From d595ff741ba6880b1916e18e699b81d88d21ee40 Mon Sep 17 00:00:00 2001
From: Peter Smith <peter.b.smith@fujitsu.com>
Date: Thu, 28 Jan 2021 16:36:09 +1100
Subject: [PATCH v21] Tablesync Solution1.

====

Features:

* The tablesync slot is now permanent instead of temporary.

* The tablesync worker is now allowing multiple tx instead of single tx.

* A new state (SUBREL_STATE_FINISHEDCOPY) is persisted after a successful copy_table in tablesync's LogicalRepSyncTableStart.

* If a re-launched tablesync finds state SUBREL_STATE_FINISHEDCOPY then it will bypass the initial copy_table phase.

* Now tablesync sets up replication origin tracking in LogicalRepSyncTableStart (similar as done for the apply worker). The origin is advanced when first created.

* Cleanup of tablesync resources:
- The tablesync slot cleanup (drop) code is added for process_syncing_tables_for_sync functions.
- The tablesync replication origin tracking is cleaned process_syncing_tables_for_apply.
- A tablesync function to cleanup its own slot/origin is called fro ProcessInterrupt. This is indirectly invoked by DropSubscription/AlterSubscrition when they signal the tablesync worker to stop.

* Updates to PG docs.

* New TAP test case.

Known Issues:

* Dangling tablesync slots may be possible if some race scenario occurs during Drop/AlterSubscription.
---
 doc/src/sgml/catalogs.sgml                  |   1 +
 doc/src/sgml/logical-replication.sgml       |  18 +-
 doc/src/sgml/ref/drop_subscription.sgml     |   6 +-
 src/backend/commands/subscriptioncmds.c     |  93 ++++---
 src/backend/replication/logical/origin.c    |   2 +-
 src/backend/replication/logical/tablesync.c | 368 ++++++++++++++++++++++++----
 src/backend/replication/logical/worker.c    |  27 +-
 src/backend/tcop/postgres.c                 |   6 +
 src/include/catalog/pg_subscription_rel.h   |   2 +
 src/include/replication/logicalworker.h     |   2 +
 src/include/replication/slot.h              |   3 +
 src/include/replication/worker_internal.h   |   2 +
 src/test/subscription/t/004_sync.pl         |  69 +++++-
 13 files changed, 503 insertions(+), 96 deletions(-)

diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index 865e826..920a39d 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -7665,6 +7665,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
        State code:
        <literal>i</literal> = initialize,
        <literal>d</literal> = data is being copied,
+       <literal>f</literal> = finished table copy,
        <literal>s</literal> = synchronized,
        <literal>r</literal> = ready (normal replication)
       </para></entry>
diff --git a/doc/src/sgml/logical-replication.sgml b/doc/src/sgml/logical-replication.sgml
index a560ad6..e1b20ea 100644
--- a/doc/src/sgml/logical-replication.sgml
+++ b/doc/src/sgml/logical-replication.sgml
@@ -248,7 +248,18 @@
 
    <para>
     As mentioned earlier, each (active) subscription receives changes from a
-    replication slot on the remote (publishing) side.  Normally, the remote
+    replication slot on the remote (publishing) side.
+   </para>
+   <para>
+    Additional table synchronization slots are normally transient, created
+    internally and dropped automatically when they are no longer needed.
+    These table synchronization slots have generated names:
+    <quote><literal>%s_%u_sync_%u</literal></quote> (parameters: Subscription
+    <parameter>slot_name</parameter>, Subscription <parameter>oid</parameter>,
+    Table <parameter>relid</parameter>)
+   </para>
+   <para>
+    Normally, the remote
     replication slot is created automatically when the subscription is created
     using <command>CREATE SUBSCRIPTION</command> and it is dropped
     automatically when the subscription is dropped using <command>DROP
@@ -294,8 +305,9 @@
        using <command>ALTER SUBSCRIPTION</command> before attempting to drop
        the subscription.  If the remote database instance no longer exists, no
        further action is then necessary.  If, however, the remote database
-       instance is just unreachable, the replication slot should then be
-       dropped manually; otherwise it would continue to reserve WAL and might
+       instance is just unreachable, the replication slot (and any still 
+       remaining table synchronization slots) should then be
+       dropped manually; otherwise it/they would continue to reserve WAL and might
        eventually cause the disk to fill up.  Such cases should be carefully
        investigated.
       </para>
diff --git a/doc/src/sgml/ref/drop_subscription.sgml b/doc/src/sgml/ref/drop_subscription.sgml
index adbdeaf..aee9615 100644
--- a/doc/src/sgml/ref/drop_subscription.sgml
+++ b/doc/src/sgml/ref/drop_subscription.sgml
@@ -79,7 +79,8 @@ DROP SUBSCRIPTION [ IF EXISTS ] <replaceable class="parameter">name</replaceable
   <para>
    When dropping a subscription that is associated with a replication slot on
    the remote host (the normal state), <command>DROP SUBSCRIPTION</command>
-   will connect to the remote host and try to drop the replication slot as
+   will connect to the remote host and try to drop the replication slot (and
+   any remaining table synchronization slots) as
    part of its operation.  This is necessary so that the resources allocated
    for the subscription on the remote host are released.  If this fails,
    either because the remote host is not reachable or because the remote
@@ -89,7 +90,8 @@ DROP SUBSCRIPTION [ IF EXISTS ] <replaceable class="parameter">name</replaceable
    executing <literal>ALTER SUBSCRIPTION ... SET (slot_name = NONE)</literal>.
    After that, <command>DROP SUBSCRIPTION</command> will no longer attempt any
    actions on a remote host.  Note that if the remote replication slot still
-   exists, it should then be dropped manually; otherwise it will continue to
+   exists, it (and any related table synchronization slots) should then be
+   dropped manually; otherwise it/they will continue to
    reserve WAL and might eventually cause the disk to fill up.  See
    also <xref linkend="logical-replication-subscription-slot"/>.
   </para>
diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c
index 082f785..e31ba6e 100644
--- a/src/backend/commands/subscriptioncmds.c
+++ b/src/backend/commands/subscriptioncmds.c
@@ -34,6 +34,7 @@
 #include "nodes/makefuncs.h"
 #include "replication/logicallauncher.h"
 #include "replication/origin.h"
+#include "replication/slot.h"
 #include "replication/walreceiver.h"
 #include "replication/walsender.h"
 #include "replication/worker_internal.h"
@@ -928,7 +929,6 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
 	char	   *err = NULL;
 	RepOriginId originid;
 	WalReceiverConn *wrconn = NULL;
-	StringInfoData cmd;
 	Form_pg_subscription form;
 
 	/*
@@ -1042,7 +1042,7 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
 	}
 	list_free(subworkers);
 
-	/* Clean up dependencies */
+	/* Clean up dependencies. */
 	deleteSharedDependencyRecordsFor(SubscriptionRelationId, subid, 0);
 
 	/* Remove any associated relation synchronization states. */
@@ -1055,61 +1055,92 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
 		replorigin_drop(originid, false);
 
 	/*
-	 * If there is no slot associated with the subscription, we can finish
-	 * here.
+	 * If there is a slot associated with the subscription, then drop the
+	 * replication slot at the publisher node using the replication
+	 * connection.
 	 */
-	if (!slotname)
+	if (slotname)
 	{
-		table_close(rel, NoLock);
-		return;
+		load_file("libpqwalreceiver", false);
+
+		wrconn = walrcv_connect(conninfo, true, subname, &err);
+		if (wrconn == NULL)
+			ereport(ERROR,
+					(errmsg("could not connect to publisher when attempting to "
+							"drop the replication slot \"%s\"", slotname),
+					 errdetail("The error was: %s", err),
+			/* translator: %s is an SQL ALTER command */
+					 errhint("Use %s to disassociate the subscription from the slot.",
+							 "ALTER SUBSCRIPTION ... SET (slot_name = NONE)")));
+
+		PG_TRY();
+		{
+			ReplicationSlotDropAtPubNode(wrconn, slotname, false /* missing_ok */ );
+		}
+		PG_FINALLY();
+		{
+			walrcv_disconnect(wrconn);
+		}
+		PG_END_TRY();
 	}
 
-	/*
-	 * Otherwise drop the replication slot at the publisher node using the
-	 * replication connection.
-	 */
+	table_close(rel, NoLock);
+}
+
+/*
+ * Drop the replication slot at the publisher node using the replication connection.
+ *
+ * missing_ok - if true then only issue WARNING message if the slot cannot be deleted.
+ */
+void
+ReplicationSlotDropAtPubNode(WalReceiverConn *wrconn, char *slotname, bool missing_ok)
+{
+	StringInfoData cmd;
+
+	Assert(wrconn);
+
 	load_file("libpqwalreceiver", false);
 
 	initStringInfo(&cmd);
 	appendStringInfo(&cmd, "DROP_REPLICATION_SLOT %s WAIT", quote_identifier(slotname));
 
-	wrconn = walrcv_connect(conninfo, true, subname, &err);
-	if (wrconn == NULL)
-		ereport(ERROR,
-				(errmsg("could not connect to publisher when attempting to "
-						"drop the replication slot \"%s\"", slotname),
-				 errdetail("The error was: %s", err),
-		/* translator: %s is an SQL ALTER command */
-				 errhint("Use %s to disassociate the subscription from the slot.",
-						 "ALTER SUBSCRIPTION ... SET (slot_name = NONE)")));
-
 	PG_TRY();
 	{
 		WalRcvExecResult *res;
 
 		res = walrcv_exec(wrconn, cmd.data, 0, NULL);
 
-		if (res->status != WALRCV_OK_COMMAND)
-			ereport(ERROR,
+		if (res->status == WALRCV_OK_COMMAND)
+		{
+			/* NOTICE. Success. */
+			ereport(NOTICE,
+					(errmsg("dropped replication slot \"%s\" on publisher",
+							slotname)));
+		}
+		else if (res->status == WALRCV_ERROR && missing_ok)
+		{
+			/* WARNING. Error, but missing_ok = true. */
+			ereport(WARNING,
 					(errmsg("could not drop the replication slot \"%s\" on publisher",
 							slotname),
 					 errdetail("The error was: %s", res->err)));
+		}
 		else
-			ereport(NOTICE,
-					(errmsg("dropped replication slot \"%s\" on publisher",
-							slotname)));
+		{
+			/* ERROR. */
+			ereport(ERROR,
+					(errmsg("could not drop the replication slot \"%s\" on publisher",
+							slotname),
+					 errdetail("The error was: %s", res->err)));
+		}
 
 		walrcv_clear_result(res);
 	}
 	PG_FINALLY();
 	{
-		walrcv_disconnect(wrconn);
+		pfree(cmd.data);
 	}
 	PG_END_TRY();
-
-	pfree(cmd.data);
-
-	table_close(rel, NoLock);
 }
 
 /*
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
index 9bd761a..77aae35 100644
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -357,7 +357,7 @@ restart:
 		if (state->roident == roident)
 		{
 			/* found our slot, is it busy? */
-			if (state->acquired_by != 0)
+			if (state->acquired_by != 0 && state->acquired_by != MyProcPid)
 			{
 				ConditionVariable *cv;
 
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index 863d196..f8cf93e 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -31,8 +31,11 @@
  *		 table state to INIT.
  *	   - Tablesync worker starts; changes table state from INIT to DATASYNC while
  *		 copying.
- *	   - Tablesync worker finishes the copy and sets table state to SYNCWAIT;
- *		 waits for state change.
+ *	   - Tablesync worker does initial table copy; there is a FINISHEDCOPY (sync
+ *		 worker specific) state to indicate when the copy phase has completed, so
+ *		 if the worker crashes with this (non-memory) state then the copy will not
+ *		 be re-attempted.
+ *	   - Tablesync worker then sets table state to SYNCWAIT; waits for state change.
  *	   - Apply worker periodically checks for tables in SYNCWAIT state.  When
  *		 any appear, it sets the table state to CATCHUP and starts loop-waiting
  *		 until either the table state is set to SYNCDONE or the sync worker
@@ -48,8 +51,8 @@
  *		 point it sets state to READY and stops tracking.  Again, there might
  *		 be zero changes in between.
  *
- *	  So the state progression is always: INIT -> DATASYNC -> SYNCWAIT ->
- *	  CATCHUP -> SYNCDONE -> READY.
+ *	  So the state progression is always: INIT -> DATASYNC -> FINISHEDCOPY
+ *	  -> SYNCWAIT -> CATCHUP -> SYNCDONE -> READY.
  *
  *	  The catalog pg_subscription_rel is used to keep information about
  *	  subscribed tables and their state.  Some transient state during data
@@ -59,6 +62,7 @@
  *	  Example flows look like this:
  *	   - Apply is in front:
  *		  sync:8
+ *			-> set in catalog FINISHEDCOPY
  *			-> set in memory SYNCWAIT
  *		  apply:10
  *			-> set in memory CATCHUP
@@ -74,6 +78,7 @@
  *
  *	   - Sync is in front:
  *		  sync:10
+ *			-> set in catalog FINISHEDCOPY
  *			-> set in memory SYNCWAIT
  *		  apply:8
  *			-> set in memory CATCHUP
@@ -98,11 +103,16 @@
 #include "miscadmin.h"
 #include "parser/parse_relation.h"
 #include "pgstat.h"
+#include "postmaster/interrupt.h"
 #include "replication/logicallauncher.h"
 #include "replication/logicalrelation.h"
+#include "replication/logicalworker.h"
 #include "replication/walreceiver.h"
 #include "replication/worker_internal.h"
+#include "replication/slot.h"
+#include "replication/origin.h"
 #include "storage/ipc.h"
+#include "storage/lmgr.h"
 #include "utils/builtins.h"
 #include "utils/lsyscache.h"
 #include "utils/memutils.h"
@@ -113,6 +123,42 @@ static bool table_states_valid = false;
 StringInfo	copybuf = NULL;
 
 /*
+ * Common code to drop the origin of a tablesync worker.
+ *
+ * There is a potential race condition if two processes attempt to call
+ * replorigin_drop for the same originid at the same time. The loser of
+ * that race would give an ERROR saying that it failed to find the
+ * expected originid.
+ *
+ * The TRY/CATCH below supresses such errors allowing the tablesync cleanup
+ * code to proceed.
+ */
+void
+tablesync_replorigin_drop(Oid subid, Oid relid, bool nowait)
+{
+	char		originname[NAMEDATALEN];
+	RepOriginId originid;
+
+	snprintf(originname, sizeof(originname), "pg_%u_%u", subid, relid);
+	originid = replorigin_by_name(originname, true);
+	if (OidIsValid(originid))
+	{
+		PG_TRY();
+		{
+			replorigin_drop(originid, nowait);
+		}
+		PG_CATCH();
+		{
+			ereport(WARNING,
+					errmsg("could not drop replication origin with OID %d, named \"%s\"",
+						   originid,
+						   originname));
+		}
+		PG_END_TRY();
+	}
+}
+
+/*
  * Exit routine for synchronization worker.
  */
 static void
@@ -260,6 +306,77 @@ invalidate_syncing_table_states(Datum arg, int cacheid, uint32 hashvalue)
 }
 
 /*
+ * The sync worker cleans up any slot / origin resources it may have created.
+ * This function is called from ProcessInterrupts() as result of tablesync being
+ * signalled.
+ */
+void
+tablesync_cleanup_at_shutdown(void)
+{
+	TimeLineID	tli;
+	Oid			subid = MySubscription->oid;
+	Oid			relid = MyLogicalRepWorker->relid;
+
+	/*
+	 * Cleanup the tablesync slot, if needed.
+	 *
+	 * If state is SYNCDONE or READY then the slot has already been dropped.
+	 */
+	if (wrconn != NULL &&
+		MyLogicalRepWorker->relstate != SUBREL_STATE_SYNCDONE &&
+		MyLogicalRepWorker->relstate != SUBREL_STATE_READY)
+	{
+		char		syncslotname[NAMEDATALEN] = {0};
+
+		/*
+		 * End wal streaming so the wrconn can be re-used to drop the slot.
+		 */
+		PG_TRY();
+		{
+			walrcv_endstreaming(wrconn, &tli);
+		}
+		PG_CATCH();
+		{
+			/*
+			 * It is possible that the walrcv_startstreaming was not yet
+			 * called (e.g. the interrupt initiating this cleanup may have
+			 * happened during the table COPY phase) so suppress any error
+			 * here to cope with that scenario.
+			 */
+		}
+		PG_END_TRY();
+
+		ReplicationSlotNameForTablesync(MySubscription->slotname,
+										subid, relid, syncslotname);
+
+		ReplicationSlotDropAtPubNode(wrconn, syncslotname, true /* missing_ok */ );
+	}
+
+	/*
+	 * Remove the tablesync's origin tracking if exists.
+	 *
+	 * The origin APIS must be called within a transaction, and this
+	 * transaction will be ended within the finish_sync_worker().
+	 */
+	if (!IsTransactionState())
+	{
+		StartTransactionCommand();
+	}
+
+	tablesync_replorigin_drop(subid, relid, false /* nowait */ );
+
+	/*
+	 * CommitTransactionCommand would normally attempt to advance the origin,
+	 * but now that the origin has been dropped that would fail, so we need to
+	 * reset the replorigin_session here to prevent this error happening.
+	 */
+	replorigin_session_reset();
+	replorigin_session_origin = InvalidRepOriginId;
+
+	finish_sync_worker();		/* doesn't return. */
+}
+
+/*
  * Handle table synchronization cooperation from the synchronization
  * worker.
  *
@@ -270,30 +387,55 @@ invalidate_syncing_table_states(Datum arg, int cacheid, uint32 hashvalue)
 static void
 process_syncing_tables_for_sync(XLogRecPtr current_lsn)
 {
-	Assert(IsTransactionState());
+	bool		sync_done = false;
+	Oid			subid = MySubscription->oid;
+	Oid			relid = MyLogicalRepWorker->relid;
 
 	SpinLockAcquire(&MyLogicalRepWorker->relmutex);
+	sync_done = MyLogicalRepWorker->relstate == SUBREL_STATE_CATCHUP &&
+		current_lsn >= MyLogicalRepWorker->relstate_lsn;
+	SpinLockRelease(&MyLogicalRepWorker->relmutex);
 
-	if (MyLogicalRepWorker->relstate == SUBREL_STATE_CATCHUP &&
-		current_lsn >= MyLogicalRepWorker->relstate_lsn)
+	if (sync_done)
 	{
 		TimeLineID	tli;
+		char		syncslotname[NAMEDATALEN] = {0};
+
+		/* End wal streaming so wrconn can be re-used to drop the slot. */
+		walrcv_endstreaming(wrconn, &tli);
 
+		/*
+		 * Cleanup the tablesync slot.
+		 */
+		ReplicationSlotNameForTablesync(MySubscription->slotname, subid, relid, syncslotname);
+
+		ReplicationSlotDropAtPubNode(wrconn, syncslotname, false /* missing_ok */ );
+
+		/*
+		 * Change state to SYNCDONE.
+		 */
+		SpinLockAcquire(&MyLogicalRepWorker->relmutex);
 		MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCDONE;
 		MyLogicalRepWorker->relstate_lsn = current_lsn;
 
 		SpinLockRelease(&MyLogicalRepWorker->relmutex);
 
+		/*
+		 * UpdateSubscriptionRelState must be called within a transaction.
+		 * That transaction will be ended within the finish_sync_worker().
+		 */
+		if (!IsTransactionState())
+		{
+			StartTransactionCommand();
+		}
+
 		UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
 								   MyLogicalRepWorker->relid,
 								   MyLogicalRepWorker->relstate,
 								   MyLogicalRepWorker->relstate_lsn);
 
-		walrcv_endstreaming(wrconn, &tli);
 		finish_sync_worker();
 	}
-	else
-		SpinLockRelease(&MyLogicalRepWorker->relmutex);
 }
 
 /*
@@ -412,6 +554,21 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
 					started_tx = true;
 				}
 
+				/*
+				 * Remove the tablesync origin tracking if exists.
+				 *
+				 * The normal case origin drop is done here instead of in the
+				 * process_syncing_tables_for_sync function because if the
+				 * tablesync worker process attempted to call drop its own
+				 * orign then would prevent the origin from advancing properly
+				 * on commit TX.
+				 */
+				tablesync_replorigin_drop(MyLogicalRepWorker->subid,
+										  rstate->relid, false /* nowait */ );
+
+				/*
+				 * Update the state to READY only after the origin cleanup.
+				 */
 				UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
 										   rstate->relid, rstate->state,
 										   rstate->lsn);
@@ -808,6 +965,42 @@ copy_table(Relation rel)
 }
 
 /*
+ * Determine the tablesync slot name.
+ *
+ * The returned slot name is either:
+ * - stored in the supplied buffer (syncslotname), or
+ * - palloc'ed in current memory context (if syncslotname = NULL).
+ */
+char *
+ReplicationSlotNameForTablesync(const char *subslotname, Oid suboid, Oid relid, char *syncslotname)
+{
+	/*
+	 * To build a slot name for the sync work, we are limited to NAMEDATALEN -
+	 * 1 characters.  We cut the original slot name to NAMEDATALEN - 28 chars
+	 * and append _%u_sync_%u (1 + 10 + 6 + 10 + '\0').  (It's actually the
+	 * NAMEDATALEN on the remote that matters, but this scheme will also work
+	 * reasonably if that is different.)
+	 */
+	StaticAssertStmt(NAMEDATALEN >= 32, "NAMEDATALEN too small");	/* for sanity */
+
+	if (syncslotname)
+	{
+		sprintf(syncslotname,
+				"%.*s_%u_sync_%u",
+				NAMEDATALEN - 28,
+				subslotname, suboid, relid);
+	}
+	else
+	{
+		syncslotname = psprintf("%.*s_%u_sync_%u",
+								NAMEDATALEN - 28,
+								subslotname, suboid, relid);
+	}
+
+	return syncslotname;
+}
+
+/*
  * Start syncing the table in the sync worker.
  *
  * If nothing needs to be done to sync the table, we exit the worker without
@@ -824,6 +1017,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
 	XLogRecPtr	relstate_lsn;
 	Relation	rel;
 	WalRcvExecResult *res;
+	char		originname[NAMEDATALEN];
+	RepOriginId originid;
 
 	/* Check the state of the table synchronization. */
 	StartTransactionCommand();
@@ -849,19 +1044,11 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
 			finish_sync_worker();	/* doesn't return */
 	}
 
-	/*
-	 * To build a slot name for the sync work, we are limited to NAMEDATALEN -
-	 * 1 characters.  We cut the original slot name to NAMEDATALEN - 28 chars
-	 * and append _%u_sync_%u (1 + 10 + 6 + 10 + '\0').  (It's actually the
-	 * NAMEDATALEN on the remote that matters, but this scheme will also work
-	 * reasonably if that is different.)
-	 */
-	StaticAssertStmt(NAMEDATALEN >= 32, "NAMEDATALEN too small");	/* for sanity */
-	slotname = psprintf("%.*s_%u_sync_%u",
-						NAMEDATALEN - 28,
-						MySubscription->slotname,
-						MySubscription->oid,
-						MyLogicalRepWorker->relid);
+	/* Calculate the name of the tablesync slot. */
+	slotname = ReplicationSlotNameForTablesync(MySubscription->slotname,
+											   MySubscription->oid,
+											   MyLogicalRepWorker->relid,
+											   NULL /* use palloc */ );
 
 	/*
 	 * Here we use the slot name instead of the subscription name as the
@@ -874,7 +1061,33 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
 				(errmsg("could not connect to the publisher: %s", err)));
 
 	Assert(MyLogicalRepWorker->relstate == SUBREL_STATE_INIT ||
-		   MyLogicalRepWorker->relstate == SUBREL_STATE_DATASYNC);
+		   MyLogicalRepWorker->relstate == SUBREL_STATE_DATASYNC ||
+		   MyLogicalRepWorker->relstate == SUBREL_STATE_FINISHEDCOPY);
+
+	/* Assign the origin tracking record name. */
+	snprintf(originname, sizeof(originname), "pg_%u_%u", MySubscription->oid, MyLogicalRepWorker->relid);
+
+	if (MyLogicalRepWorker->relstate == SUBREL_STATE_FINISHEDCOPY)
+	{
+		/*
+		 * The COPY phase was previously done, but tablesync then crashed
+		 * before it was able to finish normally.
+		 */
+		StartTransactionCommand();
+
+		/*
+		 * The origin tracking name must already exist. It was created first
+		 * time this tablesync was launched.
+		 */
+		originid = replorigin_by_name(originname, false /* missing_ok */ );
+		replorigin_session_setup(originid);
+		replorigin_session_origin = originid;
+		*origin_startpos = replorigin_session_get_progress(false);
+
+		CommitTransactionCommand();
+
+		goto copy_table_done;
+	}
 
 	SpinLockAcquire(&MyLogicalRepWorker->relmutex);
 	MyLogicalRepWorker->relstate = SUBREL_STATE_DATASYNC;
@@ -890,9 +1103,6 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
 	CommitTransactionCommand();
 	pgstat_report_stat(false);
 
-	/*
-	 * We want to do the table data sync in a single transaction.
-	 */
 	StartTransactionCommand();
 
 	/*
@@ -918,29 +1128,97 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
 	walrcv_clear_result(res);
 
 	/*
-	 * Create a new temporary logical decoding slot.  This slot will be used
-	 * for the catchup phase after COPY is done, so tell it to use the
-	 * snapshot to make the final data consistent.
+	 * Be sure to remove the newly created tablesync slot if the COPY fails.
 	 */
-	walrcv_create_slot(wrconn, slotname, true,
-					   CRS_USE_SNAPSHOT, origin_startpos);
+	PG_TRY();
+	{
+		/*
+		 * Create a new permanent logical decoding slot. This slot will be
+		 * used for the catchup phase after COPY is done, so tell it to use
+		 * the snapshot to make the final data consistent.
+		 */
+		walrcv_create_slot(wrconn, slotname, false /* permanent */ ,
+						   CRS_USE_SNAPSHOT, origin_startpos);
 
-	/* Now do the initial data copy */
-	PushActiveSnapshot(GetTransactionSnapshot());
-	copy_table(rel);
-	PopActiveSnapshot();
+		/* Now do the initial data copy */
+		PushActiveSnapshot(GetTransactionSnapshot());
+		copy_table(rel);
+		PopActiveSnapshot();
 
-	res = walrcv_exec(wrconn, "COMMIT", 0, NULL);
-	if (res->status != WALRCV_OK_COMMAND)
-		ereport(ERROR,
-				(errmsg("table copy could not finish transaction on publisher"),
-				 errdetail("The error was: %s", res->err)));
-	walrcv_clear_result(res);
+		res = walrcv_exec(wrconn, "COMMIT", 0, NULL);
+		if (res->status != WALRCV_OK_COMMAND)
+			ereport(ERROR,
+					(errmsg("table copy could not finish transaction on publisher"),
+					 errdetail("The error was: %s", res->err)));
+		walrcv_clear_result(res);
+
+		table_close(rel, NoLock);
+
+		/* Make the copy visible. */
+		CommandCounterIncrement();
+
+		/* Setup replication origin tracking. */
+		originid = replorigin_by_name(originname, true);
+		if (!OidIsValid(originid))
+		{
+			/*
+			 * Origin tracking does not exist, so create it now.
+			 *
+			 * Then advance to the LSN got from walrcv_create_slot. This is
+			 * WAL logged for the purpose of recovery. Locks are to prevent
+			 * the replication origin from vanishing while advancing.
+			 */
+			originid = replorigin_create(originname);
+
+			LockRelationOid(ReplicationOriginRelationId, RowExclusiveLock);
+			replorigin_advance(originid, *origin_startpos, InvalidXLogRecPtr,
+							   true /* go backward */ , true /* WAL log */ );
+			UnlockRelationOid(ReplicationOriginRelationId, RowExclusiveLock);
+
+			replorigin_session_setup(originid);
+			replorigin_session_origin = originid;
+		}
+		else
+		{
+			ereport(ERROR,
+					(errcode(ERRCODE_DUPLICATE_OBJECT),
+					 errmsg("replication origin \"%s\" already exists",
+							originname)));
+		}
+
+		/*
+		 * Update the persisted state to indicate the COPY phase is done; make
+		 * it visible to others.
+		 */
+		UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
+								   MyLogicalRepWorker->relid,
+								   SUBREL_STATE_FINISHEDCOPY,
+								   MyLogicalRepWorker->relstate_lsn);
+
+		CommitTransactionCommand();
+	}
+	PG_CATCH();
+	{
+		/*
+		 * If something failed during copy table then cleanup the created
+		 * slot.
+		 */
+		ReplicationSlotDropAtPubNode(wrconn, slotname, false /* missing_ok */ );
+
+		pfree(slotname);
+		slotname = NULL;
+
+		PG_RE_THROW();
+	}
+	PG_END_TRY();
 
-	table_close(rel, NoLock);
+copy_table_done:
 
-	/* Make the copy visible. */
-	CommandCounterIncrement();
+	elog(DEBUG1,
+		 "LogicalRepSyncTableStart: '%s' origin_startpos lsn %X/%X",
+		 originname,
+		 (uint32) (*origin_startpos >> 32),
+		 (uint32) *origin_startpos);
 
 	/*
 	 * We are done with the initial data synchronization, update the state.
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index eb7db89..0ea1646 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -807,12 +807,8 @@ apply_handle_stream_stop(StringInfo s)
 	/* We must be in a valid transaction state */
 	Assert(IsTransactionState());
 
-	/* The synchronization worker runs in single transaction. */
-	if (!am_tablesync_worker())
-	{
-		/* Commit the per-stream transaction */
-		CommitTransactionCommand();
-	}
+	/* Commit the per-stream transaction */
+	CommitTransactionCommand();
 
 	in_streamed_transaction = false;
 
@@ -889,9 +885,7 @@ apply_handle_stream_abort(StringInfo s)
 			/* Cleanup the subxact info */
 			cleanup_subxact_info();
 
-			/* The synchronization worker runs in single transaction */
-			if (!am_tablesync_worker())
-				CommitTransactionCommand();
+			CommitTransactionCommand();
 			return;
 		}
 
@@ -918,8 +912,7 @@ apply_handle_stream_abort(StringInfo s)
 		/* write the updated subxact list */
 		subxact_info_write(MyLogicalRepWorker->subid, xid);
 
-		if (!am_tablesync_worker())
-			CommitTransactionCommand();
+		CommitTransactionCommand();
 	}
 }
 
@@ -1062,8 +1055,7 @@ apply_handle_stream_commit(StringInfo s)
 static void
 apply_handle_commit_internal(StringInfo s, LogicalRepCommitData *commit_data)
 {
-	/* The synchronization worker runs in single transaction. */
-	if (IsTransactionState() && !am_tablesync_worker())
+	if (IsTransactionState())
 	{
 		/*
 		 * Update origin state so we can restart streaming from correct
@@ -3112,3 +3104,12 @@ IsLogicalWorker(void)
 {
 	return MyLogicalRepWorker != NULL;
 }
+
+/*
+ * Is current process a logical replication tablesync worker?
+ */
+bool
+IsLogicalWorkerTablesync(void)
+{
+	return am_tablesync_worker();
+}
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index cb5a961..8b49dd1 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -3086,9 +3086,15 @@ ProcessInterrupts(void)
 					(errcode(ERRCODE_ADMIN_SHUTDOWN),
 					 errmsg("terminating autovacuum process due to administrator command")));
 		else if (IsLogicalWorker())
+		{
+			/* Tablesync workers do their own cleanups. */
+			if (IsLogicalWorkerTablesync())
+				tablesync_cleanup_at_shutdown(); /* does not return. */
+
 			ereport(FATAL,
 					(errcode(ERRCODE_ADMIN_SHUTDOWN),
 					 errmsg("terminating logical replication worker due to administrator command")));
+		}
 		else if (IsLogicalLauncher())
 		{
 			ereport(DEBUG1,
diff --git a/src/include/catalog/pg_subscription_rel.h b/src/include/catalog/pg_subscription_rel.h
index 06663b9..9027c42 100644
--- a/src/include/catalog/pg_subscription_rel.h
+++ b/src/include/catalog/pg_subscription_rel.h
@@ -61,6 +61,8 @@ DECLARE_UNIQUE_INDEX(pg_subscription_rel_srrelid_srsubid_index, 6117, on pg_subs
 #define SUBREL_STATE_INIT		'i' /* initializing (sublsn NULL) */
 #define SUBREL_STATE_DATASYNC	'd' /* data is being synchronized (sublsn
 									 * NULL) */
+#define SUBREL_STATE_FINISHEDCOPY 'f'	/* tablesync copy phase is completed
+										 * (sublsn NULL) */
 #define SUBREL_STATE_SYNCDONE	's' /* synchronization finished in front of
 									 * apply (sublsn set) */
 #define SUBREL_STATE_READY		'r' /* ready (sublsn set) */
diff --git a/src/include/replication/logicalworker.h b/src/include/replication/logicalworker.h
index 2ad61a0..585df5f 100644
--- a/src/include/replication/logicalworker.h
+++ b/src/include/replication/logicalworker.h
@@ -15,5 +15,7 @@
 extern void ApplyWorkerMain(Datum main_arg);
 
 extern bool IsLogicalWorker(void);
+extern bool IsLogicalWorkerTablesync(void);
+extern void tablesync_cleanup_at_shutdown(void);
 
 #endif							/* LOGICALWORKER_H */
diff --git a/src/include/replication/slot.h b/src/include/replication/slot.h
index 53f636c..db51cf2 100644
--- a/src/include/replication/slot.h
+++ b/src/include/replication/slot.h
@@ -15,6 +15,7 @@
 #include "storage/lwlock.h"
 #include "storage/shmem.h"
 #include "storage/spin.h"
+#include "replication/walreceiver.h"
 
 /*
  * Behaviour of replication slots, upon release or crash.
@@ -211,6 +212,8 @@ extern bool ReplicationSlotsCountDBSlots(Oid dboid, int *nslots, int *nactive);
 extern void ReplicationSlotsDropDBSlots(Oid dboid);
 extern void InvalidateObsoleteReplicationSlots(XLogSegNo oldestSegno);
 extern ReplicationSlot *SearchNamedReplicationSlot(const char *name);
+extern char *ReplicationSlotNameForTablesync(const char *subslotname, Oid suboid, Oid relid, char *syncslotname);
+extern void ReplicationSlotDropAtPubNode(WalReceiverConn *wrconn, char *slotname, bool missing_ok);
 
 extern void StartupReplicationSlots(void);
 extern void CheckPointReplicationSlots(void);
diff --git a/src/include/replication/worker_internal.h b/src/include/replication/worker_internal.h
index d046022..67bc911 100644
--- a/src/include/replication/worker_internal.h
+++ b/src/include/replication/worker_internal.h
@@ -84,6 +84,8 @@ extern void logicalrep_worker_wakeup_ptr(LogicalRepWorker *worker);
 extern int	logicalrep_sync_worker_count(Oid subid);
 
 extern char *LogicalRepSyncTableStart(XLogRecPtr *origin_startpos);
+extern void tablesync_replorigin_drop(Oid subid, Oid relid, bool nowait);
+
 void		process_syncing_tables(XLogRecPtr current_lsn);
 void		invalidate_syncing_table_states(Datum arg, int cacheid,
 											uint32 hashvalue);
diff --git a/src/test/subscription/t/004_sync.pl b/src/test/subscription/t/004_sync.pl
index e111ab9..ec17c38 100644
--- a/src/test/subscription/t/004_sync.pl
+++ b/src/test/subscription/t/004_sync.pl
@@ -3,7 +3,9 @@ use strict;
 use warnings;
 use PostgresNode;
 use TestLib;
-use Test::More tests => 7;
+use Test::More tests => 10;
+use Time::HiRes qw(usleep);
+use Scalar::Util qw(looks_like_number);
 
 # Initialize publisher node
 my $node_publisher = get_new_node('publisher');
@@ -149,6 +151,71 @@ $result = $node_subscriber->safe_psql('postgres',
 is($result, qq(20),
 	'changes for table added after subscription initialized replicated');
 
+##
+## slot integrity
+##
+## Manually create a slot with the same name that tablesync will want.
+## Expect tablesync ERROR when clash is detected.
+## Then remove the slot so tablesync can proceed.
+## Expect tablesync can now finish normally.
+##
+
+# drop the subscription
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
+
+# empty the table tab_rep_next
+$node_subscriber->safe_psql('postgres', "DELETE FROM tab_rep_next;");
+
+# drop the table tab_rep from publisher and subscriber
+$node_subscriber->safe_psql('postgres', "DROP TABLE tab_rep;");
+$node_publisher->safe_psql('postgres', "DROP TABLE tab_rep;");
+
+# recreate the subscription again, but leave it disabled so that we can get the OID
+$node_subscriber->safe_psql('postgres',
+	"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub
+	with (enabled = false)"
+);
+
+# need to create the name of the tablesync slot, for this we need the subscription OID
+# and the table OID.
+my $subid = $node_subscriber->safe_psql('postgres',
+	"SELECT oid FROM pg_subscription WHERE subname = 'tap_sub';");
+is(looks_like_number($subid), qq(1), 'get the subscription OID');
+
+my $relid = $node_subscriber->safe_psql('postgres',
+	"SELECT 'tab_rep_next'::regclass::oid");
+is(looks_like_number($relid), qq(1), 'get the table OID');
+
+# name of the tablesync slot is 'slotname'_'suboid'_sync_'tableoid'.
+my $slotname = 'tap_sub_' . $subid . '_' . 'sync_' . $relid;
+
+# temporarily, create a slot having the same name of the tablesync slot.
+$node_publisher->safe_psql('postgres',
+	"SELECT 'init' FROM pg_create_logical_replication_slot('$slotname', 'pgoutput', false);");
+
+# enable the subscription
+$node_subscriber->safe_psql('postgres',
+	"ALTER SUBSCRIPTION tap_sub ENABLE"
+);
+
+# it will be stuck on data sync as slot create will fail because slot already exists.
+$node_subscriber->poll_query_until('postgres', $started_query)
+  or die "Timed out while waiting for subscriber to start sync";
+
+# now drop the offending slot, the tablesync should recover.
+$node_publisher->safe_psql('postgres',
+	"SELECT pg_drop_replication_slot('$slotname');");
+
+# wait for sync to finish
+$node_subscriber->poll_query_until('postgres', $synced_query)
+  or die "Timed out while waiting for subscriber to synchronize data";
+
+$result = $node_subscriber->safe_psql('postgres',
+	"SELECT count(*) FROM tab_rep_next");
+is($result, qq(20),
+	'data for table added after subscription initialized are now synced');
+
+# Cleanup
 $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
 
 $node_subscriber->stop('fast');
-- 
1.8.3.1

