diff --git a/src/backend/replication/logical/applyparallelworker.c b/src/backend/replication/logical/applyparallelworker.c
index 674ff9b9a3..df3aad43e1 100644
--- a/src/backend/replication/logical/applyparallelworker.c
+++ b/src/backend/replication/logical/applyparallelworker.c
@@ -863,16 +863,16 @@ HandleParallelApplyMessages(void)
 		void	   *data;
 		ParallelApplyWorkerInfo *winfo = (ParallelApplyWorkerInfo *) lfirst(lc);
 
-		res = shm_mq_receive(winfo->error_mq_handle, &nbytes, &data, true);
-
 		/*
 		 * The leader will detach from the error queue and set it to NULL
 		 * before preparing to stop all parallel apply workers, so we don't
-		 * need to handle error messages anymore.
+		 * need to handle error messages anymore. See logicalrep_worker_detach.
 		 */
 		if (!winfo->error_mq_handle)
 			continue;
 
+		res = shm_mq_receive(winfo->error_mq_handle, &nbytes, &data, true);
+
 		if (res == SHM_MQ_WOULD_BLOCK)
 			continue;
 		else if (res == SHM_MQ_SUCCESS)
diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c
index 95036c5f2b..1368cf2dde 100644
--- a/src/backend/replication/logical/launcher.c
+++ b/src/backend/replication/logical/launcher.c
@@ -696,8 +696,8 @@ logicalrep_worker_detach(void)
 		/*
 		 * Detach from the error_mq_handle for all parallel apply workers
 		 * before terminating them to prevent the leader apply worker from
-		 * receiving the worker termination messages which will cause the
-		 * leader to exit.
+		 * receiving the worker termination messages and sending it to logs
+		 * when the same is already done by individual parallel worker.
 		 */
 		pa_detach_all_error_mq();
 
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 35deb3126c..56e3c6c1a2 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -561,19 +561,20 @@ end_replication_step(void)
 }
 
 /*
- * Handle streamed transactions for both the leader apply worker and the parallel
- * apply workers.
+ * Handle streamed transactions for both the leader apply worker and the
+ * parallel apply workers.
  *
- * In streaming case (receiving a block of streamed transaction), for serialize
- * mode, simply redirect it to a file for the proper toplevel transaction, and
- * for parallel mode, the leader apply worker will send the changes to parallel
- * apply workers and the parallel apply worker will define savepoints if
- * needed. (LOGICAL_REP_MSG_RELATION or LOGICAL_REP_MSG_TYPE changes will be
- * applied by both leader apply worker and parallel apply workers).
+ * In the streaming case (receiving a block of the streamed transaction), for
+ * serialize mode, simply redirect it to a file for the proper toplevel
+ * transaction, and for parallel mode, the leader apply worker will send the
+ * changes to parallel apply workers and the parallel apply worker will define
+ * savepoints if needed. (LOGICAL_REP_MSG_RELATION or LOGICAL_REP_MSG_TYPE
+ * changes will be applied by both leader apply worker and parallel apply
+ * workers).
  *
- * For non-streamed transactions, returns false.
- * For streamed transactions, returns true if in leader apply worker, false
- * otherwise.
+ * Returns true for streamed transactions (when the change is either serialized
+ * to file or sent to parallel apply worker), false otherwise (regular mode or
+ * needs to be processed by parallel apply worker).
  *
  * Exception: If the message being processed is LOGICAL_REP_MSG_RELATION
  * or LOGICAL_REP_MSG_TYPE, return false even if the message needs to be sent
@@ -1392,8 +1393,6 @@ apply_handle_origin(StringInfo s)
 				 errmsg_internal("ORIGIN message sent out of order")));
 }
 
-
-
 /*
  * Handle STREAM START message.
  */
@@ -1558,7 +1557,8 @@ apply_handle_stream_stop(StringInfo s)
 			/*
 			 * Lock before sending the STREAM_STOP message so that the leader
 			 * can hold the lock first and the parallel apply worker will wait
-			 * for leader to release the lock.
+			 * for leader to release the lock. See Locking Considerations atop
+			 * applyparallelworker.c.
 			 */
 			pa_lock_stream(winfo->shared->xid, AccessExclusiveLock);
 
@@ -1576,8 +1576,11 @@ apply_handle_stream_stop(StringInfo s)
 			/*
 			 * By the time parallel apply worker is processing the changes in
 			 * the current streaming block, the leader apply worker may have
-			 * sent multiple streaming blocks. So, try to lock only if there
-			 * is no message left in the queue.
+			 * sent multiple streaming blocks. This can lead to parallel apply
+			 * worker start waiting even when there are more chunk of streams
+			 * in the queue. So, try to lock only if there is no message left
+			 * in the queue. See Locking Considerations atop
+			 * applyparallelworker.c.
 			 */
 			if (pg_atomic_sub_fetch_u32(&MyParallelShared->pending_stream_count, 1) == 0)
 			{
@@ -1733,9 +1736,11 @@ apply_handle_stream_abort(StringInfo s)
 			 * XXX For the case of aborting the subtransaction, we only
 			 * increment the number of streaming blocks without releasing the
 			 * lock. This may slightly delay the processing of STREAM_ABORT
-			 * message but can ensure that the parallel apply worker will wait
-			 * on the lock for the next set of changes after processing the
-			 * STREAM_ABORT message.
+			 * message as the parallel apply worker may already be waiting on a
+			 * lock after processing STREAM_STOP message but can ensure that the
+			 * parallel apply worker will wait on the lock for the next set of
+			 * changes after processing the STREAM_ABORT message if it is not
+			 * already waiting for STREAM_STOP message.
 			 */
 			if (toplevel_xact)
 				pa_unlock_stream(xid, AccessExclusiveLock);
@@ -1759,8 +1764,11 @@ apply_handle_stream_abort(StringInfo s)
 			 *
 			 * By the time parallel apply worker is processing the changes in
 			 * the current streaming block, the leader apply worker may have
-			 * sent multiple streaming blocks. So, try to lock only if there is
-			 * no message left in the queue.
+			 * sent multiple streaming blocks. This can lead to parallel apply
+			 * worker start waiting even when there are more chunk of streams
+			 * in the queue. So, try to lock only if there is no message left
+			 * in the queue. See Locking Considerations atop
+			 * applyparallelworker.c.
 			 */
 			if (!toplevel_xact &&
 				pg_atomic_sub_fetch_u32(&MyParallelShared->pending_stream_count, 1) == 0)
@@ -3156,7 +3164,10 @@ store_flush_position(XLogRecPtr remote_lsn)
 {
 	FlushPosition *flushpos;
 
-	/* Skip for parallel apply workers. */
+	/*
+	 * Skip for parallel apply workers. The leader apply worker will ensure to
+	 * update it as the lsn_mapping is maintained by it.
+	 */
 	if (am_parallel_apply_worker())
 		return;
 
