diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 712385b..752e3b5 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -22,6 +22,7 @@
 #include "access/relscan.h"
 #include "access/xlog.h"
 #include "catalog/index.h"
+#include "catalog/pg_namespace.h"
 #include "commands/vacuum.h"
 #include "storage/indexfsm.h"
 #include "storage/ipc.h"
@@ -823,6 +824,11 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 	}
 
 	/*
+	 * Check to see if we need to issue one final WAL record for this index,
+	 * which may be needed for correctness on a hot standby node when
+	 * non-MVCC index scans could take place. This now only occurs when we
+	 * perform a TOAST scan, so only occurs for TOAST indexes.
+	 *
 	 * If the WAL is replayed in hot standby, the replay process needs to get
 	 * cleanup locks on all index leaf pages, just as we've been doing here.
 	 * However, we won't issue any WAL records about pages that have no items
@@ -833,6 +839,7 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 	 * against the last leaf page in the index, if that one wasn't vacuumed.
 	 */
 	if (XLogStandbyInfoActive() &&
+		rel->rd_rel->relnamespace == PG_TOAST_NAMESPACE &&
 		vstate.lastBlockVacuumed < vstate.lastBlockLocked)
 	{
 		Buffer		buf;
@@ -1031,6 +1038,20 @@ restart:
 		 */
 		if (ndeletable > 0)
 		{
+			BlockNumber	lastBlockVacuumed = InvalidBlockNumber;
+
+			/*
+			 * We may need to record the lastBlockVacuumed for use when
+			 * non-MVCC scans might be performed on the index on a
+			 * hot standby. See explanation in btree_xlog_vacuum().
+			 *
+			 * On a hot standby, a non-MVCC scan can only take place
+			 * when we access a Toast Index, so we need only record
+			 * the lastBlockVacuumed if we are vacuuming a Toast Index.
+			 */
+			if (rel->rd_rel->relnamespace == PG_TOAST_NAMESPACE)
+				lastBlockVacuumed = vstate->lastBlockVacuumed;
+
 			/*
 			 * Notice that the issued XLOG_BTREE_VACUUM WAL record includes an
 			 * instruction to the replay code to get cleanup lock on all pages
@@ -1043,7 +1064,7 @@ restart:
 			 * that.
 			 */
 			_bt_delitems_vacuum(rel, buf, deletable, ndeletable,
-								vstate->lastBlockVacuumed);
+								lastBlockVacuumed);
 
 			/*
 			 * Remember highest leaf page number we've issued a
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index bba4840..0d094ca 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -391,6 +391,19 @@ btree_xlog_vacuum(XLogReaderState *record)
 	BTPageOpaque opaque;
 
 	/*
+	 * If we are running non-MVCC scans using this index we need to do some
+	 * additional work to ensure correctness, which is known as a "pin scan"
+	 * described in more detail in next paragraphs. We used to do the extra
+	 * work in all cases, whereas we now avoid that work except when the index
+	 * is a toast index, since toast scans aren't fully MVCC compliant.
+	 * If lastBlockVacuumed is set to InvalidBlockNumber then we skip the
+	 * additional work required for the pin scan.
+	 *
+	 * Avoiding this extra work is important since it requires us to touch
+	 * every page in the index, so is an O(N) operation. Worse, it is an
+	 * operation performed in the foreground during redo, so it delays
+	 * replication directly.
+	 *
 	 * If queries might be active then we need to ensure every leaf page is
 	 * unpinned between the lastBlockVacuumed and the current block, if there
 	 * are any.  This prevents replay of the VACUUM from reaching the stage of
@@ -412,7 +425,7 @@ btree_xlog_vacuum(XLogReaderState *record)
 	 * isn't yet consistent; so we need not fear reading still-corrupt blocks
 	 * here during crash recovery.
 	 */
-	if (HotStandbyActiveInReplay())
+	if (HotStandbyActiveInReplay() && BlockNumberIsValid(xlrec->lastBlockVacuumed))
 	{
 		RelFileNode thisrnode;
 		BlockNumber thisblkno;
@@ -433,7 +446,8 @@ btree_xlog_vacuum(XLogReaderState *record)
 			 * XXX we don't actually need to read the block, we just need to
 			 * confirm it is unpinned. If we had a special call into the
 			 * buffer manager we could optimise this so that if the block is
-			 * not in shared_buffers we confirm it as unpinned.
+			 * not in shared_buffers we confirm it as unpinned. Optimizing
+			 * this is now moot, since in most cases we avoid the scan.
 			 */
 			buffer = XLogReadBufferExtended(thisrnode, MAIN_FORKNUM, blkno,
 											RBM_NORMAL_NO_LOG);
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index 9ebf446..b760833 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -331,8 +331,10 @@ typedef struct xl_btree_reuse_page
  * The WAL record can represent deletion of any number of index tuples on a
  * single index page when executed by VACUUM.
  *
- * The correctness requirement for applying these changes during recovery is
- * that we must do one of these two things for every block in the index:
+ * For MVCC scans, lastBlockVacuumed will be set to InvalidBlockNumber.
+ * For a non-MVCC index scans there is an additional correctness requirement
+ * for applying these changes during recovery, which is that we must do one
+ * of these two things for every block in the index:
  *		* lock the block for cleanup and apply any required changes
  *		* EnsureBlockUnpinned()
  * The purpose of this is to ensure that no index scans started before we
