From 87d5f6bff7817367462a94a81128bfe243516310 Mon Sep 17 00:00:00 2001
From: Amit Kapila <amit.kapila@enterprisedb.com>
Date: Mon, 25 Nov 2019 16:26:17 +0530
Subject: [PATCH] Added/Changed comments and other cosmetic changes.

---
 doc/src/sgml/config.sgml             |   2 +-
 doc/src/sgml/ref/vacuum.sgml         |  26 +--
 src/backend/access/heap/vacuumlazy.c | 322 +++++++++++++++++------------------
 3 files changed, 175 insertions(+), 175 deletions(-)

diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index 51e7bb4a62..7e17d98fd8 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -2314,7 +2314,7 @@ include_dir 'conf.d'
          utility commands that support the use of parallel workers are
          <command>CREATE INDEX</command> only when building a B-tree index,
          and <command>VACUUM</command> without <literal>FULL</literal>
-         option. Parallel workers are taken from the pool of processes
+         option.  Parallel workers are taken from the pool of processes
          established by <xref linkend="guc-max-worker-processes"/>, limited
          by <xref linkend="guc-max-parallel-workers"/>.  Note that the requested
          number of workers may not actually be available at run time.
diff --git a/doc/src/sgml/ref/vacuum.sgml b/doc/src/sgml/ref/vacuum.sgml
index ae086b976b..9fee083233 100644
--- a/doc/src/sgml/ref/vacuum.sgml
+++ b/doc/src/sgml/ref/vacuum.sgml
@@ -231,21 +231,21 @@ VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ ANALYZE ] [ <replaceable class="paramet
       Perform vacuum index and cleanup index phases of <command>VACUUM</command>
       in parallel using <replaceable class="parameter">integer</replaceable>
       background workers (for the detail of each vacuum phases, please
-      refer to <xref linkend="vacuum-phases"/>). If the parallel degree
+      refer to <xref linkend="vacuum-phases"/>).  If the parallel degree
       <replaceable class="parameter">integer</replaceable> is omitted,
       then <command>VACUUM</command> decides the number of workers based
       on number of indexes that support parallel vacuum operation on the
       relation which is further limited by
-      <xref linkend="guc-max-parallel-workers-maintenance"/>. Please note
-      that it is not guaranteed that the number of parallel worker specified
+      <xref linkend="guc-max-parallel-workers-maintenance"/>.  Please note
+      that it is not guaranteed that the number of parallel workers specified
       in <replaceable class="parameter">integer</replaceable> will be used
-      during execution. It is possible for a vacuum to run with fewer workers
-      than specified, or even with no workers at all. Only one worker can
-      be used per index. So parallel workers are launched only when there
-      are at least <literal>2</literal> indexes in the table. Workers for
-      vacuum launches before starting each phases and exit at the end of
-      the phase. These behaviors might change in a future release. This
-      option can not use with <literal>FULL</literal> option.
+      during execution.  It is possible for a vacuum to run with fewer workers
+      than specified, or even with no workers at all.  Only one worker can
+      be used per index.  So parallel workers are launched only when there
+      are at least <literal>2</literal> indexes in the table.  Workers for
+      vacuum launches before starting each phase and exit at the end of
+      the phase.  These behaviors might change in a future release.  This
+      option can't be used with the <literal>FULL</literal> option.
      </para>
     </listitem>
    </varlistentry>
@@ -270,8 +270,8 @@ VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ ANALYZE ] [ <replaceable class="paramet
      <para>
       Specifies a positive integer value passed to the selected option.
       The <replaceable class="parameter">integer</replaceable> value can
-      also be omitted, in which case the default value of the selected
-      option is used.
+      also be omitted, in which case the value is decided by the command
+      based on the option used.
      </para>
     </listitem>
    </varlistentry>
@@ -356,7 +356,7 @@ VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ ANALYZE ] [ <replaceable class="paramet
    </para>
 
    <para>
-     The <option>PARALLEL</option> option is used for only vacuum purpose.
+     The <option>PARALLEL</option> option is used only for vacuum purpose.
      Even if this option is specified with <option>ANALYZE</option> option
      it does not affect <option>ANALYZE</option>.
    </para>
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index c2fe56a4b2..17598a126a 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -23,20 +23,21 @@
  * the TID array, just enough to hold as many heap tuples as fit on one page.
  *
  * Lazy vacuum supports parallel execution with parallel worker processes.  In
- * parallel lazy vacuum, we perform both index vacuuming and index cleanup with
- * parallel worker processes.  Individual indexes are processed by one vacuum
- * process.  At the beginning of lazy vacuum (at lazy_scan_heap) we prepare the
- * parallel context and initialize the DSM segment that contains shared information
- * as well as the memory space for storing dead tuples.  When starting either
- * index vacuuming or index cleanup, we launch parallel worker processes.  Once
- * all indexes are processed the parallel worker processes exit.  And then the
- * leader process re-initializes the parallel context while keeping recorded
- * dead tuples so that the leader can launch parallel workers again in the next
- * time.  Note that all parallel workers live during either index vacuuming or
- * index cleanup but the leader process neither exits from the parallel mode
- * nor destroys the parallel context.  For updating the index statistics, since
- * any updates are not allowed during parallel mode we update the index
- * statistics after exited from the parallel mode.
+ * a parallel lazy vacuum, we perform both index vacuuming and index cleanup
+ * with parallel worker processes.  Individual indexes are processed by one
+ * vacuum process.  At the beginning of a lazy vacuum (at lazy_scan_heap) we
+ * prepare the parallel context and initialize the DSM segment that contains
+ * shared information as well as the memory space for storing dead tuples.
+ * When starting either index vacuuming or index cleanup, we launch parallel
+ * worker processes.  Once all indexes are processed the parallel worker
+ * processes exit.  And then the leader process re-initializes the parallel
+ * context while keeping recorded dead tuples so that the leader can launch
+ * parallel workers again in the next time.  Note that all parallel workers
+ * live during either index vacuuming or index cleanup but the leader process
+ * neither exits from the parallel mode nor destroys the parallel context.  For
+ * updating the index statistics, since any updates are not allowed during
+ * parallel mode we update the index statistics after exited from the parallel
+ * mode.
  *
  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
@@ -130,7 +131,7 @@
 #define PREFETCH_SIZE			((BlockNumber) 32)
 
 /*
- * DSM keys for parallel lazy vacuum. Unlike other parallel execution code,
+ * DSM keys for parallel lazy vacuum.  Unlike other parallel execution code,
  * since we don't need to worry about DSM keys conflicting with plan_node_id
  * we can use small integers.
  */
@@ -146,15 +147,15 @@
  */
 
 /*
- * Macro to check if we are in a parallel lazy vacuum. If true, we are
+ * Macro to check if we are in a parallel lazy vacuum.  If true, we are
  * in the parallel mode and prepared the DSM segment.
  */
 #define ParallelVacuumIsActive(lps) (((LVParallelState *) (lps)) != NULL)
 
 /*
- * LVDeadTuples stores the dead tuple TIDs collected during heap scan.
- * This is allocated in the DSM segment when parallel lazy vacuum
- * mode, otherwise allocated in a local memory.
+ * LVDeadTuples stores the dead tuple TIDs collected during the heap scan.
+ * This is allocated in the DSM segment in parallel mode and in local memory
+ * in non-parallel mode.
  */
 typedef struct LVDeadTuples
 {
@@ -164,11 +165,12 @@ typedef struct LVDeadTuples
 	/* NB: this list is ordered by TID address */
 	ItemPointerData itemptrs[FLEXIBLE_ARRAY_MEMBER];	/* array of ItemPointerData */
 } LVDeadTuples;
+
 #define SizeOfLVDeadTuples offsetof(LVDeadTuples, itemptrs) + sizeof(ItemPointerData)
 
 /*
- * Shared information among parallel workers. So this is allocated in
- * the DSM segment.
+ * Shared information among parallel workers.  So this is allocated in the DSM
+ * segment.
  */
 typedef struct LVShared
 {
@@ -181,7 +183,7 @@ typedef struct LVShared
 
 	/*
 	 * An indication for vacuum workers of doing either index vacuuming or
-	 * index cleanup. first_time is true only if for_cleanup is true and
+	 * index cleanup.  first_time is true only if for_cleanup is true and
 	 * bulk-deletion is not performed yet.
 	 */
 	bool	for_cleanup;
@@ -201,11 +203,11 @@ typedef struct LVShared
 
 	/*
 	 * In single process lazy vacuum we could consume more memory during
-	 * index vacuuming or cleanup apart from the memory for heap scanning
-	 * if an index consume memory during ambulkdelete and amvacuumcleanup.
-	 * In parallel index vacuuming, since individual vacuum workers
-	 * consumes memory we set the new maitenance_work_mem for each workers
-	 * to not consume more memory than single process lazy vacuum.
+	 * index vacuuming or cleanup apart from the memory for heap scanning.
+	 * In parallel index vacuuming, since individual vacuum workers can
+	 * consume memory equal to maitenance_work_mem, the new
+	 * maitenance_work_mem for each worker is set such that the parallel
+	 * operation doesn't consume more memory than single process lazy vacuum.
 	 */
 	int		maintenance_work_mem_worker;
 
@@ -237,51 +239,31 @@ typedef struct LVShared
 
 	/* Shared index statistics data follows at end of struct */
 } LVShared;
+
 #define SizeOfLVShared offsetof(LVShared, bitmap) + sizeof(bits8)
 #define GetSharedIndStats(s) \
 	((LVSharedIndStats *)((char *)(s) + ((LVShared *)(s))->offset))
 #define IndStatsIsNull(s, i) \
 	(!(((LVShared *)(s))->bitmap[(i) >> 3] & (1 << ((i) & 0x07))))
 
-/*
- * Variables for cost-based vacuum delay for parallel index vacuuming.
- * The basic idea of cost-based vacuum delay for parallel index vacuuming
- * is to allow all parallel vacuum workers including the leader process
- * to have a shared view of cost related parameters (mainly VacuumCostBalance)
- * and allow each worker to update it and then based on that decide
- * whether it needs to sleep.  Besides, we allow any worker to sleep
- * only if it has performed the I/O above a certain threshold, which is
- * calculated based on the number of active workers (VacuumActiveNWorkers),
- * and the overall cost balance is more than VacuumCostLimit set by the
- * system.  Then we will allow the worker to sleep proportional to the work
- * done and reduce the VacuumSharedCostBalance by the amount which is
- * consumed by the current worker (VacuumCostBalanceLocal).  This can
- * avoid letting the workers sleep which has done less or no I/O as compared
- * to other workers, and therefore can ensure that workers who are doing
- * more I/O got throttled more.
- */
-pg_atomic_uint32	*VacuumSharedCostBalance = NULL;
-pg_atomic_uint32	*VacuumActiveNWorkers = NULL;
-int					VacuumCostBalanceLocal = 0;
-
 /*
  * Struct for an index bulk-deletion statistic used for parallel lazy
- * vacuum. This is allocated in the DSM segment.  IndexBulkDeleteResult
- * follows at end of struct.
+ * vacuum.  This is allocated in the DSM segment.
  */
 typedef struct LVSharedIndStats
 {
 	Size	size;
-	bool	updated;	/* are the stats updated */
+	bool	updated;	/* are the stats updated? */
 
-	/* Index bulk-deletion result data follows at end of struct */
+	/* IndexBulkDeleteResult data follows at end of struct */
 } LVSharedIndStats;
+
 #define SizeOfSharedIndStats(s) \
 	(sizeof(LVSharedIndStats) + ((LVSharedIndStats *)(s))->size)
 #define GetIndexBulkDeleteResult(s) \
 	((IndexBulkDeleteResult *)((char *)(s) + sizeof(LVSharedIndStats)))
 
-/* Struct for parallel lazy vacuum */
+/* Struct for maintaining a parallel vacuum state. */
 typedef struct LVParallelState
 {
 	ParallelContext	*pcxt;
@@ -337,6 +319,26 @@ static MultiXactId MultiXactCutoff;
 
 static BufferAccessStrategy vac_strategy;
 
+/*
+ * Variables for cost-based vacuum delay for parallel index vacuuming.
+ * The basic idea of cost-based vacuum delay for parallel index vacuuming
+ * is to allow all parallel vacuum workers including the leader process
+ * to have a shared view of cost related parameters (mainly VacuumCostBalance)
+ * and allow each worker to update it and then based on that decide
+ * whether it needs to sleep.  Besides, we allow any worker to sleep
+ * only if it has performed the I/O above a certain threshold, which is
+ * calculated based on the number of active workers (VacuumActiveNWorkers),
+ * and the overall cost balance is more than VacuumCostLimit set by the
+ * system.  Then we will allow the worker to sleep proportional to the work
+ * done and reduce the VacuumSharedCostBalance by the amount which is
+ * consumed by the current worker (VacuumCostBalanceLocal).  This can
+ * avoid letting the workers sleep which has done less or no I/O as compared
+ * to other workers, and therefore can ensure that workers who are doing
+ * more I/O got throttled more.
+ */
+pg_atomic_uint32	*VacuumSharedCostBalance = NULL;
+pg_atomic_uint32	*VacuumActiveNWorkers = NULL;
+int					VacuumCostBalanceLocal = 0;
 
 /* non-export function prototypes */
 static void lazy_scan_heap(Relation onerel, VacuumParams *params,
@@ -363,19 +365,6 @@ static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
 static int	vac_cmp_itemptr(const void *left, const void *right);
 static bool heap_page_is_all_visible(Relation rel, Buffer buf,
 									 TransactionId *visibility_cutoff_xid, bool *all_frozen);
-static LVParallelState *begin_parallel_vacuum(LVRelStats *vacrelstats, Oid relid,
-											  BlockNumber nblocks, Relation *Irel,
-											  int nindexes, int nrequested);
-static void end_parallel_vacuum(LVParallelState *lps, Relation *Irel, int nindexes,
-								IndexBulkDeleteResult **stats);
-static void prepare_index_statistics(LVShared *lvshared, Relation *Irel, int nindexes,
-									 int nworkers);
-static void lazy_vacuum_indexes(LVRelStats *vacrelstats, Relation *Irel,
-								int nindexes, IndexBulkDeleteResult **stats,
-								LVParallelState *lps);
-static void lazy_cleanup_indexes(LVRelStats *vacrelstats, Relation *Irel,
-								 int nindexes, IndexBulkDeleteResult **stats,
-								 LVParallelState *lps);
 static void lazy_parallel_vacuum_or_cleanup_indexes(LVRelStats *vacrelstats, Relation *Irel,
 													int nindexes, IndexBulkDeleteResult **stats,
 													LVParallelState *lps);
@@ -383,11 +372,24 @@ static void vacuum_or_cleanup_indexes_worker(Relation *Irel, int nindexes,
 											 IndexBulkDeleteResult **stats,
 											 LVShared *lvshared,
 											 LVDeadTuples *dead_tuples);
+static void lazy_vacuum_indexes(LVRelStats *vacrelstats, Relation *Irel,
+								int nindexes, IndexBulkDeleteResult **stats,
+								LVParallelState *lps);
+static void lazy_cleanup_indexes(LVRelStats *vacrelstats, Relation *Irel,
+								 int nindexes, IndexBulkDeleteResult **stats,
+								 LVParallelState *lps);
 static void update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats,
 									int nindexes);
-static LVSharedIndStats *get_indstats(LVShared *lvshared, int n);
-static int compute_parallel_workers(Relation *Irel, int nindexes, int nrequested);
 static long compute_max_dead_tuples(BlockNumber relblocks, bool hasindex);
+static int compute_parallel_workers(Relation *Irel, int nindexes, int nrequested);
+static void prepare_index_statistics(LVShared *lvshared, Relation *Irel, int nindexes,
+									 int nworkers);
+static LVParallelState *begin_parallel_vacuum(LVRelStats *vacrelstats, Oid relid,
+											  BlockNumber nblocks, Relation *Irel,
+											  int nindexes, int nrequested);
+static void end_parallel_vacuum(LVParallelState *lps, Relation *Irel, int nindexes,
+								IndexBulkDeleteResult **stats);
+static LVSharedIndStats *get_indstats(LVShared *lvshared, int n);
 static bool skip_parallel_index_vacuum(Relation indrel, bool for_cleanup,
 									   bool first_time);
 
@@ -705,12 +707,12 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
  *
  *		If the table has at least two indexes and parallel lazy vacuum is
  *		requested, we execute both index vacuuming and index cleanup with
- *		parallel workers. In parallel lazy vacuum, we enter parallel mode and
+ *		parallel workers.  In parallel lazy vacuum, we enter parallel mode and
  *		then create both the parallel context and the DSM segment before starting
- *		heap scan so that we can record dead tuples to the DSM segment. All
+ *		heap scan so that we can record dead tuples to the DSM segment.  All
  *		parallel workers are launched at beginning of index vacuuming and index
- *		cleanup and they exit once done with all indexes. At the end of this
- *		function we exit from parallel mode. Index bulk-deletion results are
+ *		cleanup and they exit once done with all indexes.  At the end of this
+ *		function we exit from parallel mode.  Index bulk-deletion results are
  *		stored in the DSM segment and update index statistics as a whole after
  *		exited from parallel mode since all writes are not allowed during parallel
  *		mode.
@@ -784,8 +786,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	vacrelstats->latestRemovedXid = InvalidTransactionId;
 
 	/*
-	 * If parallel lazy vacuum is requested and we vacuum indexes, compute
-	 * the number of parallel vacuum worker to launch.
+	 * Compute the number of parallel vacuum workers to launch if the parallel
+	 * vacuum is requested and we need to vacuum the indexes.
 	 */
 	if (params->nworkers >= 0 && vacrelstats->useindex)
 		parallel_workers = compute_parallel_workers(Irel, nindexes,
@@ -1992,9 +1994,10 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup)
 }
 
 /*
- * Perform index vacuuming or index cleanup with parallel workers. This function
- * must be used by the parallel vacuum leader process. The caller must set
- * lps->lvshared->for_cleanup to indicate whether vacuuming or cleanup.
+ * Perform index vacuuming or index cleanup with parallel workers.  This
+ * function must be used by the parallel vacuum leader process. The caller
+ * must set lps->lvshared->for_cleanup to indicate whether to perform vacuum
+ * or cleanup.
  */
 static void
 lazy_parallel_vacuum_or_cleanup_indexes(LVRelStats *vacrelstats, Relation *Irel,
@@ -2042,7 +2045,7 @@ lazy_parallel_vacuum_or_cleanup_indexes(LVRelStats *vacrelstats, Relation *Irel,
 						lps->pcxt->nworkers_launched, lps->pcxt->nworkers)));
 
 	/*
-	* Increment the active worker count. We cannot decrement until the
+	* Increment the active worker count.  We cannot decrement until the
 	* all parallel workers finish.
 	*/
 	pg_atomic_add_fetch_u32(VacuumActiveNWorkers, 1);
@@ -2220,11 +2223,11 @@ vacuum_or_cleanup_indexes_worker(Relation *Irel, int nindexes,
 }
 
 /*
- * Cleanup indexes.  This function must be used by the parallel vacuum
- * leader process in parallel vacuum case.
+ * Vacuum indexes.  We process the indexes serially unless we are doing
+ * parallel vacuum.
  */
 static void
-lazy_cleanup_indexes(LVRelStats *vacrelstats, Relation *Irel,
+lazy_vacuum_indexes(LVRelStats *vacrelstats, Relation *Irel,
 					int nindexes, IndexBulkDeleteResult **stats,
 					LVParallelState *lps)
 {
@@ -2233,25 +2236,19 @@ lazy_cleanup_indexes(LVRelStats *vacrelstats, Relation *Irel,
 	Assert(!IsParallelWorker());
 	Assert(nindexes > 0);
 
-	/*
-	 * If parallel vacuum is active we perform index cleanup with parallel
-	 * workers.
-	 */
+	/* Perform index vacuuming with parallel workers for parallel vacuum. */
 	if (ParallelVacuumIsActive(lps))
 	{
-		/* Tell parallel workers to do index cleanup */
-		lps->lvshared->for_cleanup = true;
-		lps->lvshared->first_time =
-			(vacrelstats->num_index_scans == 0);
+		/* Tell parallel workers to do index vacuuming */
+		lps->lvshared->for_cleanup = false;
+		lps->lvshared->first_time = false;
 
 		/*
-		 * Now we can provide a better estimate of total number of
-		 * surviving tuples (we assume indexes are more interested in that
-		 * than in the number of nominally live tuples).
+		 * We can only provide an approximate value of num_heap_tuples in
+		 * vacuum cases.
 		 */
-		lps->lvshared->reltuples = vacrelstats->new_rel_tuples;
-		lps->lvshared->estimated_count =
-			(vacrelstats->tupcount_pages < vacrelstats->rel_pages);
+		lps->lvshared->reltuples = vacrelstats->old_live_tuples;
+		lps->lvshared->estimated_count = true;
 
 		lazy_parallel_vacuum_or_cleanup_indexes(vacrelstats, Irel, nindexes,
 												stats, lps);
@@ -2259,20 +2256,19 @@ lazy_cleanup_indexes(LVRelStats *vacrelstats, Relation *Irel,
 	else
 	{
 		for (idx = 0; idx < nindexes; idx++)
-			lazy_cleanup_index(Irel[idx], &stats[idx],
-							   vacrelstats->new_rel_tuples,
-							   vacrelstats->tupcount_pages < vacrelstats->rel_pages);
+			lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats->dead_tuples,
+							  vacrelstats->old_live_tuples);
 	}
 }
 
 /*
- * Vacuum indexes. This function must be used by the parallel vacuum leader
- * process in parallel vacuum case.
+ * Cleanup indexes.  We process the indexes serially unless we are doing
+ * parallel vacuum.
  */
 static void
-lazy_vacuum_indexes(LVRelStats *vacrelstats, Relation *Irel,
-					int nindexes, IndexBulkDeleteResult **stats,
-					LVParallelState *lps)
+lazy_cleanup_indexes(LVRelStats *vacrelstats, Relation *Irel,
+					 int nindexes, IndexBulkDeleteResult **stats,
+					 LVParallelState *lps)
 {
 	int		idx;
 
@@ -2280,21 +2276,24 @@ lazy_vacuum_indexes(LVRelStats *vacrelstats, Relation *Irel,
 	Assert(nindexes > 0);
 
 	/*
-	 * If parallel vacuum is active we perform index vacuuming with
-	 * parallel workers.
+	 * If parallel vacuum is active we perform index cleanup with parallel
+	 * workers.
 	 */
 	if (ParallelVacuumIsActive(lps))
 	{
-		/* Tell parallel workers to do index vacuuming */
-		lps->lvshared->for_cleanup = false;
-		lps->lvshared->first_time = false;
+		/* Tell parallel workers to do index cleanup */
+		lps->lvshared->for_cleanup = true;
+		lps->lvshared->first_time =
+						(vacrelstats->num_index_scans == 0);
 
 		/*
-		 * We can only provide an approximate value of num_heap_tuples in
-		 * vacuum cases.
+		 * Now we can provide a better estimate of total number of
+		 * surviving tuples (we assume indexes are more interested in that
+		 * than in the number of nominally live tuples).
 		 */
-		lps->lvshared->reltuples = vacrelstats->old_live_tuples;
-		lps->lvshared->estimated_count = true;
+		lps->lvshared->reltuples = vacrelstats->new_rel_tuples;
+		lps->lvshared->estimated_count =
+					(vacrelstats->tupcount_pages < vacrelstats->rel_pages);
 
 		lazy_parallel_vacuum_or_cleanup_indexes(vacrelstats, Irel, nindexes,
 												stats, lps);
@@ -2302,8 +2301,9 @@ lazy_vacuum_indexes(LVRelStats *vacrelstats, Relation *Irel,
 	else
 	{
 		for (idx = 0; idx < nindexes; idx++)
-			lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats->dead_tuples,
-							  vacrelstats->old_live_tuples);
+			lazy_cleanup_index(Irel[idx], &stats[idx],
+							   vacrelstats->new_rel_tuples,
+							   vacrelstats->tupcount_pages < vacrelstats->rel_pages);
 	}
 }
 
@@ -3022,6 +3022,53 @@ compute_parallel_workers(Relation *Irel, int nindexes, int nrequested)
 	return parallel_workers;
 }
 
+/*
+ * Initialize variables for shared index statistics, set NULL bitmap and
+ * the struct size of each indexes.  Also this function sets the number of
+ * indexes that do not support parallel index vacuuming and that use
+ * maintenance_work_mem.  Since currently we don't support parallel vacuum
+ * for autovacuum we don't need to care about autovacuum_work_mem.
+ */
+static void
+prepare_index_statistics(LVShared *lvshared, Relation *Irel, int nindexes,
+	int nworkers)
+{
+	char *p = (char *)GetSharedIndStats(lvshared);
+	int nindexes_mwm = 0;
+	int i;
+
+	Assert(!IsAutoVacuumWorkerProcess());
+
+	for (i = 0; i < nindexes; i++)
+	{
+		LVSharedIndStats *indstats;
+
+		if (Irel[i]->rd_indam->amparallelvacuumoptions ==
+			VACUUM_OPTION_NO_PARALLEL)
+		{
+			/* Set NULL as this index does not support parallel vacuum */
+			lvshared->bitmap[i >> 3] |= 0 << (i & 0x07);
+			continue;
+		}
+
+		if (Irel[i]->rd_indam->amusemaintenanceworkmem)
+			nindexes_mwm++;
+
+		/* Set the size for index statistics */
+		indstats = (LVSharedIndStats *)p;
+		lvshared->bitmap[i >> 3] |= 1 << (i & 0x07);
+		indstats->size = index_parallelvacuum_estimate(Irel[i]);
+
+		p += SizeOfSharedIndStats(indstats);
+	}
+
+	/* Compute the new maitenance_work_mem value for index vacuuming */
+	lvshared->maintenance_work_mem_worker =
+					(nindexes_mwm > 0) ?
+					maintenance_work_mem / Min(nworkers, nindexes_mwm) :
+					maintenance_work_mem;
+}
+
 /*
  * Enter parallel mode, allocate and initialize the DSM segment.
  */
@@ -3134,53 +3181,6 @@ begin_parallel_vacuum(LVRelStats *vacrelstats, Oid relid, BlockNumber nblocks,
 	return lps;
 }
 
-/*
- * Initialize variables for shared index statistics, set NULL bitmap and
- * the struct size of each indexes.  Also this function sets the number of
- * indexes that do not support parallel index vacuuming and that use
- * maintenance_work_mem.  Since currently we don't support parallel vacuum
- * for autovacuum we don't need to care about autovacuum_work_mem.
- */
-static void
-prepare_index_statistics(LVShared *lvshared, Relation *Irel, int nindexes,
-						 int nworkers)
-{
-	char *p = (char *) GetSharedIndStats(lvshared);
-	int nindexes_mwm = 0;
-	int i;
-
-	Assert(!IsAutoVacuumWorkerProcess());
-
-	for (i = 0; i < nindexes; i++)
-	{
-		LVSharedIndStats *indstats;
-
-		if (Irel[i]->rd_indam->amparallelvacuumoptions ==
-			VACUUM_OPTION_NO_PARALLEL)
-		{
-			/* Set NULL as this index does not support parallel vacuum */
-			lvshared->bitmap[i >> 3] |= 0 << (i & 0x07);
-			continue;
-		}
-
-		if (Irel[i]->rd_indam->amusemaintenanceworkmem)
-			nindexes_mwm++;
-
-		/* Set the size for index statistics */
-		indstats = (LVSharedIndStats *) p;
-		lvshared->bitmap[i >> 3] |= 1 << (i & 0x07);
-		indstats->size = index_parallelvacuum_estimate(Irel[i]);
-
-		p += SizeOfSharedIndStats(indstats);
-	}
-
-	/* Compute the new maitenance_work_mem value for index vacuuming */
-	lvshared->maintenance_work_mem_worker =
-		(nindexes_mwm > 0) ?
-		maintenance_work_mem / Min(nworkers, nindexes_mwm) :
-		maintenance_work_mem;
-}
-
 /*
  * Destroy the parallel context, and end parallel mode.
  *
-- 
2.16.2.windows.1

