commit eba1215ae9f06fb5ac94a9906cbd8d02c8481d5c
Author: Alexander Korotkov <a.korotkov@postgrespro.ru>
Date:   Mon May 7 19:13:19 2018 +0300

    Documentation and comment improvements for 857f9c36

diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index eabe2a9235..785ecf922a 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -1893,15 +1893,35 @@ include_dir 'conf.d'
       </term>
       <listitem>
        <para>
-        When no tuples were deleted from the heap, B-tree indexes might still
-        be scanned during <command>VACUUM</command> cleanup stage by two
-        reasons.  The first reason is that B-tree index contains deleted pages
-        which can be recycled during cleanup.  The second reason is that B-tree
-        index statistics is stalled.  The criterion of stalled index statistics
-        is number of inserted tuples since previous statistics collection
-        is greater than <varname>vacuum_cleanup_index_scale_factor</varname>
-        fraction of total number of heap tuples.
+        When no tuples were deleted from the heap, B-tree indexes are still
+        scanned during <command>VACUUM</command> cleanup stage unless two
+        conditions are met: the index contains no deleted pages which can be
+        recycled during cleanup; and, the index statistics are not stale.
+        In order to detect stale index statistics, number of total heap tuples
+        during previous statistics collection is memorized in the index
+        meta-page.  Once number number of inserted tuples since previous
+        statistics collection is more than
+        <varname>vacuum_cleanup_index_scale_factor</varname> fraction of
+        number of heap tuples memorized in the meta-page, index statistics is
+        considered to be stalled.  Note, that number of heap tuples is written
+        to the meta-page at the first time when no dead tuples are found
+        during <command>VACUUM</command> cycle.  Thus, skip of B-tree index
+        scan during cleanup stage is only possible in second and subsequent
+        <command>VACUUM</command> cycles detecting no dead tuples.
        </para>
+
+       <para>
+        Zero value of <varname>vacuum_cleanup_index_scale_factor</varname>
+        means that index scans during <command>VACUUM</command> cleanup are
+        never skipped.  The default value is 0.1; the maximum value is 100.
+       </para>
+
+       <para>
+        Currently, <varname>vacuum_cleanup_index_scale_factor</varname>
+        influences only B-tree indexes, but in future versions it might be
+        applied to other index access methods too.
+       </para>
+
       </listitem>
      </varlistentry>
      </variablelist>
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 3bcc56e9d2..22b4a7578f 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -189,7 +189,7 @@ _bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact,
 	if (metad->btm_version < BTREE_VERSION)
 		_bt_upgrademetapage(metapg);
 
-	/* update cleanup-related infromation */
+	/* update cleanup-related information */
 	metad->btm_oldest_btpo_xact = oldestBtpoXact;
 	metad->btm_last_cleanup_num_heap_tuples = numHeapTuples;
 	MarkBufferDirty(metabuf);
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index d894ba0374..27a3032e42 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -818,10 +818,11 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
 		float8		cleanup_scale_factor;
 
 		/*
-		 * If table receives large enough amount of insertions and no cleanup
-		 * was performed, then index might appear to have stalled statistics.
-		 * In order to evade that, we perform cleanup when table receives
-		 * vacuum_cleanup_index_scale_factor fractions of insertions.
+		 * If table receives enough insertions and no cleanup was performed,
+		 * then index would appear have stale statistics.  If scale factor
+		 * is set, we avoid that by performing cleanup if the number of
+		 * inserted tuples exceeds vacuum_cleanup_index_scale_factor fraction
+		 * of original tuples count.
 		 */
 		relopts = (StdRdOptions *) info->index->rd_options;
 		cleanup_scale_factor = (relopts &&
@@ -870,8 +871,8 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 					 &oldestBtpoXact);
 
 		/*
-		 * Update cleanup-related information in metapage. These information
-		 * is used only for cleanup but keeping up them to date can avoid
+		 * Update cleanup-related information in metapage. This information
+		 * is used only for cleanup but keeping them up to date can avoid
 		 * unnecessary cleanup even after bulkdelete.
 		 */
 		_bt_update_meta_cleanup_info(info->index, oldestBtpoXact,
@@ -899,8 +900,8 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 	 * If btbulkdelete was called, we need not do anything, just return the
 	 * stats from the latest btbulkdelete call.  If it wasn't called, we might
 	 * still need to do a pass over the index, to recycle any newly-recyclable
-	 * pages and to obtain index statistics.  _bt_vacuum_needs_cleanup checks
-	 * is there are newly-recyclable or stalled index statistics.
+	 * pages or to obtain index statistics.  _bt_vacuum_needs_cleanup
+	 * determines if either are needed.
 	 *
 	 * Since we aren't going to actually delete any leaf items, there's no
 	 * need to go through all the vacuum-cycle-ID pushups.
