diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml
index 136b441e0c..02ebb718d7 100644
--- a/doc/src/sgml/monitoring.sgml
+++ b/doc/src/sgml/monitoring.sgml
@@ -1790,10 +1790,6 @@ postgres   27093  0.0  0.0  30096  2752 ?        Ss   11:34   0:00 postgres: ser
       <entry><literal>ParallelFinish</literal></entry>
       <entry>Waiting for parallel workers to finish computing.</entry>
      </row>
-     <row>
-      <entry><literal>ParallelVacuumFinish</literal></entry>
-      <entry>Waiting for parallel vacuum workers to finish index vacuum.</entry>
-     </row>
      <row>
       <entry><literal>ProcArrayGroupUpdate</literal></entry>
       <entry>Waiting for the group leader to clear the transaction ID at
@@ -7193,25 +7189,19 @@ FROM pg_stat_get_backend_idset() AS backendid;
        <structfield>indexes_total</structfield> <type>bigint</type>
       </para>
       <para>
-       Number of indexes that will be vacuumed or cleaned up. This value will be
-       <literal>0</literal> if the phase is not <literal>vacuuming indexes</literal>
-       or <literal>cleaning up indexes</literal>, <literal>INDEX_CLEANUP</literal>
-       is set to <literal>OFF</literal>, index vacuum is skipped due to very
-       few dead tuples in the table, or vacuum failsafe is triggered.
-       See <xref linkend="guc-vacuum-failsafe-age"/>
-       for more on vacuum failsafe.
+       Total number of indexes that will be vacuumed or cleaned up. This number is
+       reported as of the beginning of the <literal>vacuuming indexes</literal> phase
+       or the <literal>cleaning up indexes</literal> phase.
       </para></entry>
      </row>
 
      <row>
       <entry role="catalog_table_entry"><para role="column_definition">
-       <structfield>indexes_completed</structfield> <type>bigint</type>
+       <structfield>indexes_processed</structfield> <type>bigint</type>
       </para>
       <para>
-       Number of indexes vacuumed in the current vacuum cycle when the
-       phase is <literal>vacuuming indexes</literal>, or the number
-       of indexes cleaned up during the <literal>cleaning up indexes</literal>
-       phase.
+       Number of indexes processed. This counter only advances when the phase is
+       <literal>vacuuming indexes</literal> or <literal>cleaning up indexes</literal>.
       </para></entry>
      </row>
     </tbody>
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 28bac92591..8ede3c1956 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -2322,7 +2322,7 @@ lazy_vacuum_all_indexes(LVRelState *vacrel)
 	};
 	const int progress_end_index[] = {
 		PROGRESS_VACUUM_INDEX_TOTAL,
-		PROGRESS_VACUUM_INDEX_COMPLETED,
+		PROGRESS_VACUUM_INDEX_PROCESSED,
 		PROGRESS_VACUUM_NUM_INDEX_VACUUMS
 	};
 	int64       progress_start_val[2];
@@ -2340,8 +2340,8 @@ lazy_vacuum_all_indexes(LVRelState *vacrel)
 	}
 
 	/*
-	 * Report that we are now vacuuming indexes
-	 * and the number of indexes to vacuum.
+	 * Report that we are now vacuuming indexes and the number of indexes
+	 * to vacuum.
 	 */
 	progress_start_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_INDEX;
 	progress_start_val[1] = vacrel->nindexes;
@@ -2358,8 +2358,8 @@ lazy_vacuum_all_indexes(LVRelState *vacrel)
 														  old_live_tuples,
 														  vacrel);
 
-			/* Done vacuuming an index -- increment the indexes completed */
-			pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_COMPLETED,
+			/* Report the number of indexes vacuumed */
+			pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_PROCESSED,
 										 idx + 1);
 
 			if (lazy_check_wraparound_failsafe(vacrel))
@@ -2396,9 +2396,8 @@ lazy_vacuum_all_indexes(LVRelState *vacrel)
 	Assert(allindexes || vacrel->failsafe_active);
 
 	/*
-	 * Reset and report the total indexes to vacuum and the number of
-	 * indexes vacuumed.
-	 * Also, increase and report the number of index scans completed.
+	 * Increase and report the number of index. Also, we reset the progress
+	 * counters.
 	 *
 	 * We deliberately include the case where we started a round of bulk
 	 * deletes that we weren't able to finish due to the failsafe triggering.
@@ -2646,20 +2645,18 @@ lazy_check_wraparound_failsafe(LVRelState *vacrel)
 	{
 		const int   progress_index[] = {
 			PROGRESS_VACUUM_INDEX_TOTAL,
-			PROGRESS_VACUUM_INDEX_COMPLETED
+			PROGRESS_VACUUM_INDEX_PROCESSED
 		};
 		int64       progress_val[2] = {0, 0};
 
 		vacrel->failsafe_active = true;
 
-		/*
-		 * Disable index vacuuming, index cleanup, and heap rel truncation.
-		 *
-		 * Also, report that we are no longer tracking index vacuum/cleanup.
-		 */
+		/* Disable index vacuuming, index cleanup, and heap rel truncation */
 		vacrel->do_index_vacuuming = false;
 		vacrel->do_index_cleanup = false;
 		vacrel->do_rel_truncate = false;
+
+		/* Reset the progress counters */
 		pgstat_progress_update_multi_param(2, progress_index, progress_val);
 
 		ereport(WARNING,
@@ -2694,17 +2691,17 @@ lazy_cleanup_all_indexes(LVRelState *vacrel)
 	};
 	const int progress_end_index[] = {
 		PROGRESS_VACUUM_INDEX_TOTAL,
-		PROGRESS_VACUUM_INDEX_COMPLETED
+		PROGRESS_VACUUM_INDEX_PROCESSED
 	};
 	int64       progress_start_val[2];
-	int64       progress_end_val[2];
+	int64       progress_end_val[2] = {0, 0};
 
 	Assert(vacrel->do_index_cleanup);
 	Assert(vacrel->nindexes > 0);
 
 	/*
-	 * Report that we are now cleaning up indexes
-	 * and the number of indexes to cleanup.
+	 * Report that we are now cleaning up indexes and the number of indexes
+	 * to cleanup.
 	 */
 	progress_start_val[0] = PROGRESS_VACUUM_PHASE_INDEX_CLEANUP;
 	progress_start_val[1] = vacrel->nindexes;
@@ -2721,8 +2718,8 @@ lazy_cleanup_all_indexes(LVRelState *vacrel)
 				lazy_cleanup_one_index(indrel, istat, reltuples,
 									   estimated_count, vacrel);
 
-			/* Done cleaning an index -- increment the indexes completed */
-			pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_COMPLETED,
+			/* Report the number of indexes cleaned up */
+			pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_PROCESSED,
 										 idx + 1);
 		}
 	}
@@ -2734,12 +2731,7 @@ lazy_cleanup_all_indexes(LVRelState *vacrel)
 											estimated_count);
 	}
 
-	/*
-	 * Reset and report the total number of indexes to cleanup
-	 * and the number of indexes cleaned.
-	 */
-	progress_end_val[0] = 0;
-	progress_end_val[1] = 0;
+	/* Reset the progress counters */
 	pgstat_progress_update_multi_param(2, progress_end_index, progress_end_val);
 }
 
diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c
index 66fd3a0730..b0c406fe7a 100644
--- a/src/backend/access/transam/parallel.c
+++ b/src/backend/access/transam/parallel.c
@@ -185,8 +185,6 @@ CreateParallelContext(const char *library_name, const char *function_name,
 	pcxt->library_name = pstrdup(library_name);
 	pcxt->function_name = pstrdup(function_name);
 	pcxt->error_context_stack = error_context_stack;
-	pcxt->parallel_progress_callback = NULL;
-	pcxt->parallel_progress_callback_arg = NULL;
 	shm_toc_initialize_estimator(&pcxt->estimator);
 	dlist_push_head(&pcxt_list, &pcxt->node);
 
@@ -1203,13 +1201,8 @@ HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
 
 		case 'P':				/* Parallel progress reporting */
 			{
-				/*
-				 * A Leader process that receives this message
-				 * must be ready to update progress.
-				 */
+				/* Call the progress reporting callback */
 				Assert(pcxt->parallel_progress_callback);
-
-				/* Report progress */
 				pcxt->parallel_progress_callback(pcxt->parallel_progress_callback_arg);
 
 				break;
diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql
index fc642bc25e..5886ee8b7c 100644
--- a/src/backend/catalog/system_views.sql
+++ b/src/backend/catalog/system_views.sql
@@ -1181,7 +1181,7 @@ CREATE VIEW pg_stat_progress_vacuum AS
         S.param2 AS heap_blks_total, S.param3 AS heap_blks_scanned,
         S.param4 AS heap_blks_vacuumed, S.param5 AS index_vacuum_count,
         S.param6 AS max_dead_tuples, S.param7 AS num_dead_tuples,
-        S.param8 AS indexes_total, S.param9 AS indexes_completed
+        S.param8 AS indexes_total, S.param9 AS indexes_processed
     FROM pg_stat_get_progress_info('VACUUM') AS S
         LEFT JOIN pg_database D ON S.datid = D.oid;
 
diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c
index 9e5a300ba4..57dc5fd8f0 100644
--- a/src/backend/commands/vacuumparallel.c
+++ b/src/backend/commands/vacuumparallel.c
@@ -107,14 +107,11 @@ typedef struct PVShared
 	pg_atomic_uint32 idx;
 
 	/*
-	 * Counter for vacuuming and cleanup progress reporting.
-	 * This value is used to report index vacuum/cleanup progress
-	 * in parallel_vacuum_progress_report. We keep this
-	 * counter to avoid having to loop through
-	 * ParallelVacuumState->indstats to determine the number
-	 * of indexes completed.
+	 * Number of indexes processed in a parallel index bulk-deletion or a
+	 * parallel index cleanup. This counter is used to report the progress
+	 * information.
 	 */
-	pg_atomic_uint32 nindexes_completed;
+	pg_atomic_uint32 nindexes_processed;
 
 } PVShared;
 
@@ -284,10 +281,13 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,
 	pcxt = CreateParallelContext("postgres", "parallel_vacuum_main",
 								 parallel_workers);
 	Assert(pcxt->nworkers > 0);
-	pvs->pcxt = pcxt;
+
+	/* Setup callback for updating the progress information */
 	pcxt->parallel_progress_callback = parallel_vacuum_update_progress;
 	pcxt->parallel_progress_callback_arg = pvs;
 
+	pvs->pcxt = pcxt;
+
 	/* Estimate size for index vacuum stats -- PARALLEL_VACUUM_KEY_INDEX_STATS */
 	est_indstats_len = mul_size(sizeof(PVIndStats), nindexes);
 	shm_toc_estimate_chunk(&pcxt->estimator, est_indstats_len);
@@ -379,7 +379,7 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,
 	pg_atomic_init_u32(&(shared->cost_balance), 0);
 	pg_atomic_init_u32(&(shared->active_nworkers), 0);
 	pg_atomic_init_u32(&(shared->idx), 0);
-	pg_atomic_init_u32(&(shared->nindexes_completed), 0);
+	pg_atomic_init_u32(&(shared->nindexes_processed), 0);
 
 	shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_SHARED, shared);
 	pvs->shared = shared;
@@ -636,7 +636,7 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan
 
 	/* Reset the parallel index processing and progress counters */
 	pg_atomic_write_u32(&(pvs->shared->idx), 0);
-	pg_atomic_write_u32(&(pvs->shared->nindexes_completed), 0);
+	pg_atomic_write_u32(&(pvs->shared->nindexes_processed), 0);
 
 	/* Setup the shared cost-based vacuum delay and launch workers */
 	if (nworkers > 0)
@@ -907,16 +907,10 @@ parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation indrel,
 	pvs->indname = NULL;
 
 	/*
-	 * Update index vacuum progress.
-	 *
-	 * When a parallel worker completes an
-	 * index vacuum, it sends a protocol message
-	 * to notify the leader. The leader then
-	 * updates the progress. See HandleParallelMessage().
-	 *
-	 * When a leader performs the index vacuum,
-	 * it can update the progress directly.
+	 * Update the index vacuum progress information. Since the progress is
+	 * updated only by the leader, the worker notifies the leader of it.
 	 */
+	pg_atomic_add_fetch_u32(&(pvs->shared->nindexes_processed), 1);
 	if (IsParallelWorker())
 		pq_putmessage('P', NULL, 0);
 	else
@@ -1107,21 +1101,17 @@ parallel_vacuum_error_callback(void *arg)
 }
 
 /*
- * Read pvs->shared->nindexes_completed and report the number of indexes
- * vacuumed so far.
- *
- * Note: This function should be called by the leader process only,
- * and it's up to the caller to ensure this.
+ * Update the number of indexes processed so far in the current index bulk-deletion
+ * or index cleanup.
  */
 void
 parallel_vacuum_update_progress(void *arg)
 {
-	ParallelVacuumState *pvs = (ParallelVacuumState *)arg;
+	ParallelVacuumState *pvs = (ParallelVacuumState *) arg;
 
 	Assert(!IsParallelWorker());
-	Assert(pvs->pcxt->parallel_progress_callback_arg);
+	Assert(pvs);
 
-	if (pvs)
-		pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_COMPLETED,
-									 pg_atomic_add_fetch_u32(&(pvs->shared->nindexes_completed), 1));
-}
\ No newline at end of file
+	pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_PROCESSED,
+								 pg_atomic_read_u32(&(pvs->shared->nindexes_processed)));
+}
diff --git a/src/include/access/parallel.h b/src/include/access/parallel.h
index 7ddc71dae2..f621d51c0d 100644
--- a/src/include/access/parallel.h
+++ b/src/include/access/parallel.h
@@ -20,10 +20,8 @@
 #include "storage/shm_mq.h"
 #include "storage/shm_toc.h"
 
-/* progress callback definition */
-typedef void (*ParallelProgressCallback) (void *parallel_progress_callback_state);
-
 typedef void (*parallel_worker_main_type) (dsm_segment *seg, shm_toc *toc);
+typedef void (*parallel_progress_callback_type) (void *arg);
 
 typedef struct ParallelWorkerInfo
 {
@@ -49,7 +47,7 @@ typedef struct ParallelContext
 	ParallelWorkerInfo *worker;
 	int			nknown_attached_workers;
 	bool	   *known_attached_workers;
-	ParallelProgressCallback parallel_progress_callback;
+	parallel_progress_callback_type parallel_progress_callback;
 	void		*parallel_progress_callback_arg;
 } ParallelContext;
 
diff --git a/src/include/commands/progress.h b/src/include/commands/progress.h
index 6b8b609a4f..23c38f2d0e 100644
--- a/src/include/commands/progress.h
+++ b/src/include/commands/progress.h
@@ -26,7 +26,7 @@
 #define PROGRESS_VACUUM_MAX_DEAD_TUPLES			5
 #define PROGRESS_VACUUM_NUM_DEAD_TUPLES			6
 #define PROGRESS_VACUUM_INDEX_TOTAL             7
-#define PROGRESS_VACUUM_INDEX_COMPLETED         8
+#define PROGRESS_VACUUM_INDEX_PROCESSED         8
 
 /* Phases of vacuum (as advertised via PROGRESS_VACUUM_PHASE) */
 #define PROGRESS_VACUUM_PHASE_SCAN_HEAP			1
diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out
index c1b32dfa20..bca4da7036 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -2035,7 +2035,7 @@ pg_stat_progress_vacuum| SELECT s.pid,
     s.param6 AS max_dead_tuples,
     s.param7 AS num_dead_tuples,
     s.param8 AS indexes_total,
-    s.param9 AS indexes_completed
+    s.param9 AS indexes_processed
    FROM (pg_stat_get_progress_info('VACUUM'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20)
      LEFT JOIN pg_database d ON ((s.datid = d.oid)));
 pg_stat_recovery_prefetch| SELECT stats_reset,
