From 68f7b19a479319014a6ba39219fed524fc45ad34 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E4=B8=80=E6=8C=83?= <yizhi.fzh@alibaba-inc.com>
Date: Sun, 18 Apr 2021 22:02:54 +0800
Subject: [PATCH v1] Currently the set_append_rel_size doesn't consider the
 init partition

prune, so the estimated size may be wrong at a big scale sometimes.
In this patch I used the set the rows = parentrel->tuples *
clauseselecitivty. In this case we can loss some lose some accuracy when initial
partition prune doesn't happen at all. but generally I think it would be OK.

Another strategy is we should check if init partiton prune can happen.
if we are sure about that, we adapt the above way. or else we can use
the local stats strategy still.
---
 src/backend/optimizer/path/allpaths.c         | 13 ++-
 src/backend/optimizer/util/plancat.c          | 12 +--
 src/test/regress/expected/partition_join.out  | 14 +--
 src/test/regress/expected/select_parallel.out | 92 +++++++++----------
 4 files changed, 68 insertions(+), 63 deletions(-)

diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index edba5e49a8..31698370af 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -1178,7 +1178,15 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
 		int			i;
 
 		Assert(parent_rows > 0);
-		rel->rows = parent_rows;
+		if (rel->tuples > 0)
+			rel->rows = clamp_row_est(rel->tuples * clauselist_selectivity(root,
+															 rel->baserestrictinfo,
+															 rel->relid,
+															 JOIN_INNER,
+																		   NULL));
+		else
+			rel->rows = parent_rows;
+
 		rel->reltarget->width = rint(parent_size / parent_rows);
 		for (i = 0; i < nattrs; i++)
 			rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
@@ -1187,7 +1195,8 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
 		 * Set "raw tuples" count equal to "rows" for the appendrel; needed
 		 * because some places assume rel->tuples is valid for any baserel.
 		 */
-		rel->tuples = parent_rows;
+		if (rel->tuples == 0)
+			rel->tuples = parent_rows;
 
 		/*
 		 * Note that we leave rel->pages as zero; this is important to avoid
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 345c7425f6..2aa2da0c66 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -143,13 +143,10 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
 		palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));
 
 	/*
-	 * Estimate relation size --- unless it's an inheritance parent, in which
-	 * case the size we want is not the rel's own size but the size of its
-	 * inheritance tree.  That will be computed in set_append_rel_size().
+	 * Estimate relation size.
 	 */
-	if (!inhparent)
-		estimate_rel_size(relation, rel->attr_widths - rel->min_attr,
-						  &rel->pages, &rel->tuples, &rel->allvisfrac);
+	estimate_rel_size(relation, rel->attr_widths - rel->min_attr,
+					  &rel->pages, &rel->tuples, &rel->allvisfrac);
 
 	/* Retrieve the parallel_workers reloption, or -1 if not set. */
 	rel->rel_parallel_workers = RelationGetParallelWorkers(relation, -1);
@@ -1079,6 +1076,9 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
 			*tuples = rel->rd_rel->reltuples;
 			*allvisfrac = 0;
 			break;
+	    case RELKIND_PARTITIONED_TABLE:
+			*tuples = rel->rd_rel->reltuples;
+			break;
 		default:
 			/* else it has no disk storage; probably shouldn't get here? */
 			*pages = 0;
diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out
index 27f7525b3e..d8243ed6e6 100644
--- a/src/test/regress/expected/partition_join.out
+++ b/src/test/regress/expected/partition_join.out
@@ -2120,17 +2120,17 @@ SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 JOIN prt2_n t2 ON (t1.c = t2.c) JOI
                         QUERY PLAN                        
 ----------------------------------------------------------
  Hash Join
-   Hash Cond: (t2.c = (t1.c)::text)
+   Hash Cond: (t3.c = (t1.c)::text)
    ->  Append
-         ->  Seq Scan on prt2_n_p1 t2_1
-         ->  Seq Scan on prt2_n_p2 t2_2
+         ->  Seq Scan on plt1_p1 t3_1
+         ->  Seq Scan on plt1_p2 t3_2
+         ->  Seq Scan on plt1_p3 t3_3
    ->  Hash
          ->  Hash Join
-               Hash Cond: (t3.c = (t1.c)::text)
+               Hash Cond: (t2.c = (t1.c)::text)
                ->  Append
-                     ->  Seq Scan on plt1_p1 t3_1
-                     ->  Seq Scan on plt1_p2 t3_2
-                     ->  Seq Scan on plt1_p3 t3_3
+                     ->  Seq Scan on prt2_n_p1 t2_1
+                     ->  Seq Scan on prt2_n_p2 t2_2
                ->  Hash
                      ->  Append
                            ->  Seq Scan on prt1_n_p1 t1_1
diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out
index 05ebcb284a..599e0399cc 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -12,20 +12,19 @@ set max_parallel_workers_per_gather=4;
 -- Parallel Append with partial-subplans
 explain (costs off)
   select round(avg(aa)), sum(aa) from a_star;
-                          QUERY PLAN                          
---------------------------------------------------------------
- Finalize Aggregate
+                       QUERY PLAN                       
+--------------------------------------------------------
+ Aggregate
    ->  Gather
          Workers Planned: 3
-         ->  Partial Aggregate
-               ->  Parallel Append
-                     ->  Parallel Seq Scan on d_star a_star_4
-                     ->  Parallel Seq Scan on f_star a_star_6
-                     ->  Parallel Seq Scan on e_star a_star_5
-                     ->  Parallel Seq Scan on b_star a_star_2
-                     ->  Parallel Seq Scan on c_star a_star_3
-                     ->  Parallel Seq Scan on a_star a_star_1
-(11 rows)
+         ->  Parallel Append
+               ->  Parallel Seq Scan on d_star a_star_4
+               ->  Parallel Seq Scan on f_star a_star_6
+               ->  Parallel Seq Scan on e_star a_star_5
+               ->  Parallel Seq Scan on b_star a_star_2
+               ->  Parallel Seq Scan on c_star a_star_3
+               ->  Parallel Seq Scan on a_star a_star_1
+(10 rows)
 
 select round(avg(aa)), sum(aa) from a_star a1;
  round | sum 
@@ -38,20 +37,19 @@ alter table c_star set (parallel_workers = 0);
 alter table d_star set (parallel_workers = 0);
 explain (costs off)
   select round(avg(aa)), sum(aa) from a_star;
-                          QUERY PLAN                          
---------------------------------------------------------------
- Finalize Aggregate
+                       QUERY PLAN                       
+--------------------------------------------------------
+ Aggregate
    ->  Gather
          Workers Planned: 3
-         ->  Partial Aggregate
-               ->  Parallel Append
-                     ->  Seq Scan on d_star a_star_4
-                     ->  Seq Scan on c_star a_star_3
-                     ->  Parallel Seq Scan on f_star a_star_6
-                     ->  Parallel Seq Scan on e_star a_star_5
-                     ->  Parallel Seq Scan on b_star a_star_2
-                     ->  Parallel Seq Scan on a_star a_star_1
-(11 rows)
+         ->  Parallel Append
+               ->  Seq Scan on d_star a_star_4
+               ->  Seq Scan on c_star a_star_3
+               ->  Parallel Seq Scan on f_star a_star_6
+               ->  Parallel Seq Scan on e_star a_star_5
+               ->  Parallel Seq Scan on b_star a_star_2
+               ->  Parallel Seq Scan on a_star a_star_1
+(10 rows)
 
 select round(avg(aa)), sum(aa) from a_star a2;
  round | sum 
@@ -66,20 +64,19 @@ alter table e_star set (parallel_workers = 0);
 alter table f_star set (parallel_workers = 0);
 explain (costs off)
   select round(avg(aa)), sum(aa) from a_star;
-                     QUERY PLAN                      
------------------------------------------------------
- Finalize Aggregate
+                  QUERY PLAN                   
+-----------------------------------------------
+ Aggregate
    ->  Gather
          Workers Planned: 3
-         ->  Partial Aggregate
-               ->  Parallel Append
-                     ->  Seq Scan on d_star a_star_4
-                     ->  Seq Scan on f_star a_star_6
-                     ->  Seq Scan on e_star a_star_5
-                     ->  Seq Scan on b_star a_star_2
-                     ->  Seq Scan on c_star a_star_3
-                     ->  Seq Scan on a_star a_star_1
-(11 rows)
+         ->  Parallel Append
+               ->  Seq Scan on d_star a_star_4
+               ->  Seq Scan on f_star a_star_6
+               ->  Seq Scan on e_star a_star_5
+               ->  Seq Scan on b_star a_star_2
+               ->  Seq Scan on c_star a_star_3
+               ->  Seq Scan on a_star a_star_1
+(10 rows)
 
 select round(avg(aa)), sum(aa) from a_star a3;
  round | sum 
@@ -97,20 +94,19 @@ alter table f_star reset (parallel_workers);
 set enable_parallel_append to off;
 explain (costs off)
   select round(avg(aa)), sum(aa) from a_star;
-                          QUERY PLAN                          
---------------------------------------------------------------
- Finalize Aggregate
+                       QUERY PLAN                       
+--------------------------------------------------------
+ Aggregate
    ->  Gather
          Workers Planned: 1
-         ->  Partial Aggregate
-               ->  Append
-                     ->  Parallel Seq Scan on a_star a_star_1
-                     ->  Parallel Seq Scan on b_star a_star_2
-                     ->  Parallel Seq Scan on c_star a_star_3
-                     ->  Parallel Seq Scan on d_star a_star_4
-                     ->  Parallel Seq Scan on e_star a_star_5
-                     ->  Parallel Seq Scan on f_star a_star_6
-(11 rows)
+         ->  Append
+               ->  Parallel Seq Scan on a_star a_star_1
+               ->  Parallel Seq Scan on b_star a_star_2
+               ->  Parallel Seq Scan on c_star a_star_3
+               ->  Parallel Seq Scan on d_star a_star_4
+               ->  Parallel Seq Scan on e_star a_star_5
+               ->  Parallel Seq Scan on f_star a_star_6
+(10 rows)
 
 select round(avg(aa)), sum(aa) from a_star a4;
  round | sum 
-- 
2.21.0

