The 2 recent patches, plus the original abstraction patch can be simply
cherry picked for gcc 11.
I have applied the 3 patches to the current gcc 11 release, and it
bootstrapped with no regressions on x86_64-pc-linux-gnu.
Andrew
>From 58289b678064c4b4e1efeb806f78c42d86ae31a4 Mon Sep 17 00:00:00 2001
From: Andrew MacLeod <amacl...@redhat.com>
Date: Fri, 7 May 2021 12:03:01 -0400
Subject: [PATCH 1/4] Clean up and virtualize the on-entry cache interface.
Cleanup/Virtualize the ssa_block_range class, and implement the current
vector approach as a derived class.
Allow memory allocation from the irange allocator obstack for easy freeing.
* gimple-range-cache.cc (ssa_block_ranges): Virtualize.
(sbr_vector): Renamed from ssa_block_cache.
(sbr_vector::sbr_vector): Allocate from obstack abd initialize.
(ssa_block_ranges::~ssa_block_ranges): Remove.
(sbr_vector::set_bb_range): Use varying and undefined cached values.
(ssa_block_ranges::set_bb_varying): Remove.
(sbr_vector::get_bb_range): Adjust assert.
(sbr_vector::bb_range_p): Adjust assert.
(~block_range_cache): No freeing loop required.
(block_range_cache::get_block_ranges): Remove.
(block_range_cache::set_bb_range): Inline get_block_ranges.
(block_range_cache::set_bb_varying): Remove.
* gimple-range-cache.h (set_bb_varying): Remove prototype.
* value-range.h (irange_allocator::get_memory): New.
(cherry picked from commit 14b0f37a644d7b59e1737fb275ec4fff044972a8)
---
gcc/gimple-range-cache.cc | 165 ++++++++++++++++----------------------
gcc/gimple-range-cache.h | 1 -
gcc/value-range.h | 9 +++
3 files changed, 80 insertions(+), 95 deletions(-)
diff --git a/gcc/gimple-range-cache.cc b/gcc/gimple-range-cache.cc
index 38e4fe1c7c0..2be83d698ab 100644
--- a/gcc/gimple-range-cache.cc
+++ b/gcc/gimple-range-cache.cc
@@ -107,29 +107,53 @@ non_null_ref::process_name (tree name)
// -------------------------------------------------------------------------
-// This class implements a cache of ranges indexed by basic block. It
-// represents all that is known about an SSA_NAME on entry to each
-// block. It caches a range-for-type varying range so it doesn't need
-// to be reformed all the time. If a range is ever always associated
-// with a type, we can use that instead. Whenever varying is being
-// set for a block, the cache simply points to this cached one rather
-// than create a new one each time.
+// This class represents the API into a cache of ranges for an SSA_NAME.
+// Routines must be implemented to set, get, and query if a value is set.
class ssa_block_ranges
{
public:
- ssa_block_ranges (tree t, irange_allocator *allocator);
- ~ssa_block_ranges ();
-
- void set_bb_range (const basic_block bb, const irange &r);
- void set_bb_varying (const basic_block bb);
- bool get_bb_range (irange &r, const basic_block bb);
- bool bb_range_p (const basic_block bb);
+ virtual void set_bb_range (const basic_block bb, const irange &r) = 0;
+ virtual bool get_bb_range (irange &r, const basic_block bb) = 0;
+ virtual bool bb_range_p (const basic_block bb) = 0;
void dump(FILE *f);
-private:
- vec<irange *> m_tab;
- irange *m_type_range;
+};
+
+// Print the list of known ranges for file F in a nice format.
+
+void
+ssa_block_ranges::dump (FILE *f)
+{
+ basic_block bb;
+ int_range_max r;
+
+ FOR_EACH_BB_FN (bb, cfun)
+ if (get_bb_range (r, bb))
+ {
+ fprintf (f, "BB%d -> ", bb->index);
+ r.dump (f);
+ fprintf (f, "\n");
+ }
+}
+
+// This class implements the range cache as a linear vector, indexed by BB.
+// It caches a varying and undefined range which are used instead of
+// allocating new ones each time.
+
+class sbr_vector : public ssa_block_ranges
+{
+public:
+ sbr_vector (tree t, irange_allocator *allocator);
+
+ virtual void set_bb_range (const basic_block bb, const irange &r) OVERRIDE;
+ virtual bool get_bb_range (irange &r, const basic_block bb) OVERRIDE;
+ virtual bool bb_range_p (const basic_block bb) OVERRIDE;
+protected:
+ irange **m_tab; // Non growing vector.
+ int m_tab_size;
+ int_range<2> m_varying;
+ int_range<2> m_undefined;
tree m_type;
irange_allocator *m_irange_allocator;
};
@@ -137,55 +161,43 @@ private:
// Initialize a block cache for an ssa_name of type T.
-ssa_block_ranges::ssa_block_ranges (tree t, irange_allocator *allocator)
+sbr_vector::sbr_vector (tree t, irange_allocator *allocator)
{
gcc_checking_assert (TYPE_P (t));
m_type = t;
m_irange_allocator = allocator;
-
- m_tab.create (0);
- m_tab.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ m_tab_size = last_basic_block_for_fn (cfun) + 1;
+ m_tab = (irange **)allocator->get_memory (m_tab_size * sizeof (irange *));
+ memset (m_tab, 0, m_tab_size * sizeof (irange *));
// Create the cached type range.
- m_type_range = m_irange_allocator->allocate (2);
- m_type_range->set_varying (t);
-
- m_tab[ENTRY_BLOCK_PTR_FOR_FN (cfun)->index] = m_type_range;
-}
-
-// Destruct block range.
-
-ssa_block_ranges::~ssa_block_ranges ()
-{
- m_tab.release ();
+ m_varying.set_varying (t);
+ m_undefined.set_undefined ();
}
// Set the range for block BB to be R.
void
-ssa_block_ranges::set_bb_range (const basic_block bb, const irange &r)
+sbr_vector::set_bb_range (const basic_block bb, const irange &r)
{
- gcc_checking_assert ((unsigned) bb->index < m_tab.length ());
- irange *m = m_irange_allocator->allocate (r);
+ irange *m;
+ gcc_checking_assert (bb->index < m_tab_size);
+ if (r.varying_p ())
+ m = &m_varying;
+ else if (r.undefined_p ())
+ m = &m_undefined;
+ else
+ m = m_irange_allocator->allocate (r);
m_tab[bb->index] = m;
}
-// Set the range for block BB to the range for the type.
-
-void
-ssa_block_ranges::set_bb_varying (const basic_block bb)
-{
- gcc_checking_assert ((unsigned) bb->index < m_tab.length ());
- m_tab[bb->index] = m_type_range;
-}
-
// Return the range associated with block BB in R. Return false if
// there is no range.
bool
-ssa_block_ranges::get_bb_range (irange &r, const basic_block bb)
+sbr_vector::get_bb_range (irange &r, const basic_block bb)
{
- gcc_checking_assert ((unsigned) bb->index < m_tab.length ());
+ gcc_checking_assert (bb->index < m_tab_size);
irange *m = m_tab[bb->index];
if (m)
{
@@ -198,30 +210,12 @@ ssa_block_ranges::get_bb_range (irange &r, const basic_block bb)
// Return true if a range is present.
bool
-ssa_block_ranges::bb_range_p (const basic_block bb)
+sbr_vector::bb_range_p (const basic_block bb)
{
- gcc_checking_assert ((unsigned) bb->index < m_tab.length ());
+ gcc_checking_assert (bb->index < m_tab_size);
return m_tab[bb->index] != NULL;
}
-
-// Print the list of known ranges for file F in a nice format.
-
-void
-ssa_block_ranges::dump (FILE *f)
-{
- basic_block bb;
- int_range_max r;
-
- FOR_EACH_BB_FN (bb, cfun)
- if (get_bb_range (r, bb))
- {
- fprintf (f, "BB%d -> ", bb->index);
- r.dump (f);
- fprintf (f, "\n");
- }
-}
-
// -------------------------------------------------------------------------
// Initialize the block cache.
@@ -237,38 +231,36 @@ block_range_cache::block_range_cache ()
block_range_cache::~block_range_cache ()
{
- unsigned x;
- for (x = 0; x < m_ssa_ranges.length (); ++x)
- {
- if (m_ssa_ranges[x])
- delete m_ssa_ranges[x];
- }
delete m_irange_allocator;
// Release the vector itself.
m_ssa_ranges.release ();
}
-// Return a reference to the ssa_block_cache for NAME. If it has not been
-// accessed yet, allocate it first.
+// Set the range for NAME on entry to block BB to R.
+// If it has not been // accessed yet, allocate it first.
-ssa_block_ranges &
-block_range_cache::get_block_ranges (tree name)
+void
+block_range_cache::set_bb_range (tree name, const basic_block bb,
+ const irange &r)
{
unsigned v = SSA_NAME_VERSION (name);
if (v >= m_ssa_ranges.length ())
m_ssa_ranges.safe_grow_cleared (num_ssa_names + 1);
if (!m_ssa_ranges[v])
- m_ssa_ranges[v] = new ssa_block_ranges (TREE_TYPE (name),
+ {
+ void *r = m_irange_allocator->get_memory (sizeof (sbr_vector));
+ m_ssa_ranges[v] = new (r) sbr_vector (TREE_TYPE (name),
m_irange_allocator);
- return *(m_ssa_ranges[v]);
+ }
+ m_ssa_ranges[v]->set_bb_range (bb, r);
}
// Return a pointer to the ssa_block_cache for NAME. If it has not been
// accessed yet, return NULL.
-ssa_block_ranges *
+inline ssa_block_ranges *
block_range_cache::query_block_ranges (tree name)
{
unsigned v = SSA_NAME_VERSION (name);
@@ -277,22 +269,7 @@ block_range_cache::query_block_ranges (tree name)
return m_ssa_ranges[v];
}
-// Set the range for NAME on entry to block BB to R.
-void
-block_range_cache::set_bb_range (tree name, const basic_block bb,
- const irange &r)
-{
- return get_block_ranges (name).set_bb_range (bb, r);
-}
-
-// Set the range for NAME on entry to block BB to varying.
-
-void
-block_range_cache::set_bb_varying (tree name, const basic_block bb)
-{
- return get_block_ranges (name).set_bb_varying (bb);
-}
// Return the range for NAME on entry to BB in R. Return true if there
// is one.
diff --git a/gcc/gimple-range-cache.h b/gcc/gimple-range-cache.h
index 2b36a02654b..bccb5cc2442 100644
--- a/gcc/gimple-range-cache.h
+++ b/gcc/gimple-range-cache.h
@@ -51,7 +51,6 @@ public:
~block_range_cache ();
void set_bb_range (tree name, const basic_block bb, const irange &r);
- void set_bb_varying (tree name, const basic_block bb);
bool get_bb_range (irange &r, tree name, const basic_block bb);
bool bb_range_p (tree name, const basic_block bb);
diff --git a/gcc/value-range.h b/gcc/value-range.h
index bfc54a2473f..54623b97a1f 100644
--- a/gcc/value-range.h
+++ b/gcc/value-range.h
@@ -639,6 +639,7 @@ public:
// Return a copy of SRC with the minimum amount of sub-ranges needed
// to represent it.
irange *allocate (const irange &src);
+ void *get_memory (unsigned num_bytes);
private:
DISABLE_COPY_AND_ASSIGN (irange_allocator);
struct obstack m_obstack;
@@ -656,6 +657,14 @@ irange_allocator::~irange_allocator ()
obstack_free (&m_obstack, NULL);
}
+// Provide a hunk of memory from the obstack
+inline void *
+irange_allocator::get_memory (unsigned num_bytes)
+{
+ void *r = obstack_alloc (&m_obstack, num_bytes);
+ return r;
+}
+
// Return a new range with NUM_PAIRS.
inline irange *
--
2.25.4
>From e65205de9c7302a2a7d903ba8703461009cdacda Mon Sep 17 00:00:00 2001
From: Andrew MacLeod <amacl...@redhat.com>
Date: Mon, 7 Jun 2021 13:12:01 -0400
Subject: [PATCH 2/4] Implement multi-bit aligned accessors for sparse bitmap.
Provide set/get routines to allow sparse bitmaps to be treated as an array
of multiple bit values. Only chunk sizes that are powers of 2 are supported.
* bitmap.c (bitmap_set_aligned_chunk): New.
(bitmap_get_aligned_chunk): New.
(test_aligned_chunk): New.
(bitmap_c_tests): Call test_aligned_chunk.
* bitmap.h (bitmap_set_aligned_chunk, bitmap_get_aligned_chunk): New.
(cherry picked from commit 5ad089a3c946aec655436fa3b0b50d6574b78197)
---
gcc/bitmap.c | 108 +++++++++++++++++++++++++++++++++++++++++++++++++++
gcc/bitmap.h | 7 ++++
2 files changed, 115 insertions(+)
diff --git a/gcc/bitmap.c b/gcc/bitmap.c
index 5a650cdfc1d..b915fdfbb54 100644
--- a/gcc/bitmap.c
+++ b/gcc/bitmap.c
@@ -1004,6 +1004,83 @@ bitmap_bit_p (const_bitmap head, int bit)
return (ptr->bits[word_num] >> bit_num) & 1;
}
+/* Set CHUNK_SIZE bits at a time in bitmap HEAD.
+ Store CHUNK_VALUE starting at bits CHUNK * chunk_size.
+ This is the set routine for viewing bitmap as a multi-bit sparse array. */
+
+void
+bitmap_set_aligned_chunk (bitmap head, unsigned int chunk,
+ unsigned int chunk_size, BITMAP_WORD chunk_value)
+{
+ // Ensure chunk size is a power of 2 and fits in BITMAP_WORD.
+ gcc_checking_assert (pow2p_hwi (chunk_size));
+ gcc_checking_assert (chunk_size < (sizeof (BITMAP_WORD) * CHAR_BIT));
+
+ // Ensure chunk_value is within range of chunk_size bits.
+ BITMAP_WORD max_value = (1 << chunk_size) - 1;
+ gcc_checking_assert (chunk_value <= max_value);
+
+ unsigned bit = chunk * chunk_size;
+ unsigned indx = bit / BITMAP_ELEMENT_ALL_BITS;
+ bitmap_element *ptr;
+ if (!head->tree_form)
+ ptr = bitmap_list_find_element (head, indx);
+ else
+ ptr = bitmap_tree_find_element (head, indx);
+ unsigned word_num = bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS;
+ unsigned bit_num = bit % BITMAP_WORD_BITS;
+ BITMAP_WORD bit_val = chunk_value << bit_num;
+ BITMAP_WORD mask = ~(max_value << bit_num);
+
+ if (ptr != 0)
+ {
+ ptr->bits[word_num] &= mask;
+ ptr->bits[word_num] |= bit_val;
+ return;
+ }
+
+ ptr = bitmap_element_allocate (head);
+ ptr->indx = bit / BITMAP_ELEMENT_ALL_BITS;
+ ptr->bits[word_num] = bit_val;
+ if (!head->tree_form)
+ bitmap_list_link_element (head, ptr);
+ else
+ bitmap_tree_link_element (head, ptr);
+}
+
+/* This is the get routine for viewing bitmap as a multi-bit sparse array.
+ Return a set of CHUNK_SIZE consecutive bits from HEAD, starting at bit
+ CHUNK * chunk_size. */
+
+BITMAP_WORD
+bitmap_get_aligned_chunk (const_bitmap head, unsigned int chunk,
+ unsigned int chunk_size)
+{
+ // Ensure chunk size is a power of 2, fits in BITMAP_WORD and is in range.
+ gcc_checking_assert (pow2p_hwi (chunk_size));
+ gcc_checking_assert (chunk_size < (sizeof (BITMAP_WORD) * CHAR_BIT));
+
+ BITMAP_WORD max_value = (1 << chunk_size) - 1;
+ unsigned bit = chunk * chunk_size;
+ unsigned int indx = bit / BITMAP_ELEMENT_ALL_BITS;
+ const bitmap_element *ptr;
+ unsigned bit_num;
+ unsigned word_num;
+
+ if (!head->tree_form)
+ ptr = bitmap_list_find_element (const_cast<bitmap> (head), indx);
+ else
+ ptr = bitmap_tree_find_element (const_cast<bitmap> (head), indx);
+ if (ptr == 0)
+ return 0;
+
+ bit_num = bit % BITMAP_WORD_BITS;
+ word_num = bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS;
+
+ // Return 4 bits.
+ return (ptr->bits[word_num] >> bit_num) & max_value;
+}
+
#if GCC_VERSION < 3400
/* Table of number of set bits in a character, indexed by value of char. */
static const unsigned char popcount_table[] =
@@ -2857,6 +2934,33 @@ test_bitmap_single_bit_set_p ()
ASSERT_EQ (1066, bitmap_first_set_bit (b));
}
+/* Verify accessing aligned bit chunks works as expected. */
+
+static void
+test_aligned_chunk (unsigned num_bits)
+{
+ bitmap b = bitmap_gc_alloc ();
+ int limit = 2 ^ num_bits;
+
+ int index = 3;
+ for (int x = 0; x < limit; x++)
+ {
+ bitmap_set_aligned_chunk (b, index, num_bits, (BITMAP_WORD) x);
+ ASSERT_TRUE ((int) bitmap_get_aligned_chunk (b, index, num_bits) == x);
+ ASSERT_TRUE ((int) bitmap_get_aligned_chunk (b, index + 1,
+ num_bits) == 0);
+ ASSERT_TRUE ((int) bitmap_get_aligned_chunk (b, index - 1,
+ num_bits) == 0);
+ index += 3;
+ }
+ index = 3;
+ for (int x = 0; x < limit ; x++)
+ {
+ ASSERT_TRUE ((int) bitmap_get_aligned_chunk (b, index, num_bits) == x);
+ index += 3;
+ }
+}
+
/* Run all of the selftests within this file. */
void
@@ -2867,6 +2971,10 @@ bitmap_c_tests ()
test_clear_bit_in_middle ();
test_copying ();
test_bitmap_single_bit_set_p ();
+ /* Test 2, 4 and 8 bit aligned chunks. */
+ test_aligned_chunk (2);
+ test_aligned_chunk (4);
+ test_aligned_chunk (8);
}
} // namespace selftest
diff --git a/gcc/bitmap.h b/gcc/bitmap.h
index 84632af7009..c9fd8af49d8 100644
--- a/gcc/bitmap.h
+++ b/gcc/bitmap.h
@@ -438,6 +438,13 @@ extern bool bitmap_set_bit (bitmap, int);
/* Return true if a bit is set in a bitmap. */
extern int bitmap_bit_p (const_bitmap, int);
+/* Set and get multiple bit values in a sparse bitmap. This allows a bitmap to
+ function as a sparse array of bit patterns where the patterns are
+ multiples of power of 2. This is more efficient than performing this as
+ multiple individual operations. */
+void bitmap_set_aligned_chunk (bitmap, unsigned int, unsigned int, BITMAP_WORD);
+BITMAP_WORD bitmap_get_aligned_chunk (const_bitmap, unsigned int, unsigned int);
+
/* Debug functions to print a bitmap. */
extern void debug_bitmap (const_bitmap);
extern void debug_bitmap_file (FILE *, const_bitmap);
--
2.25.4
>From ec7a3bd4136639379666923573108146df024a19 Mon Sep 17 00:00:00 2001
From: Andrew MacLeod <amacl...@redhat.com>
Date: Mon, 7 Jun 2021 13:18:55 -0400
Subject: [PATCH 3/4] Implement a sparse bitmap representation for Rangers
on-entry cache.
Use a sparse representation for the on entry cache, and utilize it when
the number of basic blocks in the function exceeds param_evrp_sparse_threshold.
PR tree-optimization/PR100299
* gimple-range-cache.cc (class sbr_sparse_bitmap): New.
(sbr_sparse_bitmap::sbr_sparse_bitmap): New.
(sbr_sparse_bitmap::bitmap_set_quad): New.
(sbr_sparse_bitmap::bitmap_get_quad): New.
(sbr_sparse_bitmap::set_bb_range): New.
(sbr_sparse_bitmap::get_bb_range): New.
(sbr_sparse_bitmap::bb_range_p): New.
(block_range_cache::block_range_cache): initialize bitmap obstack.
(block_range_cache::~block_range_cache): Destruct obstack.
(block_range_cache::set_bb_range): Decide when to utilze the
sparse on entry cache.
* gimple-range-cache.h (block_range_cache): Add bitmap obstack.
* params.opt (-param=evrp-sparse-threshold): New.
(cherry picked from commit 9858cd1a6827ee7a928318acb5e86389f79b4012)
---
gcc/gimple-range-cache.cc | 147 +++++++++++++++++++++++++++++++++++++-
gcc/gimple-range-cache.h | 1 +
gcc/params.opt | 4 ++
3 files changed, 149 insertions(+), 3 deletions(-)
diff --git a/gcc/gimple-range-cache.cc b/gcc/gimple-range-cache.cc
index 2be83d698ab..bc4b557b493 100644
--- a/gcc/gimple-range-cache.cc
+++ b/gcc/gimple-range-cache.cc
@@ -216,12 +216,140 @@ sbr_vector::bb_range_p (const basic_block bb)
return m_tab[bb->index] != NULL;
}
+// This class implements the on entry cache via a sparse bitmap.
+// It uses the quad bit routines to access 4 bits at a time.
+// A value of 0 (the default) means there is no entry, and a value of
+// 1 thru SBR_NUM represents an element in the m_range vector.
+// Varying is given the first value (1) and pre-cached.
+// SBR_NUM + 1 represents the value of UNDEFINED, and is never stored.
+// SBR_NUM is the number of values that can be cached.
+// Indexes are 1..SBR_NUM and are stored locally at m_range[0..SBR_NUM-1]
+
+#define SBR_NUM 14
+#define SBR_UNDEF SBR_NUM + 1
+#define SBR_VARYING 1
+
+class sbr_sparse_bitmap : public ssa_block_ranges
+{
+public:
+ sbr_sparse_bitmap (tree t, irange_allocator *allocator, bitmap_obstack *bm);
+ virtual void set_bb_range (const basic_block bb, const irange &r) OVERRIDE;
+ virtual bool get_bb_range (irange &r, const basic_block bb) OVERRIDE;
+ virtual bool bb_range_p (const basic_block bb) OVERRIDE;
+private:
+ void bitmap_set_quad (bitmap head, int quad, int quad_value);
+ int bitmap_get_quad (const_bitmap head, int quad);
+ irange_allocator *m_irange_allocator;
+ irange *m_range[SBR_NUM];
+ bitmap bitvec;
+ tree m_type;
+};
+
+// Initialize a block cache for an ssa_name of type T.
+
+sbr_sparse_bitmap::sbr_sparse_bitmap (tree t, irange_allocator *allocator,
+ bitmap_obstack *bm)
+{
+ gcc_checking_assert (TYPE_P (t));
+ m_type = t;
+ bitvec = BITMAP_ALLOC (bm);
+ m_irange_allocator = allocator;
+ // Pre-cache varying.
+ m_range[0] = m_irange_allocator->allocate (2);
+ m_range[0]->set_varying (t);
+ // Pre-cache zero and non-zero values for pointers.
+ if (POINTER_TYPE_P (t))
+ {
+ m_range[1] = m_irange_allocator->allocate (2);
+ m_range[1]->set_nonzero (t);
+ m_range[2] = m_irange_allocator->allocate (2);
+ m_range[2]->set_zero (t);
+ }
+ else
+ m_range[1] = m_range[2] = NULL;
+ // Clear SBR_NUM entries.
+ for (int x = 3; x < SBR_NUM; x++)
+ m_range[x] = 0;
+}
+
+// Set 4 bit values in a sparse bitmap. This allows a bitmap to
+// function as a sparse array of 4 bit values.
+// QUAD is the index, QUAD_VALUE is the 4 bit value to set.
+
+inline void
+sbr_sparse_bitmap::bitmap_set_quad (bitmap head, int quad, int quad_value)
+{
+ bitmap_set_aligned_chunk (head, quad, 4, (BITMAP_WORD) quad_value);
+}
+
+// Get a 4 bit value from a sparse bitmap. This allows a bitmap to
+// function as a sparse array of 4 bit values.
+// QUAD is the index.
+inline int
+sbr_sparse_bitmap::bitmap_get_quad (const_bitmap head, int quad)
+{
+ return (int) bitmap_get_aligned_chunk (head, quad, 4);
+}
+
+// Set the range on entry to basic block BB to R.
+
+void
+sbr_sparse_bitmap::set_bb_range (const basic_block bb, const irange &r)
+{
+ if (r.undefined_p ())
+ {
+ bitmap_set_quad (bitvec, bb->index, SBR_UNDEF);
+ return;
+ }
+
+ // Loop thru the values to see if R is already present.
+ for (int x = 0; x < SBR_NUM; x++)
+ if (!m_range[x] || r == *(m_range[x]))
+ {
+ if (!m_range[x])
+ m_range[x] = m_irange_allocator->allocate (r);
+ bitmap_set_quad (bitvec, bb->index, x + 1);
+ return;
+ }
+ // All values are taken, default to VARYING.
+ bitmap_set_quad (bitvec, bb->index, SBR_VARYING);
+ return;
+}
+
+// Return the range associated with block BB in R. Return false if
+// there is no range.
+
+bool
+sbr_sparse_bitmap::get_bb_range (irange &r, const basic_block bb)
+{
+ int value = bitmap_get_quad (bitvec, bb->index);
+
+ if (!value)
+ return false;
+
+ gcc_checking_assert (value <= SBR_UNDEF);
+ if (value == SBR_UNDEF)
+ r.set_undefined ();
+ else
+ r = *(m_range[value - 1]);
+ return true;
+}
+
+// Return true if a range is present.
+
+bool
+sbr_sparse_bitmap::bb_range_p (const basic_block bb)
+{
+ return (bitmap_get_quad (bitvec, bb->index) != 0);
+}
+
// -------------------------------------------------------------------------
// Initialize the block cache.
block_range_cache::block_range_cache ()
{
+ bitmap_obstack_initialize (&m_bitmaps);
m_ssa_ranges.create (0);
m_ssa_ranges.safe_grow_cleared (num_ssa_names);
m_irange_allocator = new irange_allocator;
@@ -234,6 +362,7 @@ block_range_cache::~block_range_cache ()
delete m_irange_allocator;
// Release the vector itself.
m_ssa_ranges.release ();
+ bitmap_obstack_release (&m_bitmaps);
}
// Set the range for NAME on entry to block BB to R.
@@ -249,9 +378,21 @@ block_range_cache::set_bb_range (tree name, const basic_block bb,
if (!m_ssa_ranges[v])
{
- void *r = m_irange_allocator->get_memory (sizeof (sbr_vector));
- m_ssa_ranges[v] = new (r) sbr_vector (TREE_TYPE (name),
- m_irange_allocator);
+ // Use sparse representation if there are too many basic blocks.
+ if (last_basic_block_for_fn (cfun) > param_evrp_sparse_threshold)
+ {
+ void *r = m_irange_allocator->get_memory (sizeof (sbr_sparse_bitmap));
+ m_ssa_ranges[v] = new (r) sbr_sparse_bitmap (TREE_TYPE (name),
+ m_irange_allocator,
+ &m_bitmaps);
+ }
+ else
+ {
+ // Otherwise use the default vector implemntation.
+ void *r = m_irange_allocator->get_memory (sizeof (sbr_vector));
+ m_ssa_ranges[v] = new (r) sbr_vector (TREE_TYPE (name),
+ m_irange_allocator);
+ }
}
m_ssa_ranges[v]->set_bb_range (bb, r);
}
diff --git a/gcc/gimple-range-cache.h b/gcc/gimple-range-cache.h
index bccb5cc2442..43e576a51f9 100644
--- a/gcc/gimple-range-cache.h
+++ b/gcc/gimple-range-cache.h
@@ -61,6 +61,7 @@ private:
ssa_block_ranges &get_block_ranges (tree name);
ssa_block_ranges *query_block_ranges (tree name);
irange_allocator *m_irange_allocator;
+ bitmap_obstack m_bitmaps;
};
// This global cache is used with the range engine as markers for what
diff --git a/gcc/params.opt b/gcc/params.opt
index 2e4cbdd7a71..8ba281b4cfa 100644
--- a/gcc/params.opt
+++ b/gcc/params.opt
@@ -126,6 +126,10 @@ Maximum size (in bytes) of objects tracked bytewise by dead store elimination.
Common Joined UInteger Var(param_early_inlining_insns) Init(6) Optimization Param
Maximal estimated growth of function body caused by early inlining of single call.
+-param=evrp-sparse-threshold=
+Common Joined UInteger Var(param_evrp_sparse_threshold) Init(800) Optimization Param
+Maximum number of basic blocks before EVRP uses a sparse cache.
+
-param=evrp-mode=
Common Joined Var(param_evrp_mode) Enum(evrp_mode) Init(EVRP_MODE_EVRP_FIRST) Param Optimization
--param=evrp-mode=[legacy|ranger|legacy-first|ranger-first|ranger-trace|ranger-debug|trace|debug] Specifies the mode Early VRP should operate in.
--
2.25.4