All changes limited to hashtable_c++0x.cc file.
_Prime_rehash_policy::_M_next_bkt now really does what its comment was
declaring that is to say:
// Return a prime no smaller than n.
_Prime_rehash_policy::_M_need_rehash rehash only when _M_next_size is
exceeded, not only when it is reach.
PR libstdc++/87135
* src/c++11/hashtable_c++0x.cc:
(_Prime_rehash_policy::_M_next_bkt): Return a prime no smaller than
requested size, but not necessarily greater.
(_Prime_rehash_policy::_M_need_rehash): Rehash only if target size is
strictly greater than next resize threshold.
* testsuite/23_containers/unordered_map/modifiers/reserve.cc: Adapt
test
to validate that there is no rehash as long as number of insertion is
lower or equal to the reserved number of elements.
unordered_map tests successful, ok to commit once all other tests
completed ?
François
diff --git a/libstdc++-v3/src/c++11/hashtable_c++0x.cc b/libstdc++-v3/src/c++11/hashtable_c++0x.cc
index a776a8506fe..ec6031b3f5b 100644
--- a/libstdc++-v3/src/c++11/hashtable_c++0x.cc
+++ b/libstdc++-v3/src/c++11/hashtable_c++0x.cc
@@ -46,10 +46,10 @@ namespace __detail
{
// Optimize lookups involving the first elements of __prime_list.
// (useful to speed-up, eg, constructors)
- static const unsigned char __fast_bkt[13]
- = { 2, 2, 3, 5, 5, 7, 7, 11, 11, 11, 11, 13, 13 };
+ static const unsigned char __fast_bkt[]
+ = { 2, 2, 2, 3, 5, 5, 7, 7, 11, 11, 11, 11, 13, 13 };
- if (__n <= 12)
+ if (__n < sizeof(__fast_bkt) / sizeof(unsigned char))
{
_M_next_resize =
__builtin_ceil(__fast_bkt[__n] * (long double)_M_max_load_factor);
@@ -65,9 +65,8 @@ namespace __detail
// iterator that can be dereferenced to get the last prime.
constexpr auto __last_prime = __prime_list + __n_primes - 1;
- // Look for 'n + 1' to make sure returned value will be greater than n.
const unsigned long* __next_bkt =
- std::lower_bound(__prime_list + 6, __last_prime, __n + 1);
+ std::lower_bound(__prime_list + 6, __last_prime, __n);
if (__next_bkt == __last_prime)
// Set next resize to the max value so that we never try to rehash again
@@ -95,7 +94,7 @@ namespace __detail
_M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
std::size_t __n_ins) const
{
- if (__n_elt + __n_ins >= _M_next_resize)
+ if (__n_elt + __n_ins > _M_next_resize)
{
long double __min_bkts = (__n_elt + __n_ins)
/ (long double)_M_max_load_factor;
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/reserve.cc b/libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/reserve.cc
index e9cf7fd6f67..7f34325df87 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/reserve.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_map/modifiers/reserve.cc
@@ -18,23 +18,46 @@
// <http://www.gnu.org/licenses/>.
#include <unordered_map>
+
#include <testsuite_hooks.h>
void test01()
{
- const int N = 1000;
-
typedef std::unordered_map<int, int> Map;
Map m;
- m.reserve(N);
- std::size_t bkts = m.bucket_count();
- for (int i = 0; i != N; ++i)
+ // Make sure max load factor is 1 so that reserved elements is directly
+ // the bucket count.
+ m.max_load_factor(1);
+
+ int i = -1;
+ for (;;)
{
- m.insert(std::make_pair(i, i));
- // As long as we insert less than the reserved number of elements we
- // shouldn't experiment any rehash.
+ m.reserve(m.bucket_count());
+
+ std::size_t bkts = m.bucket_count();
+
+ m.reserve(bkts);
VERIFY( m.bucket_count() == bkts );
+
+ for (++i; i < bkts; ++i)
+ {
+ m.insert(std::make_pair(i, i));
+
+ // As long as we insert less than the reserved number of elements we
+ // shouldn't experiment any rehash.
+ VERIFY( m.bucket_count() == bkts );
+
+ VERIFY( m.load_factor() <= m.max_load_factor() );
+ }
+
+ // One more element should rehash.
+ m.insert(std::make_pair(i, i));
+ VERIFY( m.bucket_count() != bkts );
+ VERIFY( m.load_factor() <= m.max_load_factor() );
+
+ if (i > 1024)
+ break;
}
}