I think I completed this evolution.
I eventually used ref to node pointer as much as possible and even use
move semantic on it.
My prerequisite for this to work is that nullptr can be assign on the
fancy pointer and that a fancy pointer to __node_type is assignable
implicitely to a fancy pointer to __node_base.
* include/bits/hashtable_policy.h (_Hashtable_base): Add _Alloc
template parameter.
(_ReuseOrAllocNode<>::__node_type): Remove.
(_ReuseOrAllocNode<>::__node_pointer): New.
(_ReuseOrAllocNode(__node_pointer, __hashtable_alloc&)): Adapt
to use
latter.
(_ReuseOrAllocNode<>::operator()(_Arg&&)): Return latter.
(_AllocNode<>::__node_type): Remove.
(_AllocNode<>::__node_pointer): New.
(_AllocNode<>::operator()<>(_Arg&&)): Return latter.
(_Hash_node_base<>): Add _NodePtr template parameter.
(_Hash_node_value_base<>): Likewise.
(_Hash_node<>): Add _Ptr template parameter.
(_Hash_node<>::_M_next()): Remove.
(_Node_iterator_base<>): Use _NodePtr template parameter.
(operator==(const _Node_iterator_base&, const
_Node_iterator_base&)):
Make inline friend.
(operator!=(const _Node_iterator_base&, const
_Node_iterator_base&)):
Likewise.
(_Node_iterator<>): Use _NodePtr template parameter.
(_Node_const_iterator<>): Use _NodePtr template parameter.
(_Map_base<>::__node_type): Remove.
(_Map_base<>::__node_pointer): New.
(_Hash_code_base<>): Add _Alloc template parameter.
(_Hash_code_base<>::__pointer): New.
(_Hash_code_base<>::__node_pointer): New.
(_Hash_code_base<>::__node_ptr_arg_t): New.
(_Local_iterator_base<>): Add _Alloc template parameter.
Inherit from
_Node_iterator_base<>.
(_Local_iterator_base<>::__base_node_iter): New.
(_Local_iterator_base<>::_M_cur): Remove.
(_Local_iterator_base<>::_M_incr()): Adapt.
(_Local_iterator_base<>::_M_curr()): Remove.
(operator==(const _Local_iterator_base<>&,
const _Local_iterator_base<>&)): Remove.
(operator!=(const _Local_iterator_base<>&,
const _Local_iterator_base<>&)): Remove.
(_Local_iterator<>): Add _Alloc and _NodePtr template parameters.
(_Local_const_iterator<>): Likewise.
(_Hashtable_base<>): Add _Alloc template parameter.
(_Hashtable_alloc<>::__node_pointer): New.
(_Hashtable_alloc<>::__bucket_pointer): New.
(_Hashtable_alloc<>::_M_allocate_node): Adapt.
(_Hashtable_alloc<>::_M_deallocate_node): Adapt.
(_Hashtable_alloc<>::_M_deallocate_node_ptr): Adapt.
(_Hashtable_alloc<>::_M_deallocate_nodes): Adapt.
(_Hashtable_alloc<>::_M_allocate_buckets): Adapt.
(_Hashtable_alloc<>::_M_deallocate_buckets): Adapt.
* include/bits/hashtable.h (_Hashtable<>): Adapt.
(_Hashtable<>::_M_begin()): Remove.
* include/debug/unordered_map: Adapt.
* include/debug/unordered_set: Adapt.
* testsuite/23_containers/unordered_map/allocator/ext_ptr.cc: New.
*
testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc: New.
*
testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc: New.
* testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
Tested under Linux x86_64.
Ok to commit ?
François
On 19/04/20 7:31 pm, François Dumont wrote:
Here is my work in progress to use allocator pointer type. This type
is used both as the node pointer and as the buckets pointer.
Rather than adapting _Local_iterator_base like _Node_iterator_base I
prefer to just make it inherits from _Node_iterator_base. It
simplifies its implementation and avoids to provided dedicated
comparison operators.
Now I wonder if I need to consider Phil Bouchard comment regarding how
node pointers are being passed, either by value or reference. I
already chose to pass them as rvalue references in some occasions and
even lvalue reference like in _M_bucket_index method. Do you think I
need to continue this way ? Maybe I should use some conditional type,
if raw pointer we pass by value and otherwise we pass by ref ?
François
diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h
index b00319a668b..b735291bb56 100644
--- a/libstdc++-v3/include/bits/hashtable.h
+++ b/libstdc++-v3/include/bits/hashtable.h
@@ -171,7 +171,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typename _H1, typename _H2, typename _Hash,
typename _RehashPolicy, typename _Traits>
class _Hashtable
- : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
+ : public __detail::_Hashtable_base<_Key, _Value, _Alloc,
+ _ExtractKey, _Equal,
_H1, _H2, _Hash, _Traits>,
public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>,
@@ -182,9 +183,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>,
private __detail::_Hashtable_alloc<
- __alloc_rebind<_Alloc,
- __detail::_Hash_node<_Value,
- _Traits::__hash_cached::value>>>
+ __alloc_rebind<_Alloc, __detail::_Hash_node<
+ typename std::allocator_traits<_Alloc>::pointer, _Value,
+ _Traits::__hash_cached::value>>>
{
static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
"unordered container must have a non-const, non-volatile value_type");
@@ -195,8 +196,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
using __traits_type = _Traits;
using __hash_cached = typename __traits_type::__hash_cached;
- using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
+ using __hashtable_base = __detail::
+ _Hashtable_base<_Key, _Value, _Alloc, _ExtractKey,
+ _Equal, _H1, _H2, _Hash, _Traits>;
+
+ using __node_type = typename __hashtable_base::__node_type;
using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
+ using __node_pointer = typename __hashtable_base::__node_pointer;
+ using __node_ptr_arg_t = typename __hashtable_base::__node_ptr_arg_t;
using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
@@ -206,6 +213,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typename __hashtable_alloc::__node_alloc_traits;
using __node_base = typename __hashtable_alloc::__node_base;
using __bucket_type = typename __hashtable_alloc::__bucket_type;
+ using __bucket_pointer = typename __hashtable_alloc::__bucket_pointer;
+ using __bucket_ptr_traits = std::pointer_traits<__bucket_pointer>;
+ using __node_base_ptr_traits = std::pointer_traits<__bucket_type>;
public:
typedef _Key key_type;
@@ -232,10 +242,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__detail::_Identity,
__detail::_Select1st>::type;
- using __hashtable_base = __detail::
- _Hashtable_base<_Key, _Value, _ExtractKey,
- _Equal, _H1, _H2, _Hash, _Traits>;
-
using __hash_code_base = typename __hashtable_base::__hash_code_base;
using __hash_code = typename __hashtable_base::__hash_code;
using __ireturn_type = typename __hashtable_base::__ireturn_type;
@@ -262,8 +268,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
struct _Scoped_node
{
// Take ownership of a node with a constructed element.
- _Scoped_node(__node_type* __n, __hashtable_alloc* __h)
- : _M_h(__h), _M_node(__n) { }
+ _Scoped_node(__node_pointer&& __n, __hashtable_alloc* __h)
+ : _M_h(__h), _M_node(std::move(__n)) { }
// Allocate a node and construct an element within it.
template<typename... _Args>
@@ -279,7 +285,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Scoped_node& operator=(const _Scoped_node&) = delete;
__hashtable_alloc* _M_h;
- __node_type* _M_node;
+ __node_pointer _M_node;
};
template<typename _Ht>
@@ -306,7 +312,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Getting a bucket index from a node shall not throw because it is used
// in methods (erase, swap...) that shall not throw.
static_assert(noexcept(declval<const __hash_code_base_access&>()
- ._M_bucket_index((const __node_type*)nullptr,
+ ._M_bucket_index(declval<__node_ptr_arg_t>(),
(std::size_t)0)),
"Cache the hash code or qualify your functors involved"
" in hash code and bucket index computation with noexcept");
@@ -361,7 +367,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#endif
private:
- __bucket_type* _M_buckets = &_M_single_bucket;
+ __bucket_pointer _M_buckets
+ = __bucket_ptr_traits::pointer_to(_M_single_bucket);
size_type _M_bucket_count = 1;
__node_base _M_before_begin;
size_type _M_element_count = 0;
@@ -376,8 +383,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__bucket_type _M_single_bucket = nullptr;
bool
- _M_uses_single_bucket(__bucket_type* __bkts) const
- { return __builtin_expect(__bkts == &_M_single_bucket, false); }
+ _M_uses_single_bucket(__bucket_pointer __bkts) const
+ {
+ return __builtin_expect(std::__to_address(__bkts) == &_M_single_bucket,
+ false);
+ }
bool
_M_uses_single_bucket() const
@@ -386,20 +396,20 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__hashtable_alloc&
_M_base_alloc() { return *this; }
- __bucket_type*
+ __bucket_pointer
_M_allocate_buckets(size_type __bkt_count)
{
if (__builtin_expect(__bkt_count == 1, false))
{
_M_single_bucket = nullptr;
- return &_M_single_bucket;
+ return __bucket_ptr_traits::pointer_to(_M_single_bucket);
}
return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
}
void
- _M_deallocate_buckets(__bucket_type* __bkts, size_type __bkt_count)
+ _M_deallocate_buckets(__bucket_pointer __bkts, size_type __bkt_count)
{
if (_M_uses_single_bucket(__bkts))
return;
@@ -413,13 +423,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Gets bucket begin, deals with the fact that non-empty buckets contain
// their before begin node.
- __node_type*
+ __node_pointer
_M_bucket_begin(size_type __bkt) const;
- __node_type*
- _M_begin() const
- { return static_cast<__node_type*>(_M_before_begin._M_nxt); }
-
// Assign *this using another _Hashtable instance. Whether elements
// are copied or moved depends on the _Ht reference.
template<typename _Ht>
@@ -523,7 +529,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hashtable&
operator=(initializer_list<value_type> __l)
{
- __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+ __reuse_or_alloc_node_gen_t __roan(std::move(_M_before_begin._M_nxt),
+ *this);
_M_before_begin._M_nxt = nullptr;
clear();
this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys());
@@ -540,11 +547,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Basic container operations
iterator
begin() noexcept
- { return iterator(_M_begin()); }
+ { return iterator(_M_before_begin._M_nxt); }
const_iterator
begin() const noexcept
- { return const_iterator(_M_begin()); }
+ { return const_iterator(_M_before_begin._M_nxt); }
iterator
end() noexcept
@@ -556,7 +563,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
const_iterator
cbegin() const noexcept
- { return const_iterator(_M_begin()); }
+ { return const_iterator(_M_before_begin._M_nxt); }
const_iterator
cend() const noexcept
@@ -674,7 +681,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
protected:
// Bucket index computation helpers.
size_type
- _M_bucket_index(__node_type* __n) const noexcept
+ _M_bucket_index(__node_ptr_arg_t __n) const noexcept
{ return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
size_type
@@ -683,45 +690,45 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Find and insert helper functions and types
// Find the node before the one matching the criteria.
- __node_base*
+ __bucket_type
_M_find_before_node(size_type, const key_type&, __hash_code) const;
- __node_type*
+ __node_pointer
_M_find_node(size_type __bkt, const key_type& __key,
__hash_code __c) const
{
- __node_base* __before_n = _M_find_before_node(__bkt, __key, __c);
+ __bucket_type __before_n = _M_find_before_node(__bkt, __key, __c);
if (__before_n)
- return static_cast<__node_type*>(__before_n->_M_nxt);
+ return __before_n->_M_nxt;
return nullptr;
}
// Insert a node at the beginning of a bucket.
- void
- _M_insert_bucket_begin(size_type, __node_type*);
+ __node_pointer
+ _M_insert_bucket_begin(size_type, __node_pointer&&);
// Remove the bucket first node
void
- _M_remove_bucket_begin(size_type __bkt, __node_type* __next_n,
+ _M_remove_bucket_begin(size_type __bkt, __node_ptr_arg_t __next_n,
size_type __next_bkt);
// Get the node before __n in the bucket __bkt
- __node_base*
- _M_get_previous_node(size_type __bkt, __node_base* __n);
+ __bucket_type
+ _M_get_previous_node(size_type __bkt, __node_ptr_arg_t __n);
// Insert node __n with key __k and hash code __code, in bucket __bkt
// if no rehash (assumes no element with same key already present).
// Takes ownership of __n if insertion succeeds, throws otherwise.
iterator
_M_insert_unique_node(const key_type& __k, size_type __bkt,
- __hash_code __code, __node_type* __n,
+ __hash_code __code, __node_pointer&& __n,
size_type __n_elt = 1);
// Insert node __n with key __k and hash code __code.
// Takes ownership of __n if insertion succeeds, throws otherwise.
iterator
- _M_insert_multi_node(__node_type* __hint, const key_type& __k,
- __hash_code __code, __node_type* __n);
+ _M_insert_multi_node(__node_pointer __hint, const key_type& __k,
+ __hash_code __code, __node_pointer&& __n);
template<typename... _Args>
std::pair<iterator, bool>
@@ -778,7 +785,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_M_erase(false_type, const key_type&);
iterator
- _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n);
+ _M_erase(size_type __bkt, __bucket_type __prev_n, __node_pointer __n);
public:
// Emplace
@@ -838,7 +845,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
const key_type& __k = __nh._M_key();
__hash_code __code = this->_M_hash_code(__k);
size_type __bkt = _M_bucket_index(__k, __code);
- if (__node_type* __n = _M_find_node(__bkt, __k, __code))
+ if (__node_pointer __n = _M_find_node(__bkt, __k, __code))
{
__ret.node = std::move(__nh);
__ret.position = iterator(__n);
@@ -846,8 +853,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
else
{
- __ret.position
- = _M_insert_unique_node(__k, __bkt, __code, __nh._M_ptr);
+ __ret.position = _M_insert_unique_node(__k, __bkt, __code,
+ std::move(__nh._M_ptr));
__nh._M_ptr = nullptr;
__ret.inserted = true;
}
@@ -866,23 +873,23 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
const key_type& __k = __nh._M_key();
auto __code = this->_M_hash_code(__k);
- auto __ret
- = _M_insert_multi_node(__hint._M_cur, __k, __code, __nh._M_ptr);
+ auto __ret = _M_insert_multi_node(__hint._M_cur, __k, __code,
+ std::move(__nh._M_ptr));
__nh._M_ptr = nullptr;
return __ret;
}
private:
node_type
- _M_extract_node(size_t __bkt, __node_base* __prev_n)
+ _M_extract_node(size_t __bkt, __bucket_type __prev_n)
{
- __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
+ __node_pointer __n = __prev_n->_M_nxt;
if (__prev_n == _M_buckets[__bkt])
- _M_remove_bucket_begin(__bkt, __n->_M_next(),
- __n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0);
+ _M_remove_bucket_begin(__bkt, __n->_M_nxt,
+ __n->_M_nxt ? _M_bucket_index(__n->_M_nxt) : 0);
else if (__n->_M_nxt)
{
- size_type __next_bkt = _M_bucket_index(__n->_M_next());
+ size_type __next_bkt = _M_bucket_index(__n->_M_nxt);
if (__next_bkt != __bkt)
_M_buckets[__next_bkt] = __prev_n;
}
@@ -910,7 +917,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
node_type __nh;
__hash_code __code = this->_M_hash_code(__k);
std::size_t __bkt = _M_bucket_index(__k, __code);
- if (__node_base* __prev_node = _M_find_before_node(__bkt, __k, __code))
+ if (__bucket_type __prev_node = _M_find_before_node(__bkt, __k, __code))
__nh = _M_extract_node(__bkt, __prev_node);
return __nh;
}
@@ -934,8 +941,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
if (_M_find_node(__bkt, __k, __code) == nullptr)
{
auto __nh = __src.extract(__pos);
- _M_insert_unique_node(__k, __bkt, __code, __nh._M_ptr,
- __n_elt);
+ _M_insert_unique_node(__k, __bkt, __code,
+ std::move(__nh._M_ptr), __n_elt);
__nh._M_ptr = nullptr;
__n_elt = 1;
}
@@ -981,10 +988,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
_M_bucket_begin(size_type __bkt) const
- -> __node_type*
+ -> __node_pointer
{
- __node_base* __n = _M_buckets[__bkt];
- return __n ? static_cast<__node_type*>(__n->_M_nxt) : nullptr;
+ __bucket_type __n = _M_buckets[__bkt];
+ return __n ? __n->_M_nxt : nullptr;
}
template<typename _Key, typename _Value,
@@ -1058,7 +1065,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
&& __this_alloc != __that_alloc)
{
// Replacement allocator cannot free existing storage.
- this->_M_deallocate_nodes(_M_begin());
+ this->_M_deallocate_nodes(_M_before_begin._M_nxt);
_M_before_begin._M_nxt = nullptr;
_M_deallocate_buckets();
_M_buckets = nullptr;
@@ -1099,7 +1106,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
_M_assign_elements(_Ht&& __ht)
{
- __bucket_type* __former_buckets = nullptr;
+ __bucket_pointer __former_buckets = nullptr;
std::size_t __former_bucket_count = _M_bucket_count;
const __rehash_state& __former_state = _M_rehash_policy._M_state();
@@ -1118,7 +1125,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__hashtable_base::operator=(std::forward<_Ht>(__ht));
_M_element_count = __ht._M_element_count;
_M_rehash_policy = __ht._M_rehash_policy;
- __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+ __reuse_or_alloc_node_gen_t
+ __roan(std::move(_M_before_begin._M_nxt), *this);
_M_before_begin._M_nxt = nullptr;
_M_assign(std::forward<_Ht>(__ht), __roan);
if (__former_buckets)
@@ -1150,7 +1158,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
_M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
{
- __bucket_type* __buckets = nullptr;
+ __bucket_pointer __buckets = nullptr;
if (!_M_buckets)
_M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
@@ -1161,16 +1169,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// First deal with the special first node pointed to by
// _M_before_begin.
- __node_type* __ht_n = __ht._M_begin();
- __node_type* __this_n
+ __node_pointer __ht_n = __ht._M_before_begin._M_nxt;
+ __node_pointer __this_n
= __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
this->_M_copy_code(__this_n, __ht_n);
_M_before_begin._M_nxt = __this_n;
- _M_buckets[_M_bucket_index(__this_n)] = &_M_before_begin;
+ _M_buckets[_M_bucket_index(__this_n)] =
+ __node_base_ptr_traits::pointer_to(_M_before_begin);
// Then deal with other nodes.
- __node_base* __prev_n = __this_n;
- for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
+ __node_pointer __prev_n = __this_n;
+ for (__ht_n = __ht_n->_M_nxt; __ht_n; __ht_n = __ht_n->_M_nxt)
{
__this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
__prev_n->_M_nxt = __this_n;
@@ -1202,7 +1211,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_M_rehash_policy._M_reset();
_M_bucket_count = 1;
_M_single_bucket = nullptr;
- _M_buckets = &_M_single_bucket;
+ _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
_M_before_begin._M_nxt = nullptr;
_M_element_count = 0;
}
@@ -1216,7 +1225,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
_M_move_assign(_Hashtable&& __ht, true_type)
{
- this->_M_deallocate_nodes(_M_begin());
+ this->_M_deallocate_nodes(_M_before_begin._M_nxt);
_M_deallocate_buckets();
__hashtable_base::operator=(std::move(__ht));
_M_rehash_policy = __ht._M_rehash_policy;
@@ -1224,9 +1233,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_M_buckets = __ht._M_buckets;
else
{
- _M_buckets = &_M_single_bucket;
+ _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
_M_single_bucket = __ht._M_single_bucket;
}
+
_M_bucket_count = __ht._M_bucket_count;
_M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
_M_element_count = __ht._M_element_count;
@@ -1234,8 +1244,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Fix buckets containing the _M_before_begin pointers that can't be
// moved.
- if (_M_begin())
- _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+ if (_M_before_begin._M_nxt)
+ _M_buckets[_M_bucket_index(_M_before_begin._M_nxt)] =
+ __node_base_ptr_traits::pointer_to(_M_before_begin);
__ht._M_reset();
}
@@ -1290,23 +1301,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__map_base(__ht),
__rehash_base(__ht),
__hashtable_alloc(std::move(__ht._M_base_alloc())),
- _M_buckets(__ht._M_buckets),
+ _M_buckets(std::move(__ht._M_buckets)),
_M_bucket_count(__ht._M_bucket_count),
- _M_before_begin(__ht._M_before_begin._M_nxt),
+ _M_before_begin(std::move(__ht._M_before_begin._M_nxt)),
_M_element_count(__ht._M_element_count),
_M_rehash_policy(__ht._M_rehash_policy)
{
// Update, if necessary, buckets if __ht is using its single bucket.
- if (__ht._M_uses_single_bucket())
+ if (std::__to_address(_M_buckets) == &__ht._M_single_bucket)
{
- _M_buckets = &_M_single_bucket;
+ _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
_M_single_bucket = __ht._M_single_bucket;
}
// Update, if necessary, bucket pointing to before begin that hasn't
// moved.
- if (_M_begin())
- _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+ if (_M_before_begin._M_nxt)
+ _M_buckets[_M_bucket_index(_M_before_begin._M_nxt)] =
+ __node_base_ptr_traits::pointer_to(_M_before_begin);
__ht._M_reset();
}
@@ -1322,7 +1334,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__map_base(__ht),
__rehash_base(__ht),
__hashtable_alloc(__node_alloc_type(__a)),
- _M_buckets(),
+ _M_buckets(nullptr),
_M_bucket_count(__ht._M_bucket_count),
_M_element_count(__ht._M_element_count),
_M_rehash_policy(__ht._M_rehash_policy)
@@ -1351,17 +1363,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{
if (__ht._M_uses_single_bucket())
{
- _M_buckets = &_M_single_bucket;
+ _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
_M_single_bucket = __ht._M_single_bucket;
}
else
- _M_buckets = __ht._M_buckets;
+ _M_buckets = std::move(__ht._M_buckets);
- _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
+ _M_before_begin._M_nxt = std::move(__ht._M_before_begin._M_nxt);
// Update, if necessary, bucket pointing to before begin that hasn't
// moved.
- if (_M_begin())
- _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+ if (_M_before_begin._M_nxt)
+ _M_buckets[_M_bucket_index(_M_before_begin._M_nxt)] =
+ __node_base_ptr_traits::pointer_to(_M_before_begin);
__ht._M_reset();
}
else
@@ -1413,13 +1426,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
if (!__x._M_uses_single_bucket())
{
_M_buckets = __x._M_buckets;
- __x._M_buckets = &__x._M_single_bucket;
+ __x._M_buckets =
+ __bucket_ptr_traits::pointer_to(__x._M_single_bucket);
}
}
else if (__x._M_uses_single_bucket())
{
__x._M_buckets = _M_buckets;
- _M_buckets = &_M_single_bucket;
+ _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
}
else
std::swap(_M_buckets, __x._M_buckets);
@@ -1431,12 +1445,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Fix buckets containing the _M_before_begin pointers that can't be
// swapped.
- if (_M_begin())
- _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+ if (_M_before_begin._M_nxt)
+ _M_buckets[_M_bucket_index(_M_before_begin._M_nxt)] =
+ __node_base_ptr_traits::pointer_to(_M_before_begin);
- if (__x._M_begin())
- __x._M_buckets[__x._M_bucket_index(__x._M_begin())]
- = &__x._M_before_begin;
+ if (__x._M_before_begin._M_nxt)
+ __x._M_buckets[__x._M_bucket_index(__x._M_before_begin._M_nxt)]
+ = __node_base_ptr_traits::pointer_to(__x._M_before_begin);
}
template<typename _Key, typename _Value,
@@ -1451,7 +1466,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{
__hash_code __code = this->_M_hash_code(__k);
std::size_t __bkt = _M_bucket_index(__k, __code);
- __node_type* __p = _M_find_node(__bkt, __k, __code);
+ __node_pointer __p = _M_find_node(__bkt, __k, __code);
return __p ? iterator(__p) : end();
}
@@ -1467,7 +1482,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{
__hash_code __code = this->_M_hash_code(__k);
std::size_t __bkt = _M_bucket_index(__k, __code);
- __node_type* __p = _M_find_node(__bkt, __k, __code);
+ __node_pointer __p = _M_find_node(__bkt, __k, __code);
return __p ? const_iterator(__p) : end();
}
@@ -1483,12 +1498,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{
__hash_code __code = this->_M_hash_code(__k);
std::size_t __bkt = _M_bucket_index(__k, __code);
- __node_type* __p = _M_bucket_begin(__bkt);
+ __node_pointer __p = _M_bucket_begin(__bkt);
if (!__p)
return 0;
std::size_t __result = 0;
- for (;; __p = __p->_M_next())
+ for (;; __p = __p->_M_nxt)
{
if (this->_M_equals(__k, __code, __p))
++__result;
@@ -1497,7 +1512,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// found a non-equivalent value after an equivalent one it
// means that we won't find any new equivalent value.
break;
- if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt)
+ if (!__p->_M_nxt || _M_bucket_index(__p->_M_nxt) != __bkt)
break;
}
return __result;
@@ -1515,14 +1530,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{
__hash_code __code = this->_M_hash_code(__k);
std::size_t __bkt = _M_bucket_index(__k, __code);
- __node_type* __p = _M_find_node(__bkt, __k, __code);
+ __node_pointer __p = _M_find_node(__bkt, __k, __code);
if (__p)
{
- __node_type* __p1 = __p->_M_next();
+ __node_pointer __p1 = __p->_M_nxt;
while (__p1 && _M_bucket_index(__p1) == __bkt
&& this->_M_equals(__k, __code, __p1))
- __p1 = __p1->_M_next();
+ __p1 = __p1->_M_nxt;
return std::make_pair(iterator(__p), iterator(__p1));
}
@@ -1542,14 +1557,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{
__hash_code __code = this->_M_hash_code(__k);
std::size_t __bkt = _M_bucket_index(__k, __code);
- __node_type* __p = _M_find_node(__bkt, __k, __code);
+ __node_pointer __p = _M_find_node(__bkt, __k, __code);
if (__p)
{
- __node_type* __p1 = __p->_M_next();
+ __node_pointer __p1 = __p->_M_nxt;
while (__p1 && _M_bucket_index(__p1) == __bkt
&& this->_M_equals(__k, __code, __p1))
- __p1 = __p1->_M_next();
+ __p1 = __p1->_M_nxt;
return std::make_pair(const_iterator(__p), const_iterator(__p1));
}
@@ -1568,19 +1583,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
_M_find_before_node(size_type __bkt, const key_type& __k,
__hash_code __code) const
- -> __node_base*
+ -> __bucket_type
{
- __node_base* __prev_p = _M_buckets[__bkt];
+ __bucket_type __prev_p = _M_buckets[__bkt];
if (!__prev_p)
return nullptr;
- for (__node_type* __p = static_cast<__node_type*>(__prev_p->_M_nxt);;
- __p = __p->_M_next())
+ for (__node_pointer __p = __prev_p->_M_nxt;; __p = __p->_M_nxt)
{
if (this->_M_equals(__k, __code, __p))
return __prev_p;
- if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt)
+ if (!__p->_M_nxt || _M_bucket_index(__p->_M_nxt) != __bkt)
break;
__prev_p = __p;
}
@@ -1591,17 +1605,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typename _Alloc, typename _ExtractKey, typename _Equal,
typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
typename _Traits>
- void
+ auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_insert_bucket_begin(size_type __bkt, __node_type* __node)
+ _M_insert_bucket_begin(size_type __bkt, __node_pointer&& __node)
+ -> __node_pointer
{
if (_M_buckets[__bkt])
{
// Bucket is not empty, we just need to insert the new node
// after the bucket before begin.
__node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
- _M_buckets[__bkt]->_M_nxt = __node;
+ _M_buckets[__bkt]->_M_nxt = std::move(__node);
+ return _M_buckets[__bkt]->_M_nxt;
}
else
{
@@ -1609,12 +1625,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// beginning of the singly-linked list and the bucket will
// contain _M_before_begin pointer.
__node->_M_nxt = _M_before_begin._M_nxt;
- _M_before_begin._M_nxt = __node;
- if (__node->_M_nxt)
+ _M_before_begin._M_nxt = std::move(__node);
+ if (_M_before_begin._M_nxt->_M_nxt)
// We must update former begin bucket that is pointing to
// _M_before_begin.
- _M_buckets[_M_bucket_index(__node->_M_next())] = __node;
- _M_buckets[__bkt] = &_M_before_begin;
+ _M_buckets[_M_bucket_index(_M_before_begin._M_nxt->_M_nxt)] =
+ _M_before_begin._M_nxt;
+ _M_buckets[__bkt] =
+ __node_base_ptr_traits::pointer_to(_M_before_begin);
+ return _M_before_begin._M_nxt;
}
}
@@ -1625,7 +1644,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_remove_bucket_begin(size_type __bkt, __node_type* __next,
+ _M_remove_bucket_begin(size_type __bkt, __node_ptr_arg_t __next,
size_type __next_bkt)
{
if (!__next || __next_bkt != __bkt)
@@ -1636,7 +1655,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_M_buckets[__next_bkt] = _M_buckets[__bkt];
// Second update before begin node if necessary
- if (&_M_before_begin == _M_buckets[__bkt])
+ if (&_M_before_begin == std::__to_address(_M_buckets[__bkt]))
_M_before_begin._M_nxt = __next;
_M_buckets[__bkt] = nullptr;
}
@@ -1649,10 +1668,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_get_previous_node(size_type __bkt, __node_base* __n)
- -> __node_base*
+ _M_get_previous_node(size_type __bkt, __node_ptr_arg_t __n)
+ -> __bucket_type
{
- __node_base* __prev_n = _M_buckets[__bkt];
+ __bucket_type __prev_n = _M_buckets[__bkt];
while (__prev_n->_M_nxt != __n)
__prev_n = __prev_n->_M_nxt;
return __prev_n;
@@ -1674,12 +1693,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
const key_type& __k = this->_M_extract()(__node._M_node->_M_v());
__hash_code __code = this->_M_hash_code(__k);
size_type __bkt = _M_bucket_index(__k, __code);
- if (__node_type* __p = _M_find_node(__bkt, __k, __code))
+ if (__node_pointer __p = _M_find_node(__bkt, __k, __code))
// There is already an equivalent node, no insertion
return std::make_pair(iterator(__p), false);
// Insert the node
- auto __pos = _M_insert_unique_node(__k, __bkt, __code, __node._M_node);
+ auto __pos = _M_insert_unique_node(__k, __bkt, __code,
+ std::move(__node._M_node));
__node._M_node = nullptr;
return { __pos, true };
}
@@ -1700,8 +1720,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
const key_type& __k = this->_M_extract()(__node._M_node->_M_v());
__hash_code __code = this->_M_hash_code(__k);
- auto __pos
- = _M_insert_multi_node(__hint._M_cur, __k, __code, __node._M_node);
+ auto __pos = _M_insert_multi_node(__hint._M_cur, __k, __code,
+ std::move(__node._M_node));
__node._M_node = nullptr;
return __pos;
}
@@ -1714,7 +1734,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
_M_insert_unique_node(const key_type& __k, size_type __bkt,
- __hash_code __code, __node_type* __node,
+ __hash_code __code, __node_pointer&& __node,
size_type __n_elt)
-> iterator
{
@@ -1732,9 +1752,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
this->_M_store_code(__node, __code);
// Always insert at the beginning of the bucket.
- _M_insert_bucket_begin(__bkt, __node);
++_M_element_count;
- return iterator(__node);
+ return iterator(_M_insert_bucket_begin(__bkt, std::move(__node)));
}
template<typename _Key, typename _Value,
@@ -1744,8 +1763,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_insert_multi_node(__node_type* __hint, const key_type& __k,
- __hash_code __code, __node_type* __node)
+ _M_insert_multi_node(__node_pointer __hint, const key_type& __k,
+ __hash_code __code, __node_pointer&& __node)
-> iterator
{
const __rehash_state& __saved_state = _M_rehash_policy._M_state();
@@ -1760,34 +1779,39 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Find the node before an equivalent one or use hint if it exists and
// if it is equivalent.
- __node_base* __prev
- = __builtin_expect(__hint != nullptr, false)
- && this->_M_equals(__k, __code, __hint)
- ? __hint
- : _M_find_before_node(__bkt, __k, __code);
+ __bucket_type __prev;
+ if (__builtin_expect(__hint != nullptr, false)
+ && this->_M_equals(__k, __code, __hint))
+ __prev = __hint;
+ else
+ __prev = _M_find_before_node(__bkt, __k, __code);
+
if (__prev)
{
// Insert after the node before the equivalent one.
__node->_M_nxt = __prev->_M_nxt;
- __prev->_M_nxt = __node;
+ __prev->_M_nxt = std::move(__node);
if (__builtin_expect(__prev == __hint, false))
// hint might be the last bucket node, in this case we need to
// update next bucket.
- if (__node->_M_nxt
- && !this->_M_equals(__k, __code, __node->_M_next()))
+ if (__prev->_M_nxt->_M_nxt
+ && !this->_M_equals(__k, __code, __prev->_M_nxt->_M_nxt))
{
- size_type __next_bkt = _M_bucket_index(__node->_M_next());
+ size_type __next_bkt = _M_bucket_index(__prev->_M_nxt->_M_nxt);
if (__next_bkt != __bkt)
- _M_buckets[__next_bkt] = __node;
+ _M_buckets[__next_bkt] = __prev->_M_nxt;
}
+ ++_M_element_count;
+ return iterator(__prev->_M_nxt);
}
else
- // The inserted node has no equivalent in the hashtable. We must
- // insert the new node at the beginning of the bucket to preserve
- // equivalent elements' relative positions.
- _M_insert_bucket_begin(__bkt, __node);
- ++_M_element_count;
- return iterator(__node);
+ {
+ // The inserted node has no equivalent in the hashtable. We must
+ // insert the new node at the beginning of the bucket to preserve
+ // equivalent elements' relative positions.
+ ++_M_element_count;
+ return iterator(_M_insert_bucket_begin(__bkt, std::move(__node)));
+ }
}
// Insert v if no element with its key is already present.
@@ -1807,12 +1831,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__hash_code __code = this->_M_hash_code(__k);
size_type __bkt = _M_bucket_index(__k, __code);
- if (__node_type* __node = _M_find_node(__bkt, __k, __code))
+ if (__node_pointer __node = _M_find_node(__bkt, __k, __code))
return { iterator(__node), false };
_Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
auto __pos
- = _M_insert_unique_node(__k, __bkt, __code, __node._M_node, __n_elt);
+ = _M_insert_unique_node(__k, __bkt, __code,
+ std::move(__node._M_node), __n_elt);
__node._M_node = nullptr;
return { __pos, true };
}
@@ -1837,8 +1862,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Second allocate new node so that we don't rehash if it throws.
_Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
const key_type& __k = this->_M_extract()(__node._M_node->_M_v());
- auto __pos
- = _M_insert_multi_node(__hint._M_cur, __k, __code, __node._M_node);
+ auto __pos = _M_insert_multi_node(__hint._M_cur, __k, __code,
+ std::move(__node._M_node));
__node._M_node = nullptr;
return __pos;
}
@@ -1853,14 +1878,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
erase(const_iterator __it)
-> iterator
{
- __node_type* __n = __it._M_cur;
- std::size_t __bkt = _M_bucket_index(__n);
+ std::size_t __bkt = _M_bucket_index(__it._M_cur);
// Look for previous node to unlink it from the erased one, this
// is why we need buckets to contain the before begin to make
// this search fast.
- __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
- return _M_erase(__bkt, __prev_n, __n);
+ __bucket_type __prev_n = _M_get_previous_node(__bkt, __it._M_cur);
+ return _M_erase(__bkt, __prev_n, __it._M_cur);
}
template<typename _Key, typename _Value,
@@ -1870,21 +1894,21 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n)
+ _M_erase(size_type __bkt, __bucket_type __prev_n, __node_pointer __n)
-> iterator
{
if (__prev_n == _M_buckets[__bkt])
- _M_remove_bucket_begin(__bkt, __n->_M_next(),
- __n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0);
+ _M_remove_bucket_begin(__bkt, __n->_M_nxt,
+ __n->_M_nxt ? _M_bucket_index(__n->_M_nxt) : 0);
else if (__n->_M_nxt)
{
- size_type __next_bkt = _M_bucket_index(__n->_M_next());
+ size_type __next_bkt = _M_bucket_index(__n->_M_nxt);
if (__next_bkt != __bkt)
_M_buckets[__next_bkt] = __prev_n;
}
__prev_n->_M_nxt = __n->_M_nxt;
- iterator __result(__n->_M_next());
+ iterator __result(__n->_M_nxt);
this->_M_deallocate_node(__n);
--_M_element_count;
@@ -1905,13 +1929,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
std::size_t __bkt = _M_bucket_index(__k, __code);
// Look for the node before the first matching node.
- __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
+ __bucket_type __prev_n = _M_find_before_node(__bkt, __k, __code);
if (!__prev_n)
return 0;
// We found a matching node, erase it.
- __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
- _M_erase(__bkt, __prev_n, __n);
+ _M_erase(__bkt, __prev_n, __prev_n->_M_nxt);
return 1;
}
@@ -1929,7 +1952,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
std::size_t __bkt = _M_bucket_index(__k, __code);
// Look for the node before the first matching node.
- __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
+ __bucket_type __prev_n = _M_find_before_node(__bkt, __k, __code);
if (!__prev_n)
return 0;
@@ -1939,12 +1962,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// We use one loop to find all matching nodes and another to deallocate
// them so that the key stays valid during the first loop. It might be
// invalidated indirectly when destroying nodes.
- __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
- __node_type* __n_last = __n;
+ __node_pointer __n = __prev_n->_M_nxt;
+ __node_pointer __n_last = __n;
std::size_t __n_last_bkt = __bkt;
do
{
- __n_last = __n_last->_M_next();
+ __n_last = __n_last->_M_nxt;
if (!__n_last)
break;
__n_last_bkt = _M_bucket_index(__n_last);
@@ -1955,7 +1978,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
size_type __result = 0;
do
{
- __node_type* __p = __n->_M_next();
+ __node_pointer __p = __n->_M_nxt;
this->_M_deallocate_node(__n);
__n = __p;
++__result;
@@ -1981,22 +2004,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
erase(const_iterator __first, const_iterator __last)
-> iterator
{
- __node_type* __n = __first._M_cur;
- __node_type* __last_n = __last._M_cur;
+ __node_pointer __n = __first._M_cur;
+ __node_pointer __last_n = __last._M_cur;
if (__n == __last_n)
return iterator(__n);
std::size_t __bkt = _M_bucket_index(__n);
- __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
+ __bucket_type __prev_n = _M_get_previous_node(__bkt, __n);
bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
std::size_t __n_bkt = __bkt;
for (;;)
{
do
{
- __node_type* __tmp = __n;
- __n = __n->_M_next();
+ __node_pointer __tmp = __n;
+ __n = __n->_M_nxt;
this->_M_deallocate_node(__tmp);
--_M_element_count;
if (!__n)
@@ -2027,8 +2050,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
clear() noexcept
{
- this->_M_deallocate_nodes(_M_begin());
- __builtin_memset(_M_buckets, 0, _M_bucket_count * sizeof(__bucket_type));
+ this->_M_deallocate_nodes(_M_before_begin._M_nxt);
+ std::fill_n(_M_buckets, _M_bucket_count, nullptr);
_M_element_count = 0;
_M_before_begin._M_nxt = nullptr;
}
@@ -2088,20 +2111,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
_M_rehash_aux(size_type __bkt_count, true_type)
{
- __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
- __node_type* __p = _M_begin();
+ __bucket_pointer __new_buckets = _M_allocate_buckets(__bkt_count);
+ auto __before_begin_ptr =
+ __node_base_ptr_traits::pointer_to(_M_before_begin);
+ __node_pointer __p = _M_before_begin._M_nxt;
_M_before_begin._M_nxt = nullptr;
std::size_t __bbegin_bkt = 0;
while (__p)
{
- __node_type* __next = __p->_M_next();
+ __node_pointer __next = __p->_M_nxt;
std::size_t __bkt
= __hash_code_base::_M_bucket_index(__p, __bkt_count);
if (!__new_buckets[__bkt])
{
__p->_M_nxt = _M_before_begin._M_nxt;
_M_before_begin._M_nxt = __p;
- __new_buckets[__bkt] = &_M_before_begin;
+ __new_buckets[__bkt] = __before_begin_ptr;
if (__p->_M_nxt)
__new_buckets[__bbegin_bkt] = __p;
__bbegin_bkt = __bkt;
@@ -2130,18 +2155,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
_M_rehash_aux(size_type __bkt_count, false_type)
{
- __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
-
- __node_type* __p = _M_begin();
+ auto __new_buckets = _M_allocate_buckets(__bkt_count);
+ auto __before_begin_ptr =
+ __node_base_ptr_traits::pointer_to(_M_before_begin);
+ __node_pointer __p = _M_before_begin._M_nxt;
_M_before_begin._M_nxt = nullptr;
std::size_t __bbegin_bkt = 0;
std::size_t __prev_bkt = 0;
- __node_type* __prev_p = nullptr;
+ __node_pointer __prev_p{};
bool __check_bucket = false;
while (__p)
{
- __node_type* __next = __p->_M_next();
+ __node_pointer __next = __p->_M_nxt;
std::size_t __bkt
= __hash_code_base::_M_bucket_index(__p, __bkt_count);
@@ -2169,7 +2195,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
if (__prev_p->_M_nxt)
{
std::size_t __next_bkt
- = __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
+ = __hash_code_base::_M_bucket_index(__prev_p->_M_nxt,
__bkt_count);
if (__next_bkt != __prev_bkt)
__new_buckets[__next_bkt] = __prev_p;
@@ -2181,7 +2207,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{
__p->_M_nxt = _M_before_begin._M_nxt;
_M_before_begin._M_nxt = __p;
- __new_buckets[__bkt] = &_M_before_begin;
+ __new_buckets[__bkt] = __before_begin_ptr;
if (__p->_M_nxt)
__new_buckets[__bbegin_bkt] = __p;
__bbegin_bkt = __bkt;
@@ -2200,7 +2226,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
if (__check_bucket && __prev_p->_M_nxt)
{
std::size_t __next_bkt
- = __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
+ = __hash_code_base::_M_bucket_index(__prev_p->_M_nxt,
__bkt_count);
if (__next_bkt != __prev_bkt)
__new_buckets[__next_bkt] = __prev_p;
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
index ef120134914..1ad8193f595 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -52,7 +52,7 @@ namespace __detail
* @ingroup unordered_associative_containers
* @{
*/
- template<typename _Key, typename _Value,
+ template<typename _Key, typename _Value, typename _Alloc,
typename _ExtractKey, typename _Equal,
typename _H1, typename _H2, typename _Hash, typename _Traits>
struct _Hashtable_base;
@@ -107,24 +107,24 @@ namespace __detail
using __hashtable_alloc = _Hashtable_alloc<__node_alloc_type>;
using __node_alloc_traits =
typename __hashtable_alloc::__node_alloc_traits;
- using __node_type = typename __hashtable_alloc::__node_type;
+ using __node_pointer = typename __hashtable_alloc::__node_pointer;
public:
- _ReuseOrAllocNode(__node_type* __nodes, __hashtable_alloc& __h)
- : _M_nodes(__nodes), _M_h(__h) { }
+ _ReuseOrAllocNode(__node_pointer&& __nodes, __hashtable_alloc& __h)
+ : _M_nodes(std::move(__nodes)), _M_h(__h) { }
_ReuseOrAllocNode(const _ReuseOrAllocNode&) = delete;
~_ReuseOrAllocNode()
{ _M_h._M_deallocate_nodes(_M_nodes); }
template<typename _Arg>
- __node_type*
+ __node_pointer
operator()(_Arg&& __arg) const
{
if (_M_nodes)
{
- __node_type* __node = _M_nodes;
- _M_nodes = _M_nodes->_M_next();
+ __node_pointer __node = _M_nodes;
+ _M_nodes = _M_nodes->_M_nxt;
__node->_M_nxt = nullptr;
auto& __a = _M_h._M_node_allocator();
__node_alloc_traits::destroy(__a, __node->_M_valptr());
@@ -144,7 +144,7 @@ namespace __detail
}
private:
- mutable __node_type* _M_nodes;
+ mutable __node_pointer _M_nodes;
__hashtable_alloc& _M_h;
};
@@ -155,14 +155,14 @@ namespace __detail
{
private:
using __hashtable_alloc = _Hashtable_alloc<_NodeAlloc>;
- using __node_type = typename __hashtable_alloc::__node_type;
+ using __node_pointer = typename __hashtable_alloc::__node_pointer;
public:
_AllocNode(__hashtable_alloc& __h)
: _M_h(__h) { }
template<typename _Arg>
- __node_type*
+ __node_pointer
operator()(_Arg&& __arg) const
{ return _M_h._M_allocate_node(std::forward<_Arg>(__arg)); }
@@ -211,22 +211,27 @@ namespace __detail
* nodes also store a hash code. In some cases (e.g. strings) this
* may be a performance win.
*/
- struct _Hash_node_base
- {
- _Hash_node_base* _M_nxt;
+ template<typename _NodePtr>
+ struct _Hash_node_base
+ {
+ using __node_pointer = _NodePtr;
- _Hash_node_base() noexcept : _M_nxt() { }
+ __node_pointer _M_nxt;
- _Hash_node_base(_Hash_node_base* __next) noexcept : _M_nxt(__next) { }
- };
+ _Hash_node_base() noexcept : _M_nxt() { }
+
+ template<typename _Ptr>
+ _Hash_node_base(_Ptr&& __next) noexcept
+ : _M_nxt(std::forward<_Ptr>(__next)) { }
+ };
/**
* struct _Hash_node_value_base
*
* Node type with the value to store.
*/
- template<typename _Value>
- struct _Hash_node_value_base : _Hash_node_base
+ template<typename _NodePtr, typename _Value>
+ struct _Hash_node_value_base : _Hash_node_base<_NodePtr>
{
typedef _Value value_type;
@@ -252,7 +257,7 @@ namespace __detail
/**
* Primary template struct _Hash_node.
*/
- template<typename _Value, bool _Cache_hash_code>
+ template<typename _Ptr, typename _Value, bool _Cache_hash_code>
struct _Hash_node;
/**
@@ -260,84 +265,77 @@ namespace __detail
*
* Base class is __detail::_Hash_node_value_base.
*/
- template<typename _Value>
- struct _Hash_node<_Value, true> : _Hash_node_value_base<_Value>
- {
- std::size_t _M_hash_code;
-
- _Hash_node*
- _M_next() const noexcept
- { return static_cast<_Hash_node*>(this->_M_nxt); }
- };
+ template<typename _Ptr, typename _Value>
+ struct _Hash_node<_Ptr, _Value, true>
+ : _Hash_node_value_base<__ptr_rebind<_Ptr, _Hash_node<_Ptr, _Value, true>>,
+ _Value>
+ { std::size_t _M_hash_code; };
/**
* Specialization for nodes without caches, struct _Hash_node.
*
* Base class is __detail::_Hash_node_value_base.
*/
- template<typename _Value>
- struct _Hash_node<_Value, false> : _Hash_node_value_base<_Value>
- {
- _Hash_node*
- _M_next() const noexcept
- { return static_cast<_Hash_node*>(this->_M_nxt); }
- };
+ template<typename _Ptr, typename _Value>
+ struct _Hash_node<_Ptr, _Value, false>
+ : _Hash_node_value_base<__ptr_rebind<_Ptr, _Hash_node<_Ptr, _Value, false>>,
+ _Value>
+ { };
/// Base class for node iterators.
- template<typename _Value, bool _Cache_hash_code>
+ template<typename _NodePtr>
struct _Node_iterator_base
{
- using __node_type = _Hash_node<_Value, _Cache_hash_code>;
+ using __node_type = typename std::pointer_traits<_NodePtr>::element_type;
- __node_type* _M_cur;
+ _NodePtr _M_cur;
- _Node_iterator_base(__node_type* __p) noexcept
+ _Node_iterator_base(_NodePtr __p) noexcept
: _M_cur(__p) { }
+ _Node_iterator_base() noexcept
+ : _Node_iterator_base(nullptr) { }
void
_M_incr() noexcept
- { _M_cur = _M_cur->_M_next(); }
- };
+ { _M_cur = _M_cur->_M_nxt; }
- template<typename _Value, bool _Cache_hash_code>
- inline bool
- operator==(const _Node_iterator_base<_Value, _Cache_hash_code>& __x,
- const _Node_iterator_base<_Value, _Cache_hash_code >& __y)
- noexcept
- { return __x._M_cur == __y._M_cur; }
+ friend inline bool
+ operator==(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
+ noexcept
+ { return __x._M_cur == __y._M_cur; }
- template<typename _Value, bool _Cache_hash_code>
- inline bool
- operator!=(const _Node_iterator_base<_Value, _Cache_hash_code>& __x,
- const _Node_iterator_base<_Value, _Cache_hash_code>& __y)
- noexcept
- { return __x._M_cur != __y._M_cur; }
+ friend inline bool
+ operator!=(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
+ noexcept
+ { return __x._M_cur != __y._M_cur; }
+ };
/// Node iterators, used to iterate through all the hashtable.
- template<typename _Value, bool __constant_iterators, bool __cache>
+ template<typename _NodePtr, bool __constant_iterators>
struct _Node_iterator
- : public _Node_iterator_base<_Value, __cache>
+ : public _Node_iterator_base<_NodePtr>
{
private:
- using __base_type = _Node_iterator_base<_Value, __cache>;
+ using __base_type = _Node_iterator_base<_NodePtr>;
using __node_type = typename __base_type::__node_type;
public:
- typedef _Value value_type;
- typedef std::ptrdiff_t difference_type;
- typedef std::forward_iterator_tag iterator_category;
+ typedef typename __node_type::value_type value_type;
+ typedef typename std::pointer_traits<_NodePtr>::difference_type
+ difference_type;
+ typedef std::forward_iterator_tag iterator_category;
using pointer = typename std::conditional<__constant_iterators,
- const _Value*, _Value*>::type;
+ const value_type*, value_type*>::type;
using reference = typename std::conditional<__constant_iterators,
- const _Value&, _Value&>::type;
+ const value_type&, value_type&>::type;
_Node_iterator() noexcept
- : __base_type(0) { }
+ : __base_type(nullptr) { }
explicit
- _Node_iterator(__node_type* __p) noexcept
+ _Node_iterator(_NodePtr __p) noexcept
: __base_type(__p) { }
reference
@@ -365,31 +363,32 @@ namespace __detail
};
/// Node const_iterators, used to iterate through all the hashtable.
- template<typename _Value, bool __constant_iterators, bool __cache>
+ template<typename _NodePtr, bool __constant_iterators>
struct _Node_const_iterator
- : public _Node_iterator_base<_Value, __cache>
+ : public _Node_iterator_base<_NodePtr>
{
private:
- using __base_type = _Node_iterator_base<_Value, __cache>;
+ using __base_type = _Node_iterator_base<_NodePtr>;
using __node_type = typename __base_type::__node_type;
public:
- typedef _Value value_type;
- typedef std::ptrdiff_t difference_type;
- typedef std::forward_iterator_tag iterator_category;
+ typedef typename __node_type::value_type value_type;
+ typedef typename std::pointer_traits<_NodePtr>::difference_type
+ difference_type;
+ typedef std::forward_iterator_tag iterator_category;
- typedef const _Value* pointer;
- typedef const _Value& reference;
+ typedef const value_type* pointer;
+ typedef const value_type& reference;
_Node_const_iterator() noexcept
- : __base_type(0) { }
+ : __base_type(nullptr) { }
explicit
- _Node_const_iterator(__node_type* __p) noexcept
+ _Node_const_iterator(_NodePtr __p) noexcept
: __base_type(__p) { }
- _Node_const_iterator(const _Node_iterator<_Value, __constant_iterators,
- __cache>& __x) noexcept
+ _Node_const_iterator(const _Node_iterator<_NodePtr,
+ __constant_iterators>& __x) noexcept
: __base_type(__x._M_cur) { }
reference
@@ -662,17 +661,17 @@ namespace __detail
_H1, _H2, _Hash, _RehashPolicy, _Traits, true>
{
private:
- using __hashtable_base = __detail::_Hashtable_base<_Key, _Pair,
+ using __hashtable_base = __detail::_Hashtable_base<_Key, _Pair, _Alloc,
_Select1st,
_Equal, _H1, _H2, _Hash,
- _Traits>;
+ _Traits>;
using __hashtable = _Hashtable<_Key, _Pair, _Alloc,
_Select1st, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>;
using __hash_code = typename __hashtable_base::__hash_code;
- using __node_type = typename __hashtable_base::__node_type;
+ using __node_pointer = typename __hashtable_base::__node_pointer;
public:
using key_type = typename __hashtable_base::key_type;
@@ -706,7 +705,7 @@ namespace __detail
__hashtable* __h = static_cast<__hashtable*>(this);
__hash_code __code = __h->_M_hash_code(__k);
std::size_t __bkt = __h->_M_bucket_index(__k, __code);
- if (__node_type* __node = __h->_M_find_node(__bkt, __k, __code))
+ if (__node_pointer __node = __h->_M_find_node(__bkt, __k, __code))
return __node->_M_v().second;
typename __hashtable::_Scoped_node __node {
@@ -715,8 +714,8 @@ namespace __detail
std::tuple<const key_type&>(__k),
std::tuple<>()
};
- auto __pos
- = __h->_M_insert_unique_node(__k, __bkt, __code, __node._M_node);
+ auto __pos = __h->_M_insert_unique_node(__k, __bkt, __code,
+ std::move(__node._M_node));
__node._M_node = nullptr;
return __pos->second;
}
@@ -733,7 +732,7 @@ namespace __detail
__hashtable* __h = static_cast<__hashtable*>(this);
__hash_code __code = __h->_M_hash_code(__k);
std::size_t __bkt = __h->_M_bucket_index(__k, __code);
- if (__node_type* __node = __h->_M_find_node(__bkt, __k, __code))
+ if (__node_pointer __node = __h->_M_find_node(__bkt, __k, __code))
return __node->_M_v().second;
typename __hashtable::_Scoped_node __node {
@@ -742,8 +741,8 @@ namespace __detail
std::forward_as_tuple(std::move(__k)),
std::tuple<>()
};
- auto __pos
- = __h->_M_insert_unique_node(__k, __bkt, __code, __node._M_node);
+ auto __pos = __h->_M_insert_unique_node(__k, __bkt, __code,
+ std::move(__node._M_node));
__node._M_node = nullptr;
return __pos->second;
}
@@ -760,7 +759,7 @@ namespace __detail
__hashtable* __h = static_cast<__hashtable*>(this);
__hash_code __code = __h->_M_hash_code(__k);
std::size_t __bkt = __h->_M_bucket_index(__k, __code);
- __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
+ __node_pointer __p = __h->_M_find_node(__bkt, __k, __code);
if (!__p)
__throw_out_of_range(__N("_Map_base::at"));
@@ -779,7 +778,7 @@ namespace __detail
const __hashtable* __h = static_cast<const __hashtable*>(this);
__hash_code __code = __h->_M_hash_code(__k);
std::size_t __bkt = __h->_M_bucket_index(__k, __code);
- __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
+ __node_pointer __p = __h->_M_find_node(__bkt, __k, __code);
if (!__p)
__throw_out_of_range(__N("_Map_base::at"));
@@ -802,7 +801,8 @@ namespace __detail
_Equal, _H1, _H2, _Hash,
_RehashPolicy, _Traits>;
- using __hashtable_base = _Hashtable_base<_Key, _Value, _ExtractKey,
+ using __hashtable_base = _Hashtable_base<_Key, _Value, _Alloc,
+ _ExtractKey,
_Equal, _H1, _H2, _Hash,
_Traits>;
@@ -813,7 +813,7 @@ namespace __detail
using __unique_keys = typename __hashtable_base::__unique_keys;
using __ireturn_type = typename __hashtable_base::__ireturn_type;
- using __node_type = _Hash_node<_Value, _Traits::__hash_cached::value>;
+ using __node_type = typename __hashtable_base::__node_type;
using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
using __node_gen_type = _AllocNode<__node_alloc_type>;
@@ -948,7 +948,8 @@ namespace __detail
_Equal, _H1, _H2, _Hash,
_RehashPolicy, _Traits>;
- using __hashtable_base = _Hashtable_base<_Key, _Value, _ExtractKey,
+ using __hashtable_base = _Hashtable_base<_Key, _Value, _Alloc,
+ _ExtractKey,
_Equal, _H1, _H2, _Hash,
_Traits>;
@@ -1144,9 +1145,10 @@ namespace __detail
* Base class for local iterators, used to iterate within a bucket
* but not between buckets.
*/
- template<typename _Key, typename _Value, typename _ExtractKey,
+ template<typename _Key, typename _Value,
+ typename _Alloc, typename _ExtractKey,
typename _H1, typename _H2, typename _Hash,
- bool __cache_hash_code>
+ typename _NodePtr, bool __cache_hash_code>
struct _Local_iterator_base;
/**
@@ -1169,26 +1171,35 @@ namespace __detail
*
* Primary template is unused except as a hook for specializations.
*/
- template<typename _Key, typename _Value, typename _ExtractKey,
+ template<typename _Key, typename _Value,
+ typename _Alloc, typename _ExtractKey,
typename _H1, typename _H2, typename _Hash,
bool __cache_hash_code>
struct _Hash_code_base;
/// Specialization: ranged hash function, no caching hash codes. H1
/// and H2 are provided but ignored. We define a dummy hash code type.
- template<typename _Key, typename _Value, typename _ExtractKey,
+ template<typename _Key, typename _Value,
+ typename _Alloc, typename _ExtractKey,
typename _H1, typename _H2, typename _Hash>
- struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash, false>
+ struct _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2,
+ _Hash, false>
: private _Hashtable_ebo_helper<0, _ExtractKey>,
private _Hashtable_ebo_helper<1, _Hash>
{
private:
using __ebo_extract_key = _Hashtable_ebo_helper<0, _ExtractKey>;
using __ebo_hash = _Hashtable_ebo_helper<1, _Hash>;
+ using __pointer = typename std::allocator_traits<_Alloc>::pointer;
protected:
typedef void* __hash_code;
- typedef _Hash_node<_Value, false> __node_type;
+ typedef _Hash_node<__pointer, _Value, false> __node_type;
+ using __node_pointer = typename __node_type::__node_pointer;
+ using __node_ptr_arg_t = typename std::conditional<
+ std::__is_pointer<__node_pointer>::__value,
+ __node_pointer,
+ const __node_pointer&>::type;
// We need the default constructor for the local iterators and _Hashtable
// default constructor.
@@ -1208,17 +1219,17 @@ namespace __detail
{ return _M_ranged_hash()(__k, __bkt_count); }
std::size_t
- _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
+ _M_bucket_index(__node_ptr_arg_t __p, std::size_t __bkt_count) const
noexcept( noexcept(declval<const _Hash&>()(declval<const _Key&>(),
(std::size_t)0)) )
{ return _M_ranged_hash()(_M_extract()(__p->_M_v()), __bkt_count); }
void
- _M_store_code(__node_type*, __hash_code) const
+ _M_store_code(__node_ptr_arg_t, __hash_code) const
{ }
void
- _M_copy_code(__node_type*, const __node_type*) const
+ _M_copy_code(__node_ptr_arg_t, __node_ptr_arg_t) const
{ }
void
@@ -1242,16 +1253,19 @@ namespace __detail
/// Specialization: ranged hash function, cache hash codes. This
/// combination is meaningless, so we provide only a declaration
/// and no definition.
- template<typename _Key, typename _Value, typename _ExtractKey,
+ template<typename _Key, typename _Value,
+ typename _Alloc, typename _ExtractKey,
typename _H1, typename _H2, typename _Hash>
- struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash, true>;
+ struct _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2,
+ _Hash, true>;
/// Specialization: hash function and range-hashing function, no
/// caching of hash codes.
/// Provides typedef and accessor required by C++ 11.
- template<typename _Key, typename _Value, typename _ExtractKey,
+ template<typename _Key, typename _Value,
+ typename _Alloc, typename _ExtractKey,
typename _H1, typename _H2>
- struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2,
+ struct _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2,
_Default_ranged_hash, false>
: private _Hashtable_ebo_helper<0, _ExtractKey>,
private _Hashtable_ebo_helper<1, _H1>,
@@ -1261,10 +1275,7 @@ namespace __detail
using __ebo_extract_key = _Hashtable_ebo_helper<0, _ExtractKey>;
using __ebo_h1 = _Hashtable_ebo_helper<1, _H1>;
using __ebo_h2 = _Hashtable_ebo_helper<2, _H2>;
-
- // Gives the local iterator implementation access to _M_bucket_index().
- friend struct _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2,
- _Default_ranged_hash, false>;
+ using __pointer = typename std::allocator_traits<_Alloc>::pointer;
public:
typedef _H1 hasher;
@@ -1275,7 +1286,17 @@ namespace __detail
protected:
typedef std::size_t __hash_code;
- typedef _Hash_node<_Value, false> __node_type;
+ typedef _Hash_node<__pointer, _Value, false> __node_type;
+ using __node_pointer = typename __node_type::__node_pointer;
+ using __node_ptr_arg_t = typename std::conditional<
+ std::__is_pointer<__node_pointer>::__value,
+ __node_pointer,
+ const __node_pointer&>::type;
+
+ // Gives the local iterator implementation access to _M_bucket_index().
+ friend struct _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+ _H1, _H2, _Default_ranged_hash,
+ __node_pointer, false>;
// We need the default constructor for the local iterators and _Hashtable
// default constructor.
@@ -1300,18 +1321,18 @@ namespace __detail
{ return _M_h2()(__c, __bkt_count); }
std::size_t
- _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
+ _M_bucket_index(__node_ptr_arg_t __p, std::size_t __bkt_count) const
noexcept( noexcept(declval<const _H1&>()(declval<const _Key&>()))
&& noexcept(declval<const _H2&>()((__hash_code)0,
(std::size_t)0)) )
{ return _M_h2()(_M_h1()(_M_extract()(__p->_M_v())), __bkt_count); }
void
- _M_store_code(__node_type*, __hash_code) const
+ _M_store_code(__node_ptr_arg_t, __hash_code) const
{ }
void
- _M_copy_code(__node_type*, const __node_type*) const
+ _M_copy_code(__node_ptr_arg_t, __node_ptr_arg_t) const
{ }
void
@@ -1336,22 +1357,20 @@ namespace __detail
/// Specialization: hash function and range-hashing function,
/// caching hash codes. H is provided but ignored. Provides
/// typedef and accessor required by C++ 11.
- template<typename _Key, typename _Value, typename _ExtractKey,
+ template<typename _Key, typename _Value,
+ typename _Alloc, typename _ExtractKey,
typename _H1, typename _H2>
- struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2,
+ struct _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2,
_Default_ranged_hash, true>
: private _Hashtable_ebo_helper<0, _ExtractKey>,
private _Hashtable_ebo_helper<1, _H1>,
private _Hashtable_ebo_helper<2, _H2>
{
private:
- // Gives the local iterator implementation access to _M_h2().
- friend struct _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2,
- _Default_ranged_hash, true>;
-
using __ebo_extract_key = _Hashtable_ebo_helper<0, _ExtractKey>;
using __ebo_h1 = _Hashtable_ebo_helper<1, _H1>;
using __ebo_h2 = _Hashtable_ebo_helper<2, _H2>;
+ using __pointer = typename std::allocator_traits<_Alloc>::pointer;
public:
typedef _H1 hasher;
@@ -1362,7 +1381,17 @@ namespace __detail
protected:
typedef std::size_t __hash_code;
- typedef _Hash_node<_Value, true> __node_type;
+ typedef _Hash_node<__pointer, _Value, true> __node_type;
+ using __node_pointer = typename __node_type::__node_pointer;
+ using __node_ptr_arg_t = typename std::conditional<
+ std::__is_pointer<__node_pointer>::__value,
+ __node_pointer,
+ const __node_pointer&>::type;
+
+ // Gives the local iterator implementation access to _M_h2().
+ friend struct _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+ _H1, _H2, _Default_ranged_hash,
+ __node_pointer, true>;
// We need the default constructor for _Hashtable default constructor.
_Hash_code_base() = default;
@@ -1385,17 +1414,17 @@ namespace __detail
{ return _M_h2()(__c, __bkt_count); }
std::size_t
- _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
+ _M_bucket_index(__node_ptr_arg_t __p, std::size_t __bkt_count) const
noexcept( noexcept(declval<const _H2&>()((__hash_code)0,
(std::size_t)0)) )
{ return _M_h2()(__p->_M_hash_code, __bkt_count); }
void
- _M_store_code(__node_type* __n, __hash_code __c) const
+ _M_store_code(__node_ptr_arg_t __n, __hash_code __c) const
{ __n->_M_hash_code = __c; }
void
- _M_copy_code(__node_type* __to, const __node_type* __from) const
+ _M_copy_code(__node_ptr_arg_t __to, __node_ptr_arg_t __from) const
{ __to->_M_hash_code = __from->_M_hash_code; }
void
@@ -1418,46 +1447,45 @@ namespace __detail
};
/// Partial specialization used when nodes contain a cached hash code.
- template<typename _Key, typename _Value, typename _ExtractKey,
- typename _H1, typename _H2, typename _Hash>
- struct _Local_iterator_base<_Key, _Value, _ExtractKey,
- _H1, _H2, _Hash, true>
+ template<typename _Key, typename _Value,
+ typename _Alloc, typename _ExtractKey,
+ typename _H1, typename _H2, typename _Hash,
+ typename _NodePtr>
+ struct _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+ _H1, _H2, _Hash, _NodePtr, true>
: private _Hashtable_ebo_helper<0, _H2>
+ , public _Node_iterator_base<_NodePtr>
{
protected:
using __base_type = _Hashtable_ebo_helper<0, _H2>;
- using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
+ using __base_node_iter = _Node_iterator_base<_NodePtr>;
+ using __hash_code_base = _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey,
_H1, _H2, _Hash, true>;
_Local_iterator_base() = default;
- _Local_iterator_base(const __hash_code_base& __base,
- _Hash_node<_Value, true>* __p,
+ _Local_iterator_base(const __hash_code_base& __base, _NodePtr __p,
std::size_t __bkt, std::size_t __bkt_count)
- : __base_type(__base._M_h2()),
- _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
+ : __base_type(__base._M_h2()), __base_node_iter(__p)
+ , _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
void
_M_incr()
{
- _M_cur = _M_cur->_M_next();
- if (_M_cur)
+ __base_node_iter::_M_incr();
+ if (this->_M_cur)
{
std::size_t __bkt
- = __base_type::_M_get()(_M_cur->_M_hash_code,
- _M_bucket_count);
+ = __base_type::_M_get()(this->_M_cur->_M_hash_code,
+ _M_bucket_count);
if (__bkt != _M_bucket)
- _M_cur = nullptr;
+ this->_M_cur = nullptr;
}
}
- _Hash_node<_Value, true>* _M_cur;
std::size_t _M_bucket;
std::size_t _M_bucket_count;
public:
- const void*
- _M_curr() const { return _M_cur; } // for equality ops
-
std::size_t
_M_get_bucket() const { return _M_bucket; } // for debug mode
};
@@ -1493,29 +1521,33 @@ namespace __detail
_M_h() const { return reinterpret_cast<const _Tp*>(this); }
};
- template<typename _Key, typename _Value, typename _ExtractKey,
+ template<typename _Key, typename _Value,
+ typename _Alloc, typename _ExtractKey,
typename _H1, typename _H2, typename _Hash>
using __hash_code_for_local_iter
- = _Hash_code_storage<_Hash_code_base<_Key, _Value, _ExtractKey,
+ = _Hash_code_storage<_Hash_code_base<_Key, _Value, _Alloc, _ExtractKey,
_H1, _H2, _Hash, false>>;
// Partial specialization used when hash codes are not cached
- template<typename _Key, typename _Value, typename _ExtractKey,
- typename _H1, typename _H2, typename _Hash>
- struct _Local_iterator_base<_Key, _Value, _ExtractKey,
- _H1, _H2, _Hash, false>
- : __hash_code_for_local_iter<_Key, _Value, _ExtractKey, _H1, _H2, _Hash>
+ template<typename _Key, typename _Value, typename _Alloc,
+ typename _ExtractKey, typename _H1, typename _H2,
+ typename _Hash, typename _NodePtr>
+ struct _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+ _H1, _H2, _Hash, _NodePtr, false>
+ : __hash_code_for_local_iter<_Key, _Value, _Alloc, _ExtractKey,
+ _H1, _H2, _Hash>
+ , public _Node_iterator_base<_NodePtr>
{
protected:
- using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
- _H1, _H2, _Hash, false>;
+ using __hash_code_base = _Hash_code_base<_Key, _Value, _Alloc,
+ _ExtractKey, _H1, _H2, _Hash, false>;
+ using __base_node_iter = _Node_iterator_base<_NodePtr>;
_Local_iterator_base() : _M_bucket_count(-1) { }
- _Local_iterator_base(const __hash_code_base& __base,
- _Hash_node<_Value, false>* __p,
+ _Local_iterator_base(const __hash_code_base& __base, _NodePtr __p,
std::size_t __bkt, std::size_t __bkt_count)
- : _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
+ : __base_node_iter(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
{ _M_init(__base); }
~_Local_iterator_base()
@@ -1525,7 +1557,7 @@ namespace __detail
}
_Local_iterator_base(const _Local_iterator_base& __iter)
- : _M_cur(__iter._M_cur), _M_bucket(__iter._M_bucket),
+ : __base_node_iter(__iter._M_cur), _M_bucket(__iter._M_bucket),
_M_bucket_count(__iter._M_bucket_count)
{
if (_M_bucket_count != -1)
@@ -1537,7 +1569,7 @@ namespace __detail
{
if (_M_bucket_count != -1)
_M_destroy();
- _M_cur = __iter._M_cur;
+ this->_M_cur = __iter._M_cur;
_M_bucket = __iter._M_bucket;
_M_bucket_count = __iter._M_bucket_count;
if (_M_bucket_count != -1)
@@ -1548,17 +1580,16 @@ namespace __detail
void
_M_incr()
{
- _M_cur = _M_cur->_M_next();
- if (_M_cur)
+ __base_node_iter::_M_incr();
+ if (this->_M_cur)
{
- std::size_t __bkt = this->_M_h()->_M_bucket_index(_M_cur,
+ std::size_t __bkt = this->_M_h()->_M_bucket_index(this->_M_cur,
_M_bucket_count);
if (__bkt != _M_bucket)
- _M_cur = nullptr;
+ this->_M_cur = nullptr;
}
}
- _Hash_node<_Value, false>* _M_cur;
std::size_t _M_bucket;
std::size_t _M_bucket_count;
@@ -1570,42 +1601,22 @@ namespace __detail
_M_destroy() { this->_M_h()->~__hash_code_base(); }
public:
- const void*
- _M_curr() const { return _M_cur; } // for equality ops and debug mode
-
std::size_t
_M_get_bucket() const { return _M_bucket; } // for debug mode
};
- template<typename _Key, typename _Value, typename _ExtractKey,
- typename _H1, typename _H2, typename _Hash, bool __cache>
- inline bool
- operator==(const _Local_iterator_base<_Key, _Value, _ExtractKey,
- _H1, _H2, _Hash, __cache>& __x,
- const _Local_iterator_base<_Key, _Value, _ExtractKey,
- _H1, _H2, _Hash, __cache>& __y)
- { return __x._M_curr() == __y._M_curr(); }
-
- template<typename _Key, typename _Value, typename _ExtractKey,
- typename _H1, typename _H2, typename _Hash, bool __cache>
- inline bool
- operator!=(const _Local_iterator_base<_Key, _Value, _ExtractKey,
- _H1, _H2, _Hash, __cache>& __x,
- const _Local_iterator_base<_Key, _Value, _ExtractKey,
- _H1, _H2, _Hash, __cache>& __y)
- { return __x._M_curr() != __y._M_curr(); }
-
/// local iterators
- template<typename _Key, typename _Value, typename _ExtractKey,
+ template<typename _Key, typename _Value,
+ typename _Alloc, typename _ExtractKey,
typename _H1, typename _H2, typename _Hash,
- bool __constant_iterators, bool __cache>
+ typename _NodePtr, bool __constant_iterators, bool __cache>
struct _Local_iterator
- : public _Local_iterator_base<_Key, _Value, _ExtractKey,
- _H1, _H2, _Hash, __cache>
+ : public _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+ _H1, _H2, _Hash, _NodePtr, __cache>
{
private:
- using __base_type = _Local_iterator_base<_Key, _Value, _ExtractKey,
- _H1, _H2, _Hash, __cache>;
+ using __base_type = _Local_iterator_base<_Key, _Value, _Alloc,
+ _ExtractKey, _H1, _H2, _Hash, _NodePtr, __cache>;
using __hash_code_base = typename __base_type::__hash_code_base;
public:
typedef _Value value_type;
@@ -1621,7 +1632,7 @@ namespace __detail
_Local_iterator() = default;
_Local_iterator(const __hash_code_base& __base,
- _Hash_node<_Value, __cache>* __n,
+ _NodePtr __n,
std::size_t __bkt, std::size_t __bkt_count)
: __base_type(__base, __n, __bkt, __bkt_count)
{ }
@@ -1651,16 +1662,17 @@ namespace __detail
};
/// local const_iterators
- template<typename _Key, typename _Value, typename _ExtractKey,
+ template<typename _Key, typename _Value,
+ typename _Alloc, typename _ExtractKey,
typename _H1, typename _H2, typename _Hash,
- bool __constant_iterators, bool __cache>
+ typename _NodePtr, bool __constant_iterators, bool __cache>
struct _Local_const_iterator
- : public _Local_iterator_base<_Key, _Value, _ExtractKey,
- _H1, _H2, _Hash, __cache>
+ : public _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+ _H1, _H2, _Hash, _NodePtr, __cache>
{
private:
- using __base_type = _Local_iterator_base<_Key, _Value, _ExtractKey,
- _H1, _H2, _Hash, __cache>;
+ using __base_type = _Local_iterator_base<_Key, _Value, _Alloc,
+ _ExtractKey, _H1, _H2, _Hash, _NodePtr, __cache>;
using __hash_code_base = typename __base_type::__hash_code_base;
public:
@@ -1673,14 +1685,15 @@ namespace __detail
_Local_const_iterator() = default;
_Local_const_iterator(const __hash_code_base& __base,
- _Hash_node<_Value, __cache>* __n,
+ _NodePtr __n,
std::size_t __bkt, std::size_t __bkt_count)
: __base_type(__base, __n, __bkt, __bkt_count)
{ }
- _Local_const_iterator(const _Local_iterator<_Key, _Value, _ExtractKey,
+ _Local_const_iterator(const _Local_iterator<_Key, _Value,
+ _Alloc, _ExtractKey,
_H1, _H2, _Hash,
- __constant_iterators,
+ _NodePtr, __constant_iterators,
__cache>& __x)
: __base_type(__x)
{ }
@@ -1719,13 +1732,13 @@ namespace __detail
* - __detail::_Hash_code_base
* - __detail::_Hashtable_ebo_helper
*/
- template<typename _Key, typename _Value,
+ template<typename _Key, typename _Value, typename _Alloc,
typename _ExtractKey, typename _Equal,
typename _H1, typename _H2, typename _Hash, typename _Traits>
- struct _Hashtable_base
- : public _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash,
- _Traits::__hash_cached::value>,
- private _Hashtable_ebo_helper<0, _Equal>
+ struct _Hashtable_base
+ : public _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2, _Hash,
+ _Traits::__hash_cached::value>,
+ private _Hashtable_ebo_helper<0, _Equal>
{
public:
typedef _Key key_type;
@@ -1739,31 +1752,29 @@ namespace __detail
using __constant_iterators = typename __traits_type::__constant_iterators;
using __unique_keys = typename __traits_type::__unique_keys;
- using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
+ using __hash_code_base = _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey,
_H1, _H2, _Hash,
__hash_cached::value>;
using __hash_code = typename __hash_code_base::__hash_code;
using __node_type = typename __hash_code_base::__node_type;
+ using __node_pointer = typename __hash_code_base::__node_pointer;
+ using __node_ptr_arg_t = typename __hash_code_base::__node_ptr_arg_t;
- using iterator = __detail::_Node_iterator<value_type,
- __constant_iterators::value,
- __hash_cached::value>;
+ using iterator = __detail::_Node_iterator<__node_pointer,
+ __constant_iterators::value>;
- using const_iterator = __detail::_Node_const_iterator<value_type,
- __constant_iterators::value,
- __hash_cached::value>;
+ using const_iterator = __detail::_Node_const_iterator<__node_pointer,
+ __constant_iterators::value>;
- using local_iterator = __detail::_Local_iterator<key_type, value_type,
- _ExtractKey, _H1, _H2, _Hash,
- __constant_iterators::value,
- __hash_cached::value>;
+ using local_iterator = __detail::_Local_iterator<key_type, value_type, _Alloc,
+ _ExtractKey, _H1, _H2, _Hash, __node_pointer,
+ __constant_iterators::value, __hash_cached::value>;
- using const_local_iterator = __detail::_Local_const_iterator<key_type,
- value_type,
- _ExtractKey, _H1, _H2, _Hash,
- __constant_iterators::value,
- __hash_cached::value>;
+ using const_local_iterator = __detail::_Local_const_iterator<
+ key_type, value_type, _Alloc,
+ _ExtractKey, _H1, _H2, _Hash, __node_pointer,
+ __constant_iterators::value, __hash_cached::value>;
using __ireturn_type = typename std::conditional<__unique_keys::value,
std::pair<iterator, bool>,
@@ -1774,17 +1785,17 @@ namespace __detail
template<typename _NodeT>
struct _Equal_hash_code
{
- static bool
- _S_equals(__hash_code, const _NodeT&)
- { return true; }
+ static bool
+ _S_equals(__hash_code, const _NodeT&)
+ { return true; }
};
- template<typename _Ptr2>
- struct _Equal_hash_code<_Hash_node<_Ptr2, true>>
+ template<typename _Ptr2, typename _Value2>
+ struct _Equal_hash_code<_Hash_node<_Ptr2, _Value2, true>>
{
- static bool
- _S_equals(__hash_code __c, const _Hash_node<_Ptr2, true>& __n)
- { return __c == __n._M_hash_code; }
+ static bool
+ _S_equals(__hash_code __c, const _Hash_node<_Ptr2, _Value2, true>& __n)
+ { return __c == __n._M_hash_code; }
};
protected:
@@ -1795,7 +1806,7 @@ namespace __detail
{ }
bool
- _M_equals(const _Key& __k, __hash_code __c, __node_type* __n) const
+ _M_equals(const _Key& __k, __hash_code __c, __node_ptr_arg_t __n) const
{
static_assert(__is_invocable<const _Equal&, const _Key&, const _Key&>{},
"key equality predicate must be invocable with two arguments of "
@@ -1854,8 +1865,8 @@ namespace __detail
_H1, _H2, _Hash, _RehashPolicy, _Traits, true>::
_M_equal(const __hashtable& __other) const
{
- using __node_base = typename __hashtable::__node_base;
- using __node_type = typename __hashtable::__node_type;
+ using __bucket_type = typename __hashtable::__bucket_type;
+ using __node_pointer = typename __hashtable::__node_pointer;
const __hashtable* __this = static_cast<const __hashtable*>(this);
if (__this->size() != __other.size())
return false;
@@ -1863,18 +1874,17 @@ namespace __detail
for (auto __itx = __this->begin(); __itx != __this->end(); ++__itx)
{
std::size_t __ybkt = __other._M_bucket_index(__itx._M_cur);
- __node_base* __prev_n = __other._M_buckets[__ybkt];
+ __bucket_type __prev_n = __other._M_buckets[__ybkt];
if (!__prev_n)
return false;
- for (__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);;
- __n = __n->_M_next())
+ for (__node_pointer __n = __prev_n->_M_nxt;; __n = __n->_M_nxt)
{
if (__n->_M_v() == *__itx)
break;
if (!__n->_M_nxt
- || __other._M_bucket_index(__n->_M_next()) != __ybkt)
+ || __other._M_bucket_index(__n->_M_nxt) != __ybkt)
return false;
}
}
@@ -1906,8 +1916,8 @@ namespace __detail
_H1, _H2, _Hash, _RehashPolicy, _Traits, false>::
_M_equal(const __hashtable& __other) const
{
- using __node_base = typename __hashtable::__node_base;
- using __node_type = typename __hashtable::__node_type;
+ using __bucket_type = typename __hashtable::__bucket_type;
+ using __node_pointer = typename __hashtable::__node_pointer;
const __hashtable* __this = static_cast<const __hashtable*>(this);
if (__this->size() != __other.size())
return false;
@@ -1923,19 +1933,19 @@ namespace __detail
++__x_count;
std::size_t __ybkt = __other._M_bucket_index(__itx._M_cur);
- __node_base* __y_prev_n = __other._M_buckets[__ybkt];
+ __bucket_type __y_prev_n = __other._M_buckets[__ybkt];
if (!__y_prev_n)
return false;
- __node_type* __y_n = static_cast<__node_type*>(__y_prev_n->_M_nxt);
- for (;; __y_n = __y_n->_M_next())
+ __node_pointer __y_n = __y_prev_n->_M_nxt;
+ for (;; __y_n = __y_n->_M_nxt)
{
if (__this->key_eq()(_ExtractKey()(__y_n->_M_v()),
_ExtractKey()(*__itx)))
break;
if (!__y_n->_M_nxt
- || __other._M_bucket_index(__y_n->_M_next()) != __ybkt)
+ || __other._M_bucket_index(__y_n->_M_nxt) != __ybkt)
return false;
}
@@ -1973,11 +1983,13 @@ namespace __detail
using __value_alloc_traits = typename __node_alloc_traits::template
rebind_traits<typename __node_type::value_type>;
- using __node_base = __detail::_Hash_node_base;
- using __bucket_type = __node_base*;
+ using __node_pointer = typename __node_alloc_traits::pointer;
+ using __node_base = __detail::_Hash_node_base<__node_pointer>;
+ using __bucket_type = __ptr_rebind<__node_pointer, __node_base>;
using __bucket_alloc_type =
__alloc_rebind<__node_alloc_type, __bucket_type>;
using __bucket_alloc_traits = std::allocator_traits<__bucket_alloc_type>;
+ using __bucket_pointer = typename __bucket_alloc_traits::pointer;
_Hashtable_alloc() = default;
_Hashtable_alloc(const _Hashtable_alloc&) = default;
@@ -1998,27 +2010,27 @@ namespace __detail
// Allocate a node and construct an element within it.
template<typename... _Args>
- __node_type*
+ __node_pointer
_M_allocate_node(_Args&&... __args);
// Destroy the element within a node and deallocate the node.
void
- _M_deallocate_node(__node_type* __n);
+ _M_deallocate_node(__node_pointer __n);
// Deallocate a node.
void
- _M_deallocate_node_ptr(__node_type* __n);
+ _M_deallocate_node_ptr(__node_pointer __n);
// Deallocate the linked list of nodes pointed to by __n.
// The elements within the nodes are destroyed.
void
- _M_deallocate_nodes(__node_type* __n);
+ _M_deallocate_nodes(__node_pointer __n);
- __bucket_type*
+ __bucket_pointer
_M_allocate_buckets(std::size_t __bkt_count);
void
- _M_deallocate_buckets(__bucket_type*, std::size_t __bkt_count);
+ _M_deallocate_buckets(__bucket_pointer, std::size_t __bkt_count);
};
// Definitions of class template _Hashtable_alloc's out-of-line member
@@ -2027,17 +2039,16 @@ namespace __detail
template<typename... _Args>
auto
_Hashtable_alloc<_NodeAlloc>::_M_allocate_node(_Args&&... __args)
- -> __node_type*
+ -> __node_pointer
{
auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1);
- __node_type* __n = std::__to_address(__nptr);
__try
{
- ::new ((void*)__n) __node_type;
+ ::new ((void*)std::__to_address(__nptr)) __node_type;
__node_alloc_traits::construct(_M_node_allocator(),
- __n->_M_valptr(),
+ __nptr->_M_valptr(),
std::forward<_Args>(__args)...);
- return __n;
+ return __nptr;
}
__catch(...)
{
@@ -2048,55 +2059,51 @@ namespace __detail
template<typename _NodeAlloc>
void
- _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_type* __n)
+ _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_pointer __nptr)
{
- __node_alloc_traits::destroy(_M_node_allocator(), __n->_M_valptr());
- _M_deallocate_node_ptr(__n);
+ __node_alloc_traits::destroy(_M_node_allocator(), __nptr->_M_valptr());
+ _M_deallocate_node_ptr(__nptr);
}
template<typename _NodeAlloc>
void
- _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_type* __n)
+ _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_pointer __nptr)
{
- typedef typename __node_alloc_traits::pointer _Ptr;
- auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__n);
- __n->~__node_type();
- __node_alloc_traits::deallocate(_M_node_allocator(), __ptr, 1);
+ __nptr->~__node_type();
+ __node_alloc_traits::deallocate(_M_node_allocator(), __nptr, 1);
}
template<typename _NodeAlloc>
void
- _Hashtable_alloc<_NodeAlloc>::_M_deallocate_nodes(__node_type* __n)
+ _Hashtable_alloc<_NodeAlloc>::_M_deallocate_nodes(__node_pointer __nptr)
{
- while (__n)
+ while (__nptr)
{
- __node_type* __tmp = __n;
- __n = __n->_M_next();
+ __node_pointer __tmp = __nptr;
+ __nptr = __nptr->_M_nxt;
_M_deallocate_node(__tmp);
}
}
template<typename _NodeAlloc>
- typename _Hashtable_alloc<_NodeAlloc>::__bucket_type*
+ auto
_Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __bkt_count)
+ -> __bucket_pointer
{
__bucket_alloc_type __alloc(_M_node_allocator());
auto __ptr = __bucket_alloc_traits::allocate(__alloc, __bkt_count);
- __bucket_type* __p = std::__to_address(__ptr);
- __builtin_memset(__p, 0, __bkt_count * sizeof(__bucket_type));
- return __p;
+ std::fill_n(__ptr, __bkt_count, nullptr);
+ return __ptr;
}
template<typename _NodeAlloc>
void
- _Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_type* __bkts,
+ _Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_pointer __bkts,
std::size_t __bkt_count)
{
- typedef typename __bucket_alloc_traits::pointer _Ptr;
- auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts);
__bucket_alloc_type __alloc(_M_node_allocator());
- __bucket_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
+ __bucket_alloc_traits::deallocate(__alloc, __bkts, __bkt_count);
}
//@} hashtable-detail
diff --git a/libstdc++-v3/include/debug/unordered_map b/libstdc++-v3/include/debug/unordered_map
index 17fbba3aade..0635e9e8100 100644
--- a/libstdc++-v3/include/debug/unordered_map
+++ b/libstdc++-v3/include/debug/unordered_map
@@ -621,7 +621,7 @@ namespace __debug
[__victim](_Base_const_iterator __it) { return __it == __victim; });
this->_M_invalidate_local_if(
[__victim](_Base_const_local_iterator __it)
- { return __it._M_curr() == __victim._M_cur; });
+ { return __it == __victim; });
}
_Base_iterator
@@ -1228,7 +1228,7 @@ namespace __debug
[__victim](_Base_const_iterator __it) { return __it == __victim; });
this->_M_invalidate_local_if(
[__victim](_Base_const_local_iterator __it)
- { return __it._M_curr() == __victim._M_cur; });
+ { return __it == __victim; });
}
_Base_iterator
diff --git a/libstdc++-v3/include/debug/unordered_set b/libstdc++-v3/include/debug/unordered_set
index 4d30852186c..d1ebea98e8a 100644
--- a/libstdc++-v3/include/debug/unordered_set
+++ b/libstdc++-v3/include/debug/unordered_set
@@ -506,7 +506,7 @@ namespace __debug
[__victim](_Base_const_iterator __it) { return __it == __victim; });
this->_M_invalidate_local_if(
[__victim](_Base_const_local_iterator __it)
- { return __it._M_curr() == __victim._M_cur; });
+ { return __it == __victim; });
}
_Base_iterator
@@ -1067,7 +1067,7 @@ namespace __debug
[__victim](_Base_const_iterator __it) { return __it == __victim; });
this->_M_invalidate_local_if(
[__victim](_Base_const_local_iterator __it)
- { return __it._M_curr() == __victim._M_cur; });
+ { return __it == __victim; });
}
_Base_iterator
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..e9d7ada7151
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc
@@ -0,0 +1,57 @@
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_map>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+ std::size_t
+ operator()(const T& t) const noexcept
+ { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_map<T, int, H, E,
+ CustomPointerAlloc<std::pair<const T, int>>>;
+
+void test01()
+{
+ typedef CustomPointerAlloc<std::pair<const T, int>> alloc_type;
+ typedef std::unordered_map<T, int, H, E, alloc_type> test_type;
+ test_type v;
+ v.insert({ T(), 0 });
+ VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+ test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..4a895a6302c
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc
@@ -0,0 +1,57 @@
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_map>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+ std::size_t
+ operator()(const T& t) const noexcept
+ { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_multimap<T, int, H, E,
+ CustomPointerAlloc<std::pair<const T, int>>>;
+
+void test01()
+{
+ typedef CustomPointerAlloc<std::pair<const T, int>> alloc_type;
+ typedef std::unordered_multimap<T, int, H, E, alloc_type> test_type;
+ test_type v;
+ v.insert({ T(), 0 });
+ VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+ test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..36b5e10cc7b
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc
@@ -0,0 +1,56 @@
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_set>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+ std::size_t
+ operator()(const T& t) const noexcept
+ { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_multiset<T, H, E, CustomPointerAlloc<T>>;
+
+void test01()
+{
+ typedef CustomPointerAlloc<T> alloc_type;
+ typedef std::unordered_multiset<T, H, E, alloc_type> test_type;
+ test_type v;
+ v.insert(T());
+ VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+ test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
index f6b908ac03e..479104709fb 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
@@ -15,10 +15,7 @@
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
-// This test fails to compile since C++17 (see xfail-if below) so we can only
-// do a "run" test for C++11 and C++14, and a "compile" test for C++17 and up.
-// { dg-do run { target { c++11_only || c++14_only } } }
-// { dg-do compile { target c++17 } }
+// { dg-do run { target { c++11 } } }
#include <unordered_set>
#include <memory>
@@ -26,15 +23,22 @@
#include <testsuite_allocator.h>
struct T { int i; };
-bool operator==(const T& l, const T& r) { return l.i == r.i; }
-struct H { std::size_t operator()(const T& t) const noexcept { return t.i; }
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+ std::size_t
+ operator()(const T& t) const noexcept
+ { return t.i; }
};
-struct E : std::equal_to<T> { };
+
+struct E : std::equal_to<T>
+{ };
using __gnu_test::CustomPointerAlloc;
-// { dg-xfail-if "node reinsertion assumes raw pointers" { c++17 } }
-// TODO when removing this xfail change the test back to "dg-do run".
template class std::unordered_set<T, H, E, CustomPointerAlloc<T>>;
void test01()