17 #ifndef __TBB_concurrent_hash_map_H 18 #define __TBB_concurrent_hash_map_H 20 #define __TBB_concurrent_hash_map_H_include_area 27 #include __TBB_STD_SWAP_HEADER 38 #if __TBB_INITIALIZER_LISTS_PRESENT 39 #include <initializer_list> 41 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 47 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT 55 namespace interface5 {
57 template<
typename Key,
typename T,
typename HashCompare = tbb_hash_compare<Key>,
typename A = tbb_allocator<std::pair<const Key, T> > >
102 static size_type
const embedded_block = 1;
104 static size_type
const embedded_buckets = 1<<embedded_block;
106 static size_type
const first_block = 8;
108 static size_type
const pointers_per_table =
sizeof(segment_index_t) * 8;
112 typedef segment_ptr_t segments_table_t[pointers_per_table];
120 bucket my_embedded_segment[embedded_buckets];
122 atomic<unsigned> my_info_resizes;
123 mutable atomic<unsigned> my_info_restarts;
124 atomic<unsigned> my_info_rehashes;
128 std::memset(my_table, 0,
sizeof(my_table));
131 std::memset(my_embedded_segment, 0,
sizeof(my_embedded_segment));
132 for( size_type i = 0; i < embedded_block; i++ )
133 my_table[i] = my_embedded_segment + segment_base(i);
134 my_mask = embedded_buckets - 1;
135 __TBB_ASSERT( embedded_block <= first_block,
"The first block number must include embedded blocks");
138 my_info_restarts = 0;
139 my_info_rehashes = 0;
145 return segment_index_t(
__TBB_Log2( index|1 ) );
150 return (segment_index_t(1)<<k & ~segment_index_t(1));
155 return size_type(1)<<k;
160 return reinterpret_cast<uintptr_t
>(ptr) > uintptr_t(63);
164 static void init_buckets( segment_ptr_t ptr, size_type sz,
bool is_initial ) {
165 if( is_initial ) std::memset( static_cast<void*>(ptr), 0, sz*
sizeof(
bucket) );
166 else for(size_type i = 0; i < sz; i++, ptr++) {
167 *
reinterpret_cast<intptr_t*
>(&ptr->
mutex) = 0;
184 if( my_segment_ptr ) *my_segment_ptr = 0;
189 template<
typename Allocator>
190 void enable_segment( segment_index_t k,
const Allocator& allocator,
bool is_initial =
false ) {
193 bucket_allocator_type bucket_allocator(allocator);
197 __TBB_ASSERT( !is_valid(my_table[k]),
"Wrong concurrent assignment");
198 if( k >= first_block ) {
199 sz = segment_size( k );
200 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
201 init_buckets( ptr, sz, is_initial );
205 __TBB_ASSERT( k == embedded_block,
"Wrong segment index" );
206 sz = segment_size( first_block );
207 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
208 init_buckets( ptr, sz - embedded_buckets, is_initial );
209 ptr -= segment_base(embedded_block);
210 for(segment_index_t i = embedded_block; i < first_block; i++)
217 template<
typename Allocator>
221 bucket_allocator_type bucket_allocator(allocator);
222 segment_ptr_t buckets_ptr = my_table[
s];
223 size_type sz = segment_size( s ? s : 1 );
225 if( s >= first_block)
226 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
227 else if( s == embedded_block && embedded_block != first_block )
228 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
229 segment_size(first_block) - embedded_buckets);
230 if( s >= embedded_block ) my_table[
s] = 0;
235 segment_index_t
s = segment_index_of(
h );
236 h -= segment_base(s);
237 segment_ptr_t seg = my_table[
s];
238 __TBB_ASSERT( is_valid(seg),
"hashcode must be cut by valid mask for allocated segments" );
244 segment_index_t
s = segment_index_of(
h );
245 while( segment_ptr_t seg = my_table[++s] )
246 if( seg[
h].node_list == rehash_req ) {
248 mark_rehashed_levels(
h + ((hashcode_t)1<<s) );
255 hashcode_t m_now, m_old = m;
258 return check_rehashing_collision( h, m_old, m = m_now );
265 if( (h & m_old) != (h & m) ) {
268 for( ++m_old; !(h & m_old); m_old <<= 1 )
270 m_old = (m_old<<1) - 1;
286 size_type sz = ++my_size;
287 add_to_bucket( b, n );
290 segment_index_t new_seg =
__TBB_Log2( mask+1 );
291 __TBB_ASSERT( is_valid(my_table[new_seg-1]),
"new allocations must not publish new mask until segment has allocated");
292 static const segment_ptr_t is_allocating = (segment_ptr_t)2;
294 &&
as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
301 template<
typename Allocator>
302 void reserve(size_type buckets,
const Allocator& allocator) {
303 if( !buckets-- )
return;
304 bool is_initial = !my_size;
305 for( size_type m = my_mask; buckets > m; m = my_mask )
306 enable_segment( segment_index_of( m+1 ), allocator, is_initial );
313 for(size_type i = 0; i < embedded_buckets; i++)
315 for(size_type i = embedded_block; i < pointers_per_table; i++)
319 #if __TBB_CPP11_RVALUE_REF_PRESENT 321 my_mask = other.my_mask;
322 other.my_mask = embedded_buckets - 1;
323 my_size = other.my_size;
326 for(size_type i = 0; i < embedded_buckets; ++i) {
327 my_embedded_segment[i].
node_list = other.my_embedded_segment[i].node_list;
328 other.my_embedded_segment[i].node_list = NULL;
331 for(size_type i = embedded_block; i < pointers_per_table; ++i) {
332 my_table[i] = other.my_table[i];
333 other.my_table[i] = NULL;
336 #endif // __TBB_CPP11_RVALUE_REF_PRESENT 339 template<
typename Iterator>
345 template<
typename Container,
typename Value>
347 :
public std::iterator<std::forward_iterator_tag,Value>
350 typedef typename Container::node
node;
354 template<
typename C,
typename T,
typename U>
357 template<
typename C,
typename T,
typename U>
360 template<
typename C,
typename T,
typename U>
363 template<
typename C,
typename U>
370 size_t k = my_index+1;
371 __TBB_ASSERT( my_bucket,
"advancing an invalid iterator?");
372 while( k <= my_map->my_mask ) {
376 else my_bucket = my_map->get_bucket( k );
377 my_node =
static_cast<node*
>( my_bucket->node_list );
379 my_index = k;
return;
383 my_bucket = 0; my_node = 0; my_index = k;
385 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER) 386 template<
typename Key,
typename T,
typename HashCompare,
typename A>
391 const Container *my_map;
403 hash_map_iterator(
const Container &map,
size_t index,
const bucket *b, node_base *n );
409 my_map(other.my_map),
410 my_index(other.my_index),
411 my_bucket(other.my_bucket),
412 my_node(other.my_node)
424 return my_node->value();
437 template<
typename Container,
typename Value>
442 my_node( static_cast<
node*>(n) )
448 template<
typename Container,
typename Value>
455 template<
typename Container,
typename T,
typename U>
460 template<
typename Container,
typename T,
typename U>
467 template<
typename Iterator>
475 void set_midpoint()
const;
486 bool empty()
const {
return my_begin==my_end;}
490 return my_midpoint!=my_end;
495 my_grainsize(r.my_grainsize)
498 __TBB_ASSERT( !empty(),
"Splitting despite the range is not divisible" );
506 my_begin(r.my_begin),
508 my_midpoint(r.my_midpoint),
509 my_grainsize(r.my_grainsize)
513 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
514 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
515 my_grainsize( grainsize_ )
517 __TBB_ASSERT( grainsize_>0,
"grainsize must be positive" );
520 const Iterator&
begin()
const {
return my_begin;}
521 const Iterator&
end()
const {
return my_end;}
526 template<
typename Iterator>
529 size_t m = my_end.my_index-my_begin.my_index;
530 if( m > my_grainsize ) {
531 m = my_begin.my_index + m/2u;
533 my_midpoint = Iterator(*my_begin.my_map,m,b,b->
node_list);
535 my_midpoint = my_end;
537 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
538 "my_begin is after my_midpoint" );
540 "my_midpoint is after my_end" );
541 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
542 "[my_begin, my_midpoint) range should not be empty" );
548 #if _MSC_VER && !defined(__INTEL_COMPILER) 550 #pragma warning( push ) 551 #pragma warning( disable: 4127 ) 584 template<
typename Key,
typename T,
typename HashCompare,
typename Allocator>
586 template<
typename Container,
typename Value>
616 class node :
public node_base {
619 value_type*
storage() {
return my_value.begin(); }
620 value_type&
value() {
return *storage(); }
624 node_allocator_traits::destroy(my_allocator, static_cast<node*>(n)->storage());
625 node_allocator_traits::destroy(my_allocator, static_cast<node*>(n));
626 node_allocator_traits::deallocate(my_allocator, static_cast<node*>(n), 1);
636 node_allocator_traits::destroy(my_alloc, my_node);
637 node_allocator_traits::deallocate(my_alloc, my_node, 1);
643 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 644 template<
typename... Args>
647 template<
typename Arg1,
typename Arg2>
651 node* node_ptr = node_allocator_traits::allocate(allocator, 1);
653 node_allocator_traits::construct(allocator, node_ptr);
654 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 655 node_allocator_traits::construct(allocator, node_ptr->
storage(), std::forward<Args>(args)...);
657 node_allocator_traits::construct(allocator, node_ptr->
storage(), tbb::internal::forward<Arg1>(arg1), tbb::internal::forward<Arg2>(arg2));
664 return create_node(allocator, key, *t);
667 #if __TBB_CPP11_RVALUE_REF_PRESENT 669 return create_node(allocator, key,
std::move(*const_cast<T*>(t)));
674 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT 676 return create_node(allocator, std::piecewise_construct,
677 std::forward_as_tuple(key), std::forward_as_tuple());
687 __TBB_ASSERT(
false,
"this dummy function should not be called");
693 while( is_valid(n) && !my_hash_compare.equal(key, n->
value().first) )
694 n =
static_cast<node*
>( n->next );
709 && try_acquire( my_b->
mutex,
true ) )
725 __TBB_ASSERT( h > 1,
"The lowermost buckets can't be rehashed" );
734 mask = (mask<<1) | 1;
735 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
738 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->
value().
first );
740 hashcode_t bmask = h & (mask>>1);
741 bmask = bmask==0? 1 : ( 1u<<(
__TBB_Log2( bmask )+1 ) ) - 1;
742 __TBB_ASSERT( (c & bmask) == (h & bmask),
"hash() function changed for key in table" );
744 if( (c & mask) == h ) {
746 if( !b_old.upgrade_to_writer() ) {
750 add_to_bucket( b_new, n );
831 :
internal::hash_map_base(), my_allocator(a)
835 :
internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
840 :
internal::hash_map_base(), my_allocator(a)
842 reserve( n, my_allocator );
845 concurrent_hash_map( size_type n,
const HashCompare& compare,
const allocator_type& a = allocator_type() )
846 :
internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
848 reserve( n, my_allocator );
854 my_allocator(node_allocator_traits::select_on_container_copy_construction(table.get_allocator()))
857 internal_copy(table);
862 :
internal::hash_map_base(), my_allocator(a)
865 internal_copy(table);
869 #if __TBB_CPP11_RVALUE_REF_PRESENT 879 :
internal::hash_map_base(), my_allocator(a)
881 if (a == table.get_allocator()){
885 internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
889 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 894 :
internal::hash_map_base(), my_allocator(a)
897 internal_copy(first, last, std::distance(first, last));
902 concurrent_hash_map( I first, I last,
const HashCompare& compare,
const allocator_type& a = allocator_type() )
903 :
internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
906 internal_copy(first, last, std::distance(first, last));
910 #if __TBB_INITIALIZER_LISTS_PRESENT 911 concurrent_hash_map( std::initializer_list<value_type> il,
const allocator_type &a = allocator_type() )
916 internal_copy(il.begin(), il.end(), il.size());
920 concurrent_hash_map( std::initializer_list<value_type> il,
const HashCompare& compare,
const allocator_type& a = allocator_type() )
921 :
internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
924 internal_copy(il.begin(), il.end(), il.size());
928 #endif //__TBB_INITIALIZER_LISTS_PRESENT 936 internal_copy(table);
941 #if __TBB_CPP11_RVALUE_REF_PRESENT 946 internal_move_assign(
std::move(table), pocma_type());
950 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 952 #if __TBB_INITIALIZER_LISTS_PRESENT 956 internal_copy(il.begin(), il.end(), il.size());
959 #endif //__TBB_INITIALIZER_LISTS_PRESENT 965 void rehash(size_type n = 0);
976 range_type
range( size_type grainsize=1 ) {
977 return range_type( *
this, grainsize );
979 const_range_type
range( size_type grainsize=1 )
const {
980 return const_range_type( *
this, grainsize );
986 iterator
begin() {
return iterator( *
this, 0, my_embedded_segment, my_embedded_segment->
node_list ); }
987 iterator
end() {
return iterator( *
this, 0, 0, 0 ); }
988 const_iterator
begin()
const {
return const_iterator( *
this, 0, my_embedded_segment, my_embedded_segment->
node_list ); }
989 const_iterator
end()
const {
return const_iterator( *
this, 0, 0, 0 ); }
990 std::pair<iterator, iterator>
equal_range(
const Key&
key ) {
return internal_equal_range( key,
end() ); }
991 std::pair<const_iterator, const_iterator>
equal_range(
const Key&
key )
const {
return internal_equal_range( key,
end() ); }
994 size_type
size()
const {
return my_size; }
997 bool empty()
const {
return my_size == 0; }
1017 return const_cast<concurrent_hash_map*
>(
this)->lookup(
false, key, NULL, NULL,
false, &do_not_allocate_node );
1024 return const_cast<concurrent_hash_map*
>(
this)->lookup(
false, key, NULL, &result,
false, &do_not_allocate_node );
1031 return lookup(
false, key, NULL, &result,
true, &do_not_allocate_node );
1038 return lookup(
true, key, NULL, &result,
false, &allocate_node_default_construct );
1045 return lookup(
true, key, NULL, &result,
true, &allocate_node_default_construct );
1052 return lookup(
true, value.first, &value.second, &result,
false, &allocate_node_copy_construct );
1059 return lookup(
true, value.first, &value.second, &result,
true, &allocate_node_copy_construct );
1065 return lookup(
true, value.first, &value.second, NULL,
false, &allocate_node_copy_construct );
1068 #if __TBB_CPP11_RVALUE_REF_PRESENT 1087 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 1090 template<
typename... Args>
1092 return generic_emplace(result, std::forward<Args>(args)...);
1097 template<
typename... Args>
1099 return generic_emplace(result, std::forward<Args>(args)...);
1104 template<
typename... Args>
1108 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 1109 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 1112 template<
typename I>
1118 #if __TBB_INITIALIZER_LISTS_PRESENT 1119 void insert( std::initializer_list<value_type> il ) {
1121 insert( il.begin(), il.end() );
1123 #endif //__TBB_INITIALIZER_LISTS_PRESENT 1127 bool erase(
const Key&
key );
1132 return exclude( item_accessor );
1138 return exclude( item_accessor );
1143 bool lookup(
bool op_insert,
const Key &key,
const T *t,
const_accessor *result,
bool write,
node* (*allocate_node)(node_allocator_type& ,
const Key &,
const T * ),
node *tmp_n = 0 ) ;
1153 #if __TBB_CPP11_RVALUE_REF_PRESENT 1154 template<
typename Accessor>
1157 return lookup(
true,
value.first, &
value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct );
1160 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 1161 template<
typename Accessor,
typename... Args>
1164 node * node_ptr = create_node(my_allocator, std::forward<Args>(args)...);
1165 return lookup(
true, node_ptr->
value().first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
1167 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 1168 #endif //__TBB_CPP11_RVALUE_REF_PRESENT 1174 template<
typename I>
1175 std::pair<I, I> internal_equal_range(
const Key& key, I
end )
const;
1180 template<
typename I>
1181 void internal_copy( I first, I last, size_type reserve_size );
1183 #if __TBB_CPP11_RVALUE_REF_PRESENT 1191 if (this->my_allocator == other.my_allocator) {
1195 internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size());
1204 hashcode_t
h = my_hash_compare.hash( key );
1208 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1209 bucket *b = get_bucket( h & m );
1214 if( lock.try_acquire( b->
mutex,
true ) ) {
1218 else lock.acquire( b->
mutex,
false );
1221 n = search_bucket( key, b );
1224 else if( check_mask_race( h, m ) )
1230 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT 1234 template<
template<
typename...>
typename Map,
typename Key,
typename T,
typename... Args>
1235 using hash_map_t = Map<
1237 std::conditional_t< (
sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >,
1239 std::conditional_t< (
sizeof...(Args)>0) && is_allocator_v< pack_element_t<
sizeof...(Args)-1, Args...> >,
1245 template<
typename I,
typename... Args>
1247 -> internal::hash_map_t<concurrent_hash_map, internal::iterator_key_t<I>,internal::iterator_mapped_t<I>, Args...>;
1251 template<
typename Key,
typename T,
typename CompareOrAllocator>
1253 -> internal::hash_map_t<concurrent_hash_map, Key, T, CompareOrAllocator>;
1257 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1258 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup(
bool op_insert,
const Key &
key,
const T *t,
const_accessor *result,
bool write,
node* (*allocate_node)(node_allocator_type& ,
const Key&,
const T*),
node *tmp_n ) {
1261 hashcode_t
const h = my_hash_compare.hash( key );
1263 segment_index_t grow_segment = 0;
1267 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1268 return_value =
false;
1273 n = search_bucket( key, b() );
1278 tmp_n = allocate_node(my_allocator, key, t);
1280 if( !b.
is_writer() && !b.upgrade_to_writer() ) {
1282 n = search_bucket( key, b() );
1284 b.downgrade_to_reader();
1288 if( check_mask_race(h, m) )
1291 grow_segment = insert_new_node( b(), n = tmp_n, m );
1293 return_value =
true;
1297 if( check_mask_race( h, m ) )
1301 return_value =
true;
1304 if( !result )
goto check_growth;
1307 if( !result->try_acquire( n->mutex, write ) ) {
1309 if( result->try_acquire( n->mutex, write ) )
break;
1310 if( !backoff.bounded_pause() ) {
1313 __TBB_ASSERT( !op_insert || !return_value,
"Can't acquire new item in locked bucket?" );
1325 if( grow_segment ) {
1326 #if __TBB_STATISTICS 1329 enable_segment( grow_segment, my_allocator );
1332 delete_node( tmp_n );
1333 return return_value;
1336 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1337 template<
typename I>
1339 hashcode_t
h = my_hash_compare.hash( key );
1340 hashcode_t m = my_mask;
1341 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1343 bucket *b = get_bucket( h );
1346 b = get_bucket( h &= m );
1348 node *n = search_bucket( key, b );
1350 return std::make_pair(end_, end_);
1351 iterator lower(*
this, h, b, n), upper(lower);
1352 return std::make_pair(lower, ++upper);
1355 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1358 node_base *
const n = item_accessor.
my_node;
1359 hashcode_t
const h = item_accessor.
my_hash;
1364 node_base **
p = &b()->node_list;
1365 while( *p && *p != n )
1368 if( check_mask_race( h, m ) )
1379 item_accessor.upgrade_to_writer();
1385 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1388 hashcode_t
const h = my_hash_compare.hash( key );
1395 node_base **
p = &b()->node_list;
1397 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->value().first ) ) {
1402 if( check_mask_race( h, m ) )
1406 else if( !b.
is_writer() && !b.upgrade_to_writer() ) {
1407 if( check_mask_race( h, m ) )
1415 typename node::scoped_t item_locker( n->
mutex,
true );
1422 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1429 internal_swap(table);
1433 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1435 reserve( sz, my_allocator );
1436 hashcode_t
mask = my_mask;
1437 hashcode_t b = (mask+1)>>1;
1439 bucket *bp = get_bucket( b );
1440 for(; b <=
mask; b++, bp++ ) {
1443 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->
mutex) == 0,
"concurrent or unexpectedly terminated operation during rehash() execution" );
1445 hashcode_t
h = b;
bucket *b_old = bp;
1447 __TBB_ASSERT( h > 1,
"The lowermost buckets can't be rehashed" );
1449 b_old = get_bucket( h &= m );
1452 mark_rehashed_levels( h );
1453 for( node_base **
p = &b_old->
node_list, *q = *
p; is_valid(q); q = *
p ) {
1454 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->
value().first );
1455 if( (c & mask) != h ) {
1457 bucket *b_new = get_bucket( c & mask );
1459 add_to_bucket( b_new, q );
1460 }
else p = &q->next;
1464 #if TBB_USE_PERFORMANCE_WARNINGS 1465 int current_size =
int(my_size), buckets =
int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0;
1466 static bool reported =
false;
1468 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS 1469 for( b = 0; b <=
mask; b++ ) {
1470 if( b & (b-2) ) ++bp;
1471 else bp = get_bucket( b );
1473 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->
mutex) == 0,
"concurrent or unexpectedly terminated operation during rehash() execution" );
1475 #if TBB_USE_PERFORMANCE_WARNINGS 1477 else if( n->
next ) overpopulated_buckets++;
1480 for( ; is_valid(n); n = n->
next ) {
1481 hashcode_t
h = my_hash_compare.hash( static_cast<node*>(n)->
value().first ) &
mask;
1482 __TBB_ASSERT( h == b,
"hash() function changed for key in table or internal error" );
1486 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS 1487 #if TBB_USE_PERFORMANCE_WARNINGS 1488 if( buckets > current_size) empty_buckets -= buckets - current_size;
1489 else overpopulated_buckets -= current_size - buckets;
1490 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1492 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1494 typeid(*this).name(),
1496 "concurrent_hash_map",
1498 current_size, empty_buckets, overpopulated_buckets );
1504 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1506 hashcode_t m = my_mask;
1507 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1508 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1509 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1510 int current_size =
int(my_size), buckets =
int(m)+1, empty_buckets = 0, overpopulated_buckets = 0;
1511 static bool reported =
false;
1515 for( segment_index_t b = 0; b <= m; b++ ) {
1516 if( b & (b-2) ) ++bp;
1517 else bp = get_bucket( b );
1520 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->
mutex) == 0,
"concurrent or unexpectedly terminated operation during clear() execution" );
1521 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1524 else if( n->
next ) overpopulated_buckets++;
1526 #if __TBB_EXTRA_DEBUG 1527 for(; is_valid(n); n = n->
next ) {
1528 hashcode_t
h = my_hash_compare.hash( static_cast<node*>(n)->
value().first );
1534 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1535 #if __TBB_STATISTICS 1536 printf(
"items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d" 1537 " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1538 current_size,
int(m+1), buckets, empty_buckets, overpopulated_buckets,
1539 unsigned(my_info_resizes),
unsigned(my_info_rehashes),
unsigned(my_info_restarts) );
1540 my_info_resizes = 0;
1541 my_info_restarts = 0;
1542 my_info_rehashes = 0;
1544 if( buckets > current_size) empty_buckets -= buckets - current_size;
1545 else overpopulated_buckets -= current_size - buckets;
1546 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1548 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1550 typeid(*this).name(),
1552 "concurrent_hash_map",
1554 current_size, empty_buckets, overpopulated_buckets );
1558 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS 1560 segment_index_t
s = segment_index_of( m );
1561 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1],
"wrong mask or concurrent grow" );
1563 __TBB_ASSERT( is_valid( my_table[s] ),
"wrong mask or concurrent grow" );
1564 segment_ptr_t buckets_ptr = my_table[
s];
1565 size_type sz = segment_size( s ? s : 1 );
1566 for( segment_index_t i = 0; i < sz; i++ )
1567 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].
node_list ) {
1571 delete_segment(s, my_allocator);
1573 my_mask = embedded_buckets - 1;
1576 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1579 if( my_mask == mask ) {
1580 reserve( source.
my_size, my_allocator );
1581 bucket *dst = 0, *src = 0;
1582 bool rehash_required =
false;
1583 for( hashcode_t k = 0; k <=
mask; k++ ) {
1584 if( k & (k-2) ) ++dst,src++;
1585 else { dst = get_bucket( k ); src = source.
get_bucket( k ); }
1587 node *n =
static_cast<node*
>( src->node_list );
1589 rehash_required =
true;
1591 }
else for(; n; n =
static_cast<node*
>( n->next ) ) {
1592 node* node_ptr = create_node(my_allocator, n->
value().first, n->
value().second);
1593 add_to_bucket( dst, node_ptr);
1597 if( rehash_required ) rehash();
1601 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1602 template<
typename I>
1604 reserve( reserve_size, my_allocator );
1605 hashcode_t m = my_mask;
1607 hashcode_t
h = my_hash_compare.hash( (*first).first );
1608 bucket *b = get_bucket( h & m );
1610 node* node_ptr = create_node(my_allocator, (*first).first, (*first).second);
1611 add_to_bucket( b, node_ptr );
1621 template<
typename Key,
typename T,
typename HashCompare,
typename A1,
typename A2>
1623 if(a.
size() != b.
size())
return false;
1626 for(; i != i_end; ++i) {
1628 if( j == j_end || !(i->second == j->second) )
return false;
1633 template<
typename Key,
typename T,
typename HashCompare,
typename A1,
typename A2>
1635 {
return !(a == b); }
1637 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1641 #if _MSC_VER && !defined(__INTEL_COMPILER) 1642 #pragma warning( pop ) 1643 #endif // warning 4127 is back 1648 #undef __TBB_concurrent_hash_map_H_include_area
friend bool is_write_access_needed(accessor const &)
~const_accessor()
Destroy result after releasing the underlying reference.
bool is_writer
If mutex!=NULL, then is_writer is true if holding a writer lock, false if holding a reader lock...
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
bucket * get_bucket(hashcode_t h) const
Get bucket by (masked) hashcode.
static void init_buckets(segment_ptr_t ptr, size_type sz, bool is_initial)
Initialize buckets.
bool generic_move_insert(Accessor &&result, value_type &&value)
internal::hash_map_range< const_iterator > const_range_type
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
size_type grainsize() const
The grain size for this range.
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_false_type)
hash_map_iterator(const hash_map_iterator< Container, typename Container::value_type > &other)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
Range class used with concurrent_hash_map.
size_t segment_index_t
Segment index type.
#define __TBB_USE_OPTIONAL_RTTI
static bool is_valid(void *ptr)
hash_map_range(hash_map_range< U > &r)
type conversion
bool check_rehashing_collision(const hashcode_t h, hashcode_t m_old, hashcode_t m) const
Process mask race, check for rehashing collision.
bool is_divisible() const
True if range can be partitioned into two subranges.
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
hash_map_node_base node_base
Node base type.
friend const_accessor * accessor_location(const_accessor &a)
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_true_type)
void set_midpoint() const
Set my_midpoint to point approximately half way between my_begin and my_end.
HashCompare my_hash_compare
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
tbb::internal::allocator_rebind< Allocator, node >::type node_allocator_type
friend const_accessor * accessor_location(accessor_not_used const &)
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
const Iterator & begin() const
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
bool emplace(const_accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
friend bool operator==(const hash_map_iterator< C, T > &i, const hash_map_iterator< C, U > &j)
static node * allocate_node_move_construct(node_allocator_type &allocator, const Key &key, const T *t)
allocator_traits< Alloc >::template rebind_alloc< T >::other type
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
concurrent_hash_map(const HashCompare &compare, const allocator_type &a=allocator_type())
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
concurrent_hash_map::value_type value_type
Type of value.
Iterator::reference reference
static segment_index_t segment_base(segment_index_t k)
const_pointer internal_fast_find(const Key &key) const
Fast find when no concurrent erasure is used. For internal use inside TBB only!
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
enable_segment_failsafe(segments_table_t &table, segment_index_t k)
bool erase(const_accessor &item_accessor)
Erase item by const_accessor.
Dummy type that distinguishes splitting constructor from copy constructor.
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
static node * allocate_node_copy_construct(node_allocator_type &allocator, const Key &key, const T *t)
friend bool operator!=(const hash_map_iterator< C, T > &i, const hash_map_iterator< C, U > &j)
void mark_rehashed_levels(hashcode_t h)
bool exclude(const_accessor &item_accessor)
delete item by accessor
Base class for types that should not be copied or assigned.
void internal_move(hash_map_base &&other)
void itt_store_word_with_release(tbb::atomic< T > &dst, U src)
const_iterator end() const
Iterator::map_type map_type
Allows write access to elements and combines data access, locking, and garbage collection.
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
friend bool is_write_access_needed(accessor_not_used const &)
bool emplace(Args &&... args)
Insert item by copying if there is no such key present already.
concurrent_hash_map(const allocator_type &a=allocator_type())
Construct empty table.
Value * operator->() const
static void add_to_bucket(bucket *b, node_base *n)
Add node.
concurrent_hash_map(I first, I last, const HashCompare &compare, const allocator_type &a=allocator_type())
size_type bucket_count() const
Returns the current number of buckets.
const_range_type range(size_type grainsize=1) const
concurrent_hash_map(size_type n, const allocator_type &a=allocator_type())
Construct empty table with n preallocated buckets. This number serves also as initial concurrency lev...
void reserve(size_type buckets, const Allocator &allocator)
Prepare enough segments for number of buckets.
reference operator*() const
Return reference to associated value in hash table.
concurrent_hash_map * my_ch_map
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
pointer operator->() const
Return pointer to associated value in hash table.
auto last(Container &c) -> decltype(begin(c))
The scoped locking pattern.
void acquire(spin_rw_mutex &m, bool write=true)
Acquire lock on given mutex.
bool erase(accessor &item_accessor)
Erase item by accessor.
friend bool is_write_access_needed(const_accessor const &)
Iterator::value_type value_type
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
const_pointer operator->() const
Return pointer to associated value in hash table.
void acquire(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
find a bucket by masked hashcode, optionally rehash, and acquire the lock
node_scoped_guard(node *n, node_allocator_type &alloc)
void __TBB_store_with_release(volatile T &location, V value)
bool operator!=(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
static hash_map_node_base *const rehash_req
Incompleteness flag value.
void allocator_copy_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
hash_compare that is default argument for concurrent_hash_map
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
spin_rw_mutex mutex_t
Mutex type.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
allocator_type get_allocator() const
return allocator object
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item...
size_t hashcode_t
Type of a hash code.
tbb::internal::allocator_traits< node_allocator_type > node_allocator_traits
Unordered map from Key to T.
bucket accessor is to find, rehash, acquire a lock, and access a bucket
size_type size() const
Number of items in table.
node_allocator_type & my_alloc
Iterator::difference_type difference_type
static segment_index_t segment_index_of(size_type index)
bool check_mask_race(const hashcode_t h, hashcode_t &m) const
Check for mask race.
Meets requirements of a forward iterator for STL */.
tick_count::interval_t operator-(const tick_count &t1, const tick_count &t0)
bool is_writer()
check whether bucket is locked for write
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
~concurrent_hash_map()
Clear table and destroy it.
std::pair< I, I > internal_equal_range(const Key &key, I end) const
Returns an iterator for an item defined by the key, or for the next item after it (if upper==true) ...
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
bucket * segment_ptr_t
Segment pointer.
bool empty() const
True if range is empty.
hash_map_iterator & operator=(const hash_map_iterator< Container, typename Container::value_type > &other)
const Container * my_map
concurrent_hash_map over which we are iterating.
const_reference operator*() const
Return reference to associated value in hash table.
void release()
Set to null.
T itt_hide_load_word(const T &src)
const bucket * my_bucket
Pointer to bucket.
concurrent_hash_map(const concurrent_hash_map &table, const allocator_type &a)
void const char const char int ITT_FORMAT __itt_group_sync p
base class of concurrent_hash_map
internal::hash_map_iterator< concurrent_hash_map, value_type > iterator
Combines data access, locking, and garbage collection.
hash_map_iterator()
Construct undefined iterator.
size_t my_index
Index in hash table for current item.
node * my_node
Pointer to node that has current item.
std::pair< const Key, T > value_type
bool operator==(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
size_type count(const Key &key) const
Return count of items (0 or 1)
node * search_bucket(const key_type &key, bucket *b) const
ptrdiff_t difference_type
void internal_swap(hash_map_base &table)
Swap hash_map_bases.
Fast, unfair, spinning reader-writer lock with backoff and writer-preference.
static node * do_not_allocate_node(node_allocator_type &, const Key &, const T *)
bool empty() const
True if result is empty.
void rehash_bucket(bucket *b_new, const hashcode_t h)
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
std::pair< const_iterator, const_iterator > equal_range(const Key &key) const
hash_map_base::node_base node_base
hash_map_base::bucket bucket
bool generic_emplace(Accessor &&result, Args &&... args)
hash_map_node_base * next
Next node in chain.
auto first(Container &c) -> decltype(begin(c))
hash_map_iterator operator++(int)
Post increment.
static node * allocate_node_default_construct(node_allocator_type &allocator, const Key &key, const T *)
void delete_node(node_base *n)
hash_map_range(const map_type &map, size_type grainsize_=1)
Init range with container and grainsize specified.
bool empty() const
True if size()==0.
internal::hash_map_iterator< concurrent_hash_map, const value_type > const_iterator
internal::hash_map_range< iterator > range_type
static hash_map_node_base *const empty_rehashed
Rehashed empty bucket flag.
void advance_to_next_bucket()
static size_type segment_size(segment_index_t k)
size_type max_size() const
Upper bound on size.
hash_map_range(hash_map_range &r, split)
Split range.
void const char const char int ITT_FORMAT __itt_group_sync s
void delete_segment(segment_index_t s, const Allocator &allocator)
tbb::aligned_space< value_type > my_value
intptr_t __TBB_Log2(uintptr_t x)
size_t hashcode_t
Type of a hash code.
std::size_t size_type
Type for size of a range.
concurrent_hash_map(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
const value_type & const_reference
const_iterator begin() const
bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node *(*allocate_node)(node_allocator_type &, const Key &, const T *), node *tmp_n=0)
Insert or find item and optionally acquire a lock on the item.
node_allocator_type my_allocator
#define __TBB_FORWARDING_REF(A)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
T itt_load_word_with_acquire(const tbb::atomic< T > &src)
void swap(concurrent_hash_map &table)
swap two instances. Iterators are invalidated
void itt_hide_store_word(T &dst, T src)
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
const value_type * const_pointer
const concurrent_hash_map::value_type value_type
Type of value.
Value & operator*() const
size_t size_type
Size type.
Class that implements exponential backoff.
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment.
atomic< T > & as_atomic(T &t)
atomic< size_type > my_size
Size of container in stored items.
bool emplace(accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a write lock on the item...
void enable_segment(segment_index_t k, const Allocator &allocator, bool is_initial=false)
Enable segment.
bucket_accessor(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
concurrent_hash_map(size_type n, const HashCompare &compare, const allocator_type &a=allocator_type())
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
concurrent_hash_map(std::initializer_list< value_type > il, const HashCompare &compare, const allocator_type &a=allocator_type())
hash_map_iterator & operator++()
range_type range(size_type grainsize=1)
call_clear_on_leave(concurrent_hash_map *a_ch_map)
void insert(I first, I last)
Insert range [first, last)
static node * create_node(node_allocator_type &allocator, Args &&... args)
segment_ptr_t * my_segment_ptr
T __TBB_load_with_acquire(const volatile T &location)
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
hash_map_base::size_type size_type
const_accessor()
Create empty result.
bool erase(const Key &key)
Erase item.
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
Identifiers declared inside namespace internal should never be used directly by client code...
segment_index_t insert_new_node(bucket *b, node_base *n, hashcode_t mask)
Insert a node and check for load factor.
concurrent_hash_map(concurrent_hash_map &&table, const allocator_type &a)
Move constructor.
const Iterator & end() const
bucket my_embedded_segment[embedded_buckets]
Zero segment.
~enable_segment_failsafe()
std::pair< iterator, iterator > equal_range(const Key &key)
spin_rw_mutex mutex_t
Mutex type for buckets.
void move(tbb_thread &t1, tbb_thread &t2)