28 #if __TBB_STATISTICS_STDOUT 35 #if __TBB_NUMA_SUPPORT 38 binding_handler* binding_handler_ptr;
40 numa_binding_observer( task_arena* ta,
int numa_id,
int num_slots )
41 : task_scheduler_observer(*ta)
42 , my_numa_node_id(numa_id)
43 , binding_handler_ptr(
tbb::
internal::construct_binding_handler(num_slots))
47 tbb::internal::bind_thread_to_node(
55 ~numa_binding_observer(){
56 tbb::internal::destroy_binding_handler(binding_handler_ptr);
61 int numa_id,
int num_slots ) {
62 numa_binding_observer* binding_observer = NULL;
64 if (numa_id >= 0 && numa_topology::nodes_count() > 1) {
65 binding_observer =
new numa_binding_observer(ta, numa_id, num_slots);
66 __TBB_ASSERT(binding_observer,
"Failure during NUMA binding observer allocation and construction");
67 binding_observer->observe(
true);
69 return binding_observer;
72 void destroy_binding_observer( numa_binding_observer* binding_observer ) {
73 __TBB_ASSERT(binding_observer,
"Trying to deallocate NULL pointer");
74 binding_observer->observe(
false);
75 delete binding_observer;
83 my_arena_index = index;
86 if ( is_master && my_inbox.is_idle_state(
true ) ) {
89 my_inbox.set_is_idle(
false );
91 #if __TBB_TASK_GROUP_CONTEXT 94 my_dummy_task->prefix().context = a->my_default_ctx;
96 #if __TBB_TASK_PRIORITY 103 my_ref_top_priority = &a->my_top_priority;
104 my_ref_reload_epoch = &a->my_reload_epoch;
106 my_local_reload_epoch = *my_ref_reload_epoch;
112 return !slot &&
as_atomic( slot ).compare_and_swap( &s, NULL ) == NULL;
116 if ( lower >= upper )
return out_of_arena;
119 if ( index < lower || index >= upper ) index = s.
my_random.
get() % (upper - lower) + lower;
122 for (
size_t i = index; i < upper; ++i )
123 if (
occupy_slot(my_slots[i].my_scheduler, s) )
return i;
124 for (
size_t i = lower; i < index; ++i )
125 if (
occupy_slot(my_slots[i].my_scheduler, s) )
return i;
129 template <
bool as_worker>
132 size_t index = as_worker ? out_of_arena : occupy_free_slot_in_range( s, 0, my_num_reserved_slots );
133 if ( index == out_of_arena ) {
135 index = occupy_free_slot_in_range( s, my_num_reserved_slots, my_num_slots );
137 if ( index == out_of_arena )
142 atomic_update( my_limit, (
unsigned)(index + 1), std::less<unsigned>() );
154 size_t index = occupy_free_slot<
true>(
s );
155 if ( index == out_of_arena )
158 __TBB_ASSERT( index >= my_num_reserved_slots,
"Workers cannot occupy reserved slots" );
161 #if !__TBB_FP_CONTEXT 162 my_cpu_ctl_env.set_env();
165 #if __TBB_ARENA_OBSERVER 166 __TBB_ASSERT( !s.my_last_local_observer,
"There cannot be notified local observers when entering arena" );
167 my_observers.notify_entry_observers( s.my_last_local_observer,
true );
182 "Worker cannot leave arena while its task pool is not reset" );
186 if ( is_recall_requested() )
199 #if __TBB_ARENA_OBSERVER 200 my_observers.notify_exit_observers( s.my_last_local_observer,
true );
201 s.my_last_local_observer = NULL;
203 #if __TBB_TASK_PRIORITY 204 if ( s.my_offloaded_tasks )
205 orphan_offloaded_tasks( s );
208 ++s.my_counters.arena_roundtrips;
209 *my_slots[index].my_counters += s.my_counters;
210 s.my_counters.reset();
223 on_thread_leaving<ref_worker>();
227 __TBB_ASSERT( !my_guard,
"improperly allocated arena?" );
230 #if __TBB_TASK_PRIORITY 231 __TBB_ASSERT( !my_reload_epoch && !my_orphaned_tasks && !my_skipped_fifo_priority,
"New arena object is not zeroed" );
236 my_num_slots = num_arena_slots(num_slots);
237 my_num_reserved_slots = num_reserved_slots;
238 my_max_num_workers = num_slots-num_reserved_slots;
239 my_references = ref_external;
240 #if __TBB_TASK_PRIORITY 241 my_bottom_priority = my_top_priority = normalized_normal_priority;
244 #if __TBB_ARENA_OBSERVER 245 my_observers.my_arena =
this;
247 #if __TBB_PREVIEW_RESUMABLE_TASKS 248 my_co_cache.init(4 * num_slots);
250 __TBB_ASSERT ( my_max_num_workers <= my_num_slots, NULL );
252 for(
unsigned i = 0; i < my_num_slots; ++i ) {
253 __TBB_ASSERT( !my_slots[i].my_scheduler && !my_slots[i].task_pool, NULL );
256 #if __TBB_PREVIEW_RESUMABLE_TASKS 257 __TBB_ASSERT( !my_slots[i].my_scheduler_is_recalled, NULL );
259 ITT_SYNC_CREATE(my_slots + i, SyncType_Scheduler, SyncObj_WorkerTaskPool);
260 mailbox(i+1).construct();
262 my_slots[i].hint_for_pop = i;
263 #if __TBB_PREVIEW_CRITICAL_TASKS 264 my_slots[i].hint_for_critical = i;
267 my_slots[i].my_counters =
new (
NFS_Allocate(1,
sizeof(statistics_counters), NULL) ) statistics_counters;
270 my_task_stream.initialize(my_num_slots);
271 ITT_SYNC_CREATE(&my_task_stream, SyncType_Scheduler, SyncObj_TaskStream);
272 #if __TBB_PREVIEW_CRITICAL_TASKS 273 my_critical_task_stream.initialize(my_num_slots);
274 ITT_SYNC_CREATE(&my_critical_task_stream, SyncType_Scheduler, SyncObj_CriticalTaskStream);
276 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 277 my_local_concurrency_mode =
false;
278 my_global_concurrency_mode =
false;
280 #if !__TBB_FP_CONTEXT 281 my_cpu_ctl_env.get_env();
289 size_t n = allocation_size(num_arena_slots(num_slots));
290 unsigned char* storage = (
unsigned char*)
NFS_Allocate( 1, n, NULL );
292 memset( storage, 0, n );
293 return *
new( storage + num_arena_slots(num_slots) *
sizeof(
mail_outbox) )
arena(m, num_slots, num_reserved_slots);
298 __TBB_ASSERT( !my_references,
"There are threads in the dying arena" );
299 __TBB_ASSERT( !my_num_workers_requested && !my_num_workers_allotted,
"Dying arena requests workers" );
300 __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers,
"Inconsistent state of a dying arena" );
301 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 304 #if !__TBB_STATISTICS_EARLY_DUMP 308 intptr_t drained = 0;
309 for (
unsigned i = 0; i < my_num_slots; ++i ) {
310 __TBB_ASSERT( !my_slots[i].my_scheduler,
"arena slot is not empty" );
314 my_slots[i].free_task_pool();
316 NFS_Free( my_slots[i].my_counters );
318 drained += mailbox(i+1).drain();
320 __TBB_ASSERT( my_task_stream.drain()==0,
"Not all enqueued tasks were executed");
321 #if __TBB_PREVIEW_RESUMABLE_TASKS 323 my_co_cache.cleanup();
325 #if __TBB_PREVIEW_CRITICAL_TASKS 326 __TBB_ASSERT( my_critical_task_stream.drain()==0,
"Not all critical tasks were executed");
328 #if __TBB_COUNT_TASK_NODES 329 my_market->update_task_node_count( -drained );
332 my_market->release(
false,
false );
333 #if __TBB_TASK_GROUP_CONTEXT 334 __TBB_ASSERT( my_default_ctx,
"Master thread never entered the arena?" );
335 my_default_ctx->~task_group_context();
338 #if __TBB_ARENA_OBSERVER 339 if ( !my_observers.empty() )
340 my_observers.clear();
342 void* storage = &mailbox(my_num_slots);
344 __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, NULL );
346 #if TBB_USE_ASSERT > 1 347 memset( storage, 0, allocation_size(my_num_slots) );
353 void arena::dump_arena_statistics () {
354 statistics_counters total;
355 for(
unsigned i = 0; i < my_num_slots; ++i ) {
356 #if __TBB_STATISTICS_EARLY_DUMP 359 *my_slots[i].my_counters += s->my_counters;
364 total += *my_slots[i].my_counters;
365 dump_statistics( *my_slots[i].my_counters, i );
368 dump_statistics( *my_slots[0].my_counters, 0 );
369 #if __TBB_STATISTICS_STDOUT 370 #if !__TBB_STATISTICS_TOTALS_ONLY 371 printf(
"----------------------------------------------\n" );
373 dump_statistics( total, workers_counters_total );
374 total += *my_slots[0].my_counters;
375 dump_statistics( total, arena_counters_total );
376 #if !__TBB_STATISTICS_TOTALS_ONLY 377 printf(
"==============================================\n" );
383 #if __TBB_TASK_PRIORITY 388 inline bool arena::may_have_tasks (
generic_scheduler*
s,
bool& tasks_present,
bool& dequeuing_possible ) {
392 if ( s->my_pool_reshuffling_pending ) {
395 tasks_present =
true;
398 if ( s->my_offloaded_tasks ) {
399 tasks_present =
true;
400 if ( s->my_local_reload_epoch < *s->my_ref_reload_epoch ) {
412 ++my_abandonment_epoch;
413 __TBB_ASSERT( s.my_offloaded_task_list_tail_link && !*s.my_offloaded_task_list_tail_link, NULL );
416 orphans =
const_cast<task*
>(my_orphaned_tasks);
417 *s.my_offloaded_task_list_tail_link = orphans;
418 }
while (
as_atomic(my_orphaned_tasks).compare_and_swap(s.my_offloaded_tasks, orphans) != orphans );
419 s.my_offloaded_tasks = NULL;
421 s.my_offloaded_task_list_tail_link = NULL;
429 if ( !my_task_stream.empty(
p) )
440 if ( has_enqueued_tasks() ) {
441 advertise_new_work<work_enqueued>();
442 #if __TBB_TASK_PRIORITY 446 if ( !my_task_stream.empty(
p) ) {
447 if ( p < my_bottom_priority || p > my_top_priority )
448 my_market->update_arena_priority(*
this,
p);
461 case SNAPSHOT_FULL: {
465 if( my_pool_state.compare_and_swap( busy, SNAPSHOT_FULL )==SNAPSHOT_FULL ) {
473 #if __TBB_TASK_PRIORITY 475 intptr_t top_priority = my_top_priority;
479 for( k=0; k<n; ++k ) {
486 if( my_pool_state!=busy )
490 bool work_absent = k == n;
491 #if __TBB_PREVIEW_CRITICAL_TASKS 492 bool no_critical_tasks = my_critical_task_stream.empty(0);
493 work_absent &= no_critical_tasks;
495 #if __TBB_TASK_PRIORITY 498 bool tasks_present = !work_absent || my_orphaned_tasks;
499 bool dequeuing_possible =
false;
504 uintptr_t abandonment_epoch = my_abandonment_epoch;
510 the_context_state_propagation_mutex.lock();
511 work_absent = !may_have_tasks( my_slots[0].my_scheduler, tasks_present, dequeuing_possible );
512 the_context_state_propagation_mutex.unlock();
525 for( k = 1; work_absent && k < n; ++k ) {
526 if( my_pool_state!=busy )
528 work_absent = !may_have_tasks( my_slots[k].my_scheduler, tasks_present, dequeuing_possible );
531 work_absent = work_absent
533 && abandonment_epoch == my_abandonment_epoch;
537 if( my_pool_state==busy ) {
538 #if __TBB_TASK_PRIORITY 539 bool no_fifo_tasks = my_task_stream.empty(top_priority);
540 work_absent = work_absent && (!dequeuing_possible || no_fifo_tasks)
541 && top_priority == my_top_priority && reload_epoch == my_reload_epoch;
543 bool no_fifo_tasks = my_task_stream.empty(0);
544 work_absent = work_absent && no_fifo_tasks;
547 #if __TBB_TASK_PRIORITY 548 if ( top_priority > my_bottom_priority ) {
549 if ( my_market->lower_arena_priority(*
this, top_priority - 1, reload_epoch)
550 && !my_task_stream.empty(top_priority) )
552 atomic_update( my_skipped_fifo_priority, top_priority, std::less<intptr_t>());
555 else if ( !tasks_present && !my_orphaned_tasks && no_fifo_tasks ) {
559 int current_demand = (
int)my_max_num_workers;
560 if( my_pool_state.compare_and_swap( SNAPSHOT_EMPTY, busy )==busy ) {
563 my_market->adjust_demand( *
this, -current_demand );
564 restore_priority_if_need();
568 #if __TBB_TASK_PRIORITY 573 my_pool_state.compare_and_swap( SNAPSHOT_FULL, busy );
585 #if __TBB_COUNT_TASK_NODES 586 intptr_t arena::workers_task_node_count() {
588 for(
unsigned i = 1; i < my_num_slots; ++i ) {
591 result += s->my_task_node_count;
599 #if __TBB_RECYCLE_TO_ENQUEUE 610 __TBB_ASSERT( ref_count!=0,
"attempt to enqueue task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
611 __TBB_ASSERT( ref_count>0,
"attempt to enqueue task whose parent has a ref_count<0" );
616 #if __TBB_PREVIEW_CRITICAL_TASKS 618 #if __TBB_TASK_PRIORITY 621 bool is_critical = internal::is_critical( t ); 632 #if __TBB_TASK_ISOLATION 642 advertise_new_work<work_spawned>();
648 #if __TBB_TASK_PRIORITY 649 intptr_t
p = prio ? normalize_priority(
priority_t(prio)) : normalized_normal_priority;
650 assert_priority_valid(p);
651 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 654 my_task_stream.push( &t, p, random );
656 if ( p != my_top_priority )
657 my_market->update_arena_priority( *
this, p );
659 __TBB_ASSERT_EX(prio == 0,
"the library is not configured to respect the task priority");
660 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 663 my_task_stream.push( &t, 0, random );
666 advertise_new_work<work_enqueued>();
667 #if __TBB_TASK_PRIORITY 668 if ( p != my_top_priority )
669 my_market->update_arena_priority( *
this, p );
676 : my_scheduler(*s), my_orig_ctx(NULL), same_arena(same) {
678 my_orig_state.my_properties = my_scheduler.my_properties;
679 my_orig_state.my_innermost_running_task = my_scheduler.my_innermost_running_task;
680 mimic_outermost_level(a, type);
683 #if __TBB_PREVIEW_RESUMABLE_TASKS 684 my_scheduler.my_properties.genuine =
true;
685 my_scheduler.my_current_is_recalled = NULL;
687 mimic_outermost_level(a, type);
692 #if __TBB_TASK_GROUP_CONTEXT 693 my_scheduler.my_dummy_task->prefix().context = my_orig_ctx;
696 my_scheduler.my_properties = my_orig_state.my_properties;
697 my_scheduler.my_innermost_running_task = my_orig_state.my_innermost_running_task;
699 my_scheduler.nested_arena_exit();
701 #if __TBB_TASK_PRIORITY 702 my_scheduler.my_local_reload_epoch = *my_orig_state.my_ref_reload_epoch;
718 #if __TBB_PREVIEW_CRITICAL_TASKS 721 #if __TBB_TASK_GROUP_CONTEXT 734 #if __TBB_TASK_PRIORITY 735 if ( my_offloaded_tasks )
736 my_arena->orphan_offloaded_tasks( *
this );
737 my_offloaded_tasks = NULL;
739 attach_arena( a, slot_index,
true );
745 if( !is_worker() && slot_index >= my_arena->my_num_reserved_slots )
746 my_arena->my_market->adjust_demand(*my_arena, -1);
747 #if __TBB_ARENA_OBSERVER 748 my_last_local_observer = 0;
749 my_arena->my_observers.notify_entry_observers( my_last_local_observer,
false );
751 #if __TBB_PREVIEW_RESUMABLE_TASKS 757 #if __TBB_ARENA_OBSERVER 758 my_arena->my_observers.notify_exit_observers( my_last_local_observer,
false );
760 #if __TBB_TASK_PRIORITY 761 if ( my_offloaded_tasks )
762 my_arena->orphan_offloaded_tasks( *
this );
764 if( !is_worker() && my_arena_index >= my_arena->my_num_reserved_slots )
765 my_arena->my_market->adjust_demand(*my_arena, 1);
767 __TBB_ASSERT(my_arena->my_slots[my_arena_index].my_scheduler,
"A slot is already empty");
769 my_arena->my_exit_monitors.notify_one();
773 my_dummy_task->prefix().ref_count++;
775 local_wait_for_all(*my_dummy_task, NULL);
776 my_dummy_task->prefix().ref_count--;
779 #if __TBB_PREVIEW_RESUMABLE_TASKS 780 class resume_task :
public task {
787 if (s->prepare_resume(my_target)) {
788 s->resume(my_target);
792 prefix().state = task::to_resume;
817 void internal_suspend(
void* suspend_callback,
void* user_callback) {
820 bool is_recalled = *s.
my_arena_slot->my_scheduler_is_recalled;
823 generic_scheduler::callback_t callback = {
824 (generic_scheduler::suspend_callback_t)suspend_callback, user_callback, &s };
825 target.set_post_resume_action(generic_scheduler::PRA_CALLBACK, &callback);
829 void internal_resume(task::suspend_point tag) {
848 task::suspend_point internal_current_suspend_point() {
860 namespace interface7 {
865 if( my_max_concurrency < 1 )
866 #if __TBB_NUMA_SUPPORT 867 my_max_concurrency = tbb::internal::numa_topology::default_concurrency(numa_id());
871 __TBB_ASSERT( my_master_slots <= (
unsigned)my_max_concurrency,
"Number of slots reserved for master should not exceed arena concurrency");
876 #if __TBB_TASK_GROUP_CONTEXT 880 new_arena->my_default_ctx->capture_fp_settings();
884 if(
as_atomic(my_arena).compare_and_swap(new_arena, NULL) != NULL) {
889 #if __TBB_TASK_GROUP_CONTEXT 892 #if __TBB_TASK_GROUP_CONTEXT || __TBB_NUMA_SUPPORT 894 #if __TBB_NUMA_SUPPORT 895 my_arena->my_numa_binding_observer = tbb::internal::construct_binding_observer(
896 static_cast<task_arena*>(
this), numa_id(), my_arena->my_num_slots);
898 #if __TBB_TASK_GROUP_CONTEXT 899 new_arena->my_default_ctx->my_version_and_traits |= my_version_and_traits & exact_exception_flag;
900 as_atomic(my_context) = new_arena->my_default_ctx;
911 #if __TBB_NUMA_SUPPORT 912 if( my_arena->my_numa_binding_observer != NULL ) {
913 tbb::internal::destroy_binding_observer(my_arena->my_numa_binding_observer);
914 my_arena->my_numa_binding_observer = NULL;
917 my_arena->my_market->release(
true,
false );
920 #if __TBB_TASK_GROUP_CONTEXT 935 #if __TBB_TASK_GROUP_CONTEXT 936 my_context = my_arena->my_default_ctx;
937 my_version_and_traits |= my_context->my_version_and_traits & exact_exception_flag;
939 my_master_slots = my_arena->my_num_reserved_slots;
940 my_max_concurrency = my_master_slots + my_arena->my_max_num_workers;
951 #if __TBB_TASK_GROUP_CONTEXT 954 "The task will not be executed because default task_group_context of task_arena is cancelled. Has previously enqueued task thrown an exception?");
956 my_arena->enqueue_task( t, prio, s->
my_random );
959 class delegated_task :
public task {
975 #if __TBB_TASK_GROUP_CONTEXT 976 orig_ctx = t->prefix().context;
977 t->prefix().context = s.
my_arena->my_default_ctx;
983 ~outermost_context() {
984 #if __TBB_TASK_GROUP_CONTEXT 986 t->prefix().context = orig_ctx;
1000 #if __TBB_PREVIEW_RESUMABLE_TASKS 1001 reference_count old_ref_count = __TBB_FetchAndStoreW(&prefix.ref_count, 1);
1003 if (old_ref_count == internal::abandon_flag + 2) {
1006 tbb::task::resume(prefix.abandoned_scheduler);
1011 my_monitor.
notify(*
this);
1015 : my_delegate(d), my_monitor(s), my_root(t) {}
1017 bool operator()(uintptr_t ctx)
const {
return (
void*)ctx == (
void*)&my_delegate; }
1025 bool same_arena = s->
my_arena == my_arena;
1028 index1 = my_arena->occupy_free_slot<
false>(*s);
1031 #if __TBB_USE_OPTIONAL_RTTI 1045 (internal::forward< graph_funct >(deleg_funct->
my_func)), 0);
1050 #if __TBB_TASK_GROUP_CONTEXT 1052 #if __TBB_FP_CONTEXT 1059 delegated_task(d, my_arena->my_exit_monitors, &root),
1063 my_arena->my_exit_monitors.prepare_wait(waiter, (uintptr_t)&d);
1065 my_arena->my_exit_monitors.cancel_wait(waiter);
1068 index2 = my_arena->occupy_free_slot<
false>(*s);
1070 my_arena->my_exit_monitors.cancel_wait(waiter);
1073 #if TBB_USE_EXCEPTIONS 1079 my_arena->my_exit_monitors.commit_wait(waiter);
1084 my_arena->my_exit_monitors.notify_one();
1086 #if TBB_USE_EXCEPTIONS 1089 TbbRethrowException(pe);
1092 #if __TBB_USE_OPTIONAL_RTTI 1100 #if TBB_USE_EXCEPTIONS 1106 #if TBB_USE_EXCEPTIONS 1110 if (my_version_and_traits & exact_exception_flag)
throw;
1124 class wait_task :
public task {
1153 while( my_arena->num_workers_active() )
1158 &&
as_atomic(my_arena->my_slots[0].my_scheduler).compare_and_swap(s, NULL) == NULL ) {
1167 if( !my_arena->num_workers_active() && !my_arena->my_slots[0].my_scheduler)
1178 #if __TBB_TASK_ISOLATION 1183 isolation_guard(
isolation_tag &isolation ) : guarded( isolation ), previous_value( isolation ) {}
1184 ~isolation_guard() {
1185 guarded = previous_value;
1192 __TBB_ASSERT( s,
"this_task_arena::isolate() needs an initialized scheduler" );
1197 isolation_guard guard( current_isolation );
1198 current_isolation = isolation? isolation :
reinterpret_cast<isolation_tag>(&
d);
#define GATHER_STATISTIC(x)
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
nested_arena_context(generic_scheduler *s, arena *a, size_t slot_index, bool type, bool same)
task is running, and will be destroyed after method execute() completes.
const isolation_tag no_isolation
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
bool is_worker() const
True if running on a worker thread, false otherwise.
static const intptr_t num_priority_levels
void __TBB_EXPORTED_METHOD internal_enqueue(task &, intptr_t) const
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id head
void __TBB_EXPORTED_METHOD internal_attach()
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
isolation_tag isolation
The tag used for task isolation.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
uintptr_t my_arenas_aba_epoch
ABA prevention marker to assign to newly created arenas.
void __TBB_EXPORTED_METHOD internal_terminate()
static int __TBB_EXPORTED_FUNC internal_max_concurrency(const task_arena *)
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
unsigned char state
A task::state_type, stored as a byte for compactness.
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Class representing where mail is put.
task object is freshly allocated or recycled.
static const unsigned ref_external
Reference increment values for externals and workers.
T1 atomic_update(tbb::atomic< T1 > &dst, T2 newValue, Pred compare)
Atomically replaces value of dst with newValue if they satisfy condition of compare predicate...
binary_semaphore for concurrent monitor
Base class for types that should not be copied or assigned.
virtual void local_wait_for_all(task &parent, task *child)=0
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
task * my_dummy_task
Fake root task created by slave threads.
generic_scheduler & my_scheduler
#define __TBB_CONTEXT_ARG(arg1, context)
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
void process(generic_scheduler &)
Registers the worker with the arena and enters TBB scheduler dispatch loop.
bool type
Indicates that a scheduler acts as a master or a worker.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
Exception container that preserves the exact copy of the original exception.
void __TBB_EXPORTED_METHOD internal_wait() const
bool is_quiescent_local_task_pool_reset() const
static int __TBB_EXPORTED_FUNC internal_current_slot()
task **__TBB_atomic task_pool
static generic_scheduler * local_scheduler_if_initialized()
void detach()
Detach inbox from its outbox.
void make_critical(task &t)
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
scheduler_state my_orig_state
void __TBB_store_with_release(volatile T &location, V value)
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
int my_max_concurrency
Concurrency level for deferred initialization.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
Smart holder for the empty task class with automatic destruction.
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
market * my_market
The market that owns this arena.
Base class for user-defined tasks.
static generic_scheduler * create_worker(market &m, size_t index, bool geniune)
Initialize a scheduler for a worker thread.
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
void mimic_outermost_level(arena *a, bool type)
static const int priority_critical
size_t occupy_free_slot(generic_scheduler &s)
Tries to occupy a slot in the arena. On success, returns the slot index; if no slot is available...
scheduler_properties my_properties
intptr_t isolation_tag
A tag for task isolation.
int ref_count() const
The internal reference count.
static unsigned default_num_threads()
virtual task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation))=0
Try getting a task from other threads (via mailbox, stealing, FIFO queue, orphans adoption)...
Memory prefix to a task object.
Bit-field representing properties of a sheduler.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
market * my_market
The market I am in.
void const char const char int ITT_FORMAT __itt_group_sync p
size_t occupy_free_slot_in_range(generic_scheduler &s, size_t lower, size_t upper)
Tries to occupy a slot in the specified range.
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
static generic_scheduler * local_scheduler_weak()
bool has_enqueued_tasks()
Check for the presence of enqueued tasks at all priority levels.
A functor that spawns a task.
unsigned short affinity_id
An id as used for specifying affinity.
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert ...
void __TBB_EXPORTED_METHOD internal_initialize()
void free_arena()
Completes arena shutdown, destructs and deallocates it.
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
#define __TBB_ISOLATION_ARG(arg1, isolation)
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
bool is_out_of_work()
Check if there is job anywhere in arena.
arena(market &, unsigned max_num_workers, unsigned num_reserved_slots)
Constructor.
atomic< unsigned > my_references
Reference counter for the arena.
task_group_context * my_orig_ctx
bool __TBB_EXPORTED_METHOD is_group_execution_cancelled() const
Returns true if the context received cancellation request.
static const size_t out_of_arena
void __TBB_EXPORTED_METHOD internal_execute(delegate_base &) const
void create_coroutine(coroutine_type &c, size_t stack_size, void *arg)
void __TBB_EXPORTED_FUNC isolate_within_arena(delegate_base &d, intptr_t isolation=0)
void notify(const P &predicate)
Notify waiting threads of the event that satisfies the given predicate.
task is in ready pool, or is going to be put there, or was just taken off.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id tail
void const char const char int ITT_FORMAT __itt_group_sync s
bool outermost
Indicates that a scheduler is on outermost level.
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
#define ITT_SYNC_CREATE(obj, type, name)
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
Used to form groups of tasks.
A fast random number generator.
Work stealing task scheduler.
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
void attach_arena(arena *, size_t index, bool is_master)
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
#define ITT_NOTIFY(name, obj)
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
task_group_context * context()
This method is deprecated and will be removed in the future.
T __TBB_load_relaxed(const volatile T &location)
int current_thread_index()
Returns the index, aka slot number, of the calling thread in its current arena.
static bool occupy_slot(generic_scheduler *&slot, generic_scheduler &s)
state_type state() const
Current execution state.
atomic< T > & as_atomic(T &t)
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
T __TBB_load_with_acquire(const volatile T &location)
size_t __TBB_EXPORTED_FUNC NFS_GetLineSize()
Cache/sector line size.
Set if ref_count might be changed by another thread. Used for debugging.
intptr_t reference_count
A reference count.
bool is_critical(task &t)
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
bool is_idle_state(bool value) const
Indicate whether thread that reads this mailbox is idle.
internal::arena * my_arena
NULL if not currently initialized.
unsigned short get()
Get a random number.
static int unsigned num_arena_slots(unsigned num_slots)
void restore_priority_if_need()
If enqueued tasks found, restore arena priority and task presence status.
static void one_time_init()
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s const char ITT_FORMAT s __itt_frame ITT_FORMAT p __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu __itt_counter unsigned long long ITT_FORMAT lu __itt_counter __itt_clock_domain unsigned long long void ITT_FORMAT p const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope scope
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
void on_thread_leaving()
Notification that worker or master leaves its arena.
void nested_arena_entry(arena *, size_t)
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.
#define __TBB_CONTEXT_ARG1(context)