17 #ifndef _TBB_scheduler_H 18 #define _TBB_scheduler_H 25 #include "../rml/include/rml_tbb.h" 29 #if __TBB_SURVIVE_THREAD_SWITCH 33 #if __TBB_PREVIEW_RESUMABLE_TASKS 40 template<
typename SchedulerTraits>
class custom_scheduler;
46 #define EmptyTaskPool ((task**)0) 47 #define LockedTaskPool ((task**)~(intptr_t)0) 58 #if __TBB_PREVIEW_CRITICAL_TASKS 59 bool has_taken_critical_task : 1;
62 #if __TBB_PREVIEW_RESUMABLE_TASKS 68 #if __TBB_PREVIEW_RESUMABLE_TASKS 70 #elif __TBB_PREVIEW_CRITICAL_TASKS 103 #if __TBB_SCHEDULER_OBSERVER 104 observer_proxy* my_last_global_observer;
108 #if __TBB_ARENA_OBSERVER 109 observer_proxy* my_last_local_observer;
112 #if __TBB_TASK_PRIORITY 116 volatile intptr_t *my_ref_top_priority;
119 volatile uintptr_t *my_ref_reload_epoch;
121 #if __TBB_PREVIEW_RESUMABLE_TASKS 126 tbb::atomic<bool>* my_current_is_recalled;
147 #if __TBB_PREVIEW_CRITICAL_TASKS 157 uintptr_t my_rsb_stealing_threshold;
161 static const size_t null_arena_index = ~size_t(0);
163 inline bool is_task_pool_published ()
const;
165 inline bool is_local_task_pool_quiescent ()
const;
167 inline bool is_quiescent_local_task_pool_empty ()
const;
169 inline bool is_quiescent_local_task_pool_reset ()
const;
180 #if __TBB_HOARD_NONLOCAL_TASKS 181 task* my_nonlocal_free_list;
199 #if __TBB_COUNT_TASK_NODES 200 intptr_t my_task_node_count;
204 #if __TBB_PREVIEW_RESUMABLE_TASKS 205 enum post_resume_action {
216 typedef void(*suspend_callback_t)(
void*, task::suspend_point);
220 suspend_callback_t suspend_callback;
222 task::suspend_point tag;
225 if (suspend_callback) {
226 __TBB_ASSERT(suspend_callback && user_callback && tag, NULL);
227 suspend_callback(user_callback, tag);
236 post_resume_action my_post_resume_action;
239 void* my_post_resume_arg;
245 void set_post_resume_action(post_resume_action,
void* arg);
248 void do_post_resume_action();
258 bool resume_original_scheduler();
263 friend void recall_function(task::suspend_point tag);
267 void init_stack_info ();
274 return my_stealing_threshold < (uintptr_t)&anchor && (uintptr_t)
__TBB_get_bsp() < my_rsb_stealing_threshold;
276 return my_stealing_threshold < (uintptr_t)&anchor;
282 void publish_task_pool();
286 void leave_task_pool();
290 inline void reset_task_pool_and_leave ();
298 void unlock_task_pool(
arena_slot* victim_arena_slot,
task** victim_task_pool )
const;
303 void acquire_task_pool()
const;
308 void release_task_pool()
const;
312 task* prepare_for_spawning(
task* t );
315 inline void commit_spawned_tasks(
size_t new_tail );
319 inline void commit_relocated_tasks(
size_t new_tail );
333 #if __TBB_TASK_ISOLATION 336 task* get_task(
size_t T );
358 #if __TBB_PREVIEW_CRITICAL_TASKS 364 bool handled_as_critical(
task& t );
369 static const size_t min_task_pool_size = 64;
374 size_t prepare_task_pool(
size_t n );
380 bool cleanup_master(
bool blocking_terminate );
386 static void cleanup_worker(
void* arg,
bool worker );
393 #if TBB_USE_ASSERT > 1 396 void assert_task_pool_valid()
const;
401 void attach_arena(
arena*,
size_t index,
bool is_master );
402 void nested_arena_entry(
arena*,
size_t );
403 void nested_arena_exit();
404 void wait_until_empty();
412 void local_spawn(
task* first,
task*& next );
413 void local_spawn_root_and_wait(
task* first,
task*& next );
420 void cleanup_scheduler();
424 task& allocate_task(
size_t number_of_bytes,
429 template<free_task_h
int h>
430 void free_task(
task& t );
433 inline void deallocate_task(
task& t );
436 inline bool is_worker()
const;
439 inline bool outermost_level()
const;
445 inline bool master_outermost_level ()
const;
448 inline bool worker_outermost_level ()
const;
451 unsigned max_threads_in_arena();
453 #if __TBB_COUNT_TASK_NODES 454 intptr_t get_task_node_count(
bool count_arena_workers =
false );
472 void free_nonlocal_small_task(
task& t );
474 #if __TBB_TASK_GROUP_CONTEXT 500 uintptr_t my_context_state_propagation_epoch;
506 tbb::atomic<uintptr_t> my_local_ctx_list_update;
508 #if __TBB_TASK_PRIORITY 509 inline intptr_t effective_reference_priority ()
const;
514 task* my_offloaded_tasks;
517 task** my_offloaded_task_list_tail_link;
520 uintptr_t my_local_reload_epoch;
523 volatile bool my_pool_reshuffling_pending;
540 inline void offload_task (
task& t, intptr_t task_priority );
545 void cleanup_local_context_list ();
549 template <
typename T>
558 __TBB_ASSERT(is_alive(ctx),
"referenced task_group_context was destroyed");
559 static const char *msg =
"task_group_context is invalid";
568 #if __TBB_TASK_PRIORITY 572 #if TBB_USE_ASSERT > 1 584 ::rml::server::execution_resource_t master_exec_resource;
588 #if __TBB_TASK_GROUP_CONTEXT 591 tbb::atomic<uintptr_t> my_nonlocal_ctx_list_update;
594 #if __TBB_SURVIVE_THREAD_SWITCH 605 cilk_state_t my_cilk_state;
613 mutable statistics_counters my_counters;
635 task** tp = my_arena_slot->task_pool;
640 __TBB_ASSERT( is_local_task_pool_quiescent(),
"Task pool is not quiescent" );
645 __TBB_ASSERT( is_local_task_pool_quiescent(),
"Task pool is not quiescent" );
650 return my_properties.outermost;
654 return !is_worker() && outermost_level();
658 return is_worker() && outermost_level();
661 #if __TBB_TASK_GROUP_CONTEXT 669 my_inbox.attach( my_arena->mailbox(
id) );
679 return my_arena->my_num_slots;
691 #if __TBB_COUNT_TASK_NODES 692 --my_task_node_count;
696 #if __TBB_COUNT_TASK_NODES 697 inline intptr_t generic_scheduler::get_task_node_count(
bool count_arena_workers ) {
698 return my_task_node_count + (count_arena_workers? my_arena->workers_task_node_count(): 0);
711 __TBB_ASSERT ( new_tail <= my_arena_slot->my_task_pool_size,
"task deque end was overwritten" );
721 "Task pool must be locked when calling commit_relocated_tasks()" );
729 template<free_task_h
int h
int>
731 #if __TBB_HOARD_NONLOCAL_TASKS 745 #if __TBB_PREVIEW_RESUMABLE_TASKS 753 p.
next = my_free_list;
755 }
else if( !(h&local_task) && p.
origin && uintptr_t(p.
origin) < uintptr_t(4096) ) {
758 }
else if( !(h&local_task) && p.
origin ) {
760 #if __TBB_HOARD_NONLOCAL_TASKS 762 p.
next = my_nonlocal_free_list;
763 my_nonlocal_free_list = &t;
766 free_nonlocal_small_task(t);
773 #if __TBB_TASK_PRIORITY 774 inline intptr_t generic_scheduler::effective_reference_priority ()
const {
781 return !worker_outermost_level() ||
782 my_arena->my_num_workers_allotted < my_arena->num_workers_active() ? *my_ref_top_priority : my_arena->my_top_priority;
785 inline void generic_scheduler::offload_task (
task& t, intptr_t ) {
787 __TBB_ASSERT( !is_proxy(t),
"The proxy task cannot be offloaded" );
788 __TBB_ASSERT( my_offloaded_task_list_tail_link && !*my_offloaded_task_list_tail_link, NULL );
793 my_offloaded_tasks = &t;
797 #if __TBB_PREVIEW_RESUMABLE_TASKS 798 inline void generic_scheduler::set_post_resume_action(post_resume_action pra,
void* arg) {
799 __TBB_ASSERT(my_post_resume_action == PRA_NONE,
"Post resume action has already been set.");
802 my_post_resume_action = pra;
803 my_post_resume_arg = arg;
808 if (my_properties.outermost && my_wait_task == my_dummy_task) {
809 if (my_properties.genuine) {
811 target.set_post_resume_action(PRA_NOTIFY, my_current_is_recalled);
815 target.set_post_resume_action(PRA_CLEANUP,
this);
816 my_target_on_exit = ⌖
822 my_wait_task->prefix().abandoned_scheduler =
this;
823 target.set_post_resume_action(PRA_ABANDON, my_wait_task);
827 inline bool generic_scheduler::resume_original_scheduler() {
829 if (!prepare_resume(target)) {
841 "The post resume action is not set. Has prepare_resume been called?");
849 #if __TBB_SCHEDULER_OBSERVER 850 target.my_last_global_observer = my_last_global_observer;
852 #if __TBB_ARENA_OBSERVER 853 target.my_last_local_observer = my_last_local_observer;
857 #if __TBB_TASK_PRIORITY 858 if (my_offloaded_tasks)
859 my_arena->orphan_offloaded_tasks(*
this);
863 my_co_context.
resume(target.my_co_context);
866 do_post_resume_action();
867 if (
this == my_arena_slot->my_scheduler) {
868 my_arena_slot->my_scheduler_is_recalled->store<
tbb::relaxed>(
false);
872 inline void generic_scheduler::do_post_resume_action() {
873 __TBB_ASSERT(my_post_resume_action != PRA_NONE,
"The post resume action is not set.");
876 switch (my_post_resume_action) {
879 task_prefix& wait_task_prefix =
static_cast<task*
>(my_post_resume_arg)->prefix();
882 if (old_ref_count == 1) {
886 tbb::task::resume(wait_task_prefix.abandoned_scheduler);
892 callback_t callback = *
static_cast<callback_t*
>(my_post_resume_arg);
903 to_cleanup->
my_arena->my_co_cache.push(to_cleanup);
908 tbb::atomic<bool>& scheduler_recall_flag = *
static_cast<tbb::atomic<bool>*
>(my_post_resume_arg);
909 scheduler_recall_flag =
true;
917 my_post_resume_action = PRA_NONE;
918 my_post_resume_arg = NULL;
921 struct recall_functor {
922 tbb::atomic<bool>* scheduler_recall_flag;
924 recall_functor(tbb::atomic<bool>* recall_flag_) :
925 scheduler_recall_flag(recall_flag_) {}
927 void operator()(task::suspend_point ) {
928 *scheduler_recall_flag =
true;
943 s.do_post_resume_action();
951 s.resume(*s.my_target_on_exit);
957 #if __TBB_TASK_GROUP_CONTEXT 962 template <
bool report_tasks>
973 curr_cpu_ctl_env = guard_cpu_ctl_env;
978 if ( curr_cpu_ctl_env != guard_cpu_ctl_env )
981 if ( report_tasks && curr_ctx )
987 generic_scheduler::assert_context_valid( ctx );
991 if ( ctl != curr_cpu_ctl_env ) {
992 curr_cpu_ctl_env = ctl;
996 if ( report_tasks && ctx != curr_ctx ) {
1007 void restore_default() {
1008 #if __TBB_FP_CONTEXT 1009 if ( curr_cpu_ctl_env != guard_cpu_ctl_env ) {
1011 curr_cpu_ctl_env = guard_cpu_ctl_env;
void * __TBB_get_bsp()
Retrieves the current RSE backing store pointer. IA64 specific.
#define GATHER_STATISTIC(x)
bool can_steal()
Returns true if stealing is allowed.
#define __TBB_store_release
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
task is running, and will be destroyed after method execute() completes.
A scheduler with a customized evaluation loop.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
task * next_offloaded
Pointer to the next offloaded lower priority task.
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
bool is_worker() const
True if running on a worker thread, false otherwise.
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
#define __TBB_ISOLATION_EXPR(isolation)
Bitwise-OR of local_task and small_task.
void co_local_wait_for_all(void *)
tbb::task * next
"next" field for list of task
context_list_node_t * my_prev
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
void commit_relocated_tasks(size_t new_tail)
Makes relocated tasks visible to thieves and releases the local task pool.
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
void free_task(task &t)
Put task on free list.
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
uintptr_t my_stealing_threshold
Position in the call stack specifying its maximal filling when stealing is still allowed.
unsigned char state
A task::state_type, stored as a byte for compactness.
int depth
Obsolete. Used to be scheduling depth before TBB 2.2.
task object is freshly allocated or recycled.
static const unsigned ref_external
Reference increment values for externals and workers.
T punned_cast(U *ptr)
Cast between unrelated pointer types.
virtual void local_wait_for_all(task &parent, task *child)=0
task * my_dummy_task
Fake root task created by slave threads.
bool my_auto_initialized
True if *this was created by automatic TBB initialization.
#define __TBB_CONTEXT_ARG(arg1, context)
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
task * my_return_list
List of small tasks that have been returned to this scheduler by other schedulers.
__TBB_atomic intptr_t my_small_task_count
Number of small tasks that have been allocated by this scheduler.
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
Disable caching for a small task.
Task is known to have been allocated by this scheduler.
bool type
Indicates that a scheduler acts as a master or a worker.
void reset_task_pool_and_leave()
Resets head and tail indices to 0, and leaves task pool.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void __TBB_store_relaxed(volatile T &location, V value)
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
bool is_quiescent_local_task_pool_reset() const
void resume(co_context &target)
static bool is_version_3_task(task &t)
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Task is known to be a small task.
void __TBB_store_with_release(volatile T &location, V value)
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
task object is on free list, or is going to be put there, or was just taken off.
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
Base class for user-defined tasks.
void init_stack_info()
Sets up the data necessary for the stealing limiting heuristics.
bool is_quiescent_local_task_pool_empty() const
bool is_task_pool_published() const
void commit_spawned_tasks(size_t new_tail)
Makes newly spawned tasks visible to thieves.
scheduler_properties my_properties
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
internal::cpu_ctl_env_space my_cpu_ctl_env
Space for platform-specific FPU settings.
void attach_mailbox(affinity_id id)
intptr_t isolation_tag
A tag for task isolation.
A lock that occupies a single byte.
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
task * my_free_list
Free list of small tasks that can be reused.
Memory prefix to a task object.
Bit-field representing properties of a sheduler.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
market * my_market
The market I am in.
void const char const char int ITT_FORMAT __itt_group_sync p
Data structure to be inherited by the types that can form intrusive lists.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
unsigned max_threads_in_arena()
Returns the concurrency limit of the current arena.
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
bool is_local_task_pool_quiescent() const
static bool is_proxy(const task &t)
True if t is a task_proxy.
unsigned short affinity_id
An id as used for specifying affinity.
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
Class representing source of mail.
intptr_t my_priority
Priority level of the task group (in normalized representation)
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
#define __TBB_ISOLATION_ARG(arg1, isolation)
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
internal::string_index my_name
Description of algorithm for scheduler based instrumentation.
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
auto first(Container &c) -> decltype(begin(c))
free_task_hint
Optimization hint to free_task that enables it omit unnecessary tests and code.
task is in ready pool, or is going to be put there, or was just taken off.
void const char const char int ITT_FORMAT __itt_group_sync s
bool outermost
Indicates that a scheduler is on outermost level.
context_list_node_t * my_next
void deallocate_task(task &t)
Return task object to the memory allocator.
Used to form groups of tasks.
A fast random number generator.
Work stealing task scheduler.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
scheduler * owner
Obsolete. The scheduler that owns the task.
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
#define ITT_NOTIFY(name, obj)
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
T __TBB_load_relaxed(const volatile T &location)
static task * plugged_return_list()
Special value used to mark my_return_list as not taking any more entries.
state_type state() const
Current execution state.
void poison_pointer(T *__TBB_atomic &)
long my_ref_count
Reference count for scheduler.
void assert_task_pool_valid() const
#define ITT_TASK_BEGIN(type, name, id)
unsigned char
Reserved bits.
intptr_t reference_count
A reference count.
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
static const kind_type dying
void on_thread_leaving()
Notification that worker or master leaves its arena.
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.