28 #if __TBB_STATISTICS_STDOUT 47 #if __TBB_TASK_GROUP_CONTEXT 52 #if __TBB_TASK_PRIORITY 59 my_ref_top_priority = &a->my_top_priority;
60 my_ref_reload_epoch = &a->my_reload_epoch;
62 my_local_reload_epoch = *my_ref_reload_epoch;
68 return !slot &&
as_atomic( slot ).compare_and_swap( &s, NULL ) == NULL;
72 if ( lower >= upper )
return out_of_arena;
75 if ( index < lower || index >= upper ) index = s.
my_random.
get() % (upper - lower) + lower;
78 for (
size_t i = index; i < upper; ++i )
79 if (
occupy_slot(my_slots[i].my_scheduler, s) )
return i;
80 for (
size_t i = lower; i < index; ++i )
81 if (
occupy_slot(my_slots[i].my_scheduler, s) )
return i;
85 template <
bool as_worker>
88 size_t index = as_worker ? out_of_arena : occupy_free_slot_in_range( s, 0, my_num_reserved_slots );
89 if ( index == out_of_arena ) {
91 index = occupy_free_slot_in_range( s, my_num_reserved_slots, my_num_slots );
93 if ( index == out_of_arena )
98 atomic_update( my_limit, (
unsigned)(index + 1), std::less<unsigned>() );
110 size_t index = occupy_free_slot<
true>(
s );
111 if ( index == out_of_arena )
114 __TBB_ASSERT( index >= my_num_reserved_slots,
"Workers cannot occupy reserved slots" );
117 #if !__TBB_FP_CONTEXT 118 my_cpu_ctl_env.set_env();
121 #if __TBB_ARENA_OBSERVER 122 __TBB_ASSERT( !s.my_last_local_observer,
"There cannot be notified local observers when entering arena" );
123 my_observers.notify_entry_observers( s.my_last_local_observer,
true );
138 "Worker cannot leave arena while its task pool is not reset" );
142 if ( is_recall_requested() )
155 #if __TBB_ARENA_OBSERVER 156 my_observers.notify_exit_observers( s.my_last_local_observer,
true );
157 s.my_last_local_observer = NULL;
159 #if __TBB_TASK_PRIORITY 160 if ( s.my_offloaded_tasks )
161 orphan_offloaded_tasks( s );
164 ++s.my_counters.arena_roundtrips;
165 *my_slots[index].my_counters += s.my_counters;
166 s.my_counters.reset();
179 on_thread_leaving<ref_worker>();
183 __TBB_ASSERT( !my_guard,
"improperly allocated arena?" );
186 #if __TBB_TASK_PRIORITY 187 __TBB_ASSERT( !my_reload_epoch && !my_orphaned_tasks && !my_skipped_fifo_priority,
"New arena object is not zeroed" );
192 my_num_slots = num_arena_slots(num_slots);
193 my_num_reserved_slots = num_reserved_slots;
194 my_max_num_workers = num_slots-num_reserved_slots;
195 my_references = ref_external;
196 #if __TBB_TASK_PRIORITY 197 my_bottom_priority = my_top_priority = normalized_normal_priority;
200 #if __TBB_ARENA_OBSERVER 201 my_observers.my_arena =
this;
203 #if __TBB_PREVIEW_RESUMABLE_TASKS 204 my_co_cache.init(4 * num_slots);
206 __TBB_ASSERT ( my_max_num_workers <= my_num_slots, NULL );
208 for(
unsigned i = 0; i < my_num_slots; ++i ) {
209 __TBB_ASSERT( !my_slots[i].my_scheduler && !my_slots[i].task_pool, NULL );
212 #if __TBB_PREVIEW_RESUMABLE_TASKS 213 __TBB_ASSERT( !my_slots[i].my_scheduler_is_recalled, NULL );
215 ITT_SYNC_CREATE(my_slots + i, SyncType_Scheduler, SyncObj_WorkerTaskPool);
216 mailbox(i+1).construct();
218 my_slots[i].hint_for_pop = i;
219 #if __TBB_PREVIEW_CRITICAL_TASKS 220 my_slots[i].hint_for_critical = i;
223 my_slots[i].my_counters =
new (
NFS_Allocate(1,
sizeof(statistics_counters), NULL) ) statistics_counters;
226 my_task_stream.initialize(my_num_slots);
227 ITT_SYNC_CREATE(&my_task_stream, SyncType_Scheduler, SyncObj_TaskStream);
228 #if __TBB_PREVIEW_CRITICAL_TASKS 229 my_critical_task_stream.initialize(my_num_slots);
230 ITT_SYNC_CREATE(&my_critical_task_stream, SyncType_Scheduler, SyncObj_CriticalTaskStream);
232 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 233 my_local_concurrency_mode =
false;
234 my_global_concurrency_mode =
false;
236 #if !__TBB_FP_CONTEXT 237 my_cpu_ctl_env.get_env();
245 size_t n = allocation_size(num_arena_slots(num_slots));
246 unsigned char* storage = (
unsigned char*)
NFS_Allocate( 1, n, NULL );
248 memset( storage, 0, n );
249 return *
new( storage + num_arena_slots(num_slots) *
sizeof(
mail_outbox) )
arena(m, num_slots, num_reserved_slots);
254 __TBB_ASSERT( !my_references,
"There are threads in the dying arena" );
255 __TBB_ASSERT( !my_num_workers_requested && !my_num_workers_allotted,
"Dying arena requests workers" );
256 __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers,
"Inconsistent state of a dying arena" );
257 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 260 #if !__TBB_STATISTICS_EARLY_DUMP 264 intptr_t drained = 0;
265 for (
unsigned i = 0; i < my_num_slots; ++i ) {
266 __TBB_ASSERT( !my_slots[i].my_scheduler,
"arena slot is not empty" );
270 my_slots[i].free_task_pool();
272 NFS_Free( my_slots[i].my_counters );
274 drained += mailbox(i+1).drain();
276 __TBB_ASSERT( my_task_stream.drain()==0,
"Not all enqueued tasks were executed");
277 #if __TBB_PREVIEW_RESUMABLE_TASKS 279 my_co_cache.cleanup();
281 #if __TBB_PREVIEW_CRITICAL_TASKS 282 __TBB_ASSERT( my_critical_task_stream.drain()==0,
"Not all critical tasks were executed");
284 #if __TBB_COUNT_TASK_NODES 285 my_market->update_task_node_count( -drained );
289 #if __TBB_TASK_GROUP_CONTEXT 290 __TBB_ASSERT( my_default_ctx,
"Master thread never entered the arena?" );
291 my_default_ctx->~task_group_context();
294 #if __TBB_ARENA_OBSERVER 295 if ( !my_observers.empty() )
296 my_observers.clear();
298 void* storage = &mailbox(my_num_slots);
300 __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, NULL );
302 #if TBB_USE_ASSERT > 1 303 memset( storage, 0, allocation_size(my_num_slots) );
309 void arena::dump_arena_statistics () {
310 statistics_counters total;
311 for(
unsigned i = 0; i < my_num_slots; ++i ) {
312 #if __TBB_STATISTICS_EARLY_DUMP 315 *my_slots[i].my_counters += s->my_counters;
320 total += *my_slots[i].my_counters;
321 dump_statistics( *my_slots[i].my_counters, i );
324 dump_statistics( *my_slots[0].my_counters, 0 );
325 #if __TBB_STATISTICS_STDOUT 326 #if !__TBB_STATISTICS_TOTALS_ONLY 327 printf(
"----------------------------------------------\n" );
329 dump_statistics( total, workers_counters_total );
330 total += *my_slots[0].my_counters;
331 dump_statistics( total, arena_counters_total );
332 #if !__TBB_STATISTICS_TOTALS_ONLY 333 printf(
"==============================================\n" );
339 #if __TBB_TASK_PRIORITY 344 inline bool arena::may_have_tasks (
generic_scheduler*
s,
bool& tasks_present,
bool& dequeuing_possible ) {
348 if ( s->my_pool_reshuffling_pending ) {
351 tasks_present =
true;
354 if ( s->my_offloaded_tasks ) {
355 tasks_present =
true;
356 if ( s->my_local_reload_epoch < *s->my_ref_reload_epoch ) {
368 ++my_abandonment_epoch;
369 __TBB_ASSERT( s.my_offloaded_task_list_tail_link && !*s.my_offloaded_task_list_tail_link, NULL );
372 orphans =
const_cast<task*
>(my_orphaned_tasks);
373 *s.my_offloaded_task_list_tail_link = orphans;
374 }
while (
as_atomic(my_orphaned_tasks).compare_and_swap(s.my_offloaded_tasks, orphans) != orphans );
375 s.my_offloaded_tasks = NULL;
377 s.my_offloaded_task_list_tail_link = NULL;
385 if ( !my_task_stream.empty(
p) )
396 if ( has_enqueued_tasks() ) {
397 advertise_new_work<work_enqueued>();
398 #if __TBB_TASK_PRIORITY 402 if ( !my_task_stream.empty(
p) ) {
403 if ( p < my_bottom_priority || p > my_top_priority )
417 case SNAPSHOT_FULL: {
421 if( my_pool_state.compare_and_swap( busy, SNAPSHOT_FULL )==SNAPSHOT_FULL ) {
429 #if __TBB_TASK_PRIORITY 431 intptr_t top_priority = my_top_priority;
435 for( k=0; k<n; ++k ) {
442 if( my_pool_state!=busy )
446 bool work_absent = k == n;
447 #if __TBB_PREVIEW_CRITICAL_TASKS 448 bool no_critical_tasks = my_critical_task_stream.empty(0);
449 work_absent &= no_critical_tasks;
451 #if __TBB_TASK_PRIORITY 454 bool tasks_present = !work_absent || my_orphaned_tasks;
455 bool dequeuing_possible =
false;
460 uintptr_t abandonment_epoch = my_abandonment_epoch;
466 the_context_state_propagation_mutex.lock();
467 work_absent = !may_have_tasks( my_slots[0].my_scheduler, tasks_present, dequeuing_possible );
468 the_context_state_propagation_mutex.unlock();
481 for( k = 1; work_absent && k < n; ++k ) {
482 if( my_pool_state!=busy )
484 work_absent = !may_have_tasks( my_slots[k].my_scheduler, tasks_present, dequeuing_possible );
487 work_absent = work_absent
489 && abandonment_epoch == my_abandonment_epoch;
493 if( my_pool_state==busy ) {
494 #if __TBB_TASK_PRIORITY 495 bool no_fifo_tasks = my_task_stream.empty(top_priority);
496 work_absent = work_absent && (!dequeuing_possible || no_fifo_tasks)
497 && top_priority == my_top_priority && reload_epoch == my_reload_epoch;
499 bool no_fifo_tasks = my_task_stream.empty(0);
500 work_absent = work_absent && no_fifo_tasks;
503 #if __TBB_TASK_PRIORITY 504 if ( top_priority > my_bottom_priority ) {
505 if (
my_market->lower_arena_priority(*
this, top_priority - 1, reload_epoch)
506 && !my_task_stream.empty(top_priority) )
508 atomic_update( my_skipped_fifo_priority, top_priority, std::less<intptr_t>());
511 else if ( !tasks_present && !my_orphaned_tasks && no_fifo_tasks ) {
515 int current_demand = (
int)my_max_num_workers;
516 if( my_pool_state.compare_and_swap( SNAPSHOT_EMPTY, busy )==busy ) {
520 restore_priority_if_need();
524 #if __TBB_TASK_PRIORITY 529 my_pool_state.compare_and_swap( SNAPSHOT_FULL, busy );
541 #if __TBB_COUNT_TASK_NODES 542 intptr_t arena::workers_task_node_count() {
544 for(
unsigned i = 1; i < my_num_slots; ++i ) {
547 result += s->my_task_node_count;
555 #if __TBB_RECYCLE_TO_ENQUEUE 566 __TBB_ASSERT( ref_count!=0,
"attempt to enqueue task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
567 __TBB_ASSERT( ref_count>0,
"attempt to enqueue task whose parent has a ref_count<0" );
572 #if __TBB_PREVIEW_CRITICAL_TASKS 581 #if __TBB_TASK_ISOLATION 591 advertise_new_work<work_spawned>();
597 #if __TBB_TASK_PRIORITY 598 intptr_t
p = prio ? normalize_priority(
priority_t(prio)) : normalized_normal_priority;
599 assert_priority_valid(p);
600 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 603 my_task_stream.push( &t, p, random );
605 if ( p != my_top_priority )
606 my_market->update_arena_priority( *
this, p );
608 __TBB_ASSERT_EX(prio == 0,
"the library is not configured to respect the task priority");
609 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 612 my_task_stream.push( &t, 0, random );
615 advertise_new_work<work_enqueued>();
616 #if __TBB_TASK_PRIORITY 617 if ( p != my_top_priority )
618 my_market->update_arena_priority( *
this, p );
625 : my_scheduler(*s), my_orig_ctx(NULL), same_arena(same) {
627 my_orig_state.my_properties = my_scheduler.my_properties;
628 my_orig_state.my_innermost_running_task = my_scheduler.my_innermost_running_task;
629 mimic_outermost_level(a, type);
632 #if __TBB_PREVIEW_RESUMABLE_TASKS 633 my_scheduler.my_properties.genuine =
true;
634 my_scheduler.my_current_is_recalled = NULL;
636 mimic_outermost_level(a, type);
641 #if __TBB_TASK_GROUP_CONTEXT 642 my_scheduler.my_dummy_task->prefix().context = my_orig_ctx;
645 my_scheduler.my_properties = my_orig_state.my_properties;
646 my_scheduler.my_innermost_running_task = my_orig_state.my_innermost_running_task;
648 my_scheduler.nested_arena_exit();
650 #if __TBB_TASK_PRIORITY 651 my_scheduler.my_local_reload_epoch = *my_orig_state.my_ref_reload_epoch;
667 #if __TBB_PREVIEW_CRITICAL_TASKS 670 #if __TBB_TASK_GROUP_CONTEXT 683 #if __TBB_TASK_PRIORITY 684 if ( my_offloaded_tasks )
685 my_arena->orphan_offloaded_tasks( *
this );
686 my_offloaded_tasks = NULL;
696 #if __TBB_ARENA_OBSERVER 697 my_last_local_observer = 0;
698 my_arena->my_observers.notify_entry_observers( my_last_local_observer,
false );
700 #if __TBB_PREVIEW_RESUMABLE_TASKS 706 #if __TBB_ARENA_OBSERVER 707 my_arena->my_observers.notify_exit_observers( my_last_local_observer,
false );
709 #if __TBB_TASK_PRIORITY 710 if ( my_offloaded_tasks )
711 my_arena->orphan_offloaded_tasks( *
this );
728 #if __TBB_PREVIEW_RESUMABLE_TASKS 729 class resume_task :
public task {
736 if (s->prepare_resume(my_target)) {
737 s->resume(my_target);
741 prefix().state = task::to_resume;
766 void internal_suspend(
void* suspend_callback,
void* user_callback) {
769 bool is_recalled = *s.
my_arena_slot->my_scheduler_is_recalled;
772 generic_scheduler::callback_t callback = {
773 (generic_scheduler::suspend_callback_t)suspend_callback, user_callback, &s };
774 target.set_post_resume_action(generic_scheduler::PRA_CALLBACK, &callback);
778 void internal_resume(task::suspend_point tag) {
797 task::suspend_point internal_current_suspend_point() {
809 namespace interface7 {
814 if( my_max_concurrency < 1 )
816 __TBB_ASSERT( my_master_slots <= (
unsigned)my_max_concurrency,
"Number of slots reserved for master should not exceed arena concurrency");
821 #if __TBB_TASK_GROUP_CONTEXT 825 new_arena->my_default_ctx->capture_fp_settings();
834 #if __TBB_TASK_GROUP_CONTEXT 837 new_arena->my_default_ctx->my_version_and_traits |= my_version_and_traits & exact_exception_flag;
838 as_atomic(my_context) = new_arena->my_default_ctx;
850 #if __TBB_TASK_GROUP_CONTEXT 865 #if __TBB_TASK_GROUP_CONTEXT 866 my_context =
my_arena->my_default_ctx;
867 my_version_and_traits |= my_context->my_version_and_traits & exact_exception_flag;
881 #if __TBB_TASK_GROUP_CONTEXT 884 "The task will not be executed because default task_group_context of task_arena is cancelled. Has previously enqueued task thrown an exception?");
905 #if __TBB_TASK_GROUP_CONTEXT 906 orig_ctx = t->prefix().context;
907 t->prefix().context = s.
my_arena->my_default_ctx;
913 ~outermost_context() {
914 #if __TBB_TASK_GROUP_CONTEXT 916 t->prefix().context = orig_ctx;
930 #if __TBB_PREVIEW_RESUMABLE_TASKS 931 reference_count old_ref_count = __TBB_FetchAndStoreW(&prefix.ref_count, 1);
933 if (old_ref_count == internal::abandon_flag + 2) {
936 tbb::task::resume(prefix.abandoned_scheduler);
945 : my_delegate(d), my_monitor(s), my_root(t) {}
947 bool operator()(uintptr_t ctx)
const {
return (
void*)ctx == (
void*)&my_delegate; }
961 #if __TBB_USE_OPTIONAL_RTTI 975 (internal::forward< graph_funct >(deleg_funct->
my_func)), 0);
980 #if __TBB_TASK_GROUP_CONTEXT 1003 #if TBB_USE_EXCEPTIONS 1016 #if TBB_USE_EXCEPTIONS 1019 TbbRethrowException(pe);
1022 #if __TBB_USE_OPTIONAL_RTTI 1030 #if TBB_USE_EXCEPTIONS 1036 #if TBB_USE_EXCEPTIONS 1040 if (my_version_and_traits & exact_exception_flag)
throw;
1108 #if __TBB_TASK_ISOLATION 1113 isolation_guard(
isolation_tag &isolation ) : guarded( isolation ), previous_value( isolation ) {}
1114 ~isolation_guard() {
1115 guarded = previous_value;
1122 __TBB_ASSERT( s,
"this_task_arena::isolate() needs an initialized scheduler" );
1127 isolation_guard guard( current_isolation );
1128 current_isolation = isolation? isolation :
reinterpret_cast<isolation_tag>(&
d);
size_t occupy_free_slot(generic_scheduler &s)
Tries to occupy a slot in the arena. On success, returns the slot index; if no slot is available...
delegated_task(internal::delegate_base &d, concurrent_monitor &s, task *t)
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
void __TBB_EXPORTED_METHOD internal_attach()
static generic_scheduler * local_scheduler_weak()
bool is_idle_state(bool value) const
Indicate whether thread that reads this mailbox is idle.
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
Base class for user-defined tasks.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
A fast random number generator.
Bit-field representing properties of a sheduler.
void const char const char int ITT_FORMAT __itt_group_sync s
task is in ready pool, or is going to be put there, or was just taken off.
internal::arena * my_arena
NULL if not currently initialized.
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
bool operator()(uintptr_t ctx) const
#define ITT_SYNC_CREATE(obj, type, name)
static const intptr_t num_priority_levels
concurrent_monitor & my_monitor
unsigned num_workers_active() const
The number of workers active in the arena.
bool is_worker() const
True if running on a worker thread, false otherwise.
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
void __TBB_EXPORTED_METHOD internal_terminate()
unsigned char state
A task::state_type, stored as a byte for compactness.
void __TBB_EXPORTED_METHOD internal_wait() const
void __TBB_store_with_release(volatile T &location, V value)
A functor that spawns a task.
intptr_t reference_count
A reference count.
void __TBB_EXPORTED_FUNC isolate_within_arena(delegate_base &d, intptr_t isolation=0)
void notify_one()
Notify one thread about the event.
generic_scheduler & my_scheduler
bool is_critical(task &t)
bool commit_wait(thread_context &thr)
Commit wait if event count has not changed; otherwise, cancel wait.
const isolation_tag no_isolation
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id tail
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
Exception container that preserves the exact copy of the original exception.
arena(market &, unsigned max_num_workers, unsigned num_reserved_slots)
Constructor.
void __TBB_EXPORTED_METHOD internal_initialize()
uintptr_t my_arenas_aba_epoch
ABA prevention marker to assign to newly created arenas.
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
task **__TBB_atomic task_pool
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
#define __TBB_ISOLATION_ARG(arg1, isolation)
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
isolation_tag isolation
The tag used for task isolation.
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert ...
static bool occupy_slot(generic_scheduler *&slot, generic_scheduler &s)
intptr_t isolation_tag
A tag for task isolation.
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
unsigned my_num_slots
The number of slots in the arena.
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
task_group_context * my_orig_ctx
bool is_quiescent_local_task_pool_reset() const
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
Class representing where mail is put.
task * execute() __TBB_override
Should be overridden by derived classes.
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
void free_arena()
Completes arena shutdown, destructs and deallocates it.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
bool is_out_of_work()
Check if there is job anywhere in arena.
int my_max_concurrency
Concurrency level for deferred initialization.
static int unsigned num_arena_slots(unsigned num_slots)
Work stealing task scheduler.
Set if ref_count might be changed by another thread. Used for debugging.
#define GATHER_STATISTIC(x)
void attach_mailbox(affinity_id id)
#define __TBB_CONTEXT_ARG1(context)
Memory prefix to a task object.
atomic< T > & as_atomic(T &t)
size_t __TBB_EXPORTED_FUNC NFS_GetLineSize()
Cache/sector line size.
state_type state() const
Current execution state.
task_group_context * context()
This method is deprecated and will be removed in the future.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
static const int priority_critical
scheduler_properties my_properties
void process(generic_scheduler &)
Registers the worker with the arena and enters TBB scheduler dispatch loop.
Smart holder for the empty task class with automatic destruction.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id head
size_t occupy_free_slot_in_range(generic_scheduler &s, size_t lower, size_t upper)
Tries to occupy a slot in the specified range.
T1 atomic_update(tbb::atomic< T1 > &dst, T2 newValue, Pred compare)
Atomically replaces value of dst with newValue if they satisfy condition of compare predicate...
void on_thread_leaving()
Notification that worker or master leaves its arena.
void adjust_demand(arena &, int delta)
Request that arena's need in workers should be adjusted.
market * my_market
The market I am in.
atomic< unsigned > my_references
Reference counter for the arena.
bool has_enqueued_tasks()
Check for the presence of enqueued tasks at all priority levels.
void prepare_wait(thread_context &thr, uintptr_t ctx=0)
prepare wait by inserting 'thr' into the wait queue
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
Used to form groups of tasks.
#define ITT_NOTIFY(name, obj)
static unsigned default_num_threads()
internal::delegate_base & my_delegate
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
int ref_count() const
The internal reference count.
Base class for types that should not be copied or assigned.
void cancel_wait(thread_context &thr)
Cancel the wait. Removes the thread from the wait queue if not removed yet.
static generic_scheduler * create_worker(market &m, size_t index, bool geniune)
Initialize a scheduler for a worker thread.
task * my_dummy_task
Fake root task created by slave threads.
void make_critical(task &t)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
void nested_arena_entry(arena *, size_t)
static const size_t out_of_arena
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
nested_arena_context(generic_scheduler *s, arena *a, size_t slot_index, bool type, bool same)
bool type
Indicates that a scheduler acts as a master or a worker.
virtual task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation))=0
Try getting a task from other threads (via mailbox, stealing, FIFO queue, orphans adoption)...
scheduler_state my_orig_state
void create_coroutine(coroutine_type &c, size_t stack_size, void *arg)
void set_is_idle(bool value)
Indicate whether thread that reads this mailbox is idle.
T __TBB_load_relaxed(const volatile T &location)
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
task object is freshly allocated or recycled.
wait_task(binary_semaphore &sema)
void detach()
Detach inbox from its outbox.
void __TBB_EXPORTED_METHOD internal_enqueue(task &, intptr_t) const
static int __TBB_EXPORTED_FUNC internal_max_concurrency(const task_arena *)
static void one_time_init()
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
void const char const char int ITT_FORMAT __itt_group_sync p
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
T __TBB_load_with_acquire(const volatile T &location)
binary_semaphore & my_signal
void attach_arena(arena *, size_t index, bool is_master)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
task is running, and will be destroyed after method execute() completes.
binary_semaphore for concurrent monitor
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s __itt_frame ITT_FORMAT p const char const char ITT_FORMAT s __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope scope
static int __TBB_EXPORTED_FUNC internal_current_slot()
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
bool outermost
Indicates that a scheduler is on outermost level.
void mimic_outermost_level(arena *a, bool type)
static const unsigned ref_external
Reference increment values for externals and workers.
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
unsigned short affinity_id
An id as used for specifying affinity.
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
#define __TBB_CONTEXT_ARG(arg1, context)
void notify(const P &predicate)
Notify waiting threads of the event that satisfies the given predicate.
unsigned short get()
Get a random number.
task * execute() __TBB_override
Should be overridden by derived classes.
virtual void local_wait_for_all(task &parent, task *child)=0
market * my_market
The market that owns this arena.
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
static generic_scheduler * local_scheduler_if_initialized()
void __TBB_EXPORTED_METHOD internal_execute(delegate_base &) const
void restore_priority_if_need()
If enqueued tasks found, restore arena priority and task presence status.