17 #ifndef _TBB_custom_scheduler_H 18 #define _TBB_custom_scheduler_H 38 #if __TBB_x86_32||__TBB_x86_64 41 static const bool has_slow_atomic =
false;
51 template<
typename SchedulerTraits>
75 if( SchedulerTraits::itt_possible )
77 if( SchedulerTraits::has_slow_atomic && p.
ref_count==1 )
81 #if __TBB_PREVIEW_RESUMABLE_TASKS 82 if (old_ref_count == internal::abandon_flag + 2) {
86 tbb::task::resume(p.abandoned_scheduler);
90 if (old_ref_count > 1) {
100 if( SchedulerTraits::itt_possible )
105 #if __TBB_TASK_ISOLATION 113 #if __TBB_RECYCLE_TO_ENQUEUE 114 if (p.
state==task::to_enqueue) {
117 my_arena->enqueue_task(s, 0, my_random );
120 if( bypass_slot==NULL )
122 #if __TBB_PREVIEW_CRITICAL_TASKS 124 local_spawn( bypass_slot, bypass_slot->
prefix().
next );
139 std::memset(p, 0,
sizeof(scheduler_type));
140 scheduler_type* s =
new(
p ) scheduler_type( m, genuine );
155 template<
typename SchedulerTraits>
158 bool outermost_worker_level = worker_outermost_level();
159 bool outermost_dispatch_level = outermost_worker_level || master_outermost_level();
160 bool can_steal_here = can_steal();
161 bool outermost_current_worker_level = outermost_worker_level;
162 #if __TBB_PREVIEW_RESUMABLE_TASKS 163 outermost_current_worker_level &= my_properties.genuine;
165 my_inbox.set_is_idle(
true );
166 #if __TBB_HOARD_NONLOCAL_TASKS 169 #if __TBB_TASK_PRIORITY 170 if ( outermost_dispatch_level ) {
171 if ( intptr_t skipped_priority = my_arena->my_skipped_fifo_priority ) {
175 if ( my_arena->my_skipped_fifo_priority.compare_and_swap(0, skipped_priority) == skipped_priority
176 && skipped_priority > my_arena->my_top_priority )
178 my_market->update_arena_priority( *my_arena, skipped_priority );
185 size_t n = my_arena->my_limit-1;
189 for(
int failure_count = -static_cast<int>(SchedulerTraits::itt_possible);; ++failure_count) {
192 if( completion_ref_count == 1 ) {
193 if( SchedulerTraits::itt_possible ) {
194 if( failure_count!=-1 ) {
195 ITT_NOTIFY(sync_prepare, &completion_ref_count);
199 ITT_NOTIFY(sync_acquired, &completion_ref_count);
209 if ( outermost_current_worker_level ) {
210 if ( ( my_arena->my_num_workers_allotted < my_arena->num_workers_active() ) ) {
211 if ( SchedulerTraits::itt_possible && failure_count != -1 )
216 #if __TBB_PREVIEW_RESUMABLE_TASKS 217 else if ( *my_arena_slot->my_scheduler_is_recalled ) {
219 if ( my_inbox.is_idle_state(
true) )
220 my_inbox.set_is_idle(
false);
224 #if __TBB_TASK_PRIORITY 225 const int p =
int(my_arena->my_top_priority);
227 static const int p = 0;
231 if ( n && !my_inbox.empty() ) {
233 #if __TBB_TASK_ISOLATION 237 if ( isolation !=
no_isolation && !t && !my_inbox.empty()
238 && my_inbox.is_idle_state(
true ) ) {
241 my_inbox.set_is_idle(
false );
251 !my_arena->my_task_stream.empty(p) && (
252 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 255 t = my_arena->my_task_stream.pop( p, my_arena_slot->hint_for_pop )
258 ITT_NOTIFY(sync_acquired, &my_arena->my_task_stream);
261 #if __TBB_TASK_PRIORITY 264 __TBB_ASSERT( !is_proxy(*t),
"The proxy task cannot be offloaded" );
271 #if __TBB_PREVIEW_CRITICAL_TASKS 274 ITT_NOTIFY(sync_acquired, &my_arena->my_critical_task_stream);
277 #endif // __TBB_PREVIEW_CRITICAL_TASKS 282 #if __TBB_ARENA_OBSERVER 283 my_arena->my_observers.notify_entry_observers( my_last_local_observer, is_worker() );
285 #if __TBB_SCHEDULER_OBSERVER 286 the_global_observer_list.notify_entry_observers( my_last_global_observer, is_worker() );
288 if ( SchedulerTraits::itt_possible && failure_count != -1 ) {
296 if( SchedulerTraits::itt_possible && failure_count==-1 ) {
306 const int failure_threshold = 2*
int(n+1);
307 if( failure_count>=failure_threshold ) {
311 failure_count = failure_threshold;
314 #if __TBB_TASK_PRIORITY 316 if ( my_arena->my_orphaned_tasks ) {
318 ++my_arena->my_abandonment_epoch;
319 task* orphans = (
task*)__TBB_FetchAndStoreW( &my_arena->my_orphaned_tasks, 0 );
323 my_local_reload_epoch--;
324 t = reload_tasks( orphans, link,
__TBB_ISOLATION_ARG( effective_reference_priority(), isolation ) );
326 *link = my_offloaded_tasks;
327 if ( !my_offloaded_tasks )
328 my_offloaded_task_list_tail_link = link;
329 my_offloaded_tasks = orphans;
331 __TBB_ASSERT( !my_offloaded_tasks == !my_offloaded_task_list_tail_link, NULL );
333 if( SchedulerTraits::itt_possible )
335 __TBB_ASSERT( !is_proxy(*t),
"The proxy task cannot be offloaded" );
341 const int yield_threshold = 100;
342 if( yield_count++ >= yield_threshold ) {
345 #if __TBB_TASK_PRIORITY 346 if( outermost_current_worker_level || my_arena->my_top_priority > my_arena->my_bottom_priority ) {
347 if ( my_arena->is_out_of_work() && outermost_current_worker_level ) {
349 if ( outermost_current_worker_level && my_arena->is_out_of_work() ) {
351 if( SchedulerTraits::itt_possible )
355 #if __TBB_TASK_PRIORITY 357 if ( my_offloaded_tasks ) {
360 my_local_reload_epoch--;
365 if ( !outermost_worker_level && *my_ref_top_priority > my_arena->my_top_priority ) {
367 my_ref_top_priority = &my_arena->my_top_priority;
369 __TBB_ASSERT(my_ref_reload_epoch == &my_arena->my_reload_epoch, NULL);
375 n = my_arena->my_limit-1;
378 if ( my_inbox.is_idle_state(
true ) )
379 my_inbox.set_is_idle(
false );
383 template<
typename SchedulerTraits>
392 #if __TBB_TASK_ISOLATION 394 "A task from another isolated region is going to be executed" );
397 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_ASSERT 403 assert_task_pool_valid();
404 #if __TBB_PREVIEW_CRITICAL_TASKS 410 "Received task must be critical one" );
411 ITT_NOTIFY(sync_acquired, &my_arena->my_critical_task_stream);
413 my_innermost_running_task = t;
418 #if __TBB_TASK_PRIORITY 419 intptr_t
p = priority(*t);
420 if ( p != *my_ref_top_priority
422 assert_priority_valid(p);
423 if ( p != my_arena->my_top_priority ) {
424 my_market->update_arena_priority( *my_arena, p );
426 if ( p < effective_reference_priority() ) {
427 if ( !my_offloaded_tasks ) {
431 *my_offloaded_task_list_tail_link = NULL;
433 offload_task( *t, p );
435 if ( is_task_pool_published() ) {
448 #if __TBB_PREVIEW_CRITICAL_TASKS 453 my_innermost_running_task = t;
456 #if __TBB_TASK_GROUP_CONTEXT 462 GATHER_STATISTIC( my_counters.avg_arena_concurrency += my_arena->num_workers_active() );
463 GATHER_STATISTIC( my_counters.avg_assigned_workers += my_arena->my_num_workers_allotted );
464 #if __TBB_TASK_PRIORITY 466 GATHER_STATISTIC( my_counters.avg_market_prio += my_market->my_global_top_priority );
474 "if task::execute() returns task, it must be marked as allocated" );
478 affinity_id next_affinity=t_next->prefix().affinity;
479 if (next_affinity != 0 && next_affinity != my_affinity_id)
484 assert_task_pool_valid();
485 switch( t->
state() ) {
493 free_task<no_hint>( *t );
495 assert_task_pool_valid();
501 #if __TBB_RECYCLE_TO_ENQUEUE 503 case task::to_enqueue:
505 __TBB_ASSERT( t_next != t,
"a task returned from method execute() can not be recycled in another way" );
509 assert_task_pool_valid();
513 __TBB_ASSERT( t_next,
"reexecution requires that method execute() return another task" );
514 __TBB_ASSERT( t_next != t,
"a task returned from method execute() can not be recycled in another way" );
518 assert_task_pool_valid();
523 #if __TBB_PREVIEW_RESUMABLE_TASKS 524 case task::to_resume:
528 free_task<no_hint>(*t);
529 __TBB_ASSERT(!my_properties.genuine && my_properties.outermost,
530 "Only a coroutine on outermost level can be left.");
536 __TBB_ASSERT(
false,
"task is in READY state upon return from method execute()" );
551 template<
typename SchedulerTraits>
556 #if __TBB_TASK_GROUP_CONTEXT 559 assert_task_pool_valid();
562 if( SchedulerTraits::itt_possible )
568 task* old_innermost_running_task = my_innermost_running_task;
572 bool cleanup = !is_worker() && &parent==my_dummy_task;
574 __TBB_ASSERT(my_properties.outermost || my_innermost_running_task!=my_dummy_task,
"The outermost property should be set out of a dispatch loop");
575 my_properties.outermost &= my_innermost_running_task==my_dummy_task;
576 #if __TBB_PREVIEW_CRITICAL_TASKS 577 my_properties.has_taken_critical_task |=
is_critical(*my_innermost_running_task);
579 #if __TBB_TASK_PRIORITY 581 volatile intptr_t *old_ref_top_priority = my_ref_top_priority;
584 volatile uintptr_t *old_ref_reload_epoch = my_ref_reload_epoch;
585 if ( !outermost_level() ) {
590 my_ref_reload_epoch = &my_arena->my_reload_epoch;
591 if (my_ref_reload_epoch != old_ref_reload_epoch)
592 my_local_reload_epoch = *my_ref_reload_epoch - 1;
595 #if __TBB_TASK_ISOLATION 596 isolation_tag isolation = my_innermost_running_task->prefix().isolation;
603 #if __TBB_PREVIEW_RESUMABLE_TASKS 606 tbb::atomic<bool> recall_flag;
608 if (outermost_level() && my_wait_task == NULL && my_properties.genuine) {
609 __TBB_ASSERT(my_arena_slot->my_scheduler ==
this, NULL);
610 __TBB_ASSERT(my_arena_slot->my_scheduler_is_recalled == NULL, NULL);
611 my_arena_slot->my_scheduler_is_recalled = &recall_flag;
612 my_current_is_recalled = &recall_flag;
614 __TBB_ASSERT(my_arena_slot->my_scheduler_is_recalled != NULL, NULL);
615 task* old_wait_task = my_wait_task;
618 #if TBB_USE_EXCEPTIONS 632 #if __TBB_PREVIEW_RESUMABLE_TASKS 634 my_innermost_running_task = old_innermost_running_task;
635 my_properties = old_properties;
636 my_wait_task = old_wait_task;
648 #if __TBB_PREVIEW_RESUMABLE_TASKS 650 if ( &recall_flag != my_arena_slot->my_scheduler_is_recalled ) {
651 __TBB_ASSERT( my_arena_slot->my_scheduler_is_recalled != NULL,
"A broken recall flag" );
652 if ( *my_arena_slot->my_scheduler_is_recalled ) {
653 if ( !resume_original_scheduler() ) {
655 __TBB_ASSERT( !my_properties.genuine && my_properties.outermost,
656 "Only a coroutine on outermost level can be left." );
658 my_innermost_running_task = old_innermost_running_task;
659 my_properties = old_properties;
660 my_wait_task = old_wait_task;
667 __TBB_ASSERT( is_task_pool_published() || is_quiescent_local_task_pool_reset(), NULL );
669 assert_task_pool_valid();
675 #if __TBB_HOARD_NONLOCAL_TASKS 677 for (; my_nonlocal_free_list; my_nonlocal_free_list = t ) {
679 free_nonlocal_small_task( *my_nonlocal_free_list );
683 __TBB_ASSERT( !is_task_pool_published() && is_quiescent_local_task_pool_reset(), NULL );
685 my_innermost_running_task = old_innermost_running_task;
686 my_properties = old_properties;
687 #if __TBB_TASK_PRIORITY 688 my_ref_top_priority = old_ref_top_priority;
689 if(my_ref_reload_epoch != old_ref_reload_epoch)
690 my_local_reload_epoch = *old_ref_reload_epoch-1;
691 my_ref_reload_epoch = old_ref_reload_epoch;
693 #if __TBB_PREVIEW_RESUMABLE_TASKS 694 if (&recall_flag != my_arena_slot->my_scheduler_is_recalled) {
697 tbb::task::suspend(recall_functor(&recall_flag));
698 if (my_inbox.is_idle_state(
true))
699 my_inbox.set_is_idle(
false);
702 __TBB_ASSERT(&recall_flag == my_arena_slot->my_scheduler_is_recalled, NULL);
703 __TBB_ASSERT(!(my_wait_task->prefix().ref_count & internal::abandon_flag), NULL);
704 my_wait_task = old_wait_task;
710 #if __TBB_PREVIEW_RESUMABLE_TASKS 711 if ( *my_arena_slot->my_scheduler_is_recalled )
714 if ( &recall_flag == my_arena_slot->my_scheduler_is_recalled || old_wait_task != NULL )
718 tbb::task::suspend( recall_functor(&recall_flag) );
719 if ( my_inbox.is_idle_state(
true) )
720 my_inbox.set_is_idle(
false);
727 #if TBB_USE_EXCEPTIONS 730 TbbCatchAll( my_innermost_running_task->prefix().context );
731 t = my_innermost_running_task;
734 #if __TBB_RECYCLE_TO_ENQUEUE 736 || t->state() == task::to_enqueue
741 if( SchedulerTraits::itt_possible )
744 if( SchedulerTraits::itt_possible )
745 ITT_NOTIFY(sync_acquired, &t->prefix().ref_count);
754 #if __TBB_PREVIEW_RESUMABLE_TASKS 756 my_wait_task = old_wait_task;
757 if (my_wait_task == NULL) {
758 __TBB_ASSERT(outermost_level(),
"my_wait_task could be NULL only on outermost level");
759 if (&recall_flag != my_arena_slot->my_scheduler_is_recalled) {
763 tbb::task::suspend(recall_functor(&recall_flag));
764 if (my_inbox.is_idle_state(
true))
765 my_inbox.set_is_idle(
false);
767 __TBB_ASSERT(my_arena_slot->my_scheduler ==
this, NULL);
768 my_arena_slot->my_scheduler_is_recalled = NULL;
769 my_current_is_recalled = NULL;
773 my_innermost_running_task = old_innermost_running_task;
774 my_properties = old_properties;
775 #if __TBB_TASK_PRIORITY 776 my_ref_top_priority = old_ref_top_priority;
777 if(my_ref_reload_epoch != old_ref_reload_epoch)
778 my_local_reload_epoch = *old_ref_reload_epoch-1;
779 my_ref_reload_epoch = old_ref_reload_epoch;
785 "Worker thread exits nested dispatch loop prematurely" );
793 #if __TBB_TASK_GROUP_CONTEXT 796 if ( parent_ctx->my_cancellation_requested ) {
798 if ( master_outermost_level() && parent_ctx == default_context() ) {
801 parent_ctx->my_cancellation_requested = 0;
809 context_guard.restore_default();
810 TbbRethrowException( pe );
813 __TBB_ASSERT(!is_worker() || !CancellationInfoPresent(*my_dummy_task),
814 "Worker's dummy task context modified");
815 __TBB_ASSERT(!master_outermost_level() || !CancellationInfoPresent(*my_dummy_task),
816 "Unexpected exception or cancellation data in the master's dummy task");
818 assert_task_pool_valid();
static const bool itt_possible
#define __TBB_FetchAndDecrementWrelease(P)
void assert_task_valid(const task *)
bool ConcurrentWaitsEnabled(task &t)
Base class for user-defined tasks.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
Bit-field representing properties of a sheduler.
void const char const char int ITT_FORMAT __itt_group_sync s
task is in ready pool, or is going to be put there, or was just taken off.
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
#define ITT_SYNC_CREATE(obj, type, name)
static const intptr_t num_priority_levels
#define __TBB_control_consistency_helper()
unsigned char state
A task::state_type, stored as a byte for compactness.
intptr_t reference_count
A reference count.
task to be recycled as continuation
bool is_critical(task &t)
const isolation_tag no_isolation
void local_wait_for_all(task &parent, task *child) __TBB_override
Scheduler loop that dispatches tasks.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p sync_cancel
Exception container that preserves the exact copy of the original exception.
task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation)) __TBB_override
Try getting a task from the mailbox or stealing from another scheduler.
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
#define __TBB_fallthrough
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
#define __TBB_ISOLATION_ARG(arg1, isolation)
isolation_tag isolation
The tag used for task isolation.
virtual ~task()
Destructor.
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
intptr_t isolation_tag
A tag for task isolation.
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
custom_scheduler< SchedulerTraits > scheduler_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
bool is_enqueued_task() const
True if the task was enqueued.
Work stealing task scheduler.
Set if ref_count might be changed by another thread. Used for debugging.
#define GATHER_STATISTIC(x)
Traits classes for scheduler.
Memory prefix to a task object.
intptr_t my_priority
Priority level of the task group (in normalized representation)
bool process_bypass_loop(context_guard_helper< SchedulerTraits::itt_possible > &context_guard, __TBB_ISOLATION_ARG(task *t, isolation_tag isolation))
Implements the bypass loop of the dispatch loop (local_wait_for_all).
void reset_extra_state(task *t)
state_type state() const
Current execution state.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
#define __TBB_ISOLATION_EXPR(isolation)
A scheduler with a customized evaluation loop.
static generic_scheduler * allocate_scheduler(market &m, bool genuine)
Used to form groups of tasks.
#define ITT_NOTIFY(name, obj)
virtual task * execute()=0
Should be overridden by derived classes.
int ref_count() const
The internal reference count.
task * next_offloaded
Pointer to the next offloaded lower priority task.
tbb::task * next
"next" field for list of task
void tally_completion_of_predecessor(task &s, __TBB_ISOLATION_ARG(task *&bypass_slot, isolation_tag isolation))
Decrements ref_count of a predecessor.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
task object is freshly allocated or recycled.
#define ITT_STACK(precond, name, obj)
void const char const char int ITT_FORMAT __itt_group_sync p
void assert_task_pool_valid() const
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
task is running, and will be destroyed after method execute() completes.
void poison_pointer(T *__TBB_atomic &)
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
static const bool has_slow_atomic
unsigned short affinity_id
An id as used for specifying affinity.
void wait_for_all(task &parent, task *child) __TBB_override
Entry point from client code to the scheduler loop that dispatches tasks.
scheduler * owner
Obsolete. The scheduler that owns the task.
custom_scheduler(market &m, bool genuine)