Intel(R) Threading Building Blocks Doxygen Documentation
version 4.2.3
|
Go to the documentation of this file.
35 extern generic_scheduler* (*AllocateSchedulerPtr)( market&, bool );
41 #if __TBB_TASK_GROUP_CONTEXT
42 context_state_propagation_mutex_type the_context_state_propagation_mutex;
44 uintptr_t the_context_state_propagation_epoch = 0;
55 #if __TBB_TASK_GROUP_CONTEXT
59 #if __TBB_TASK_PRIORITY
78 #if _MSC_VER && !defined(__INTEL_COMPILER)
81 #pragma warning(disable:4355)
89 , my_co_context(m.worker_stack_size(), genuine ? NULL : this)
91 , my_small_task_count(1)
93 , my_cilk_state(cs_none)
100 #if __TBB_PREVIEW_CRITICAL_TASKS
103 #if __TBB_PREVIEW_RESUMABLE_TASKS
105 my_current_is_recalled = NULL;
106 my_post_resume_action = PRA_NONE;
107 my_post_resume_arg = NULL;
113 #if __TBB_TASK_PRIORITY
114 my_ref_top_priority = &m.my_global_top_priority;
115 my_ref_reload_epoch = &m.my_global_reload_epoch;
117 #if __TBB_TASK_GROUP_CONTEXT
119 my_context_state_propagation_epoch = the_context_state_propagation_epoch;
120 my_context_list_head.my_prev = &my_context_list_head;
121 my_context_list_head.my_next = &my_context_list_head;
122 ITT_SYNC_CREATE(&my_context_list_mutex, SyncType_Scheduler, SyncObj_ContextsList);
128 #if _MSC_VER && !defined(__INTEL_COMPILER)
130 #endif // warning 4355 is back
132 #if TBB_USE_ASSERT > 1
143 for (
size_t i = 0; i < H; ++i )
144 __TBB_ASSERT( tp[i] == poisoned_ptr,
"Task pool corrupted" );
145 for (
size_t i = H; i < T; ++i ) {
149 tp[i]->prefix().extra_state ==
es_task_proxy,
"task in the deque has invalid state" );
153 __TBB_ASSERT( tp[i] == poisoned_ptr,
"Task pool corrupted" );
164 #if defined(_MSC_VER)&&_MSC_VER<1400 && !_WIN64
166 __asm mov eax, fs:[0x18]
169 NT_TIB *pteb = (NT_TIB*)NtCurrentTeb();
171 __TBB_ASSERT( &pteb < pteb->StackBase && &pteb > pteb->StackLimit,
"invalid stack info in TEB" );
172 __TBB_ASSERT( stack_size >0,
"stack_size not initialized?" );
190 void *stack_base = &stack_size;
191 #if __linux__ && !__bg__
195 size_t np_stack_size = 0;
197 void *stack_limit = NULL;
199 #if __TBB_PREVIEW_RESUMABLE_TASKS
201 stack_limit = my_co_context.get_stack_limit();
202 __TBB_ASSERT( (uintptr_t)stack_base > (uintptr_t)stack_limit,
"stack size must be positive" );
204 stack_size = size_t((
char*)stack_base - (
char*)stack_limit);
208 pthread_attr_t np_attr_stack;
209 if( !stack_limit && 0 == pthread_getattr_np(pthread_self(), &np_attr_stack) ) {
210 if ( 0 == pthread_attr_getstack(&np_attr_stack, &stack_limit, &np_stack_size) ) {
212 pthread_attr_t attr_stack;
213 if ( 0 == pthread_attr_init(&attr_stack) ) {
214 if ( 0 == pthread_attr_getstacksize(&attr_stack, &stack_size) ) {
215 if ( np_stack_size < stack_size ) {
218 rsb_base = stack_limit;
219 stack_size = np_stack_size/2;
221 stack_limit = (
char*)stack_limit + stack_size;
227 pthread_attr_destroy(&attr_stack);
230 my_rsb_stealing_threshold = (uintptr_t)((
char*)rsb_base + stack_size/2);
235 stack_size = size_t((
char*)stack_base - (
char*)stack_limit);
237 pthread_attr_destroy(&np_attr_stack);
240 __TBB_ASSERT( stack_size>0,
"stack size must be positive" );
245 #if __TBB_TASK_GROUP_CONTEXT
251 void generic_scheduler::cleanup_local_context_list () {
253 bool wait_for_concurrent_destroyers_to_leave =
false;
254 uintptr_t local_count_snapshot = my_context_state_propagation_epoch;
255 my_local_ctx_list_update.store<
relaxed>(1);
263 if ( my_nonlocal_ctx_list_update.load<
relaxed>() || local_count_snapshot != the_context_state_propagation_epoch )
264 lock.acquire(my_context_list_mutex);
268 while ( node != &my_context_list_head ) {
275 wait_for_concurrent_destroyers_to_leave =
true;
278 my_local_ctx_list_update.store<
release>(0);
280 if ( wait_for_concurrent_destroyers_to_leave )
297 #if __TBB_PREVIEW_CRITICAL_TASKS
300 #if __TBB_TASK_GROUP_CONTEXT
301 cleanup_local_context_list();
305 #if __TBB_HOARD_NONLOCAL_TASKS
306 while(
task* t = my_nonlocal_free_list ) {
308 my_nonlocal_free_list =
p.next;
325 #if __TBB_COUNT_TASK_NODES
326 my_market->update_task_node_count( my_task_node_count );
340 #if __TBB_HOARD_NONLOCAL_TASKS
341 if( (t = my_nonlocal_free_list) ) {
354 __TBB_ASSERT( t,
"another thread emptied the my_return_list" );
360 #if __TBB_COUNT_TASK_NODES
361 ++my_task_node_count;
367 #if __TBB_PREFETCHING
370 #if __TBB_HOARD_NONLOCAL_TASKS
386 #if __TBB_COUNT_TASK_NODES
387 ++my_task_node_count;
392 #if __TBB_TASK_GROUP_CONTEXT
415 task* old =
s.my_return_list;
421 if(
as_atomic(
s.my_return_list).compare_and_swap(&t, old )==old ) {
422 #if __TBB_PREFETCHING
439 if ( T + num_tasks <= my_arena_slot->my_task_pool_size )
457 for (
size_t i = H; i < T; ++i )
466 if ( new_size < 2 * my_arena_slot->my_task_pool_size )
472 for (
size_t i = H; i < T; ++i )
494 bool sync_prepare_done =
false;
510 else if( !sync_prepare_done ) {
513 sync_prepare_done =
true;
536 task** victim_task_pool;
537 bool sync_prepare_done =
false;
539 victim_task_pool = victim_arena_slot->
task_pool;
545 if( sync_prepare_done )
556 else if( !sync_prepare_done ) {
559 sync_prepare_done =
true;
563 #if __TBB_STEALING_ABORT_ON_CONTENTION
564 if(!backoff.bounded_pause()) {
580 "not really locked victim's task pool?" );
581 return victim_task_pool;
585 task** victim_task_pool )
const {
586 __TBB_ASSERT( victim_arena_slot,
"empty victim arena slot pointer" );
599 __TBB_ASSERT( ref_count>=0,
"attempt to spawn task whose parent has a ref_count<0" );
600 __TBB_ASSERT( ref_count!=0,
"attempt to spawn task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
606 "backwards compatibility to TBB 2.0 tasks is broken" );
607 #if __TBB_TASK_ISOLATION
619 #if __TBB_TASK_PRIORITY
631 #if __TBB_PREVIEW_CRITICAL_TASKS
632 bool generic_scheduler::handled_as_critical(
task& t ) {
635 #if __TBB_TASK_ISOLATION
641 my_arena->my_critical_task_stream.push(
656 if ( &
first->prefix().next == &next ) {
665 #if __TBB_PREVIEW_CRITICAL_TASKS
666 if( !handled_as_critical( *
first ) )
697 #if __TBB_PREVIEW_CRITICAL_TASKS
698 if( !handled_as_critical( *t ) )
704 if(
size_t num_tasks = tasks.
size() ) {
726 #if __TBB_TASK_GROUP_CONTEXT
728 "all the root tasks in list must share the same context");
749 s->my_arena->enqueue_task(t, (intptr_t)prio,
s->my_random );
752 #if __TBB_TASK_PRIORITY
753 class auto_indicator :
no_copy {
754 volatile bool& my_indicator;
756 auto_indicator (
volatile bool& indicator ) : my_indicator(indicator) { my_indicator = true ;}
757 ~auto_indicator () { my_indicator =
false; }
765 #if __TBB_TASK_ISOLATION
767 bool tasks_omitted =
false;
768 while ( !t && T>H0 ) {
769 t =
get_task( --T, isolation, tasks_omitted );
770 if ( !tasks_omitted ) {
776 if ( t && tasks_omitted ) {
806 #if __TBB_TASK_ISOLATION
821 __TBB_ASSERT( my_offloaded_tasks,
"At least one task is expected to be already offloaded" );
828 auto_indicator indicator( my_pool_reshuffling_pending );
838 for (
size_t src = H0; src<T0; ++src ) {
842 intptr_t
p = priority( *t );
843 if (
p<*my_ref_top_priority ) {
844 offload_task( *t,
p );
860 #if __TBB_TASK_ISOLATION
871 task **link = &offloaded_tasks;
872 while (
task *t = *link ) {
873 task** next_ptr = &t->prefix().next_offloaded;
875 if ( priority(*t) >= top_priority ) {
876 tasks.push_back( t );
879 task* next = *next_ptr;
880 t->prefix().owner =
this;
888 if ( link == &offloaded_tasks ) {
889 offloaded_tasks = NULL;
891 offloaded_task_list_link = NULL;
898 offloaded_task_list_link = link;
901 size_t num_tasks = tasks.size();
914 if ( t ) --num_tasks;
922 uintptr_t reload_epoch = *my_ref_reload_epoch;
925 || my_local_reload_epoch - reload_epoch > uintptr_t(-1)/2,
926 "Reload epoch counter overflow?" );
927 if ( my_local_reload_epoch == reload_epoch )
930 intptr_t top_priority = effective_reference_priority();
932 task *t = reload_tasks( my_offloaded_tasks, my_offloaded_task_list_tail_link,
__TBB_ISOLATION_ARG( top_priority, isolation ) );
949 my_local_reload_epoch = reload_epoch;
954 #if __TBB_TASK_ISOLATION
961 || is_local_task_pool_quiescent(),
"Is it safe to get a task at position T?" );
963 task* result = my_arena_slot->task_pool_ptr[T];
964 __TBB_ASSERT( !is_poisoned( result ),
"The poisoned task is going to be processed" );
965 #if __TBB_TASK_ISOLATION
970 if ( !omit && !is_proxy( *result ) )
973 tasks_omitted =
true;
978 if ( !result || !is_proxy( *result ) )
982 task_proxy& tp = static_cast<task_proxy&>(*result);
986 __TBB_ASSERT( is_version_3_task( *t ),
"backwards compatibility with TBB 2.0 broken" );
987 my_innermost_running_task = t;
988 #if __TBB_TASK_ISOLATION
990 if ( !tasks_omitted )
994 t->note_affinity( my_affinity_id );
1000 free_task<small_task>( tp );
1001 #if __TBB_TASK_ISOLATION
1002 if ( tasks_omitted )
1003 my_arena_slot->task_pool_ptr[T] = NULL;
1013 size_t H0 = (size_t)-1, T = T0;
1014 task* result = NULL;
1015 bool task_pool_empty =
false;
1024 if ( (intptr_t)H0 > (intptr_t)T ) {
1028 && H0 == T + 1,
"victim/thief arbitration algorithm failure" );
1031 task_pool_empty =
true;
1033 }
else if ( H0 == T ) {
1036 task_pool_empty =
true;
1045 #if __TBB_TASK_ISOLATION
1046 result =
get_task( T, isolation, tasks_omitted );
1050 }
else if ( !tasks_omitted ) {
1058 }
while ( !result && !task_pool_empty );
1060 #if __TBB_TASK_ISOLATION
1061 if ( tasks_omitted ) {
1062 if ( task_pool_empty ) {
1125 free_task<no_cache_small_task>(tp);
1144 task* result = NULL;
1147 bool tasks_omitted =
false;
1159 result = victim_pool[H-1];
1167 task_proxy& tp = *static_cast<task_proxy*>(result);
1175 tasks_omitted =
true;
1176 }
else if ( !tasks_omitted ) {
1182 }
while ( !result );
1186 ITT_NOTIFY( sync_acquired, (
void*)((uintptr_t)&victim_slot+
sizeof( uintptr_t )) );
1188 if ( tasks_omitted ) {
1190 victim_pool[H-1] = NULL;
1195 #if __TBB_PREFETCHING
1199 if ( tasks_omitted )
1205 #if __TBB_PREVIEW_CRITICAL_TASKS
1213 if(
my_arena->my_critical_task_stream.empty(0) )
1215 task* critical_task = NULL;
1218 #if __TBB_TASK_ISOLATION
1220 critical_task =
my_arena->my_critical_task_stream.pop_specific( 0, start_lane, isolation );
1226 return critical_task;
1239 free_task<no_cache_small_task>(*tp);
1246 __TBB_ASSERT ( my_arena_index < my_arena->my_num_slots,
"arena slot index is out-of-bound" );
1250 "entering arena without tasks to share" );
1271 __TBB_ASSERT(!genuine || index,
"workers should have index > 0");
1272 s->my_arena_index = index;
1273 s->my_dummy_task->prefix().ref_count = 2;
1277 s->init_stack_info();
1288 task& t = *
s->my_dummy_task;
1291 #if __TBB_TASK_GROUP_CONTEXT
1294 #if __TBB_FP_CONTEXT
1295 s->default_context()->capture_fp_settings();
1298 s->init_stack_info();
1299 context_state_propagation_mutex_type::scoped_lock
lock(the_context_state_propagation_mutex);
1300 s->my_market->my_masters.push_front( *
s );
1305 s->attach_arena( a, 0,
true );
1306 s->my_arena_slot->my_scheduler =
s;
1307 #if __TBB_TASK_GROUP_CONTEXT
1308 a->my_default_ctx =
s->default_context();
1311 __TBB_ASSERT(
s->my_arena_index == 0,
"Master thread must occupy the first slot in its arena" );
1315 s->my_market->register_master(
s->master_exec_resource );
1318 #if __TBB_ARENA_OBSERVER
1319 __TBB_ASSERT( !a || a->my_observers.empty(),
"Just created arena cannot have any observers associated with it" );
1321 #if __TBB_SCHEDULER_OBSERVER
1322 the_global_observer_list.notify_entry_observers(
s->my_last_global_observer,
false );
1329 __TBB_ASSERT( !
s.my_arena_slot,
"cleaning up attached worker" );
1330 #if __TBB_SCHEDULER_OBSERVER
1332 the_global_observer_list.notify_exit_observers(
s.my_last_global_observer,
true );
1334 s.cleanup_scheduler();
1360 #if __TBB_ARENA_OBSERVER
1362 a->my_observers.notify_exit_observers( my_last_local_observer,
false );
1364 #if __TBB_SCHEDULER_OBSERVER
1365 the_global_observer_list.notify_exit_observers( my_last_global_observer,
false );
1368 m->unregister_master( master_exec_resource );
1372 #if __TBB_STATISTICS
1377 #if __TBB_TASK_GROUP_CONTEXT
1379 default_context()->~task_group_context();
1382 context_state_propagation_mutex_type::scoped_lock
lock(the_context_state_propagation_mutex);
1392 return m->
release( a != NULL, blocking_terminate );
void release_task_pool() const
Unlocks the local task pool.
static void sign_on(generic_scheduler *s)
Register TBB scheduler instance in thread-local storage.
task **__TBB_atomic task_pool_ptr
Task pool of the scheduler that owns this slot.
Set if ref_count might be changed by another thread. Used for debugging.
#define __TBB_cl_prefetch(p)
Used to form groups of tasks.
static bool is_shared(intptr_t tat)
True if the proxy is stored both in its sender's pool and in the destination mailbox.
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
bool is_critical(task &t)
task * steal_task_from(__TBB_ISOLATION_ARG(arena_slot &victim_arena_slot, isolation_tag isolation))
Steal task from another scheduler's ready pool.
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
task object is freshly allocated or recycled.
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
void atomic_fence()
Sequentially consistent full memory fence.
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
unsigned short affinity_id
An id as used for specifying affinity.
#define __TBB_ISOLATION_EXPR(isolation)
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
void allocate_task_pool(size_t n)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
void __TBB_store_with_release(volatile T &location, V value)
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
#define ITT_NOTIFY(name, obj)
task **__TBB_atomic task_pool
#define __TBB_cl_evict(p)
static const intptr_t num_priority_levels
__TBB_atomic size_t tail
Index of the element following the last ready task in the deque.
uintptr_t my_stealing_threshold
Position in the call stack specifying its maximal filling when stealing is still allowed.
bool is_worker() const
True if running on a worker thread, false otherwise.
static const size_t quick_task_size
If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd.
unsigned char state
A task::state_type, stored as a byte for compactness.
void push_back(const T &val)
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
size_t my_task_pool_size
Capacity of the primary task pool (number of elements - pointers to task).
task_proxy * pop(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get next piece of mail, or NULL if mailbox is empty.
mail_outbox * outbox
Mailbox to which this was mailed.
void assert_task_pool_valid() const
void const char const char int ITT_FORMAT __itt_group_sync p
tbb::task * parent
The task whose reference count includes me.
static const size_t min_task_pool_size
state_type state() const
Current execution state.
task is in ready pool, or is going to be put there, or was just taken off.
void copy_memory(T *dst) const
Copies the contents of the vector into the dst array.
intptr_t reference_count
A reference count.
__TBB_atomic intptr_t my_small_task_count
Number of small tasks that have been allocated by this scheduler.
static const kind_type detached
void unlock_task_pool(arena_slot *victim_arena_slot, task **victim_task_pool) const
Unlocks victim's task pool.
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
void assert_task_valid(const task *)
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
intptr_t isolation_tag
A tag for task isolation.
auto first(Container &c) -> decltype(begin(c))
market * my_market
The market I am in.
bool is_quiescent_local_task_pool_reset() const
bool is_quiescent_local_task_pool_empty() const
#define GATHER_STATISTIC(x)
void commit_relocated_tasks(size_t new_tail)
Makes relocated tasks visible to thieves and releases the local task pool.
static bool is_version_3_task(task &t)
static const intptr_t pool_bit
Class that implements exponential backoff.
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
virtual void __TBB_EXPORTED_METHOD note_affinity(affinity_id id)
Invoked by scheduler to notify task that it ran on unexpected thread.
void push(task_proxy *t)
Push task_proxy onto the mailbox queue of another thread.
void local_spawn(task *first, task *&next)
__TBB_atomic size_t head
Index of the first ready task in the deque.
void init_stack_info()
Sets up the data necessary for the stealing limiting heuristics.
void enqueue(task &, void *reserved) __TBB_override
For internal use only.
void acquire_task_pool() const
Locks the local task pool.
Memory prefix to a task object.
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
unsigned short get()
Get a random number.
task * extract_task()
Returns a pointer to the encapsulated task or NULL, and frees proxy if necessary.
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
T __TBB_load_relaxed(const volatile T &location)
context_list_node_t * my_next
atomic< T > & as_atomic(T &t)
bool is_local_task_pool_quiescent() const
isolation_tag isolation
The tag used for task isolation.
task * my_dummy_task
Fake root task created by slave threads.
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
void publish_task_pool()
Used by workers to enter the task pool.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
static bool is_proxy(const task &t)
True if t is a task_proxy.
void reset_task_pool_and_leave()
Resets head and tail indices to 0, and leaves task pool.
task ** lock_task_pool(arena_slot *victim_arena_slot) const
Locks victim's task pool, and returns pointer to it. The pointer can be NULL.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
void set_ref_count(int count)
Set reference count.
bool cleanup_master(bool blocking_terminate)
Perform necessary cleanup when a master thread stops using TBB.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p sync_cancel
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
void spawn(task &first, task *&next) __TBB_override
For internal use only.
void spawn_root_and_wait(task &first, task *&next) __TBB_override
For internal use only.
#define __TBB_FetchAndDecrementWrelease(P)
tbb::task * next
"next" field for list of task
#define ITT_SYNC_CREATE(obj, type, name)
Smart holder for the empty task class with automatic destruction.
generic_scheduler *(* AllocateSchedulerPtr)(market &, bool)
Pointer to the scheduler factory function.
scheduler_properties my_properties
generic_scheduler(market &, bool)
void free_nonlocal_small_task(task &t)
Free a small task t that that was allocated by a different scheduler.
static generic_scheduler * create_worker(market &m, size_t index, bool geniune)
Initialize a scheduler for a worker thread.
task * get_mailbox_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempt to get a task from the mailbox.
void fill_with_canary_pattern(size_t, size_t)
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
#define __TBB_ISOLATION_ARG(arg1, isolation)
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
void deallocate_task(task &t)
Return task object to the memory allocator.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
bool recipient_is_idle()
True if thread that owns this mailbox is looking for work.
void poison_pointer(T *__TBB_atomic &)
static const intptr_t mailbox_bit
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
Base class for user-defined tasks.
Work stealing task scheduler.
void Scheduler_OneTimeInitialization(bool itt_present)
Defined in scheduler.cpp.
void commit_spawned_tasks(size_t new_tail)
Makes newly spawned tasks visible to thieves.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
bool is_task_pool_published() const
#define __TBB_control_consistency_helper()
void local_spawn_root_and_wait(task *first, task *&next)
static const intptr_t location_mask
virtual void local_wait_for_all(task &parent, task *child)=0
static const kind_type binding_required
void * __TBB_get_bsp()
Retrieves the current RSE backing store pointer. IA64 specific.
task * get_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get a task from the local pool.
Represents acquisition of a mutex.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
void pause()
Pause for a while.
#define __TBB_get_object_ref(class_name, member_name, member_addr)
Returns address of the object containing a member with the given name and address.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
static const kind_type dying
void destroy()
Destroy and deallocate this scheduler object.
task * prepare_for_spawning(task *t)
Checks if t is affinitized to another thread, and if so, bundles it as proxy.
static void sign_off(generic_scheduler *s)
Unregister TBB scheduler instance from thread-local storage.
task * my_free_list
Free list of small tasks that can be reused.
bool outermost
Indicates that a scheduler is on outermost level.
size_t worker_stack_size() const
Returns the requested stack size of worker threads.
Set if the task has been stolen.
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
void cleanup_scheduler()
Cleans up this scheduler (the scheduler might be destroyed).
void leave_task_pool()
Leave the task pool.
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
void __TBB_store_relaxed(volatile T &location, V value)
size_t prepare_task_pool(size_t n)
Makes sure that the task pool can accommodate at least n more elements.
static const unsigned ref_external
Reference increment values for externals and workers.
static task * plugged_return_list()
Special value used to mark my_return_list as not taking any more entries.
generic_scheduler * allocate_scheduler(market &m, bool genuine)
task * my_return_list
List of small tasks that have been returned to this scheduler by other schedulers.
atomic< unsigned > my_limit
The maximal number of currently busy slots.
A scheduler with a customized evaluation loop.
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
virtual ~scheduler()=0
Pure virtual destructor;.
Base class for types that should not be copied or assigned.
void on_thread_leaving()
Notification that worker or master leaves its arena.
Vector that grows without reallocations, and stores items in the reverse order.
task object is on free list, or is going to be put there, or was just taken off.
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
scheduler * owner
Obsolete. The scheduler that owns the task.
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
const isolation_tag no_isolation
task * steal_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempts to steal a task from a randomly chosen thread/scheduler.
#define __TBB_CONTEXT_ARG(arg1, context)
#define __TBB_PREVIEW_RESUMABLE_TASKS
Copyright © 2005-2019 Intel Corporation. All Rights Reserved.
Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
registered trademarks or trademarks of Intel Corporation or its
subsidiaries in the United States and other countries.
* Other names and brands may be claimed as the property of others.