Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
arena.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #include "tbb/global_control.h" // thread_stack_size
18 
19 #include "scheduler.h"
20 #include "governor.h"
21 #include "arena.h"
22 #include "itt_notify.h"
23 #include "semaphore.h"
25 
26 #include <functional>
27 
28 #if __TBB_STATISTICS_STDOUT
29 #include <cstdio>
30 #endif
31 
32 namespace tbb {
33 namespace internal {
34 
35 // put it here in order to enable compiler to inline it into arena::process and nested_arena_entry
36 void generic_scheduler::attach_arena( arena* a, size_t index, bool is_master ) {
37  __TBB_ASSERT( a->my_market == my_market, NULL );
38  my_arena = a;
39  my_arena_index = index;
40  my_arena_slot = a->my_slots + index;
41  attach_mailbox( affinity_id(index+1) );
42  if ( is_master && my_inbox.is_idle_state( true ) ) {
43  // Master enters an arena with its own task to be executed. It means that master is not
44  // going to enter stealing loop and take affinity tasks.
45  my_inbox.set_is_idle( false );
46  }
47 #if __TBB_TASK_GROUP_CONTEXT
48  // Context to be used by root tasks by default (if the user has not specified one).
49  if( !is_master )
50  my_dummy_task->prefix().context = a->my_default_ctx;
51 #endif /* __TBB_TASK_GROUP_CONTEXT */
52 #if __TBB_TASK_PRIORITY
53  // In the current implementation master threads continue processing even when
54  // there are other masters with higher priority. Only TBB worker threads are
55  // redistributed between arenas based on the latters' priority. Thus master
56  // threads use arena's top priority as a reference point (in contrast to workers
57  // that use my_market->my_global_top_priority).
58  if( is_master ) {
59  my_ref_top_priority = &a->my_top_priority;
60  my_ref_reload_epoch = &a->my_reload_epoch;
61  }
62  my_local_reload_epoch = *my_ref_reload_epoch;
63  __TBB_ASSERT( !my_offloaded_tasks, NULL );
64 #endif /* __TBB_TASK_PRIORITY */
65 }
66 
67 inline static bool occupy_slot( generic_scheduler*& slot, generic_scheduler& s ) {
68  return !slot && as_atomic( slot ).compare_and_swap( &s, NULL ) == NULL;
69 }
70 
71 size_t arena::occupy_free_slot_in_range( generic_scheduler& s, size_t lower, size_t upper ) {
72  if ( lower >= upper ) return out_of_arena;
73  // Start search for an empty slot from the one we occupied the last time
74  size_t index = s.my_arena_index;
75  if ( index < lower || index >= upper ) index = s.my_random.get() % (upper - lower) + lower;
76  __TBB_ASSERT( index >= lower && index < upper, NULL );
77  // Find a free slot
78  for ( size_t i = index; i < upper; ++i )
79  if ( occupy_slot(my_slots[i].my_scheduler, s) ) return i;
80  for ( size_t i = lower; i < index; ++i )
81  if ( occupy_slot(my_slots[i].my_scheduler, s) ) return i;
82  return out_of_arena;
83 }
84 
85 template <bool as_worker>
87  // Firstly, masters try to occupy reserved slots
88  size_t index = as_worker ? out_of_arena : occupy_free_slot_in_range( s, 0, my_num_reserved_slots );
89  if ( index == out_of_arena ) {
90  // Secondly, all threads try to occupy all non-reserved slots
91  index = occupy_free_slot_in_range( s, my_num_reserved_slots, my_num_slots );
92  // Likely this arena is already saturated
93  if ( index == out_of_arena )
94  return out_of_arena;
95  }
96 
97  ITT_NOTIFY(sync_acquired, my_slots + index);
98  atomic_update( my_limit, (unsigned)(index + 1), std::less<unsigned>() );
99  return index;
100 }
101 
103  __TBB_ASSERT( is_alive(my_guard), NULL );
104  __TBB_ASSERT( governor::is_set(&s), NULL );
107 
108  __TBB_ASSERT( my_num_slots > 1, NULL );
109 
110  size_t index = occupy_free_slot</*as_worker*/true>( s );
111  if ( index == out_of_arena )
112  goto quit;
113 
114  __TBB_ASSERT( index >= my_num_reserved_slots, "Workers cannot occupy reserved slots" );
115  s.attach_arena( this, index, /*is_master*/false );
116 
117 #if !__TBB_FP_CONTEXT
118  my_cpu_ctl_env.set_env();
119 #endif
120 
121 #if __TBB_ARENA_OBSERVER
122  __TBB_ASSERT( !s.my_last_local_observer, "There cannot be notified local observers when entering arena" );
123  my_observers.notify_entry_observers( s.my_last_local_observer, /*worker=*/true );
124 #endif /* __TBB_ARENA_OBSERVER */
125 
126  // Task pool can be marked as non-empty if the worker occupies the slot left by a master.
127  if ( s.my_arena_slot->task_pool != EmptyTaskPool ) {
128  __TBB_ASSERT( s.my_inbox.is_idle_state(false), NULL );
129  s.local_wait_for_all( *s.my_dummy_task, NULL );
130  __TBB_ASSERT( s.my_inbox.is_idle_state(true), NULL );
131  }
132 
133  for ( ;; ) {
136  __TBB_ASSERT( is_alive(my_guard), NULL );
138  "Worker cannot leave arena while its task pool is not reset" );
139  __TBB_ASSERT( s.my_arena_slot->task_pool == EmptyTaskPool, "Empty task pool is not marked appropriately" );
140  // This check prevents relinquishing more than necessary workers because
141  // of the non-atomicity of the decision making procedure
142  if ( is_recall_requested() )
143  break;
144  // Try to steal a task.
145  // Passing reference count is technically unnecessary in this context,
146  // but omitting it here would add checks inside the function.
148  if (t) {
149  // A side effect of receive_or_steal_task is that my_innermost_running_task can be set.
150  // But for the outermost dispatch loop it has to be a dummy task.
153  }
154  }
155 #if __TBB_ARENA_OBSERVER
156  my_observers.notify_exit_observers( s.my_last_local_observer, /*worker=*/true );
157  s.my_last_local_observer = NULL;
158 #endif /* __TBB_ARENA_OBSERVER */
159 #if __TBB_TASK_PRIORITY
160  if ( s.my_offloaded_tasks )
161  orphan_offloaded_tasks( s );
162 #endif /* __TBB_TASK_PRIORITY */
163 #if __TBB_STATISTICS
164  ++s.my_counters.arena_roundtrips;
165  *my_slots[index].my_counters += s.my_counters;
166  s.my_counters.reset();
167 #endif /* __TBB_STATISTICS */
168  __TBB_store_with_release( my_slots[index].my_scheduler, (generic_scheduler*)NULL );
169  s.my_arena_slot = 0; // detached from slot
170  s.my_inbox.detach();
171  __TBB_ASSERT( s.my_inbox.is_idle_state(true), NULL );
174  __TBB_ASSERT( is_alive(my_guard), NULL );
175 quit:
176  // In contrast to earlier versions of TBB (before 3.0 U5) now it is possible
177  // that arena may be temporarily left unpopulated by threads. See comments in
178  // arena::on_thread_leaving() for more details.
179  on_thread_leaving<ref_worker>();
180 }
181 
182 arena::arena ( market& m, unsigned num_slots, unsigned num_reserved_slots ) {
183  __TBB_ASSERT( !my_guard, "improperly allocated arena?" );
184  __TBB_ASSERT( sizeof(my_slots[0]) % NFS_GetLineSize()==0, "arena::slot size not multiple of cache line size" );
185  __TBB_ASSERT( (uintptr_t)this % NFS_GetLineSize()==0, "arena misaligned" );
186 #if __TBB_TASK_PRIORITY
187  __TBB_ASSERT( !my_reload_epoch && !my_orphaned_tasks && !my_skipped_fifo_priority, "New arena object is not zeroed" );
188 #endif /* __TBB_TASK_PRIORITY */
189  my_market = &m;
190  my_limit = 1;
191  // Two slots are mandatory: for the master, and for 1 worker (required to support starvation resistant tasks).
192  my_num_slots = num_arena_slots(num_slots);
193  my_num_reserved_slots = num_reserved_slots;
194  my_max_num_workers = num_slots-num_reserved_slots;
195  my_references = ref_external; // accounts for the master
196 #if __TBB_TASK_PRIORITY
197  my_bottom_priority = my_top_priority = normalized_normal_priority;
198 #endif /* __TBB_TASK_PRIORITY */
199  my_aba_epoch = m.my_arenas_aba_epoch;
200 #if __TBB_ARENA_OBSERVER
201  my_observers.my_arena = this;
202 #endif
203 #if __TBB_PREVIEW_RESUMABLE_TASKS
204  my_co_cache.init(4 * num_slots);
205 #endif
206  __TBB_ASSERT ( my_max_num_workers <= my_num_slots, NULL );
207  // Construct slots. Mark internal synchronization elements for the tools.
208  for( unsigned i = 0; i < my_num_slots; ++i ) {
209  __TBB_ASSERT( !my_slots[i].my_scheduler && !my_slots[i].task_pool, NULL );
210  __TBB_ASSERT( !my_slots[i].task_pool_ptr, NULL );
211  __TBB_ASSERT( !my_slots[i].my_task_pool_size, NULL );
212 #if __TBB_PREVIEW_RESUMABLE_TASKS
213  __TBB_ASSERT( !my_slots[i].my_scheduler_is_recalled, NULL );
214 #endif
215  ITT_SYNC_CREATE(my_slots + i, SyncType_Scheduler, SyncObj_WorkerTaskPool);
216  mailbox(i+1).construct();
217  ITT_SYNC_CREATE(&mailbox(i+1), SyncType_Scheduler, SyncObj_Mailbox);
218  my_slots[i].hint_for_pop = i;
219 #if __TBB_PREVIEW_CRITICAL_TASKS
220  my_slots[i].hint_for_critical = i;
221 #endif
222 #if __TBB_STATISTICS
223  my_slots[i].my_counters = new ( NFS_Allocate(1, sizeof(statistics_counters), NULL) ) statistics_counters;
224 #endif /* __TBB_STATISTICS */
225  }
226  my_task_stream.initialize(my_num_slots);
227  ITT_SYNC_CREATE(&my_task_stream, SyncType_Scheduler, SyncObj_TaskStream);
228 #if __TBB_PREVIEW_CRITICAL_TASKS
229  my_critical_task_stream.initialize(my_num_slots);
230  ITT_SYNC_CREATE(&my_critical_task_stream, SyncType_Scheduler, SyncObj_CriticalTaskStream);
231 #endif
232 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
233  my_local_concurrency_mode = false;
234  my_global_concurrency_mode = false;
235 #endif
236 #if !__TBB_FP_CONTEXT
237  my_cpu_ctl_env.get_env();
238 #endif
239 }
240 
241 arena& arena::allocate_arena( market& m, unsigned num_slots, unsigned num_reserved_slots ) {
242  __TBB_ASSERT( sizeof(base_type) + sizeof(arena_slot) == sizeof(arena), "All arena data fields must go to arena_base" );
243  __TBB_ASSERT( sizeof(base_type) % NFS_GetLineSize() == 0, "arena slots area misaligned: wrong padding" );
244  __TBB_ASSERT( sizeof(mail_outbox) == NFS_MaxLineSize, "Mailbox padding is wrong" );
245  size_t n = allocation_size(num_arena_slots(num_slots));
246  unsigned char* storage = (unsigned char*)NFS_Allocate( 1, n, NULL );
247  // Zero all slots to indicate that they are empty
248  memset( storage, 0, n );
249  return *new( storage + num_arena_slots(num_slots) * sizeof(mail_outbox) ) arena(m, num_slots, num_reserved_slots);
250 }
251 
253  __TBB_ASSERT( is_alive(my_guard), NULL );
254  __TBB_ASSERT( !my_references, "There are threads in the dying arena" );
255  __TBB_ASSERT( !my_num_workers_requested && !my_num_workers_allotted, "Dying arena requests workers" );
256  __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, "Inconsistent state of a dying arena" );
257 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
258  __TBB_ASSERT( !my_global_concurrency_mode, NULL );
259 #endif
260 #if !__TBB_STATISTICS_EARLY_DUMP
261  GATHER_STATISTIC( dump_arena_statistics() );
262 #endif
263  poison_value( my_guard );
264  intptr_t drained = 0;
265  for ( unsigned i = 0; i < my_num_slots; ++i ) {
266  __TBB_ASSERT( !my_slots[i].my_scheduler, "arena slot is not empty" );
267  // TODO: understand the assertion and modify
268  // __TBB_ASSERT( my_slots[i].task_pool == EmptyTaskPool, NULL );
269  __TBB_ASSERT( my_slots[i].head == my_slots[i].tail, NULL ); // TODO: replace by is_quiescent_local_task_pool_empty
270  my_slots[i].free_task_pool();
271 #if __TBB_STATISTICS
272  NFS_Free( my_slots[i].my_counters );
273 #endif /* __TBB_STATISTICS */
274  drained += mailbox(i+1).drain();
275  }
276  __TBB_ASSERT( my_task_stream.drain()==0, "Not all enqueued tasks were executed");
277 #if __TBB_PREVIEW_RESUMABLE_TASKS
278  // Cleanup coroutines/schedulers cache
279  my_co_cache.cleanup();
280 #endif
281 #if __TBB_PREVIEW_CRITICAL_TASKS
282  __TBB_ASSERT( my_critical_task_stream.drain()==0, "Not all critical tasks were executed");
283 #endif
284 #if __TBB_COUNT_TASK_NODES
285  my_market->update_task_node_count( -drained );
286 #endif /* __TBB_COUNT_TASK_NODES */
287  // remove an internal reference
288  my_market->release( /*is_public=*/false, /*blocking_terminate=*/false );
289 #if __TBB_TASK_GROUP_CONTEXT
290  __TBB_ASSERT( my_default_ctx, "Master thread never entered the arena?" );
291  my_default_ctx->~task_group_context();
292  NFS_Free(my_default_ctx);
293 #endif /* __TBB_TASK_GROUP_CONTEXT */
294 #if __TBB_ARENA_OBSERVER
295  if ( !my_observers.empty() )
296  my_observers.clear();
297 #endif /* __TBB_ARENA_OBSERVER */
298  void* storage = &mailbox(my_num_slots);
299  __TBB_ASSERT( my_references == 0, NULL );
300  __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, NULL );
301  this->~arena();
302 #if TBB_USE_ASSERT > 1
303  memset( storage, 0, allocation_size(my_num_slots) );
304 #endif /* TBB_USE_ASSERT */
305  NFS_Free( storage );
306 }
307 
308 #if __TBB_STATISTICS
309 void arena::dump_arena_statistics () {
310  statistics_counters total;
311  for( unsigned i = 0; i < my_num_slots; ++i ) {
312 #if __TBB_STATISTICS_EARLY_DUMP
313  generic_scheduler* s = my_slots[i].my_scheduler;
314  if ( s )
315  *my_slots[i].my_counters += s->my_counters;
316 #else
317  __TBB_ASSERT( !my_slots[i].my_scheduler, NULL );
318 #endif
319  if ( i != 0 ) {
320  total += *my_slots[i].my_counters;
321  dump_statistics( *my_slots[i].my_counters, i );
322  }
323  }
324  dump_statistics( *my_slots[0].my_counters, 0 );
325 #if __TBB_STATISTICS_STDOUT
326 #if !__TBB_STATISTICS_TOTALS_ONLY
327  printf( "----------------------------------------------\n" );
328 #endif
329  dump_statistics( total, workers_counters_total );
330  total += *my_slots[0].my_counters;
331  dump_statistics( total, arena_counters_total );
332 #if !__TBB_STATISTICS_TOTALS_ONLY
333  printf( "==============================================\n" );
334 #endif
335 #endif /* __TBB_STATISTICS_STDOUT */
336 }
337 #endif /* __TBB_STATISTICS */
338 
339 #if __TBB_TASK_PRIORITY
340 // The method inspects a scheduler to determine:
341 // 1. if it has tasks that can be retrieved and executed (via the return value);
342 // 2. if it has any tasks at all, including those of lower priority (via tasks_present);
343 // 3. if it is able to work with enqueued tasks (via dequeuing_possible).
344 inline bool arena::may_have_tasks ( generic_scheduler* s, bool& tasks_present, bool& dequeuing_possible ) {
345  if ( !s || s->my_arena != this )
346  return false;
347  dequeuing_possible |= s->worker_outermost_level();
348  if ( s->my_pool_reshuffling_pending ) {
349  // This primary task pool is nonempty and may contain tasks at the current
350  // priority level. Its owner is winnowing lower priority tasks at the moment.
351  tasks_present = true;
352  return true;
353  }
354  if ( s->my_offloaded_tasks ) {
355  tasks_present = true;
356  if ( s->my_local_reload_epoch < *s->my_ref_reload_epoch ) {
357  // This scheduler's offload area is nonempty and may contain tasks at the
358  // current priority level.
359  return true;
360  }
361  }
362  return false;
363 }
364 
365 void arena::orphan_offloaded_tasks(generic_scheduler& s) {
366  __TBB_ASSERT( s.my_offloaded_tasks, NULL );
367  GATHER_STATISTIC( ++s.my_counters.prio_orphanings );
368  ++my_abandonment_epoch;
369  __TBB_ASSERT( s.my_offloaded_task_list_tail_link && !*s.my_offloaded_task_list_tail_link, NULL );
370  task* orphans;
371  do {
372  orphans = const_cast<task*>(my_orphaned_tasks);
373  *s.my_offloaded_task_list_tail_link = orphans;
374  } while ( as_atomic(my_orphaned_tasks).compare_and_swap(s.my_offloaded_tasks, orphans) != orphans );
375  s.my_offloaded_tasks = NULL;
376 #if TBB_USE_ASSERT
377  s.my_offloaded_task_list_tail_link = NULL;
378 #endif /* TBB_USE_ASSERT */
379 }
380 #endif /* __TBB_TASK_PRIORITY */
381 
383  // Look for enqueued tasks at all priority levels
384  for ( int p = 0; p < num_priority_levels; ++p )
385  if ( !my_task_stream.empty(p) )
386  return true;
387  return false;
388 }
389 
391  // Check for the presence of enqueued tasks "lost" on some of
392  // priority levels because updating arena priority and switching
393  // arena into "populated" (FULL) state happen non-atomically.
394  // Imposing atomicity would require task::enqueue() to use a lock,
395  // which is unacceptable.
396  if ( has_enqueued_tasks() ) {
397  advertise_new_work<work_enqueued>();
398 #if __TBB_TASK_PRIORITY
399  // update_arena_priority() expects non-zero arena::my_num_workers_requested,
400  // so must be called after advertise_new_work<work_enqueued>()
401  for ( int p = 0; p < num_priority_levels; ++p )
402  if ( !my_task_stream.empty(p) ) {
403  if ( p < my_bottom_priority || p > my_top_priority )
404  my_market->update_arena_priority(*this, p);
405  }
406 #endif
407  }
408 }
409 
411  // TODO: rework it to return at least a hint about where a task was found; better if the task itself.
412  for(;;) {
413  pool_state_t snapshot = my_pool_state;
414  switch( snapshot ) {
415  case SNAPSHOT_EMPTY:
416  return true;
417  case SNAPSHOT_FULL: {
418  // Use unique id for "busy" in order to avoid ABA problems.
419  const pool_state_t busy = pool_state_t(&busy);
420  // Request permission to take snapshot
421  if( my_pool_state.compare_and_swap( busy, SNAPSHOT_FULL )==SNAPSHOT_FULL ) {
422  // Got permission. Take the snapshot.
423  // NOTE: This is not a lock, as the state can be set to FULL at
424  // any moment by a thread that spawns/enqueues new task.
425  size_t n = my_limit;
426  // Make local copies of volatile parameters. Their change during
427  // snapshot taking procedure invalidates the attempt, and returns
428  // this thread into the dispatch loop.
429 #if __TBB_TASK_PRIORITY
430  uintptr_t reload_epoch = __TBB_load_with_acquire( my_reload_epoch );
431  intptr_t top_priority = my_top_priority;
432  // Inspect primary task pools first
433 #endif /* __TBB_TASK_PRIORITY */
434  size_t k;
435  for( k=0; k<n; ++k ) {
436  if( my_slots[k].task_pool != EmptyTaskPool &&
437  __TBB_load_relaxed(my_slots[k].head) < __TBB_load_relaxed(my_slots[k].tail) )
438  {
439  // k-th primary task pool is nonempty and does contain tasks.
440  break;
441  }
442  if( my_pool_state!=busy )
443  return false; // the work was published
444  }
445  __TBB_ASSERT( k <= n, NULL );
446  bool work_absent = k == n;
447 #if __TBB_PREVIEW_CRITICAL_TASKS
448  bool no_critical_tasks = my_critical_task_stream.empty(0);
449  work_absent &= no_critical_tasks;
450 #endif
451 #if __TBB_TASK_PRIORITY
452  // Variable tasks_present indicates presence of tasks at any priority
453  // level, while work_absent refers only to the current priority.
454  bool tasks_present = !work_absent || my_orphaned_tasks;
455  bool dequeuing_possible = false;
456  if ( work_absent ) {
457  // Check for the possibility that recent priority changes
458  // brought some tasks to the current priority level
459 
460  uintptr_t abandonment_epoch = my_abandonment_epoch;
461  // Master thread's scheduler needs special handling as it
462  // may be destroyed at any moment (workers' schedulers are
463  // guaranteed to be alive while at least one thread is in arena).
464  // The lock below excludes concurrency with task group state change
465  // propagation and guarantees lifetime of the master thread.
466  the_context_state_propagation_mutex.lock();
467  work_absent = !may_have_tasks( my_slots[0].my_scheduler, tasks_present, dequeuing_possible );
468  the_context_state_propagation_mutex.unlock();
469  // The following loop is subject to data races. While k-th slot's
470  // scheduler is being examined, corresponding worker can either
471  // leave to RML or migrate to another arena.
472  // But the races are not prevented because all of them are benign.
473  // First, the code relies on the fact that worker thread's scheduler
474  // object persists until the whole library is deinitialized.
475  // Second, in the worst case the races can only cause another
476  // round of stealing attempts to be undertaken. Introducing complex
477  // synchronization into this coldest part of the scheduler's control
478  // flow does not seem to make sense because it both is unlikely to
479  // ever have any observable performance effect, and will require
480  // additional synchronization code on the hotter paths.
481  for( k = 1; work_absent && k < n; ++k ) {
482  if( my_pool_state!=busy )
483  return false; // the work was published
484  work_absent = !may_have_tasks( my_slots[k].my_scheduler, tasks_present, dequeuing_possible );
485  }
486  // Preclude premature switching arena off because of a race in the previous loop.
487  work_absent = work_absent
488  && !__TBB_load_with_acquire(my_orphaned_tasks)
489  && abandonment_epoch == my_abandonment_epoch;
490  }
491 #endif /* __TBB_TASK_PRIORITY */
492  // Test and test-and-set.
493  if( my_pool_state==busy ) {
494 #if __TBB_TASK_PRIORITY
495  bool no_fifo_tasks = my_task_stream.empty(top_priority);
496  work_absent = work_absent && (!dequeuing_possible || no_fifo_tasks)
497  && top_priority == my_top_priority && reload_epoch == my_reload_epoch;
498 #else
499  bool no_fifo_tasks = my_task_stream.empty(0);
500  work_absent = work_absent && no_fifo_tasks;
501 #endif /* __TBB_TASK_PRIORITY */
502  if( work_absent ) {
503 #if __TBB_TASK_PRIORITY
504  if ( top_priority > my_bottom_priority ) {
505  if ( my_market->lower_arena_priority(*this, top_priority - 1, reload_epoch)
506  && !my_task_stream.empty(top_priority) )
507  {
508  atomic_update( my_skipped_fifo_priority, top_priority, std::less<intptr_t>());
509  }
510  }
511  else if ( !tasks_present && !my_orphaned_tasks && no_fifo_tasks ) {
512 #endif /* __TBB_TASK_PRIORITY */
513  // save current demand value before setting SNAPSHOT_EMPTY,
514  // to avoid race with advertise_new_work.
515  int current_demand = (int)my_max_num_workers;
516  if( my_pool_state.compare_and_swap( SNAPSHOT_EMPTY, busy )==busy ) {
517  // This thread transitioned pool to empty state, and thus is
518  // responsible for telling the market that there is no work to do.
519  my_market->adjust_demand( *this, -current_demand );
520  restore_priority_if_need();
521  return true;
522  }
523  return false;
524 #if __TBB_TASK_PRIORITY
525  }
526 #endif /* __TBB_TASK_PRIORITY */
527  }
528  // Undo previous transition SNAPSHOT_FULL-->busy, unless another thread undid it.
529  my_pool_state.compare_and_swap( SNAPSHOT_FULL, busy );
530  }
531  }
532  return false;
533  }
534  default:
535  // Another thread is taking a snapshot.
536  return false;
537  }
538  }
539 }
540 
541 #if __TBB_COUNT_TASK_NODES
542 intptr_t arena::workers_task_node_count() {
543  intptr_t result = 0;
544  for( unsigned i = 1; i < my_num_slots; ++i ) {
545  generic_scheduler* s = my_slots[i].my_scheduler;
546  if( s )
547  result += s->my_task_node_count;
548  }
549  return result;
550 }
551 #endif /* __TBB_COUNT_TASK_NODES */
552 
553 void arena::enqueue_task( task& t, intptr_t prio, FastRandom &random )
554 {
555 #if __TBB_RECYCLE_TO_ENQUEUE
556  __TBB_ASSERT( t.state()==task::allocated || t.state()==task::to_enqueue, "attempt to enqueue task with inappropriate state" );
557 #else
558  __TBB_ASSERT( t.state()==task::allocated, "attempt to enqueue task that is not in 'allocated' state" );
559 #endif
560  t.prefix().state = task::ready;
561  t.prefix().extra_state |= es_task_enqueued; // enqueued task marker
562 
563 #if TBB_USE_ASSERT
564  if( task* parent = t.parent() ) {
565  internal::reference_count ref_count = parent->prefix().ref_count;
566  __TBB_ASSERT( ref_count!=0, "attempt to enqueue task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
567  __TBB_ASSERT( ref_count>0, "attempt to enqueue task whose parent has a ref_count<0" );
568  parent->prefix().extra_state |= es_ref_count_active;
569  }
570  __TBB_ASSERT(t.prefix().affinity==affinity_id(0), "affinity is ignored for enqueued tasks");
571 #endif /* TBB_USE_ASSERT */
572 #if __TBB_PREVIEW_CRITICAL_TASKS
573  if( prio == internal::priority_critical || internal::is_critical( t ) ) {
574  // TODO: consider using of 'scheduler::handled_as_critical'
577  ITT_NOTIFY(sync_releasing, &my_critical_task_stream);
578  if( s && s->my_arena_slot ) {
579  // Scheduler is initialized and it is attached to the arena,
580  // propagate isolation level to critical task
581 #if __TBB_TASK_ISOLATION
583 #endif
584  unsigned& lane = s->my_arena_slot->hint_for_critical;
585  my_critical_task_stream.push( &t, 0, tbb::internal::subsequent_lane_selector(lane) );
586  } else {
587  // Either scheduler is not initialized or it is not attached to the arena
588  // use random lane for the task
589  my_critical_task_stream.push( &t, 0, internal::random_lane_selector(random) );
590  }
591  advertise_new_work<work_spawned>();
592  return;
593  }
594 #endif /* __TBB_PREVIEW_CRITICAL_TASKS */
595 
596  ITT_NOTIFY(sync_releasing, &my_task_stream);
597 #if __TBB_TASK_PRIORITY
598  intptr_t p = prio ? normalize_priority(priority_t(prio)) : normalized_normal_priority;
599  assert_priority_valid(p);
600 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD
601  my_task_stream.push( &t, p, internal::random_lane_selector(random) );
602 #else
603  my_task_stream.push( &t, p, random );
604 #endif
605  if ( p != my_top_priority )
606  my_market->update_arena_priority( *this, p );
607 #else /* !__TBB_TASK_PRIORITY */
608  __TBB_ASSERT_EX(prio == 0, "the library is not configured to respect the task priority");
609 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD
610  my_task_stream.push( &t, 0, internal::random_lane_selector(random) );
611 #else
612  my_task_stream.push( &t, 0, random );
613 #endif
614 #endif /* !__TBB_TASK_PRIORITY */
615  advertise_new_work<work_enqueued>();
616 #if __TBB_TASK_PRIORITY
617  if ( p != my_top_priority )
618  my_market->update_arena_priority( *this, p );
619 #endif /* __TBB_TASK_PRIORITY */
620 }
621 
623 public:
624  nested_arena_context(generic_scheduler *s, arena* a, size_t slot_index, bool type, bool same)
625  : my_scheduler(*s), my_orig_ctx(NULL), same_arena(same) {
626  if (same_arena) {
627  my_orig_state.my_properties = my_scheduler.my_properties;
628  my_orig_state.my_innermost_running_task = my_scheduler.my_innermost_running_task;
629  mimic_outermost_level(a, type);
630  } else {
631  my_orig_state = *s;
632 #if __TBB_PREVIEW_RESUMABLE_TASKS
633  my_scheduler.my_properties.genuine = true;
634  my_scheduler.my_current_is_recalled = NULL;
635 #endif
636  mimic_outermost_level(a, type);
637  s->nested_arena_entry(a, slot_index);
638  }
639  }
641 #if __TBB_TASK_GROUP_CONTEXT
642  my_scheduler.my_dummy_task->prefix().context = my_orig_ctx; // restore context of dummy task
643 #endif
644  if (same_arena) {
645  my_scheduler.my_properties = my_orig_state.my_properties;
646  my_scheduler.my_innermost_running_task = my_orig_state.my_innermost_running_task;
647  } else {
648  my_scheduler.nested_arena_exit();
649  static_cast<scheduler_state&>(my_scheduler) = my_orig_state; // restore arena settings
650 #if __TBB_TASK_PRIORITY
651  my_scheduler.my_local_reload_epoch = *my_orig_state.my_ref_reload_epoch;
652 #endif
653  governor::assume_scheduler(&my_scheduler);
654  }
655  }
656 
657 private:
661  const bool same_arena;
662 
664  my_scheduler.my_properties.outermost = true;
665  my_scheduler.my_properties.type = type;
666  my_scheduler.my_innermost_running_task = my_scheduler.my_dummy_task;
667 #if __TBB_PREVIEW_CRITICAL_TASKS
668  my_scheduler.my_properties.has_taken_critical_task = false;
669 #endif
670 #if __TBB_TASK_GROUP_CONTEXT
671  // Save dummy's context and replace it by arena's context
672  my_orig_ctx = my_scheduler.my_dummy_task->prefix().context;
673  my_scheduler.my_dummy_task->prefix().context = a->my_default_ctx;
674 #endif
675  }
676 };
677 
678 void generic_scheduler::nested_arena_entry(arena* a, size_t slot_index) {
679  __TBB_ASSERT( is_alive(a->my_guard), NULL );
680  __TBB_ASSERT( a!=my_arena, NULL);
681 
682  // overwrite arena settings
683 #if __TBB_TASK_PRIORITY
684  if ( my_offloaded_tasks )
685  my_arena->orphan_offloaded_tasks( *this );
686  my_offloaded_tasks = NULL;
687 #endif /* __TBB_TASK_PRIORITY */
688  attach_arena( a, slot_index, /*is_master*/true );
689  __TBB_ASSERT( my_arena == a, NULL );
691  // TODO? ITT_NOTIFY(sync_acquired, a->my_slots + index);
692  // TODO: it requires market to have P workers (not P-1)
693  // TODO: a preempted worker should be excluded from assignment to other arenas e.g. my_slack--
694  if( !is_worker() && slot_index >= my_arena->my_num_reserved_slots )
696 #if __TBB_ARENA_OBSERVER
697  my_last_local_observer = 0; // TODO: try optimize number of calls
698  my_arena->my_observers.notify_entry_observers( my_last_local_observer, /*worker=*/false );
699 #endif
700 #if __TBB_PREVIEW_RESUMABLE_TASKS
701  my_wait_task = NULL;
702 #endif
703 }
704 
706 #if __TBB_ARENA_OBSERVER
707  my_arena->my_observers.notify_exit_observers( my_last_local_observer, /*worker=*/false );
708 #endif /* __TBB_ARENA_OBSERVER */
709 #if __TBB_TASK_PRIORITY
710  if ( my_offloaded_tasks )
711  my_arena->orphan_offloaded_tasks( *this );
712 #endif
715  // Free the master slot.
716  __TBB_ASSERT(my_arena->my_slots[my_arena_index].my_scheduler, "A slot is already empty");
718  my_arena->my_exit_monitors.notify_one(); // do not relax!
719 }
720 
722  my_dummy_task->prefix().ref_count++; // prevents exit from local_wait_for_all when local work is done enforcing the stealing
726 }
727 
728 #if __TBB_PREVIEW_RESUMABLE_TASKS
729 class resume_task : public task {
730  generic_scheduler& my_target;
731 public:
732  resume_task(generic_scheduler& target) : my_target(target) {}
733  task* execute() __TBB_override {
735  __TBB_ASSERT(s, NULL);
736  if (s->prepare_resume(my_target)) {
737  s->resume(my_target);
738  } else {
739  __TBB_ASSERT(prefix().state == task::executing, NULL);
740  // Request the dispatch loop to exit (because we in a coroutine on the outermost level).
741  prefix().state = task::to_resume;
742  }
743  return NULL;
744  }
745 };
746 
748  // We may have some coroutines cached
749  generic_scheduler* co_sched = curr.my_arena->my_co_cache.pop();
750  if (!co_sched) {
751  // TODO: avoid setting/unsetting the scheduler.
753  co_sched = generic_scheduler::create_worker(*curr.my_market, curr.my_arena_index, /* genuine = */ false);
755  // Prepare newly created scheduler
756  co_sched->my_arena = curr.my_arena;
757  }
758  // Prepare scheduler (general)
759  co_sched->my_dummy_task->prefix().context = co_sched->my_arena->my_default_ctx;
760  // Prolong the arena's lifetime until all coroutines is alive
761  // (otherwise the arena can be destroyed while some tasks are suspended).
763  return *co_sched;
764 }
765 
766 void internal_suspend(void* suspend_callback, void* user_callback) {
768  __TBB_ASSERT(s.my_arena_slot->my_scheduler_is_recalled != NULL, NULL);
769  bool is_recalled = *s.my_arena_slot->my_scheduler_is_recalled;
770  generic_scheduler& target = is_recalled ? *s.my_arena_slot->my_scheduler : create_coroutine(s);
771 
772  generic_scheduler::callback_t callback = {
773  (generic_scheduler::suspend_callback_t)suspend_callback, user_callback, &s };
774  target.set_post_resume_action(generic_scheduler::PRA_CALLBACK, &callback);
775  s.resume(target);
776 }
777 
778 void internal_resume(task::suspend_point tag) {
779  generic_scheduler& s = *static_cast<generic_scheduler*>(tag);
780  task* t = new(&s.allocate_task(sizeof(resume_task), __TBB_CONTEXT_ARG(NULL, s.my_dummy_task->context()))) resume_task(s);
781  make_critical(*t);
782 
783  // TODO: remove this work-around
784  // Prolong the arena's lifetime until all coroutines is alive
785  // (otherwise the arena can be destroyed while some tasks are suspended).
786  arena& a = *s.my_arena;
788 
789  a.my_critical_task_stream.push(t, 0, tbb::internal::random_lane_selector(s.my_random));
790  // Do not access 's' after that point.
792 
793  // Release our reference to my_arena.
795 }
796 
797 task::suspend_point internal_current_suspend_point() {
798  return governor::local_scheduler();
799 }
800 #endif /* __TBB_PREVIEW_RESUMABLE_TASKS */
801 
802 } // namespace internal
803 } // namespace tbb
804 
805 #include "scheduler_utility.h"
806 #include "tbb/task_arena.h" // task_arena_base
807 
808 namespace tbb {
809 namespace interface7 {
810 namespace internal {
811 
814  if( my_max_concurrency < 1 )
815  my_max_concurrency = (int)governor::default_num_threads();
816  __TBB_ASSERT( my_master_slots <= (unsigned)my_max_concurrency, "Number of slots reserved for master should not exceed arena concurrency");
817  arena* new_arena = market::create_arena( my_max_concurrency, my_master_slots, 0 );
818  // add an internal market reference; a public reference was added in create_arena
819  market &m = market::global_market( /*is_public=*/false );
820  // allocate default context for task_arena
821 #if __TBB_TASK_GROUP_CONTEXT
822  new_arena->my_default_ctx = new ( NFS_Allocate(1, sizeof(task_group_context), NULL) )
824 #if __TBB_FP_CONTEXT
825  new_arena->my_default_ctx->capture_fp_settings();
826 #endif
827 #endif /* __TBB_TASK_GROUP_CONTEXT */
828  // threads might race to initialize the arena
829  if(as_atomic(my_arena).compare_and_swap(new_arena, NULL) != NULL) {
830  __TBB_ASSERT(my_arena, NULL); // another thread won the race
831  // release public market reference
832  m.release( /*is_public=*/true, /*blocking_terminate=*/false );
833  new_arena->on_thread_leaving<arena::ref_external>(); // destroy unneeded arena
834 #if __TBB_TASK_GROUP_CONTEXT
835  spin_wait_while_eq(my_context, (task_group_context*)NULL);
836  } else {
837  new_arena->my_default_ctx->my_version_and_traits |= my_version_and_traits & exact_exception_flag;
838  as_atomic(my_context) = new_arena->my_default_ctx;
839 #endif
840  }
841  // TODO: should it trigger automatic initialization of this thread?
843 }
844 
846  if( my_arena ) {// task_arena was initialized
847  my_arena->my_market->release( /*is_public=*/true, /*blocking_terminate=*/false );
849  my_arena = 0;
850 #if __TBB_TASK_GROUP_CONTEXT
851  my_context = 0;
852 #endif
853  }
854 }
855 
857  __TBB_ASSERT(!my_arena, NULL);
859  if( s && s->my_arena ) {
860  // There is an active arena to attach to.
861  // It's still used by s, so won't be destroyed right away.
862  my_arena = s->my_arena;
863  __TBB_ASSERT( my_arena->my_references > 0, NULL );
865 #if __TBB_TASK_GROUP_CONTEXT
866  my_context = my_arena->my_default_ctx;
867  my_version_and_traits |= my_context->my_version_and_traits & exact_exception_flag;
868 #endif
869  my_master_slots = my_arena->my_num_reserved_slots;
870  my_max_concurrency = my_master_slots + my_arena->my_max_num_workers;
871  __TBB_ASSERT(arena::num_arena_slots(my_max_concurrency)==my_arena->my_num_slots, NULL);
872  // increases market's ref count for task_arena
873  market::global_market( /*is_public=*/true );
874  }
875 }
876 
877 void task_arena_base::internal_enqueue( task& t, intptr_t prio ) const {
878  __TBB_ASSERT(my_arena, NULL);
879  generic_scheduler* s = governor::local_scheduler_weak(); // scheduler is only needed for FastRandom instance
880  __TBB_ASSERT(s, "Scheduler is not initialized"); // we allocated a task so can expect the scheduler
881 #if __TBB_TASK_GROUP_CONTEXT
882  // Is there a better place for checking the state of my_default_ctx?
883  __TBB_ASSERT(!(my_arena->my_default_ctx == t.prefix().context && my_arena->my_default_ctx->is_group_execution_cancelled()),
884  "The task will not be executed because default task_group_context of task_arena is cancelled. Has previously enqueued task thrown an exception?");
885 #endif
886  my_arena->enqueue_task( t, prio, s->my_random );
887 }
888 
889 class delegated_task : public task {
894  generic_scheduler& s = *(generic_scheduler*)prefix().owner;
895  __TBB_ASSERT(s.outermost_level(), "expected to be enqueued and received on the outermost level");
896  struct outermost_context : internal::no_copy {
897  delegated_task * t;
899  task * orig_dummy;
900  task_group_context * orig_ctx;
901  scheduler_properties orig_props;
902  outermost_context(delegated_task *_t, generic_scheduler &_s)
903  : t(_t), s(_s), orig_dummy(s.my_dummy_task), orig_props(s.my_properties) {
905 #if __TBB_TASK_GROUP_CONTEXT
906  orig_ctx = t->prefix().context;
907  t->prefix().context = s.my_arena->my_default_ctx;
908 #endif
909  // Mimics outermost master
910  s.my_dummy_task = t;
912  }
913  ~outermost_context() {
914 #if __TBB_TASK_GROUP_CONTEXT
915  // Restore context for sake of registering potential exception
916  t->prefix().context = orig_ctx;
917 #endif
918  // Restore scheduler state
919  s.my_properties = orig_props;
920  s.my_dummy_task = orig_dummy;
921  }
922  } scope(this, s);
923  my_delegate();
924  return NULL;
925  }
927  // potential exception was already registered. It must happen before the notification
928  __TBB_ASSERT(my_root->ref_count() == 2, NULL);
929  task_prefix& prefix = my_root->prefix();
930 #if __TBB_PREVIEW_RESUMABLE_TASKS
931  reference_count old_ref_count = __TBB_FetchAndStoreW(&prefix.ref_count, 1);
932  // Check if the scheduler was abandoned.
933  if (old_ref_count == internal::abandon_flag + 2) {
934  __TBB_ASSERT(prefix.abandoned_scheduler, NULL);
935  // The wait has been completed. Spawn a resume task.
936  tbb::task::resume(prefix.abandoned_scheduler);
937  }
938 #else
939  __TBB_store_with_release(prefix.ref_count, 1); // must precede the wakeup
940 #endif
941  my_monitor.notify(*this); // do not relax, it needs a fence!
942  }
943 public:
945  : my_delegate(d), my_monitor(s), my_root(t) {}
946  // predicate for concurrent_monitor notification
947  bool operator()(uintptr_t ctx) const { return (void*)ctx == (void*)&my_delegate; }
948 };
949 
951  __TBB_ASSERT(my_arena, NULL);
953  __TBB_ASSERT(s, "Scheduler is not initialized");
954 
955  bool same_arena = s->my_arena == my_arena;
956  size_t index1 = s->my_arena_index;
957  if (!same_arena) {
958  index1 = my_arena->occupy_free_slot</* as_worker*/false>(*s);
959  if (index1 == arena::out_of_arena) {
960 
961 #if __TBB_USE_OPTIONAL_RTTI
962  // Workaround for the bug inside graph. If the thread can not occupy arena slot during task_arena::execute()
963  // and all aggregator operations depend on this task completion (all other threads are inside arena already)
964  // deadlock appears, because enqueued task will never enter arena.
965  // Workaround: check if the task came from graph via RTTI (casting to graph::spawn_functor)
966  // and enqueue this task with non-blocking internal_enqueue method.
967  // TODO: have to change behaviour later in next GOLD release (maybe to add new library entry point - try_execute)
971 
972  if (deleg_funct) {
973  internal_enqueue(*new(task::allocate_root(__TBB_CONTEXT_ARG1(*my_context)))
975  (internal::forward< graph_funct >(deleg_funct->my_func)), 0);
976  return;
977  } else {
978 #endif /* __TBB_USE_OPTIONAL_RTTI */
980 #if __TBB_TASK_GROUP_CONTEXT
981  task_group_context exec_context(task_group_context::isolated, my_version_and_traits & exact_exception_flag);
982 #if __TBB_FP_CONTEXT
983  exec_context.copy_fp_settings(*my_context);
984 #endif
985 #endif
986  auto_empty_task root(__TBB_CONTEXT_ARG(s, &exec_context));
987  root.prefix().ref_count = 2;
990  0, s->my_random); // TODO: priority?
991  size_t index2 = arena::out_of_arena;
992  do {
993  my_arena->my_exit_monitors.prepare_wait(waiter, (uintptr_t)&d);
994  if (__TBB_load_with_acquire(root.prefix().ref_count) < 2) {
996  break;
997  }
998  index2 = my_arena->occupy_free_slot</*as_worker*/false>(*s);
999  if (index2 != arena::out_of_arena) {
1002  s->local_wait_for_all(root, NULL);
1003 #if TBB_USE_EXCEPTIONS
1004  __TBB_ASSERT(!exec_context.my_exception, NULL); // exception can be thrown above, not deferred
1005 #endif
1006  __TBB_ASSERT(root.prefix().ref_count == 0, NULL);
1007  break;
1008  }
1010  } while (__TBB_load_with_acquire(root.prefix().ref_count) == 2);
1011  if (index2 == arena::out_of_arena) {
1012  // notify a waiting thread even if this thread did not enter arena,
1013  // in case it was woken by a leaving thread but did not need to enter
1014  my_arena->my_exit_monitors.notify_one(); // do not relax!
1015  }
1016 #if TBB_USE_EXCEPTIONS
1017  // process possible exception
1019  TbbRethrowException(pe);
1020 #endif
1021  return;
1022 #if __TBB_USE_OPTIONAL_RTTI
1023  } // if task came from graph
1024 #endif
1025  } // if (index1 == arena::out_of_arena)
1026  } // if (!same_arena)
1027 
1028  context_guard_helper</*report_tasks=*/false> context_guard;
1029  context_guard.set_ctx(__TBB_CONTEXT_ARG1(my_context));
1030 #if TBB_USE_EXCEPTIONS
1031  try {
1032 #endif
1033  //TODO: replace dummy tasks for workers as well to avoid using of the_dummy_context
1035  d();
1036 #if TBB_USE_EXCEPTIONS
1037  }
1038  catch (...) {
1039  context_guard.restore_default(); // TODO: is it needed on Windows?
1040  if (my_version_and_traits & exact_exception_flag) throw;
1041  else {
1044  exception_container.register_pending_exception();
1045  __TBB_ASSERT(exception_container.my_exception, NULL);
1046  TbbRethrowException(exception_container.my_exception);
1047  }
1048  }
1049 #endif
1050 }
1051 
1052 // this wait task is a temporary approach to wait for arena emptiness for masters without slots
1053 // TODO: it will be rather reworked for one source of notification from is_out_of_work
1054 class wait_task : public task {
1058  __TBB_ASSERT( s, NULL );
1059  __TBB_ASSERT( s->outermost_level(), "The enqueued task can be processed only on outermost level" );
1060  if ( s->is_worker() ) {
1061  __TBB_ASSERT( s->my_innermost_running_task == this, NULL );
1062  // Mimic worker on outermost level to run remaining tasks
1064  s->local_wait_for_all( *s->my_dummy_task, NULL );
1065  s->my_innermost_running_task = this;
1066  } else s->my_arena->is_out_of_work(); // avoids starvation of internal_wait: issuing this task makes arena full
1067  my_signal.V();
1068  return NULL;
1069  }
1070 public:
1071  wait_task ( binary_semaphore & sema ) : my_signal(sema) {}
1072 };
1073 
1075  __TBB_ASSERT(my_arena, NULL);
1077  __TBB_ASSERT(s, "Scheduler is not initialized");
1078  __TBB_ASSERT(s->my_arena != my_arena || s->my_arena_index == 0, "task_arena::wait_until_empty() is not supported within a worker context" );
1079  if( s->my_arena == my_arena ) {
1080  //unsupported, but try do something for outermost master
1081  __TBB_ASSERT(s->master_outermost_level(), "unsupported");
1082  if( !s->my_arena_index )
1083  while( my_arena->num_workers_active() )
1084  s->wait_until_empty();
1085  } else for(;;) {
1087  if( !__TBB_load_with_acquire(my_arena->my_slots[0].my_scheduler) // TODO TEMP: one master, make more masters
1088  && as_atomic(my_arena->my_slots[0].my_scheduler).compare_and_swap(s, NULL) == NULL ) {
1090  s->wait_until_empty();
1091  } else {
1092  binary_semaphore waiter; // TODO: replace by a single event notification from is_out_of_work
1093  internal_enqueue( *new( task::allocate_root(__TBB_CONTEXT_ARG1(*my_context)) ) wait_task(waiter), 0 ); // TODO: priority?
1094  waiter.P(); // TODO: concurrent_monitor
1095  }
1096  }
1097  if( !my_arena->num_workers_active() && !my_arena->my_slots[0].my_scheduler) // no activity
1098  break; // spin until workers active but avoid spinning in a worker
1099  __TBB_Yield(); // wait until workers and master leave
1100  }
1101 }
1102 
1105  return s? int(s->my_arena_index) : -1;
1106 }
1107 
1108 #if __TBB_TASK_ISOLATION
1109 class isolation_guard : tbb::internal::no_copy {
1110  isolation_tag &guarded;
1111  isolation_tag previous_value;
1112 public:
1113  isolation_guard( isolation_tag &isolation ) : guarded( isolation ), previous_value( isolation ) {}
1114  ~isolation_guard() {
1115  guarded = previous_value;
1116  }
1117 };
1118 
1119 void isolate_within_arena( delegate_base& d, intptr_t isolation ) {
1120  // TODO: Decide what to do if the scheduler is not initialized. Is there a use case for it?
1122  __TBB_ASSERT( s, "this_task_arena::isolate() needs an initialized scheduler" );
1123  // Theoretically, we can keep the current isolation in the scheduler; however, it makes sense to store it in innermost
1124  // running task because it can in principle be queried via task::self().
1125  isolation_tag& current_isolation = s->my_innermost_running_task->prefix().isolation;
1126  // We temporarily change the isolation tag of the currently running task. It will be restored in the destructor of the guard.
1127  isolation_guard guard( current_isolation );
1128  current_isolation = isolation? isolation : reinterpret_cast<isolation_tag>(&d);
1129  d();
1130 }
1131 #endif /* __TBB_TASK_ISOLATION */
1132 
1134  arena* a = NULL;
1135  if( ta ) // for special cases of ta->max_concurrency()
1136  a = ta->my_arena;
1138  a = s->my_arena; // the current arena if any
1139 
1140  if( a ) { // Get parameters from the arena
1141  __TBB_ASSERT( !ta || ta->my_max_concurrency==1, NULL );
1143  } else {
1144  __TBB_ASSERT( !ta || ta->my_max_concurrency==automatic, NULL );
1146  }
1147 }
1148 } // tbb::interfaceX::internal
1149 } // tbb::interfaceX
1150 } // tbb
size_t occupy_free_slot(generic_scheduler &s)
Tries to occupy a slot in the arena. On success, returns the slot index; if no slot is available...
Definition: arena.cpp:86
uintptr_t pool_state_t
Definition: arena.h:306
Tag for enqueued tasks.
delegated_task(internal::delegate_base &d, concurrent_monitor &s, task *t)
Definition: arena.cpp:944
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Definition: tbb_stddef.h:216
void __TBB_EXPORTED_METHOD internal_attach()
Definition: arena.cpp:856
static generic_scheduler * local_scheduler_weak()
Definition: governor.h:134
bool is_idle_state(bool value) const
Indicate whether thread that reads this mailbox is idle.
Definition: mailbox.h:218
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
Definition: arena.h:181
Base class for user-defined tasks.
Definition: task.h:604
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
A fast random number generator.
Definition: tbb_misc.h:130
Bit-field representing properties of a sheduler.
Definition: scheduler.h:50
void const char const char int ITT_FORMAT __itt_group_sync s
task is in ready pool, or is going to be put there, or was just taken off.
Definition: task.h:630
internal::arena * my_arena
NULL if not currently initialized.
Definition: task_arena.h:105
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
Definition: task.h:263
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
Definition: scheduler.h:649
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
bool operator()(uintptr_t ctx) const
Definition: arena.cpp:947
#define ITT_SYNC_CREATE(obj, type, name)
Definition: itt_notify.h:119
static const intptr_t num_priority_levels
unsigned num_workers_active() const
The number of workers active in the arena.
Definition: arena.h:325
bool is_worker() const
True if running on a worker thread, false otherwise.
Definition: scheduler.h:673
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
Definition: market.cpp:307
void __TBB_EXPORTED_METHOD internal_terminate()
Definition: arena.cpp:845
unsigned char state
A task::state_type, stored as a byte for compactness.
Definition: task.h:272
void __TBB_EXPORTED_METHOD internal_wait() const
Definition: arena.cpp:1074
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:716
intptr_t reference_count
A reference count.
Definition: task.h:120
void __TBB_EXPORTED_FUNC isolate_within_arena(delegate_base &d, intptr_t isolation=0)
void notify_one()
Notify one thread about the event.
generic_scheduler & my_scheduler
Definition: arena.cpp:658
bool is_critical(task &t)
Definition: task.h:1003
bool commit_wait(thread_context &thr)
Commit wait if event count has not changed; otherwise, cancel wait.
const isolation_tag no_isolation
Definition: task.h:133
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id tail
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
Definition: scheduler.h:85
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
Definition: arena.h:191
Exception container that preserves the exact copy of the original exception.
arena(market &, unsigned max_num_workers, unsigned num_reserved_slots)
Constructor.
Definition: arena.cpp:182
void __TBB_EXPORTED_METHOD internal_initialize()
Definition: arena.cpp:812
uintptr_t my_arenas_aba_epoch
ABA prevention marker to assign to newly created arenas.
Definition: market.h:143
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
Definition: governor.h:129
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:652
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
#define __TBB_ISOLATION_ARG(arg1, isolation)
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
Definition: arena.cpp:553
isolation_tag isolation
The tag used for task isolation.
Definition: task.h:209
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
Definition: task.h:438
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
Definition: scheduler.h:88
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
Definition: governor.cpp:120
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert ...
Definition: tbb_stddef.h:167
static bool occupy_slot(generic_scheduler *&slot, generic_scheduler &s)
Definition: arena.cpp:67
intptr_t isolation_tag
A tag for task isolation.
Definition: task.h:132
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
unsigned my_num_slots
The number of slots in the arena.
Definition: arena.h:241
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
Definition: scheduler.h:653
task_group_context * my_orig_ctx
Definition: arena.cpp:660
bool is_quiescent_local_task_pool_reset() const
Definition: scheduler.h:644
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
Definition: task.h:281
Class representing where mail is put.
Definition: mailbox.h:96
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: arena.cpp:893
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
Definition: scheduler.h:175
void free_arena()
Completes arena shutdown, destructs and deallocates it.
Definition: arena.cpp:252
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
Definition: market.cpp:96
bool is_out_of_work()
Check if there is job anywhere in arena.
Definition: arena.cpp:410
int my_max_concurrency
Concurrency level for deferred initialization.
Definition: task_arena.h:113
static int unsigned num_arena_slots(unsigned num_slots)
Definition: arena.h:287
Work stealing task scheduler.
Definition: scheduler.h:137
Set if ref_count might be changed by another thread. Used for debugging.
#define __TBB_Yield()
Definition: ibm_aix51.h:44
void P()
wait/acquire
Definition: semaphore.h:235
#define GATHER_STATISTIC(x)
void attach_mailbox(affinity_id id)
Definition: scheduler.h:667
#define __TBB_CONTEXT_ARG1(context)
Memory prefix to a task object.
Definition: task.h:192
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
size_t __TBB_EXPORTED_FUNC NFS_GetLineSize()
Cache/sector line size.
state_type state() const
Current execution state.
Definition: task.h:901
task_group_context * context()
This method is deprecated and will be removed in the future.
Definition: task.h:867
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
Definition: task.h:991
static const int priority_critical
Definition: task.h:302
scheduler_properties my_properties
Definition: scheduler.h:101
void process(generic_scheduler &)
Registers the worker with the arena and enters TBB scheduler dispatch loop.
Definition: arena.cpp:102
Smart holder for the empty task class with automatic destruction.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id head
size_t occupy_free_slot_in_range(generic_scheduler &s, size_t lower, size_t upper)
Tries to occupy a slot in the specified range.
Definition: arena.cpp:71
T1 atomic_update(tbb::atomic< T1 > &dst, T2 newValue, Pred compare)
Atomically replaces value of dst with newValue if they satisfy condition of compare predicate...
Definition: tbb_misc.h:181
void on_thread_leaving()
Notification that worker or master leaves its arena.
Definition: arena.h:385
void adjust_demand(arena &, int delta)
Request that arena&#39;s need in workers should be adjusted.
Definition: market.cpp:556
market * my_market
The market I am in.
Definition: scheduler.h:172
atomic< unsigned > my_references
Reference counter for the arena.
Definition: arena.h:149
bool has_enqueued_tasks()
Check for the presence of enqueued tasks at all priority levels.
Definition: arena.cpp:382
void prepare_wait(thread_context &thr, uintptr_t ctx=0)
prepare wait by inserting &#39;thr&#39; into the wait queue
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
Used to form groups of tasks.
Definition: task.h:347
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:116
static unsigned default_num_threads()
Definition: governor.h:84
internal::delegate_base & my_delegate
Definition: arena.cpp:890
bool release(bool is_public, bool blocking_terminate)
Decrements market&#39;s refcount and destroys it in the end.
Definition: market.cpp:175
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:394
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
Definition: arena.cpp:241
int ref_count() const
The internal reference count.
Definition: task.h:904
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
void cancel_wait(thread_context &thr)
Cancel the wait. Removes the thread from the wait queue if not removed yet.
static generic_scheduler * create_worker(market &m, size_t index, bool geniune)
Initialize a scheduler for a worker thread.
Definition: scheduler.cpp:1269
task * my_dummy_task
Fake root task created by slave threads.
Definition: scheduler.h:186
void make_critical(task &t)
Definition: task.h:1002
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
Definition: task.h:219
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
Definition: governor.cpp:116
#define __TBB_override
Definition: tbb_stddef.h:240
#define poison_value(g)
void nested_arena_entry(arena *, size_t)
Definition: arena.cpp:678
static const size_t out_of_arena
Definition: arena.h:373
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
nested_arena_context(generic_scheduler *s, arena *a, size_t slot_index, bool type, bool same)
Definition: arena.cpp:624
bool type
Indicates that a scheduler acts as a master or a worker.
Definition: scheduler.h:54
virtual task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation))=0
Try getting a task from other threads (via mailbox, stealing, FIFO queue, orphans adoption)...
void create_coroutine(coroutine_type &c, size_t stack_size, void *arg)
Definition: co_context.h:154
void set_is_idle(bool value)
Indicate whether thread that reads this mailbox is idle.
Definition: mailbox.h:211
T __TBB_load_relaxed(const volatile T &location)
Definition: tbb_machine.h:738
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
Definition: arena.h:475
task object is freshly allocated or recycled.
Definition: task.h:632
wait_task(binary_semaphore &sema)
Definition: arena.cpp:1071
void detach()
Detach inbox from its outbox.
Definition: mailbox.h:197
void __TBB_EXPORTED_METHOD internal_enqueue(task &, intptr_t) const
Definition: arena.cpp:877
static int __TBB_EXPORTED_FUNC internal_max_concurrency(const task_arena *)
Definition: arena.cpp:1133
static void one_time_init()
Definition: governor.cpp:156
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
Definition: arena.h:254
void const char const char int ITT_FORMAT __itt_group_sync p
affinity_id affinity
Definition: task.h:283
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:712
void attach_arena(arena *, size_t index, bool is_master)
Definition: arena.cpp:36
arena_slot my_slots[1]
Definition: arena.h:381
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
#define EmptyTaskPool
Definition: scheduler.h:46
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
Definition: scheduler.h:82
task is running, and will be destroyed after method execute() completes.
Definition: task.h:626
priority_t
Definition: task.h:306
binary_semaphore for concurrent monitor
Definition: semaphore.h:222
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s __itt_frame ITT_FORMAT p const char const char ITT_FORMAT s __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope scope
static int __TBB_EXPORTED_FUNC internal_current_slot()
Definition: arena.cpp:1103
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.
Definition: scheduler.h:657
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
Definition: task.h:854
bool outermost
Indicates that a scheduler is on outermost level.
Definition: scheduler.h:57
void mimic_outermost_level(arena *a, bool type)
Definition: arena.cpp:663
static const unsigned ref_external
Reference increment values for externals and workers.
Definition: arena.h:318
void V()
post/release
Definition: semaphore.h:240
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
Definition: arena.h:309
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
Definition: arena.h:244
unsigned short affinity_id
An id as used for specifying affinity.
Definition: task.h:128
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
Definition: scheduler.h:79
#define __TBB_CONTEXT_ARG(arg1, context)
void notify(const P &predicate)
Notify waiting threads of the event that satisfies the given predicate.
unsigned short get()
Get a random number.
Definition: tbb_misc.h:141
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: arena.cpp:1056
virtual void local_wait_for_all(task &parent, task *child)=0
market * my_market
The market that owns this arena.
Definition: arena.h:223
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
Definition: scheduler.cpp:335
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:139
void __TBB_EXPORTED_METHOD internal_execute(delegate_base &) const
Definition: arena.cpp:950
The graph class.
void restore_priority_if_need()
If enqueued tasks found, restore arena priority and task presence status.
Definition: arena.cpp:390

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.