Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
task_group_context.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #include "scheduler.h"
18 
19 #include "itt_notify.h"
20 
21 namespace tbb {
22 
23 #if __TBB_TASK_GROUP_CONTEXT
24 
25 using namespace internal;
26 
27 //------------------------------------------------------------------------
28 // captured_exception
29 //------------------------------------------------------------------------
30 
31 inline char* duplicate_string ( const char* src ) {
32  char* dst = NULL;
33  if ( src ) {
34  size_t len = strlen(src) + 1;
35  dst = (char*)allocate_via_handler_v3(len);
36  strncpy (dst, src, len);
37  }
38  return dst;
39 }
40 
42  clear();
43 }
44 
45 void captured_exception::set ( const char* a_name, const char* info ) throw() {
46  my_exception_name = duplicate_string( a_name );
47  my_exception_info = duplicate_string( info );
48 }
49 
50 void captured_exception::clear () throw() {
51  deallocate_via_handler_v3 (const_cast<char*>(my_exception_name));
52  deallocate_via_handler_v3 (const_cast<char*>(my_exception_info));
53 }
54 
55 captured_exception* captured_exception::move () throw() {
56  captured_exception *e = (captured_exception*)allocate_via_handler_v3(sizeof(captured_exception));
57  if ( e ) {
58  ::new (e) captured_exception();
59  e->my_exception_name = my_exception_name;
60  e->my_exception_info = my_exception_info;
61  e->my_dynamic = true;
62  my_exception_name = my_exception_info = NULL;
63  }
64  return e;
65 }
66 
67 void captured_exception::destroy () throw() {
68  __TBB_ASSERT ( my_dynamic, "Method destroy can be used only on objects created by clone or allocate" );
69  if ( my_dynamic ) {
72  }
73 }
74 
75 captured_exception* captured_exception::allocate ( const char* a_name, const char* info ) {
76  captured_exception *e = (captured_exception*)allocate_via_handler_v3( sizeof(captured_exception) );
77  if ( e ) {
78  ::new (e) captured_exception(a_name, info);
79  e->my_dynamic = true;
80  }
81  return e;
82 }
83 
84 const char* captured_exception::name() const throw() {
85  return my_exception_name;
86 }
87 
88 const char* captured_exception::what() const throw() {
89  return my_exception_info;
90 }
91 
92 
93 //------------------------------------------------------------------------
94 // tbb_exception_ptr
95 //------------------------------------------------------------------------
96 
97 #if !TBB_USE_CAPTURED_EXCEPTION
98 
99 namespace internal {
100 
101 template<typename T>
102 tbb_exception_ptr* AllocateExceptionContainer( const T& src ) {
103  tbb_exception_ptr *eptr = (tbb_exception_ptr*)allocate_via_handler_v3( sizeof(tbb_exception_ptr) );
104  if ( eptr )
105  new (eptr) tbb_exception_ptr(src);
106  return eptr;
107 }
108 
109 tbb_exception_ptr* tbb_exception_ptr::allocate () {
110  return AllocateExceptionContainer( std::current_exception() );
111 }
112 
113 tbb_exception_ptr* tbb_exception_ptr::allocate ( const tbb_exception& ) {
114  return AllocateExceptionContainer( std::current_exception() );
115 }
116 
117 tbb_exception_ptr* tbb_exception_ptr::allocate ( captured_exception& src ) {
118  tbb_exception_ptr *res = AllocateExceptionContainer( src );
119  src.destroy();
120  return res;
121 }
122 
123 void tbb_exception_ptr::destroy () throw() {
124  this->tbb_exception_ptr::~tbb_exception_ptr();
126 }
127 
128 } // namespace internal
129 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
130 
131 
132 //------------------------------------------------------------------------
133 // task_group_context
134 //------------------------------------------------------------------------
135 
137  if ( __TBB_load_relaxed(my_kind) == binding_completed ) {
138  if ( governor::is_set(my_owner) ) {
139  // Local update of the context list
140  uintptr_t local_count_snapshot = my_owner->my_context_state_propagation_epoch;
141  my_owner->my_local_ctx_list_update.store<relaxed>(1);
142  // Prevent load of nonlocal update flag from being hoisted before the
143  // store to local update flag.
144  atomic_fence();
145  if ( my_owner->my_nonlocal_ctx_list_update.load<relaxed>() ) {
146  spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
147  my_node.my_prev->my_next = my_node.my_next;
148  my_node.my_next->my_prev = my_node.my_prev;
149  my_owner->my_local_ctx_list_update.store<relaxed>(0);
150  }
151  else {
152  my_node.my_prev->my_next = my_node.my_next;
153  my_node.my_next->my_prev = my_node.my_prev;
154  // Release fence is necessary so that update of our neighbors in
155  // the context list was committed when possible concurrent destroyer
156  // proceeds after local update flag is reset by the following store.
157  my_owner->my_local_ctx_list_update.store<release>(0);
158  if ( local_count_snapshot != the_context_state_propagation_epoch ) {
159  // Another thread was propagating cancellation request when we removed
160  // ourselves from the list. We must ensure that it is not accessing us
161  // when this destructor finishes. We'll be able to acquire the lock
162  // below only after the other thread finishes with us.
163  spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
164  }
165  }
166  }
167  else {
168  // Nonlocal update of the context list
169  // Synchronizes with generic_scheduler::cleanup_local_context_list()
170  // TODO: evaluate and perhaps relax, or add some lock instead
171  if ( internal::as_atomic(my_kind).fetch_and_store(dying) == detached ) {
172  my_node.my_prev->my_next = my_node.my_next;
173  my_node.my_next->my_prev = my_node.my_prev;
174  }
175  else {
176  //TODO: evaluate and perhaps relax
177  my_owner->my_nonlocal_ctx_list_update.fetch_and_increment<full_fence>();
178  //TODO: evaluate and perhaps remove
179  spin_wait_until_eq( my_owner->my_local_ctx_list_update, 0u );
180  my_owner->my_context_list_mutex.lock();
181  my_node.my_prev->my_next = my_node.my_next;
182  my_node.my_next->my_prev = my_node.my_prev;
183  my_owner->my_context_list_mutex.unlock();
184  //TODO: evaluate and perhaps relax
185  my_owner->my_nonlocal_ctx_list_update.fetch_and_decrement<full_fence>();
186  }
187  }
188  }
189 #if __TBB_FP_CONTEXT
190  internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env)->~cpu_ctl_env();
191 #endif
192  poison_value(my_version_and_traits);
193  if ( my_exception )
194  my_exception->destroy();
195  ITT_STACK(itt_caller != ITT_CALLER_NULL, caller_destroy, itt_caller);
196 }
197 
198 void task_group_context::init () {
199 #if DO_ITT_NOTIFY
200  // Check version of task group context to avoid reporting misleading identifier.
201  if( ( my_version_and_traits & version_mask ) < 3 )
202  my_name = internal::CUSTOM_CTX;
203 #endif
204  ITT_TASK_GROUP(this, my_name, NULL);
205  __TBB_STATIC_ASSERT ( sizeof(my_version_and_traits) >= 4, "Layout of my_version_and_traits must be reconsidered on this platform" );
206  __TBB_STATIC_ASSERT ( sizeof(task_group_context) == 2 * NFS_MaxLineSize, "Context class has wrong size - check padding and members alignment" );
207  __TBB_ASSERT ( (uintptr_t(this) & (sizeof(my_cancellation_requested) - 1)) == 0, "Context is improperly aligned" );
208  __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == isolated || __TBB_load_relaxed(my_kind) == bound, "Context can be created only as isolated or bound" );
209  my_parent = NULL;
210  my_cancellation_requested = 0;
211  my_exception = NULL;
212  my_owner = NULL;
213  my_state = 0;
214  itt_caller = ITT_CALLER_NULL;
215 #if __TBB_TASK_PRIORITY
216  my_priority = normalized_normal_priority;
217 #endif /* __TBB_TASK_PRIORITY */
218 #if __TBB_FP_CONTEXT
219  __TBB_STATIC_ASSERT( sizeof(my_cpu_ctl_env) == sizeof(internal::uint64_t), "The reserved space for FPU settings are not equal sizeof(uint64_t)" );
220  __TBB_STATIC_ASSERT( sizeof(cpu_ctl_env) <= sizeof(my_cpu_ctl_env), "FPU settings storage does not fit to uint64_t" );
221  suppress_unused_warning( my_cpu_ctl_env.space );
222 
223  cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
224  new ( &ctl ) cpu_ctl_env;
225  if ( my_version_and_traits & fp_settings )
226  ctl.get_env();
227 #endif
228 }
229 
230 void task_group_context::register_with ( generic_scheduler *local_sched ) {
231  __TBB_ASSERT( local_sched, NULL );
232  my_owner = local_sched;
233  // state propagation logic assumes new contexts are bound to head of the list
234  my_node.my_prev = &local_sched->my_context_list_head;
235  // Notify threads that may be concurrently destroying contexts registered
236  // in this scheduler's list that local list update is underway.
237  local_sched->my_local_ctx_list_update.store<relaxed>(1);
238  // Prevent load of global propagation epoch counter from being hoisted before
239  // speculative stores above, as well as load of nonlocal update flag from
240  // being hoisted before the store to local update flag.
241  atomic_fence();
242  // Finalize local context list update
243  if ( local_sched->my_nonlocal_ctx_list_update.load<relaxed>() ) {
244  spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
245  local_sched->my_context_list_head.my_next->my_prev = &my_node;
246  my_node.my_next = local_sched->my_context_list_head.my_next;
247  my_owner->my_local_ctx_list_update.store<relaxed>(0);
248  local_sched->my_context_list_head.my_next = &my_node;
249  }
250  else {
251  local_sched->my_context_list_head.my_next->my_prev = &my_node;
252  my_node.my_next = local_sched->my_context_list_head.my_next;
253  my_owner->my_local_ctx_list_update.store<release>(0);
254  // Thread-local list of contexts allows concurrent traversal by another thread
255  // while propagating state change. To ensure visibility of my_node's members
256  // to the concurrently traversing thread, the list's head is updated by means
257  // of store-with-release.
258  __TBB_store_with_release(local_sched->my_context_list_head.my_next, &my_node);
259  }
260 }
261 
262 void task_group_context::bind_to ( generic_scheduler *local_sched ) {
263  __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == binding_required, "Already bound or isolated?" );
264  __TBB_ASSERT ( !my_parent, "Parent is set before initial binding" );
265  my_parent = local_sched->my_innermost_running_task->prefix().context;
266 #if __TBB_FP_CONTEXT
267  // Inherit FPU settings only if the context has not captured FPU settings yet.
268  if ( !(my_version_and_traits & fp_settings) )
269  copy_fp_settings(*my_parent);
270 #endif
271 
272  // Condition below prevents unnecessary thrashing parent context's cache line
273  if ( !(my_parent->my_state & may_have_children) )
274  my_parent->my_state |= may_have_children; // full fence is below
275  if ( my_parent->my_parent ) {
276  // Even if this context were made accessible for state change propagation
277  // (by placing __TBB_store_with_release(s->my_context_list_head.my_next, &my_node)
278  // above), it still could be missed if state propagation from a grand-ancestor
279  // was underway concurrently with binding.
280  // Speculative propagation from the parent together with epoch counters
281  // detecting possibility of such a race allow to avoid taking locks when
282  // there is no contention.
283 
284  // Acquire fence is necessary to prevent reordering subsequent speculative
285  // loads of parent state data out of the scope where epoch counters comparison
286  // can reliably validate it.
287  uintptr_t local_count_snapshot = __TBB_load_with_acquire( my_parent->my_owner->my_context_state_propagation_epoch );
288  // Speculative propagation of parent's state. The speculation will be
289  // validated by the epoch counters check further on.
290  my_cancellation_requested = my_parent->my_cancellation_requested;
291 #if __TBB_TASK_PRIORITY
292  my_priority = my_parent->my_priority;
293 #endif /* __TBB_TASK_PRIORITY */
294  register_with( local_sched ); // Issues full fence
295 
296  // If no state propagation was detected by the following condition, the above
297  // full fence guarantees that the parent had correct state during speculative
298  // propagation before the fence. Otherwise the propagation from parent is
299  // repeated under the lock.
300  if ( local_count_snapshot != the_context_state_propagation_epoch ) {
301  // Another thread may be propagating state change right now. So resort to lock.
302  context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);
303  my_cancellation_requested = my_parent->my_cancellation_requested;
304 #if __TBB_TASK_PRIORITY
305  my_priority = my_parent->my_priority;
306 #endif /* __TBB_TASK_PRIORITY */
307  }
308  }
309  else {
310  register_with( local_sched ); // Issues full fence
311  // As we do not have grand-ancestors, concurrent state propagation (if any)
312  // may originate only from the parent context, and thus it is safe to directly
313  // copy the state from it.
314  my_cancellation_requested = my_parent->my_cancellation_requested;
315 #if __TBB_TASK_PRIORITY
316  my_priority = my_parent->my_priority;
317 #endif /* __TBB_TASK_PRIORITY */
318  }
319  __TBB_store_relaxed(my_kind, binding_completed);
320 }
321 
322 template <typename T>
323 void task_group_context::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
324  if (this->*mptr_state == new_state) {
325  // Nothing to do, whether descending from "src" or not, so no need to scan.
326  // Hopefully this happens often thanks to earlier invocations.
327  // This optimization is enabled by LIFO order in the context lists:
328  // - new contexts are bound to the beginning of lists;
329  // - descendants are newer than ancestors;
330  // - earlier invocations are therefore likely to "paint" long chains.
331  }
332  else if (this == &src) {
333  // This clause is disjunct from the traversal below, which skips src entirely.
334  // Note that src.*mptr_state is not necessarily still equal to new_state (another thread may have changed it again).
335  // Such interference is probably not frequent enough to aim for optimisation by writing new_state again (to make the other thread back down).
336  // Letting the other thread prevail may also be fairer.
337  }
338  else {
339  for ( task_group_context *ancestor = my_parent; ancestor != NULL; ancestor = ancestor->my_parent ) {
340  __TBB_ASSERT(internal::is_alive(ancestor->my_version_and_traits), "context tree was corrupted");
341  if ( ancestor == &src ) {
342  for ( task_group_context *ctx = this; ctx != ancestor; ctx = ctx->my_parent )
343  ctx->*mptr_state = new_state;
344  break;
345  }
346  }
347  }
348 }
349 
350 template <typename T>
351 void generic_scheduler::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
352  spin_mutex::scoped_lock lock(my_context_list_mutex);
353  // Acquire fence is necessary to ensure that the subsequent node->my_next load
354  // returned the correct value in case it was just inserted in another thread.
355  // The fence also ensures visibility of the correct my_parent value.
356  context_list_node_t *node = __TBB_load_with_acquire(my_context_list_head.my_next);
357  while ( node != &my_context_list_head ) {
358  task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node);
359  if ( ctx.*mptr_state != new_state )
360  ctx.propagate_task_group_state( mptr_state, src, new_state );
361  node = node->my_next;
362  __TBB_ASSERT( is_alive(ctx.my_version_and_traits), "Local context list contains destroyed object" );
363  }
364  // Sync up local propagation epoch with the global one. Release fence prevents
365  // reordering of possible store to *mptr_state after the sync point.
366  __TBB_store_with_release(my_context_state_propagation_epoch, the_context_state_propagation_epoch);
367 }
368 
369 template <typename T>
370 bool market::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
371  if ( !(src.my_state & task_group_context::may_have_children) )
372  return true;
373  // The whole propagation algorithm is under the lock in order to ensure correctness
374  // in case of concurrent state changes at the different levels of the context tree.
375  // See comment at the bottom of scheduler.cpp
376  context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);
377  if ( src.*mptr_state != new_state )
378  // Another thread has concurrently changed the state. Back down.
379  return false;
380  // Advance global state propagation epoch
381  __TBB_FetchAndAddWrelease(&the_context_state_propagation_epoch, 1);
382  // Propagate to all workers and masters and sync up their local epochs with the global one
383  unsigned num_workers = my_first_unused_worker_idx;
384  for ( unsigned i = 0; i < num_workers; ++i ) {
385  generic_scheduler *s = my_workers[i];
386  // If the worker is only about to be registered, skip it.
387  if ( s )
388  s->propagate_task_group_state( mptr_state, src, new_state );
389  }
390  // Propagate to all master threads
391  // The whole propagation sequence is locked, thus no contention is expected
392  for( scheduler_list_type::iterator it = my_masters.begin(); it != my_masters.end(); it++ )
393  it->propagate_task_group_state( mptr_state, src, new_state );
394  return true;
395 }
396 
398  __TBB_ASSERT ( my_cancellation_requested == 0 || my_cancellation_requested == 1, "Invalid cancellation state");
399  if ( my_cancellation_requested || as_atomic(my_cancellation_requested).compare_and_swap(1, 0) ) {
400  // This task group and any descendants have already been canceled.
401  // (A newly added descendant would inherit its parent's my_cancellation_requested,
402  // not missing out on any cancellation still being propagated, and a context cannot be uncanceled.)
403  return false;
404  }
405  governor::local_scheduler_weak()->my_market->propagate_task_group_state( &task_group_context::my_cancellation_requested, *this, (uintptr_t)1 );
406  return true;
407 }
408 
410  return my_cancellation_requested != 0;
411 }
412 
413 // IMPORTANT: It is assumed that this method is not used concurrently!
416  // No fences are necessary since this context can be accessed from another thread
417  // only after stealing happened (which means necessary fences were used).
418  if ( my_exception ) {
419  my_exception->destroy();
420  my_exception = NULL;
421  }
422  my_cancellation_requested = 0;
423 }
424 
425 #if __TBB_FP_CONTEXT
426 // IMPORTANT: It is assumed that this method is not used concurrently!
429  // No fences are necessary since this context can be accessed from another thread
430  // only after stealing happened (which means necessary fences were used).
431  cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
432  if ( !(my_version_and_traits & fp_settings) ) {
433  new ( &ctl ) cpu_ctl_env;
434  my_version_and_traits |= fp_settings;
435  }
436  ctl.get_env();
437 }
438 
439 void task_group_context::copy_fp_settings( const task_group_context &src ) {
440  __TBB_ASSERT( !(my_version_and_traits & fp_settings), "The context already has FPU settings." );
441  __TBB_ASSERT( src.my_version_and_traits & fp_settings, "The source context does not have FPU settings." );
442 
443  cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
444  cpu_ctl_env &src_ctl = *internal::punned_cast<cpu_ctl_env*>(&src.my_cpu_ctl_env);
445  new (&ctl) cpu_ctl_env( src_ctl );
446  my_version_and_traits |= fp_settings;
447 }
448 #endif /* __TBB_FP_CONTEXT */
449 
451  if ( my_cancellation_requested )
452  return;
453 #if TBB_USE_EXCEPTIONS
454  try {
455  throw;
456  } TbbCatchAll( this );
457 #endif /* TBB_USE_EXCEPTIONS */
458 }
459 
460 #if __TBB_TASK_PRIORITY
462  __TBB_ASSERT( prio == priority_low || prio == priority_normal || prio == priority_high, "Invalid priority level value" );
463  intptr_t p = normalize_priority(prio);
464  if ( my_priority == p && !(my_state & task_group_context::may_have_children))
465  return;
466  my_priority = p;
467  internal::generic_scheduler* s = governor::local_scheduler_if_initialized();
468  if ( !s || !s->my_arena || !s->my_market->propagate_task_group_state(&task_group_context::my_priority, *this, p) )
469  return;
470 
472  // need to find out the right arena for priority update.
473  // The executing status check only guarantees being inside some working arena.
474  if ( s->my_innermost_running_task->state() == task::executing )
475  // Updating arena priority here does not eliminate necessity of checking each
476  // task priority and updating arena priority if necessary before the task execution.
477  // These checks will be necessary because:
478  // a) set_priority() may be invoked before any tasks from this task group are spawned;
479  // b) all spawned tasks from this task group are retrieved from the task pools.
480  // These cases create a time window when arena priority may be lowered.
481  s->my_market->update_arena_priority( *s->my_arena, p );
482 }
483 
485  return static_cast<priority_t>(priority_from_normalized_rep[my_priority]);
486 }
487 #endif /* __TBB_TASK_PRIORITY */
488 
489 #endif /* __TBB_TASK_GROUP_CONTEXT */
490 
491 } // namespace tbb
__TBB_EXPORTED_METHOD ~captured_exception()
void __TBB_EXPORTED_METHOD capture_fp_settings()
Captures the current FPU control settings to the context.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Definition: tbb_stddef.h:216
bool __TBB_EXPORTED_METHOD cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups...
__TBB_EXPORTED_METHOD ~task_group_context()
static generic_scheduler * local_scheduler_weak()
Definition: governor.h:134
void __TBB_store_relaxed(volatile T &location, V value)
Definition: tbb_machine.h:742
void const char const char int ITT_FORMAT __itt_group_sync s
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:553
void bind_to(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler and binds it to its parent context.
void __TBB_EXPORTED_METHOD reset()
Forcefully reinitializes the context after the task tree it was associated with is completed...
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:716
void __TBB_EXPORTED_METHOD set(const char *name, const char *info)
friend class scoped_lock
Definition: spin_mutex.h:179
void __TBB_EXPORTED_METHOD clear()
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
Definition: governor.cpp:120
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
__TBB_DEPRECATED void set_priority(priority_t)
Changes priority of the task group.
void destroy()
Destroys this objects.
static captured_exception * allocate(const char *name, const char *info)
Functionally equivalent to {captured_exception e(name,info); return e.move();}.
static tbb_exception_ptr * allocate()
void *__TBB_EXPORTED_FUNC allocate_via_handler_v3(size_t n)
Allocates memory using MallocHandler.
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:398
intptr_t my_priority
Priority level of the task group (in normalized representation)
Definition: task.h:448
T punned_cast(U *ptr)
Cast between unrelated pointer types.
Definition: tbb_stddef.h:314
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
bool __TBB_EXPORTED_METHOD is_group_execution_cancelled() const
Returns true if the context received cancellation request.
Release.
Definition: atomic.h:59
captured_exception *__TBB_EXPORTED_METHOD move() __TBB_override
Creates and returns pointer to the deep copy of this exception object.
market * my_market
The market I am in.
Definition: scheduler.h:172
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
Definition: tbb_machine.h:402
#define ITT_CALLER_NULL
Definition: itt_notify.h:48
Sequential consistency.
Definition: atomic.h:55
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
No ordering.
Definition: atomic.h:61
#define poison_value(g)
const char *__TBB_EXPORTED_METHOD what() const __TBB_override
Returns the result of originally intercepted exception&#39;s what() method.
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
Definition: task.h:429
__TBB_DEPRECATED priority_t priority() const
Retrieves current priority of the current task group.
void __TBB_EXPORTED_FUNC deallocate_via_handler_v3(void *p)
Deallocates memory using FreeHandler.
T __TBB_load_relaxed(const volatile T &location)
Definition: tbb_machine.h:738
#define ITT_STACK(precond, name, obj)
Definition: itt_notify.h:122
void const char const char int ITT_FORMAT __itt_group_sync p
void __TBB_EXPORTED_METHOD init()
Out-of-line part of the constructor.
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:712
#define __TBB_FetchAndAddWrelease(P, V)
Definition: tbb_machine.h:312
void register_with(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler.
task is running, and will be destroyed after method execute() completes.
Definition: task.h:626
priority_t
Definition: task.h:306
void __TBB_EXPORTED_METHOD destroy() __TBB_override
Destroys objects created by the move() method.
void propagate_task_group_state(T task_group_context::*mptr_state, task_group_context &src, T new_state)
Propagates any state change detected to *this, and as an optimisation possibly also upward along the ...
void atomic_fence()
Sequentially consistent full memory fence.
Definition: tbb_machine.h:342
#define ITT_TASK_GROUP(type, name, parent)
Definition: itt_notify.h:124
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:139
#define __TBB_get_object_ref(class_name, member_name, member_addr)
Returns address of the object containing a member with the given name and address.
Definition: tbb_stddef.h:270
The graph class.
const char *__TBB_EXPORTED_METHOD name() const __TBB_override
Returns RTTI name of the originally intercepted exception.

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.