Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
tbb::interface7::internal::task_arena_base Class Reference

#include <task_arena.h>

Inheritance diagram for tbb::interface7::internal::task_arena_base:
Collaboration diagram for tbb::interface7::internal::task_arena_base:

Static Public Attributes

static const int automatic = -1
 Typedef for number of threads that is automatic. More...
 
static const int not_initialized = -2
 

Protected Types

enum  { default_flags, exact_exception_flag = task_group_context::exact_exception }
 

Protected Member Functions

 task_arena_base (int max_concurrency, unsigned reserved_for_masters)
 
void __TBB_EXPORTED_METHOD internal_initialize ()
 
void __TBB_EXPORTED_METHOD internal_terminate ()
 
void __TBB_EXPORTED_METHOD internal_attach ()
 
void __TBB_EXPORTED_METHOD internal_enqueue (task &, intptr_t) const
 
void __TBB_EXPORTED_METHOD internal_execute (delegate_base &) const
 
void __TBB_EXPORTED_METHOD internal_wait () const
 

Static Protected Member Functions

static int __TBB_EXPORTED_FUNC internal_current_slot ()
 
static int __TBB_EXPORTED_FUNC internal_max_concurrency (const task_arena *)
 

Protected Attributes

internal::arenamy_arena
 NULL if not currently initialized. More...
 
task_group_contextmy_context
 default context of the arena More...
 
int my_max_concurrency
 Concurrency level for deferred initialization. More...
 
unsigned my_master_slots
 Reserved master slots. More...
 
intptr_t my_version_and_traits
 Special settings. More...
 

Detailed Description

Definition at line 102 of file task_arena.h.

Member Enumeration Documentation

◆ anonymous enum

anonymous enum
protected
Enumerator
default_flags 
exact_exception_flag 

Definition at line 121 of file task_arena.h.

Constructor & Destructor Documentation

◆ task_arena_base()

tbb::interface7::internal::task_arena_base::task_arena_base ( int  max_concurrency,
unsigned  reserved_for_masters 
)
inlineprotected

Definition at line 129 of file task_arena.h.

References __TBB_EXPORTED_FUNC, and __TBB_EXPORTED_METHOD.

130  : my_arena(0)
131 #if __TBB_TASK_GROUP_CONTEXT
132  , my_context(0)
133 #endif
135  , my_master_slots(reserved_for_masters)
137  {}
internal::arena * my_arena
NULL if not currently initialized.
Definition: task_arena.h:105
int max_concurrency()
Returns the maximal number of threads that can work inside the arena.
Definition: task_arena.h:413
task_group_context * my_context
default context of the arena
Definition: task_arena.h:109
intptr_t my_version_and_traits
Special settings.
Definition: task_arena.h:119
int my_max_concurrency
Concurrency level for deferred initialization.
Definition: task_arena.h:113
unsigned my_master_slots
Reserved master slots.
Definition: task_arena.h:116

Member Function Documentation

◆ internal_attach()

void tbb::interface7::internal::task_arena_base::internal_attach ( )
protected

Definition at line 856 of file arena.cpp.

References __TBB_ASSERT, tbb::internal::market::global_market(), tbb::internal::governor::local_scheduler_if_initialized(), tbb::internal::scheduler_state::my_arena, tbb::internal::arena_base::my_max_num_workers, tbb::internal::arena_base::my_num_reserved_slots, tbb::internal::arena_base::my_num_slots, tbb::internal::arena_base::my_references, tbb::internal::arena::num_arena_slots(), tbb::internal::arena::ref_external, and s.

856  {
857  __TBB_ASSERT(!my_arena, NULL);
858  generic_scheduler* s = governor::local_scheduler_if_initialized();
859  if( s && s->my_arena ) {
860  // There is an active arena to attach to.
861  // It's still used by s, so won't be destroyed right away.
862  my_arena = s->my_arena;
863  __TBB_ASSERT( my_arena->my_references > 0, NULL );
865 #if __TBB_TASK_GROUP_CONTEXT
866  my_context = my_arena->my_default_ctx;
868 #endif
872  // increases market's ref count for task_arena
873  market::global_market( /*is_public=*/true );
874  }
875 }
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
Definition: arena.h:181
void const char const char int ITT_FORMAT __itt_group_sync s
internal::arena * my_arena
NULL if not currently initialized.
Definition: task_arena.h:105
unsigned my_num_slots
The number of slots in the arena.
Definition: arena.h:241
task_group_context * my_context
default context of the arena
Definition: task_arena.h:109
intptr_t my_version_and_traits
Special settings.
Definition: task_arena.h:119
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
Definition: market.cpp:96
int my_max_concurrency
Concurrency level for deferred initialization.
Definition: task_arena.h:113
static int unsigned num_arena_slots(unsigned num_slots)
Definition: arena.h:287
atomic< unsigned > my_references
Reference counter for the arena.
Definition: arena.h:149
unsigned my_master_slots
Reserved master slots.
Definition: task_arena.h:116
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
Definition: task.h:435
static const unsigned ref_external
Reference increment values for externals and workers.
Definition: arena.h:318
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
Definition: arena.h:244
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:139
Here is the call graph for this function:

◆ internal_current_slot()

int tbb::interface7::internal::task_arena_base::internal_current_slot ( )
staticprotected

Definition at line 1103 of file arena.cpp.

References __TBB_ASSERT, d, int, tbb::interface7::internal::isolate_within_arena(), tbb::internal::task_prefix::isolation, tbb::internal::governor::local_scheduler_if_initialized(), tbb::internal::governor::local_scheduler_weak(), tbb::internal::scheduler_state::my_arena_index, tbb::internal::scheduler_state::my_innermost_running_task, tbb::task::prefix(), and s.

1103  {
1104  generic_scheduler* s = governor::local_scheduler_if_initialized();
1105  return s? int(s->my_arena_index) : -1;
1106 }
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:139
Here is the call graph for this function:

◆ internal_enqueue()

void tbb::interface7::internal::task_arena_base::internal_enqueue ( task t,
intptr_t  prio 
) const
protected

Definition at line 877 of file arena.cpp.

References __TBB_ASSERT, tbb::internal::task_prefix::context, tbb::internal::arena::enqueue_task(), tbb::internal::governor::local_scheduler_weak(), tbb::internal::scheduler_state::my_arena, tbb::internal::generic_scheduler::my_random, tbb::task::prefix(), and s.

Referenced by tbb::task::enqueue().

877  {
878  __TBB_ASSERT(my_arena, NULL);
879  generic_scheduler* s = governor::local_scheduler_weak(); // scheduler is only needed for FastRandom instance
880  __TBB_ASSERT(s, "Scheduler is not initialized"); // we allocated a task so can expect the scheduler
881 #if __TBB_TASK_GROUP_CONTEXT
882  // Is there a better place for checking the state of my_default_ctx?
883  __TBB_ASSERT(!(my_arena->my_default_ctx == t.prefix().context && my_arena->my_default_ctx->is_group_execution_cancelled()),
884  "The task will not be executed because default task_group_context of task_arena is cancelled. Has previously enqueued task thrown an exception?");
885 #endif
886  my_arena->enqueue_task( t, prio, s->my_random );
887 }
static generic_scheduler * local_scheduler_weak()
Definition: governor.h:134
void const char const char int ITT_FORMAT __itt_group_sync s
internal::arena * my_arena
NULL if not currently initialized.
Definition: task_arena.h:105
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
Definition: arena.cpp:553
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_execute()

void tbb::interface7::internal::task_arena_base::internal_execute ( internal::delegate_base d) const
protected

Definition at line 950 of file arena.cpp.

References __TBB_ASSERT, __TBB_CONTEXT_ARG, __TBB_CONTEXT_ARG1, tbb::internal::__TBB_load_with_acquire(), tbb::task::allocate_root(), tbb::internal::concurrent_monitor::cancel_wait(), tbb::internal::concurrent_monitor::commit_wait(), tbb::task_group_context::copy_fp_settings(), d, tbb::task_group_context::default_traits, tbb::internal::arena::enqueue_task(), tbb::task_group_context::exact_exception, tbb::task_group_context::isolated, tbb::internal::governor::local_scheduler_weak(), tbb::internal::generic_scheduler::local_wait_for_all(), tbb::internal::scheduler_properties::master, tbb::internal::scheduler_state::my_arena, tbb::internal::scheduler_state::my_arena_index, tbb::task_group_context::my_exception, tbb::internal::arena_base::my_exit_monitors, tbb::interface7::internal::delegated_function< F, R >::my_func, tbb::internal::generic_scheduler::my_random, tbb::internal::concurrent_monitor::notify_one(), tbb::internal::arena::occupy_free_slot(), tbb::internal::arena::out_of_arena, tbb::internal::auto_empty_task::prefix(), tbb::internal::concurrent_monitor::prepare_wait(), tbb::internal::task_prefix::ref_count, tbb::task_group_context::register_pending_exception(), tbb::internal::context_guard_helper< T >::restore_default(), s, scope, and tbb::internal::context_guard_helper< T >::set_ctx().

950  {
951  __TBB_ASSERT(my_arena, NULL);
952  generic_scheduler* s = governor::local_scheduler_weak();
953  __TBB_ASSERT(s, "Scheduler is not initialized");
954 
955  bool same_arena = s->my_arena == my_arena;
956  size_t index1 = s->my_arena_index;
957  if (!same_arena) {
958  index1 = my_arena->occupy_free_slot</* as_worker*/false>(*s);
959  if (index1 == arena::out_of_arena) {
960 
961 #if __TBB_USE_OPTIONAL_RTTI
962  // Workaround for the bug inside graph. If the thread can not occupy arena slot during task_arena::execute()
963  // and all aggregator operations depend on this task completion (all other threads are inside arena already)
964  // deadlock appears, because enqueued task will never enter arena.
965  // Workaround: check if the task came from graph via RTTI (casting to graph::spawn_functor)
966  // and enqueue this task with non-blocking internal_enqueue method.
967  // TODO: have to change behaviour later in next GOLD release (maybe to add new library entry point - try_execute)
969  internal::delegated_function< graph_funct, void >* deleg_funct =
970  dynamic_cast< internal::delegated_function< graph_funct, void>* >(&d);
971 
972  if (deleg_funct) {
974  internal::function_task< internal::strip< graph_funct >::type >
975  (internal::forward< graph_funct >(deleg_funct->my_func)), 0);
976  return;
977  } else {
978 #endif /* __TBB_USE_OPTIONAL_RTTI */
979  concurrent_monitor::thread_context waiter;
980 #if __TBB_TASK_GROUP_CONTEXT
981  task_group_context exec_context(task_group_context::isolated, my_version_and_traits & exact_exception_flag);
982 #if __TBB_FP_CONTEXT
983  exec_context.copy_fp_settings(*my_context);
984 #endif
985 #endif
986  auto_empty_task root(__TBB_CONTEXT_ARG(s, &exec_context));
987  root.prefix().ref_count = 2;
989  delegated_task(d, my_arena->my_exit_monitors, &root),
990  0, s->my_random); // TODO: priority?
991  size_t index2 = arena::out_of_arena;
992  do {
993  my_arena->my_exit_monitors.prepare_wait(waiter, (uintptr_t)&d);
994  if (__TBB_load_with_acquire(root.prefix().ref_count) < 2) {
996  break;
997  }
998  index2 = my_arena->occupy_free_slot</*as_worker*/false>(*s);
999  if (index2 != arena::out_of_arena) {
1001  nested_arena_context scope(s, my_arena, index2, scheduler_properties::master, same_arena);
1002  s->local_wait_for_all(root, NULL);
1003 #if TBB_USE_EXCEPTIONS
1004  __TBB_ASSERT(!exec_context.my_exception, NULL); // exception can be thrown above, not deferred
1005 #endif
1006  __TBB_ASSERT(root.prefix().ref_count == 0, NULL);
1007  break;
1008  }
1010  } while (__TBB_load_with_acquire(root.prefix().ref_count) == 2);
1011  if (index2 == arena::out_of_arena) {
1012  // notify a waiting thread even if this thread did not enter arena,
1013  // in case it was woken by a leaving thread but did not need to enter
1014  my_arena->my_exit_monitors.notify_one(); // do not relax!
1015  }
1016 #if TBB_USE_EXCEPTIONS
1017  // process possible exception
1018  if (task_group_context::exception_container_type *pe = exec_context.my_exception)
1019  TbbRethrowException(pe);
1020 #endif
1021  return;
1022 #if __TBB_USE_OPTIONAL_RTTI
1023  } // if task came from graph
1024 #endif
1025  } // if (index1 == arena::out_of_arena)
1026  } // if (!same_arena)
1027 
1028  context_guard_helper</*report_tasks=*/false> context_guard;
1029  context_guard.set_ctx(__TBB_CONTEXT_ARG1(my_context));
1030 #if TBB_USE_EXCEPTIONS
1031  try {
1032 #endif
1033  //TODO: replace dummy tasks for workers as well to avoid using of the_dummy_context
1034  nested_arena_context scope(s, my_arena, index1, scheduler_properties::master, same_arena);
1035  d();
1036 #if TBB_USE_EXCEPTIONS
1037  }
1038  catch (...) {
1039  context_guard.restore_default(); // TODO: is it needed on Windows?
1041  else {
1042  task_group_context exception_container(task_group_context::isolated,
1044  exception_container.register_pending_exception();
1045  __TBB_ASSERT(exception_container.my_exception, NULL);
1046  TbbRethrowException(exception_container.my_exception);
1047  }
1048  }
1049 #endif
1050 }
size_t occupy_free_slot(generic_scheduler &s)
Tries to occupy a slot in the arena. On success, returns the slot index; if no slot is available...
Definition: arena.cpp:86
static generic_scheduler * local_scheduler_weak()
Definition: governor.h:134
void const char const char int ITT_FORMAT __itt_group_sync s
internal::arena * my_arena
NULL if not currently initialized.
Definition: task_arena.h:105
void notify_one()
Notify one thread about the event.
bool commit_wait(thread_context &thr)
Commit wait if event count has not changed; otherwise, cancel wait.
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:652
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
Definition: arena.cpp:553
task_group_context * my_context
default context of the arena
Definition: task_arena.h:109
intptr_t my_version_and_traits
Special settings.
Definition: task_arena.h:119
#define __TBB_CONTEXT_ARG1(context)
void prepare_wait(thread_context &thr, uintptr_t ctx=0)
prepare wait by inserting &#39;thr&#39; into the wait queue
void cancel_wait(thread_context &thr)
Cancel the wait. Removes the thread from the wait queue if not removed yet.
internal::tbb_exception_ptr exception_container_type
Definition: task.h:356
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
static const size_t out_of_arena
Definition: arena.h:373
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void __TBB_EXPORTED_METHOD internal_enqueue(task &, intptr_t) const
Definition: arena.cpp:877
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
Definition: arena.h:254
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:712
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s __itt_frame ITT_FORMAT p const char const char ITT_FORMAT s __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope scope
#define __TBB_CONTEXT_ARG(arg1, context)
Here is the call graph for this function:

◆ internal_initialize()

void tbb::interface7::internal::task_arena_base::internal_initialize ( )
protected

Definition at line 812 of file arena.cpp.

References __TBB_ASSERT, tbb::internal::as_atomic(), tbb::internal::market::create_arena(), tbb::internal::governor::default_num_threads(), tbb::task_group_context::default_traits, tbb::internal::market::global_market(), int, tbb::task_group_context::isolated, tbb::internal::governor::local_scheduler_weak(), tbb::internal::scheduler_state::my_arena, tbb::internal::NFS_Allocate(), tbb::internal::arena::on_thread_leaving(), tbb::internal::governor::one_time_init(), tbb::internal::arena::ref_external, tbb::internal::market::release(), and tbb::internal::spin_wait_while_eq().

812  {
814  if( my_max_concurrency < 1 )
816  __TBB_ASSERT( my_master_slots <= (unsigned)my_max_concurrency, "Number of slots reserved for master should not exceed arena concurrency");
817  arena* new_arena = market::create_arena( my_max_concurrency, my_master_slots, 0 );
818  // add an internal market reference; a public reference was added in create_arena
819  market &m = market::global_market( /*is_public=*/false );
820  // allocate default context for task_arena
821 #if __TBB_TASK_GROUP_CONTEXT
822  new_arena->my_default_ctx = new ( NFS_Allocate(1, sizeof(task_group_context), NULL) )
824 #if __TBB_FP_CONTEXT
825  new_arena->my_default_ctx->capture_fp_settings();
826 #endif
827 #endif /* __TBB_TASK_GROUP_CONTEXT */
828  // threads might race to initialize the arena
829  if(as_atomic(my_arena).compare_and_swap(new_arena, NULL) != NULL) {
830  __TBB_ASSERT(my_arena, NULL); // another thread won the race
831  // release public market reference
832  m.release( /*is_public=*/true, /*blocking_terminate=*/false );
833  new_arena->on_thread_leaving<arena::ref_external>(); // destroy unneeded arena
834 #if __TBB_TASK_GROUP_CONTEXT
835  spin_wait_while_eq(my_context, (task_group_context*)NULL);
836  } else {
837  new_arena->my_default_ctx->my_version_and_traits |= my_version_and_traits & exact_exception_flag;
838  as_atomic(my_context) = new_arena->my_default_ctx;
839 #endif
840  }
841  // TODO: should it trigger automatic initialization of this thread?
843 }
static generic_scheduler * local_scheduler_weak()
Definition: governor.h:134
internal::arena * my_arena
NULL if not currently initialized.
Definition: task_arena.h:105
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
Definition: market.cpp:307
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
task_group_context * my_context
default context of the arena
Definition: task_arena.h:109
intptr_t my_version_and_traits
Special settings.
Definition: task_arena.h:119
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
Definition: market.cpp:96
int my_max_concurrency
Concurrency level for deferred initialization.
Definition: task_arena.h:113
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
static unsigned default_num_threads()
Definition: governor.h:84
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:394
unsigned my_master_slots
Reserved master slots.
Definition: task_arena.h:116
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
static void one_time_init()
Definition: governor.cpp:156
static const unsigned ref_external
Reference increment values for externals and workers.
Definition: arena.h:318
Here is the call graph for this function:

◆ internal_max_concurrency()

int tbb::interface7::internal::task_arena_base::internal_max_concurrency ( const task_arena ta)
staticprotected

Definition at line 1133 of file arena.cpp.

References __TBB_ASSERT, tbb::internal::governor::default_num_threads(), int, tbb::internal::governor::local_scheduler_if_initialized(), my_arena, my_max_concurrency, tbb::internal::arena_base::my_max_num_workers, tbb::internal::arena_base::my_num_reserved_slots, and s.

Referenced by tbb::this_task_arena::max_concurrency().

1133  {
1134  arena* a = NULL;
1135  if( ta ) // for special cases of ta->max_concurrency()
1136  a = ta->my_arena;
1137  else if( generic_scheduler* s = governor::local_scheduler_if_initialized() )
1138  a = s->my_arena; // the current arena if any
1139 
1140  if( a ) { // Get parameters from the arena
1141  __TBB_ASSERT( !ta || ta->my_max_concurrency==1, NULL );
1142  return a->my_num_reserved_slots + a->my_max_num_workers;
1143  } else {
1144  __TBB_ASSERT( !ta || ta->my_max_concurrency==automatic, NULL );
1146  }
1147 }
void const char const char int ITT_FORMAT __itt_group_sync s
static const int automatic
Typedef for number of threads that is automatic.
Definition: task_arena.h:149
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
static unsigned default_num_threads()
Definition: governor.h:84
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:139
Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_terminate()

void tbb::interface7::internal::task_arena_base::internal_terminate ( )
protected

Definition at line 845 of file arena.cpp.

References tbb::internal::scheduler_state::my_arena, tbb::internal::arena_base::my_market, tbb::internal::arena::on_thread_leaving(), tbb::internal::arena::ref_external, and tbb::internal::market::release().

845  {
846  if( my_arena ) {// task_arena was initialized
847  my_arena->my_market->release( /*is_public=*/true, /*blocking_terminate=*/false );
849  my_arena = 0;
850 #if __TBB_TASK_GROUP_CONTEXT
851  my_context = 0;
852 #endif
853  }
854 }
internal::arena * my_arena
NULL if not currently initialized.
Definition: task_arena.h:105
task_group_context * my_context
default context of the arena
Definition: task_arena.h:109
void on_thread_leaving()
Notification that worker or master leaves its arena.
Definition: arena.h:385
bool release(bool is_public, bool blocking_terminate)
Decrements market&#39;s refcount and destroys it in the end.
Definition: market.cpp:175
static const unsigned ref_external
Reference increment values for externals and workers.
Definition: arena.h:318
market * my_market
The market that owns this arena.
Definition: arena.h:223
Here is the call graph for this function:

◆ internal_wait()

void tbb::interface7::internal::task_arena_base::internal_wait ( ) const
protected

Definition at line 1074 of file arena.cpp.

References __TBB_ASSERT, __TBB_CONTEXT_ARG1, tbb::internal::__TBB_load_with_acquire(), __TBB_Yield, tbb::task::allocate_root(), tbb::internal::as_atomic(), tbb::internal::governor::local_scheduler_weak(), tbb::internal::generic_scheduler::master_outermost_level(), tbb::internal::scheduler_state::my_arena, tbb::internal::scheduler_state::my_arena_index, tbb::internal::arena_base::my_pool_state, tbb::internal::arena_slot_line1::my_scheduler, tbb::internal::arena::my_slots, tbb::internal::arena::num_workers_active(), tbb::internal::binary_semaphore::P(), s, tbb::internal::arena::SNAPSHOT_EMPTY, tbb::internal::generic_scheduler::wait_until_empty(), and tbb::internal::scheduler_properties::worker.

1074  {
1075  __TBB_ASSERT(my_arena, NULL);
1076  generic_scheduler* s = governor::local_scheduler_weak();
1077  __TBB_ASSERT(s, "Scheduler is not initialized");
1078  __TBB_ASSERT(s->my_arena != my_arena || s->my_arena_index == 0, "task_arena::wait_until_empty() is not supported within a worker context" );
1079  if( s->my_arena == my_arena ) {
1080  //unsupported, but try do something for outermost master
1081  __TBB_ASSERT(s->master_outermost_level(), "unsupported");
1082  if( !s->my_arena_index )
1083  while( my_arena->num_workers_active() )
1084  s->wait_until_empty();
1085  } else for(;;) {
1087  if( !__TBB_load_with_acquire(my_arena->my_slots[0].my_scheduler) // TODO TEMP: one master, make more masters
1088  && as_atomic(my_arena->my_slots[0].my_scheduler).compare_and_swap(s, NULL) == NULL ) {
1089  nested_arena_context a(s, my_arena, 0, scheduler_properties::worker, false);
1090  s->wait_until_empty();
1091  } else {
1092  binary_semaphore waiter; // TODO: replace by a single event notification from is_out_of_work
1093  internal_enqueue( *new( task::allocate_root(__TBB_CONTEXT_ARG1(*my_context)) ) wait_task(waiter), 0 ); // TODO: priority?
1094  waiter.P(); // TODO: concurrent_monitor
1095  }
1096  }
1097  if( !my_arena->num_workers_active() && !my_arena->my_slots[0].my_scheduler) // no activity
1098  break; // spin until workers active but avoid spinning in a worker
1099  __TBB_Yield(); // wait until workers and master leave
1100  }
1101 }
static generic_scheduler * local_scheduler_weak()
Definition: governor.h:134
void const char const char int ITT_FORMAT __itt_group_sync s
internal::arena * my_arena
NULL if not currently initialized.
Definition: task_arena.h:105
unsigned num_workers_active() const
The number of workers active in the arena.
Definition: arena.h:325
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
Definition: arena.h:191
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:652
task_group_context * my_context
default context of the arena
Definition: task_arena.h:109
#define __TBB_Yield()
Definition: ibm_aix51.h:44
#define __TBB_CONTEXT_ARG1(context)
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
void __TBB_EXPORTED_METHOD internal_enqueue(task &, intptr_t) const
Definition: arena.cpp:877
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:712
arena_slot my_slots[1]
Definition: arena.h:381
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
Definition: arena.h:309
Here is the call graph for this function:

Member Data Documentation

◆ automatic

const int tbb::interface7::internal::task_arena_base::automatic = -1
static

Typedef for number of threads that is automatic.

Definition at line 149 of file task_arena.h.

◆ my_arena

internal::arena* tbb::interface7::internal::task_arena_base::my_arena
protected

NULL if not currently initialized.

Definition at line 105 of file task_arena.h.

Referenced by internal_max_concurrency().

◆ my_context

task_group_context* tbb::interface7::internal::task_arena_base::my_context
protected

default context of the arena

Definition at line 109 of file task_arena.h.

◆ my_master_slots

unsigned tbb::interface7::internal::task_arena_base::my_master_slots
protected

Reserved master slots.

Definition at line 116 of file task_arena.h.

◆ my_max_concurrency

int tbb::interface7::internal::task_arena_base::my_max_concurrency
protected

Concurrency level for deferred initialization.

Definition at line 113 of file task_arena.h.

Referenced by internal_max_concurrency().

◆ my_version_and_traits

intptr_t tbb::interface7::internal::task_arena_base::my_version_and_traits
protected

Special settings.

Definition at line 119 of file task_arena.h.

◆ not_initialized

const int tbb::interface7::internal::task_arena_base::not_initialized = -2
static

The documentation for this class was generated from the following files:

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.