Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
tbb::internal::market Class Reference

#include <market.h>

Inheritance diagram for tbb::internal::market:
Collaboration diagram for tbb::internal::market:

Public Member Functions

void try_destroy_arena (arena *, uintptr_t aba_epoch)
 Removes the arena from the market's list. More...
 
void detach_arena (arena &)
 Removes the arena from the market's list. More...
 
bool release (bool is_public, bool blocking_terminate)
 Decrements market's refcount and destroys it in the end. More...
 
void adjust_demand (arena &, int delta)
 Request that arena's need in workers should be adjusted. More...
 
bool must_join_workers () const
 Used when RML asks for join mode during workers termination. More...
 
size_t worker_stack_size () const
 Returns the requested stack size of worker threads. More...
 

Static Public Member Functions

static arenacreate_arena (int num_slots, int num_reserved_slots, size_t stack_size)
 Creates an arena object. More...
 
static void set_active_num_workers (unsigned w)
 Set number of active workers. More...
 
static unsigned app_parallelism_limit ()
 Reports active parallelism level according to user's settings. More...
 
static unsigned max_num_workers ()
 

Private Types

typedef intrusive_list< arenaarena_list_type
 
typedef intrusive_list< generic_schedulerscheduler_list_type
 
typedef scheduler_mutex_type global_market_mutex_type
 
typedef spin_rw_mutex arenas_list_mutex_type
 Lightweight mutex guarding accounting operations with arenas list. More...
 

Private Member Functions

 market (unsigned workers_soft_limit, unsigned workers_hard_limit, size_t stack_size)
 Constructor. More...
 
void destroy ()
 Destroys and deallocates market object created by market::create() More...
 
void update_allotment ()
 Recalculates the number of workers assigned to each arena in the list. More...
 
arenaarena_in_need (arena *)
 Returns next arena that needs more workers, or NULL. More...
 
void assert_market_valid () const
 
void insert_arena_into_list (arena &a)
 
void remove_arena_from_list (arena &a)
 
arenaarena_in_need (arena_list_type &arenas, arena *hint)
 
int update_allotment (arena_list_type &arenas, int total_demand, int max_workers)
 
bool is_arena_in_list (arena_list_type &arenas, arena *a)
 
version_type version () const __TBB_override
 
unsigned max_job_count () const __TBB_override
 
size_t min_stack_size () const __TBB_override
 
policy_type policy () const __TBB_override
 
job * create_one_job () __TBB_override
 
void cleanup (job &j) __TBB_override
 
void acknowledge_close_connection () __TBB_override
 
void process (job &j) __TBB_override
 
- Private Member Functions inherited from tbb::internal::no_copy
 no_copy (const no_copy &)=delete
 
 no_copy ()=default
 

Static Private Member Functions

static marketglobal_market (bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
 Factory method creating new market object. More...
 

Private Attributes

arenas_list_mutex_type my_arenas_list_mutex
 
rml::tbb_server * my_server
 Pointer to the RML server object that services this TBB instance. More...
 
unsigned my_num_workers_hard_limit
 Maximal number of workers allowed for use by the underlying resource manager. More...
 
unsigned my_num_workers_soft_limit
 Current application-imposed limit on the number of workers (see set_active_num_workers()) More...
 
int my_num_workers_requested
 Number of workers currently requested from RML. More...
 
atomic< unsigned > my_first_unused_worker_idx
 First unused index of worker. More...
 
int my_total_demand
 Number of workers that were requested by all arenas. More...
 
arena_list_type my_arenas
 List of registered arenas. More...
 
arenamy_next_arena
 The first arena to be checked when idle worker seeks for an arena to enter. More...
 
uintptr_t my_arenas_aba_epoch
 ABA prevention marker to assign to newly created arenas. More...
 
unsigned my_ref_count
 Reference count controlling market object lifetime. More...
 
unsigned my_public_ref_count
 Count of master threads attached. More...
 
size_t my_stack_size
 Stack size of worker threads. More...
 
bool my_join_workers
 Shutdown mode. More...
 
unsigned my_workers_soft_limit_to_report
 Either workers soft limit to be reported via runtime_warning() or skip_soft_limit_warning. More...
 

Static Private Attributes

static markettheMarket
 Currently active global market. More...
 
static global_market_mutex_type theMarketMutex
 Mutex guarding creation/destruction of theMarket, insertions/deletions in my_arenas, and cancellation propagation. More...
 
static const unsigned skip_soft_limit_warning = ~0U
 The value indicating that the soft limit warning is unnecessary. More...
 

Friends

class generic_scheduler
 
class arena
 
class tbb::interface7::internal::task_arena_base
 
template<typename SchedulerTraits >
class custom_scheduler
 
class tbb::task_group_context
 
void ITT_DoUnsafeOneTimeInitialization ()
 

Detailed Description

Definition at line 45 of file market.h.

Member Typedef Documentation

◆ arena_list_type

Definition at line 54 of file market.h.

◆ arenas_list_mutex_type

Lightweight mutex guarding accounting operations with arenas list.

Definition at line 66 of file market.h.

◆ global_market_mutex_type

◆ scheduler_list_type

Constructor & Destructor Documentation

◆ market()

tbb::internal::market::market ( unsigned  workers_soft_limit,
unsigned  workers_hard_limit,
size_t  stack_size 
)
private

Constructor.

Definition at line 64 of file market.cpp.

65  : my_num_workers_hard_limit(workers_hard_limit)
66  , my_num_workers_soft_limit(workers_soft_limit)
67 #if __TBB_TASK_PRIORITY
68  , my_global_top_priority(normalized_normal_priority)
69  , my_global_bottom_priority(normalized_normal_priority)
70 #endif /* __TBB_TASK_PRIORITY */
71  , my_ref_count(1)
72  , my_stack_size(stack_size)
73  , my_workers_soft_limit_to_report(workers_soft_limit)
74 {
75 #if __TBB_TASK_PRIORITY
76  __TBB_ASSERT( my_global_reload_epoch == 0, NULL );
77  my_priority_levels[normalized_normal_priority].workers_available = my_num_workers_soft_limit;
78 #endif /* __TBB_TASK_PRIORITY */
79 
80  // Once created RML server will start initializing workers that will need
81  // global market instance to get worker stack size
83  __TBB_ASSERT( my_server, "Failed to create RML server" );
84 }

References __TBB_ASSERT, tbb::internal::governor::create_rml_server(), my_num_workers_soft_limit, and my_server.

Referenced by global_market().

Here is the call graph for this function:
Here is the caller graph for this function:

Member Function Documentation

◆ acknowledge_close_connection()

void tbb::internal::market::acknowledge_close_connection ( )
private

Definition at line 695 of file market.cpp.

695  {
696  destroy();
697 }

References destroy().

Here is the call graph for this function:

◆ adjust_demand()

void tbb::internal::market::adjust_demand ( arena a,
int  delta 
)

Request that arena's need in workers should be adjusted.

Concurrent invocations are possible only on behalf of different arenas.

Definition at line 556 of file market.cpp.

556  {
557  __TBB_ASSERT( theMarket, "market instance was destroyed prematurely?" );
558  if ( !delta )
559  return;
561  int prev_req = a.my_num_workers_requested;
562  a.my_num_workers_requested += delta;
563  if ( a.my_num_workers_requested <= 0 ) {
564  a.my_num_workers_allotted = 0;
565  if ( prev_req <= 0 ) {
567  return;
568  }
569  delta = -prev_req;
570  }
571  else if ( prev_req < 0 ) {
572  delta = a.my_num_workers_requested;
573  }
574  my_total_demand += delta;
575 #if !__TBB_TASK_PRIORITY
577 #else /* !__TBB_TASK_PRIORITY */
578  intptr_t p = a.my_top_priority;
579  priority_level_info &pl = my_priority_levels[p];
580  pl.workers_requested += delta;
581  __TBB_ASSERT( pl.workers_requested >= 0, NULL );
582  if ( a.my_num_workers_requested <= 0 ) {
583  if ( a.my_top_priority != normalized_normal_priority ) {
584  GATHER_STATISTIC( ++governor::local_scheduler_if_initialized()->my_counters.arena_prio_resets );
585  update_arena_top_priority( a, normalized_normal_priority );
586  }
587  a.my_bottom_priority = normalized_normal_priority;
588  }
589  unsigned effective_soft_limit = my_num_workers_soft_limit;
590  if (my_mandatory_num_requested > 0) {
591  __TBB_ASSERT(effective_soft_limit == 0, NULL);
592  effective_soft_limit = 1;
593 
594  }
595  if ( p == my_global_top_priority ) {
596  if ( !pl.workers_requested ) {
597  while ( --p >= my_global_bottom_priority && !my_priority_levels[p].workers_requested )
598  continue;
599  if ( p < my_global_bottom_priority )
600  reset_global_priority();
601  else
602  update_global_top_priority(p);
603  }
604  my_priority_levels[my_global_top_priority].workers_available = effective_soft_limit;
605  update_allotment( my_global_top_priority );
606  }
607  else if ( p > my_global_top_priority ) {
608  __TBB_ASSERT( pl.workers_requested > 0, NULL );
609  // TODO: investigate if the following invariant is always valid
610  __TBB_ASSERT( a.my_num_workers_requested >= 0, NULL );
611  update_global_top_priority(p);
612  a.my_num_workers_allotted = min( (int)effective_soft_limit, a.my_num_workers_requested );
613  my_priority_levels[p - 1].workers_available = effective_soft_limit - a.my_num_workers_allotted;
614  update_allotment( p - 1 );
615  }
616  else if ( p == my_global_bottom_priority ) {
617  if ( !pl.workers_requested ) {
618  while ( ++p <= my_global_top_priority && !my_priority_levels[p].workers_requested )
619  continue;
620  if ( p > my_global_top_priority )
621  reset_global_priority();
622  else
623  my_global_bottom_priority = p;
624  }
625  else
626  update_allotment( p );
627  }
628  else if ( p < my_global_bottom_priority ) {
629  int prev_bottom = my_global_bottom_priority;
630  my_global_bottom_priority = p;
631  update_allotment( prev_bottom );
632  }
633  else {
634  __TBB_ASSERT( my_global_bottom_priority < p && p < my_global_top_priority, NULL );
635  update_allotment( p );
636  }
637  __TBB_ASSERT( my_global_top_priority >= a.my_top_priority || a.my_num_workers_requested<=0, NULL );
639 #endif /* !__TBB_TASK_PRIORITY */
640  if ( delta > 0 ) {
641  // can't overflow soft_limit, but remember values request by arenas in
642  // my_total_demand to not prematurely release workers to RML
643  if ( my_num_workers_requested+delta > (int)effective_soft_limit)
644  delta = effective_soft_limit - my_num_workers_requested;
645  } else {
646  // the number of workers should not be decreased below my_total_demand
648  delta = min(my_total_demand, (int)effective_soft_limit) - my_num_workers_requested;
649  }
650  my_num_workers_requested += delta;
651  __TBB_ASSERT( my_num_workers_requested <= (int)effective_soft_limit, NULL );
652 
654  // Must be called outside of any locks
655  my_server->adjust_job_count_estimate( delta );
657 }

References __TBB_ASSERT, assert_market_valid(), GATHER_STATISTIC, tbb::internal::governor::local_scheduler_if_initialized(), tbb::spin_rw_mutex_v3::lock(), tbb::internal::min(), my_arenas_list_mutex, tbb::internal::arena_base::my_num_workers_allotted, my_num_workers_requested, tbb::internal::arena_base::my_num_workers_requested, my_num_workers_soft_limit, my_server, my_total_demand, p, theMarket, tbb::spin_rw_mutex_v3::unlock(), and update_allotment().

Referenced by tbb::internal::arena::advertise_new_work(), tbb::internal::arena::is_out_of_work(), tbb::internal::generic_scheduler::nested_arena_entry(), and tbb::internal::generic_scheduler::nested_arena_exit().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ app_parallelism_limit()

unsigned tbb::internal::market::app_parallelism_limit ( )
static

Reports active parallelism level according to user's settings.

Definition at line 513 of file tbb_main.cpp.

513  {
515 }

References tbb::internal::allowed_parallelism_control::active_value_if_present(), and tbb::internal::allowed_parallelism_ctl.

Referenced by tbb::internal::calc_workers_soft_limit(), and global_market().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ arena_in_need() [1/2]

arena* tbb::internal::market::arena_in_need ( arena )
inlineprivate

Returns next arena that needs more workers, or NULL.

Definition at line 221 of file market.h.

221  {
223  return NULL;
224  arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex, /*is_writer=*/false);
226  }

References tbb::internal::__TBB_load_with_acquire(), lock, my_arenas, my_arenas_list_mutex, my_next_arena, and my_total_demand.

Referenced by process().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ arena_in_need() [2/2]

arena * tbb::internal::market::arena_in_need ( arena_list_type arenas,
arena hint 
)
private

This method must be invoked under my_arenas_list_mutex.

Definition at line 373 of file market.cpp.

373  {
374  if ( arenas.empty() )
375  return NULL;
376  arena_list_type::iterator it = hint;
377  __TBB_ASSERT( it != arenas.end(), NULL );
378  do {
379  arena& a = *it;
380  if ( ++it == arenas.end() )
381  it = arenas.begin();
382  if( a.num_workers_active() < a.my_num_workers_allotted ) {
383  a.my_references += arena::ref_worker;
384  return &a;
385  }
386  } while ( it != hint );
387  return NULL;
388 }

References __TBB_ASSERT, tbb::internal::intrusive_list_base< List, T >::begin(), tbb::internal::intrusive_list_base< List, T >::empty(), tbb::internal::intrusive_list_base< List, T >::end(), tbb::internal::arena_base::my_num_workers_allotted, tbb::internal::arena_base::my_references, tbb::internal::arena::num_workers_active(), and tbb::internal::arena::ref_worker.

Here is the call graph for this function:

◆ assert_market_valid()

void tbb::internal::market::assert_market_valid ( ) const
inlineprivate

Definition at line 227 of file market.h.

227 {}

Referenced by adjust_demand(), and try_destroy_arena().

Here is the caller graph for this function:

◆ cleanup()

void tbb::internal::market::cleanup ( job &  j)
private

Definition at line 681 of file market.cpp.

681  {
682  __TBB_ASSERT( theMarket != this, NULL );
683  generic_scheduler& s = static_cast<generic_scheduler&>(j);
685  __TBB_ASSERT( !mine || mine->is_worker(), NULL );
686  if( mine!=&s ) {
688  generic_scheduler::cleanup_worker( &s, mine!=NULL );
690  } else {
692  }
693 }

References __TBB_ASSERT, tbb::internal::governor::assume_scheduler(), tbb::internal::generic_scheduler::cleanup_worker(), tbb::internal::generic_scheduler::is_worker(), tbb::internal::governor::local_scheduler_if_initialized(), s, and theMarket.

Here is the call graph for this function:

◆ create_arena()

arena * tbb::internal::market::create_arena ( int  num_slots,
int  num_reserved_slots,
size_t  stack_size 
)
static

Creates an arena object.

If necessary, also creates global market instance, and boosts its ref count. Each call to create_arena() must be matched by the call to arena::free_arena().

Definition at line 307 of file market.cpp.

307  {
308  __TBB_ASSERT( num_slots > 0, NULL );
309  __TBB_ASSERT( num_reserved_slots <= num_slots, NULL );
310  // Add public market reference for master thread/task_arena (that adds an internal reference in exchange).
311  market &m = global_market( /*is_public=*/true, num_slots-num_reserved_slots, stack_size );
312 
313  arena& a = arena::allocate_arena( m, num_slots, num_reserved_slots );
314  // Add newly created arena into the existing market's list.
315  arenas_list_mutex_type::scoped_lock lock(m.my_arenas_list_mutex);
316  m.insert_arena_into_list(a);
317  return &a;
318 }

References __TBB_ASSERT, tbb::internal::arena::allocate_arena(), global_market(), insert_arena_into_list(), lock, and my_arenas_list_mutex.

Referenced by tbb::internal::governor::init_scheduler().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ create_one_job()

rml::job * tbb::internal::market::create_one_job ( )
private

Definition at line 699 of file market.cpp.

699  {
700  unsigned index = ++my_first_unused_worker_idx;
701  __TBB_ASSERT( index > 0, NULL );
702  ITT_THREAD_SET_NAME(_T("TBB Worker Thread"));
703  // index serves as a hint decreasing conflicts between workers when they migrate between arenas
704  generic_scheduler* s = generic_scheduler::create_worker( *this, index, /* genuine = */ true );
705 #if __TBB_TASK_GROUP_CONTEXT
706  __TBB_ASSERT( index <= my_num_workers_hard_limit, NULL );
707  __TBB_ASSERT( !my_workers[index - 1], NULL );
708  my_workers[index - 1] = s;
709 #endif /* __TBB_TASK_GROUP_CONTEXT */
710  return s;
711 }

References __TBB_ASSERT, _T, tbb::internal::generic_scheduler::create_worker(), ITT_THREAD_SET_NAME, my_first_unused_worker_idx, my_num_workers_hard_limit, and s.

Here is the call graph for this function:

◆ destroy()

void tbb::internal::market::destroy ( )
private

Destroys and deallocates market object created by market::create()

Definition at line 165 of file market.cpp.

165  {
166 #if __TBB_COUNT_TASK_NODES
167  if ( my_task_node_count )
168  runtime_warning( "Leaked %ld task objects\n", (long)my_task_node_count );
169 #endif /* __TBB_COUNT_TASK_NODES */
170  this->market::~market(); // qualified to suppress warning
171  NFS_Free( this );
173 }

References tbb::internal::NFS_Free(), tbb::internal::__TBB_InitOnce::remove_ref(), and tbb::internal::runtime_warning().

Referenced by acknowledge_close_connection().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ detach_arena()

void tbb::internal::market::detach_arena ( arena a)

Removes the arena from the market's list.

This method must be invoked under my_arenas_list_mutex.

Definition at line 321 of file market.cpp.

321  {
322  __TBB_ASSERT( theMarket == this, "Global market instance was destroyed prematurely?" );
323  __TBB_ASSERT( !a.my_slots[0].my_scheduler, NULL );
324  if (a.my_global_concurrency_mode)
325  disable_mandatory_concurrency_impl(&a);
326 
328  if ( a.my_aba_epoch == my_arenas_aba_epoch )
330 }

References __TBB_ASSERT, tbb::internal::arena_base::my_aba_epoch, my_arenas_aba_epoch, tbb::internal::arena_slot_line1::my_scheduler, tbb::internal::arena::my_slots, remove_arena_from_list(), and theMarket.

Referenced by try_destroy_arena().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ global_market()

market & tbb::internal::market::global_market ( bool  is_public,
unsigned  max_num_workers = 0,
size_t  stack_size = 0 
)
staticprivate

Factory method creating new market object.

Definition at line 96 of file market.cpp.

96  {
97  global_market_mutex_type::scoped_lock lock( theMarketMutex );
98  market *m = theMarket;
99  if( m ) {
100  ++m->my_ref_count;
101  const unsigned old_public_count = is_public? m->my_public_ref_count++ : /*any non-zero value*/1;
102  lock.release();
103  if( old_public_count==0 )
104  set_active_num_workers( calc_workers_soft_limit(workers_requested, m->my_num_workers_hard_limit) );
105 
106  // do not warn if default number of workers is requested
107  if( workers_requested != governor::default_num_threads()-1 ) {
108  __TBB_ASSERT( skip_soft_limit_warning > workers_requested,
109  "skip_soft_limit_warning must be larger than any valid workers_requested" );
110  unsigned soft_limit_to_report = m->my_workers_soft_limit_to_report;
111  if( soft_limit_to_report < workers_requested ) {
112  runtime_warning( "The number of workers is currently limited to %u. "
113  "The request for %u workers is ignored. Further requests for more workers "
114  "will be silently ignored until the limit changes.\n",
115  soft_limit_to_report, workers_requested );
116  // The race is possible when multiple threads report warnings.
117  // We are OK with that, as there are just multiple warnings.
118  internal::as_atomic(m->my_workers_soft_limit_to_report).
119  compare_and_swap(skip_soft_limit_warning, soft_limit_to_report);
120  }
121 
122  }
123  if( m->my_stack_size < stack_size )
124  runtime_warning( "Thread stack size has been already set to %u. "
125  "The request for larger stack (%u) cannot be satisfied.\n",
126  m->my_stack_size, stack_size );
127  }
128  else {
129  // TODO: A lot is done under theMarketMutex locked. Can anything be moved out?
130  if( stack_size == 0 )
132  // Expecting that 4P is suitable for most applications.
133  // Limit to 2P for large thread number.
134  // TODO: ask RML for max concurrency and possibly correct hard_limit
135  const unsigned factor = governor::default_num_threads()<=128? 4 : 2;
136  // The requested number of threads is intentionally not considered in
137  // computation of the hard limit, in order to separate responsibilities
138  // and avoid complicated interactions between global_control and task_scheduler_init.
139  // The market guarantees that at least 256 threads might be created.
140  const unsigned workers_hard_limit = max(max(factor*governor::default_num_threads(), 256u), app_parallelism_limit());
141  const unsigned workers_soft_limit = calc_workers_soft_limit(workers_requested, workers_hard_limit);
142  // Create the global market instance
143  size_t size = sizeof(market);
144 #if __TBB_TASK_GROUP_CONTEXT
145  __TBB_ASSERT( __TBB_offsetof(market, my_workers) + sizeof(generic_scheduler*) == sizeof(market),
146  "my_workers must be the last data field of the market class");
147  size += sizeof(generic_scheduler*) * (workers_hard_limit - 1);
148 #endif /* __TBB_TASK_GROUP_CONTEXT */
150  void* storage = NFS_Allocate(1, size, NULL);
151  memset( storage, 0, size );
152  // Initialize and publish global market
153  m = new (storage) market( workers_soft_limit, workers_hard_limit, stack_size );
154  if( is_public )
155  m->my_public_ref_count = 1;
156  theMarket = m;
157  // This check relies on the fact that for shared RML default_concurrency==max_concurrency
158  if ( !governor::UsePrivateRML && m->my_server->default_concurrency() < workers_soft_limit )
159  runtime_warning( "RML might limit the number of workers to %u while %u is requested.\n"
160  , m->my_server->default_concurrency(), workers_soft_limit );
161  }
162  return *m;
163 }

References __TBB_ASSERT, __TBB_offsetof, tbb::interface9::global_control::active_value(), tbb::internal::__TBB_InitOnce::add_ref(), app_parallelism_limit(), tbb::internal::as_atomic(), tbb::internal::calc_workers_soft_limit(), tbb::internal::governor::default_num_threads(), lock, market(), tbb::internal::max(), my_num_workers_hard_limit, my_public_ref_count, my_ref_count, my_server, my_stack_size, my_workers_soft_limit_to_report, tbb::internal::NFS_Allocate(), tbb::internal::runtime_warning(), set_active_num_workers(), size, skip_soft_limit_warning, theMarket, theMarketMutex, tbb::interface9::global_control::thread_stack_size, and tbb::internal::governor::UsePrivateRML.

Referenced by create_arena(), and tbb::internal::generic_scheduler::create_master().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ insert_arena_into_list()

void tbb::internal::market::insert_arena_into_list ( arena a)
private

Definition at line 29 of file market.cpp.

29  {
30 #if __TBB_TASK_PRIORITY
31  arena_list_type &arenas = my_priority_levels[a.my_top_priority].arenas;
32  arena *&next = my_priority_levels[a.my_top_priority].next_arena;
33 #else /* !__TBB_TASK_PRIORITY */
34  arena_list_type &arenas = my_arenas;
35  arena *&next = my_next_arena;
36 #endif /* !__TBB_TASK_PRIORITY */
37  arenas.push_front( a );
38  if ( arenas.size() == 1 )
39  next = &*arenas.begin();
40 }

References tbb::internal::intrusive_list_base< List, T >::begin(), my_arenas, my_next_arena, tbb::internal::intrusive_list_base< List, T >::push_front(), and tbb::internal::intrusive_list_base< List, T >::size().

Referenced by create_arena().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_arena_in_list()

bool tbb::internal::market::is_arena_in_list ( arena_list_type arenas,
arena a 
)
private

This method must be invoked under my_arenas_list_mutex.

Definition at line 423 of file market.cpp.

423  {
424  if ( a ) {
425  for ( arena_list_type::iterator it = arenas.begin(); it != arenas.end(); ++it )
426  if ( a == &*it )
427  return true;
428  }
429  return false;
430 }

References tbb::internal::intrusive_list_base< List, T >::begin(), and tbb::internal::intrusive_list_base< List, T >::end().

Here is the call graph for this function:

◆ max_job_count()

unsigned tbb::internal::market::max_job_count ( ) const
inlineprivate

Definition at line 249 of file market.h.

249 { return my_num_workers_hard_limit; }

References my_num_workers_hard_limit.

◆ max_num_workers()

static unsigned tbb::internal::market::max_num_workers ( )
inlinestatic

Definition at line 363 of file market.h.

363  {
364  global_market_mutex_type::scoped_lock lock( theMarketMutex );
366  }

References lock, my_num_workers_hard_limit, theMarket, and theMarketMutex.

Referenced by tbb::internal::allowed_parallelism_control::active_value().

Here is the caller graph for this function:

◆ min_stack_size()

size_t tbb::internal::market::min_stack_size ( ) const
inlineprivate

Definition at line 251 of file market.h.

251 { return worker_stack_size(); }

References worker_stack_size().

Here is the call graph for this function:

◆ must_join_workers()

bool tbb::internal::market::must_join_workers ( ) const
inline

Used when RML asks for join mode during workers termination.

Definition at line 297 of file market.h.

297 { return my_join_workers; }

References my_join_workers.

◆ policy()

policy_type tbb::internal::market::policy ( ) const
inlineprivate

Definition at line 253 of file market.h.

253 { return throughput; }

◆ process()

void tbb::internal::market::process ( job &  j)
private

Definition at line 659 of file market.cpp.

659  {
660  generic_scheduler& s = static_cast<generic_scheduler&>(j);
661  // s.my_arena can be dead. Don't access it until arena_in_need is called
662  arena *a = s.my_arena;
663  __TBB_ASSERT( governor::is_set(&s), NULL );
664 
665  for (int i = 0; i < 2; ++i) {
666  while ( (a = arena_in_need(a)) ) {
667  a->process(s);
668  a = NULL; // to avoid double checks in arena_in_need(arena*) for the same priority level
669  }
670  // Workers leave market because there is no arena in need. It can happen earlier than
671  // adjust_job_count_estimate() decreases my_slack and RML can put this thread to sleep.
672  // It might result in a busy-loop checking for my_slack<0 and calling this method instantly.
673  // the yield refines this spinning.
674  if ( !i )
675  __TBB_Yield();
676  }
677 
678  GATHER_STATISTIC( ++s.my_counters.market_roundtrips );
679 }

References __TBB_ASSERT, __TBB_Yield, arena_in_need(), GATHER_STATISTIC, tbb::internal::governor::is_set(), tbb::internal::arena::process(), and s.

Here is the call graph for this function:

◆ release()

bool tbb::internal::market::release ( bool  is_public,
bool  blocking_terminate 
)

Decrements market's refcount and destroys it in the end.

Definition at line 175 of file market.cpp.

175  {
176  __TBB_ASSERT( theMarket == this, "Global market instance was destroyed prematurely?" );
177  bool do_release = false;
178  {
179  global_market_mutex_type::scoped_lock lock( theMarketMutex );
180  if ( blocking_terminate ) {
181  __TBB_ASSERT( is_public, "Only an object with a public reference can request the blocking terminate" );
182  while ( my_public_ref_count == 1 && my_ref_count > 1 ) {
183  lock.release();
184  // To guarantee that request_close_connection() is called by the last master, we need to wait till all
185  // references are released. Re-read my_public_ref_count to limit waiting if new masters are created.
186  // Theoretically, new private references to the market can be added during waiting making it potentially
187  // endless.
188  // TODO: revise why the weak scheduler needs market's pointer and try to remove this wait.
189  // Note that the market should know about its schedulers for cancellation/exception/priority propagation,
190  // see e.g. task_group_context::cancel_group_execution()
192  __TBB_Yield();
193  lock.acquire( theMarketMutex );
194  }
195  }
196  if ( is_public ) {
197  __TBB_ASSERT( theMarket == this, "Global market instance was destroyed prematurely?" );
200  }
201  if ( --my_ref_count == 0 ) {
203  do_release = true;
204  theMarket = NULL;
205  }
206  }
207  if( do_release ) {
208  __TBB_ASSERT( !__TBB_load_with_acquire(my_public_ref_count), "No public references remain if we remove the market." );
209  // inform RML that blocking termination is required
210  my_join_workers = blocking_terminate;
211  my_server->request_close_connection();
212  return blocking_terminate;
213  }
214  return false;
215 }

References __TBB_ASSERT, tbb::internal::__TBB_load_with_acquire(), __TBB_Yield, lock, my_join_workers, my_public_ref_count, my_ref_count, my_server, theMarket, and theMarketMutex.

Referenced by tbb::internal::generic_scheduler::cleanup_master(), tbb::internal::arena::free_arena(), and set_active_num_workers().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ remove_arena_from_list()

void tbb::internal::market::remove_arena_from_list ( arena a)
private

Definition at line 42 of file market.cpp.

42  {
43 #if __TBB_TASK_PRIORITY
44  arena_list_type &arenas = my_priority_levels[a.my_top_priority].arenas;
45  arena *&next = my_priority_levels[a.my_top_priority].next_arena;
46 #else /* !__TBB_TASK_PRIORITY */
47  arena_list_type &arenas = my_arenas;
48  arena *&next = my_next_arena;
49 #endif /* !__TBB_TASK_PRIORITY */
50  arena_list_type::iterator it = next;
51  __TBB_ASSERT( it != arenas.end(), NULL );
52  if ( next == &a ) {
53  if ( ++it == arenas.end() && arenas.size() > 1 )
54  it = arenas.begin();
55  next = &*it;
56  }
57  arenas.remove( a );
58 }

References __TBB_ASSERT, tbb::internal::intrusive_list_base< List, T >::begin(), tbb::internal::intrusive_list_base< List, T >::end(), my_arenas, my_next_arena, tbb::internal::intrusive_list_base< List, T >::remove(), and tbb::internal::intrusive_list_base< List, T >::size().

Referenced by detach_arena().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ set_active_num_workers()

void tbb::internal::market::set_active_num_workers ( unsigned  w)
static

Set number of active workers.

Definition at line 235 of file market.cpp.

235  {
236  market *m;
237 
238  {
239  global_market_mutex_type::scoped_lock lock( theMarketMutex );
240  if ( !theMarket )
241  return; // actual value will be used at market creation
242  m = theMarket;
243  if (m->my_num_workers_soft_limit == soft_limit)
244  return;
245  ++m->my_ref_count;
246  }
247  // have my_ref_count for market, use it safely
248 
249  int delta = 0;
250  {
251  arenas_list_mutex_type::scoped_lock lock( m->my_arenas_list_mutex );
252  __TBB_ASSERT(soft_limit <= m->my_num_workers_hard_limit, NULL);
253 
254 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
255 #if __TBB_TASK_PRIORITY
256 #define FOR_EACH_PRIORITY_LEVEL_BEGIN { \
257  for (int p = m->my_global_top_priority; p >= m->my_global_bottom_priority; --p) { \
258  priority_level_info& pl = m->my_priority_levels[p]; \
259  arena_list_type& arenas = pl.arenas;
260 #else
261 #define FOR_EACH_PRIORITY_LEVEL_BEGIN { { \
262  const int p = 0; \
263  arena_list_type& arenas = m->my_arenas;
264 #endif
265 #define FOR_EACH_PRIORITY_LEVEL_END } }
266 
267  if (m->my_num_workers_soft_limit == 0 && m->my_mandatory_num_requested > 0) {
268  FOR_EACH_PRIORITY_LEVEL_BEGIN
269  for (arena_list_type::iterator it = arenas.begin(); it != arenas.end(); ++it)
270  if (it->my_global_concurrency_mode)
271  m->disable_mandatory_concurrency_impl(&*it);
272  FOR_EACH_PRIORITY_LEVEL_END
273  }
274  __TBB_ASSERT(m->my_mandatory_num_requested == 0, NULL);
275 #endif
276 
277  as_atomic(m->my_num_workers_soft_limit) = soft_limit;
278  // report only once after new soft limit value is set
279  m->my_workers_soft_limit_to_report = soft_limit;
280 
281 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
282  if (m->my_num_workers_soft_limit == 0) {
283  FOR_EACH_PRIORITY_LEVEL_BEGIN
284  for (arena_list_type::iterator it = arenas.begin(); it != arenas.end(); ++it) {
285  if (!it->my_task_stream.empty(p))
286  m->enable_mandatory_concurrency_impl(&*it);
287  }
288  FOR_EACH_PRIORITY_LEVEL_END
289  }
290 #undef FOR_EACH_PRIORITY_LEVEL_BEGIN
291 #undef FOR_EACH_PRIORITY_LEVEL_END
292 #endif
293 
294  delta = m->update_workers_request();
295  }
296  // adjust_job_count_estimate must be called outside of any locks
297  if( delta!=0 )
298  m->my_server->adjust_job_count_estimate( delta );
299  // release internal market reference to match ++m->my_ref_count above
300  m->release( /*is_public=*/false, /*blocking_terminate=*/false );
301 }

References __TBB_ASSERT, tbb::internal::as_atomic(), lock, my_arenas_list_mutex, my_num_workers_hard_limit, my_num_workers_soft_limit, my_ref_count, my_server, my_workers_soft_limit_to_report, p, release(), theMarket, and theMarketMutex.

Referenced by tbb::internal::allowed_parallelism_control::apply_active(), and global_market().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ try_destroy_arena()

void tbb::internal::market::try_destroy_arena ( arena a,
uintptr_t  aba_epoch 
)

Removes the arena from the market's list.

Definition at line 332 of file market.cpp.

332  {
333  bool locked = true;
334  __TBB_ASSERT( a, NULL );
335  // we hold reference to the market, so it cannot be destroyed at any moment here
336  __TBB_ASSERT( this == theMarket, NULL );
337  __TBB_ASSERT( my_ref_count!=0, NULL );
340 #if __TBB_TASK_PRIORITY
341  // scan all priority levels, not only in [my_global_bottom_priority;my_global_top_priority]
342  // range, because arena to be destroyed can have no outstanding request for workers
343  for ( int p = num_priority_levels-1; p >= 0; --p ) {
344  priority_level_info &pl = my_priority_levels[p];
345  arena_list_type &my_arenas = pl.arenas;
346 #endif /* __TBB_TASK_PRIORITY */
347  arena_list_type::iterator it = my_arenas.begin();
348  for ( ; it != my_arenas.end(); ++it ) {
349  if ( a == &*it ) {
350  if ( it->my_aba_epoch == aba_epoch ) {
351  // Arena is alive
352  if ( !a->my_num_workers_requested && !a->my_references ) {
353  __TBB_ASSERT( !a->my_num_workers_allotted && (a->my_pool_state == arena::SNAPSHOT_EMPTY || !a->my_max_num_workers), "Inconsistent arena state" );
354  // Arena is abandoned. Destroy it.
355  detach_arena( *a );
357  locked = false;
358  a->free_arena();
359  }
360  }
361  if (locked)
363  return;
364  }
365  }
366 #if __TBB_TASK_PRIORITY
367  }
368 #endif /* __TBB_TASK_PRIORITY */
370 }

References __TBB_ASSERT, assert_market_valid(), tbb::internal::intrusive_list_base< List, T >::begin(), detach_arena(), tbb::internal::intrusive_list_base< List, T >::end(), tbb::internal::arena::free_arena(), tbb::spin_rw_mutex_v3::lock(), my_arenas, my_arenas_list_mutex, tbb::internal::arena_base::my_max_num_workers, tbb::internal::arena_base::my_num_workers_allotted, tbb::internal::arena_base::my_num_workers_requested, tbb::internal::arena_base::my_pool_state, my_ref_count, tbb::internal::arena_base::my_references, tbb::internal::num_priority_levels, p, tbb::internal::arena::SNAPSHOT_EMPTY, theMarket, and tbb::spin_rw_mutex_v3::unlock().

Referenced by tbb::internal::arena::on_thread_leaving().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ update_allotment() [1/2]

void tbb::internal::market::update_allotment ( )
inlineprivate

Recalculates the number of workers assigned to each arena in the list.

The actual number of workers servicing a particular arena may temporarily deviate from the calculated value.

Definition at line 214 of file market.h.

References my_arenas, my_num_workers_soft_limit, and my_total_demand.

Referenced by adjust_demand().

Here is the caller graph for this function:

◆ update_allotment() [2/2]

int tbb::internal::market::update_allotment ( arena_list_type arenas,
int  total_demand,
int  max_workers 
)
private

Definition at line 390 of file market.cpp.

390  {
391  __TBB_ASSERT( workers_demand > 0, NULL );
392  max_workers = min(workers_demand, max_workers);
393  int assigned = 0;
394  int carry = 0;
395  for (arena_list_type::iterator it = arenas.begin(); it != arenas.end(); ++it) {
396  arena& a = *it;
397  if (a.my_num_workers_requested <= 0) {
398  __TBB_ASSERT(!a.my_num_workers_allotted, NULL);
399  continue;
400  }
401  int allotted = 0;
402 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
403  if (my_num_workers_soft_limit == 0) {
404  __TBB_ASSERT(max_workers == 0 || max_workers == 1, NULL);
405  allotted = a.my_global_concurrency_mode && assigned < max_workers ? 1 : 0;
406  } else
407 #endif
408  {
409  int tmp = a.my_num_workers_requested * max_workers + carry;
410  allotted = tmp / workers_demand;
411  carry = tmp % workers_demand;
412  // a.my_num_workers_requested may temporarily exceed a.my_max_num_workers
413  allotted = min(allotted, (int)a.my_max_num_workers);
414  }
415  a.my_num_workers_allotted = allotted;
416  assigned += allotted;
417  }
418  __TBB_ASSERT( 0 <= assigned && assigned <= max_workers, NULL );
419  return assigned;
420 }

References __TBB_ASSERT, tbb::internal::intrusive_list_base< List, T >::begin(), tbb::internal::intrusive_list_base< List, T >::end(), tbb::internal::min(), tbb::internal::arena_base::my_max_num_workers, tbb::internal::arena_base::my_num_workers_allotted, tbb::internal::arena_base::my_num_workers_requested, and my_num_workers_soft_limit.

Here is the call graph for this function:

◆ version()

version_type tbb::internal::market::version ( ) const
inlineprivate

Definition at line 247 of file market.h.

247 { return 0; }

◆ worker_stack_size()

size_t tbb::internal::market::worker_stack_size ( ) const
inline

Returns the requested stack size of worker threads.

Definition at line 300 of file market.h.

300 { return my_stack_size; }

References my_stack_size.

Referenced by tbb::internal::generic_scheduler::init_stack_info(), and min_stack_size().

Here is the caller graph for this function:

Friends And Related Function Documentation

◆ arena

friend class arena
friend

Definition at line 47 of file market.h.

◆ custom_scheduler

template<typename SchedulerTraits >
friend class custom_scheduler
friend

Definition at line 49 of file market.h.

◆ generic_scheduler

friend class generic_scheduler
friend

Definition at line 46 of file market.h.

◆ ITT_DoUnsafeOneTimeInitialization

void ITT_DoUnsafeOneTimeInitialization ( )
friend

◆ tbb::interface7::internal::task_arena_base

Definition at line 48 of file market.h.

◆ tbb::task_group_context

friend class tbb::task_group_context
friend

Definition at line 50 of file market.h.

Member Data Documentation

◆ my_arenas

arena_list_type tbb::internal::market::my_arenas
private

List of registered arenas.

Definition at line 135 of file market.h.

Referenced by arena_in_need(), insert_arena_into_list(), remove_arena_from_list(), try_destroy_arena(), and update_allotment().

◆ my_arenas_aba_epoch

uintptr_t tbb::internal::market::my_arenas_aba_epoch
private

ABA prevention marker to assign to newly created arenas.

Definition at line 143 of file market.h.

Referenced by tbb::internal::arena::arena(), and detach_arena().

◆ my_arenas_list_mutex

arenas_list_mutex_type tbb::internal::market::my_arenas_list_mutex
private

◆ my_first_unused_worker_idx

atomic<unsigned> tbb::internal::market::my_first_unused_worker_idx
private

First unused index of worker.

Used to assign indices to the new workers coming from RML, and busy part of my_workers array.

Definition at line 86 of file market.h.

Referenced by create_one_job().

◆ my_join_workers

bool tbb::internal::market::my_join_workers
private

Shutdown mode.

Definition at line 155 of file market.h.

Referenced by must_join_workers(), and release().

◆ my_next_arena

arena* tbb::internal::market::my_next_arena
private

The first arena to be checked when idle worker seeks for an arena to enter.

The check happens in round-robin fashion.

Definition at line 139 of file market.h.

Referenced by arena_in_need(), insert_arena_into_list(), and remove_arena_from_list().

◆ my_num_workers_hard_limit

unsigned tbb::internal::market::my_num_workers_hard_limit
private

Maximal number of workers allowed for use by the underlying resource manager.

It can't be changed after market creation.

Definition at line 74 of file market.h.

Referenced by create_one_job(), global_market(), max_job_count(), max_num_workers(), and set_active_num_workers().

◆ my_num_workers_requested

int tbb::internal::market::my_num_workers_requested
private

Number of workers currently requested from RML.

Definition at line 81 of file market.h.

Referenced by adjust_demand().

◆ my_num_workers_soft_limit

unsigned tbb::internal::market::my_num_workers_soft_limit
private

Current application-imposed limit on the number of workers (see set_active_num_workers())

It can't be more than my_num_workers_hard_limit.

Definition at line 78 of file market.h.

Referenced by adjust_demand(), tbb::internal::arena::advertise_new_work(), market(), tbb::internal::arena::on_thread_leaving(), set_active_num_workers(), and update_allotment().

◆ my_public_ref_count

unsigned tbb::internal::market::my_public_ref_count
private

Count of master threads attached.

Definition at line 149 of file market.h.

Referenced by global_market(), and release().

◆ my_ref_count

unsigned tbb::internal::market::my_ref_count
private

Reference count controlling market object lifetime.

Definition at line 146 of file market.h.

Referenced by global_market(), release(), set_active_num_workers(), and try_destroy_arena().

◆ my_server

rml::tbb_server* tbb::internal::market::my_server
private

Pointer to the RML server object that services this TBB instance.

Definition at line 70 of file market.h.

Referenced by adjust_demand(), global_market(), market(), release(), and set_active_num_workers().

◆ my_stack_size

size_t tbb::internal::market::my_stack_size
private

Stack size of worker threads.

Definition at line 152 of file market.h.

Referenced by global_market(), and worker_stack_size().

◆ my_total_demand

int tbb::internal::market::my_total_demand
private

Number of workers that were requested by all arenas.

Definition at line 89 of file market.h.

Referenced by adjust_demand(), arena_in_need(), and update_allotment().

◆ my_workers_soft_limit_to_report

unsigned tbb::internal::market::my_workers_soft_limit_to_report
private

Either workers soft limit to be reported via runtime_warning() or skip_soft_limit_warning.

Definition at line 161 of file market.h.

Referenced by global_market(), and set_active_num_workers().

◆ skip_soft_limit_warning

const unsigned tbb::internal::market::skip_soft_limit_warning = ~0U
staticprivate

The value indicating that the soft limit warning is unnecessary.

Definition at line 158 of file market.h.

Referenced by global_market().

◆ theMarket

market * tbb::internal::market::theMarket
staticprivate

Currently active global market.

Definition at line 58 of file market.h.

Referenced by adjust_demand(), cleanup(), detach_arena(), global_market(), max_num_workers(), release(), set_active_num_workers(), and try_destroy_arena().

◆ theMarketMutex

market::global_market_mutex_type tbb::internal::market::theMarketMutex
staticprivate

Mutex guarding creation/destruction of theMarket, insertions/deletions in my_arenas, and cancellation propagation.

Definition at line 63 of file market.h.

Referenced by global_market(), max_num_workers(), release(), and set_active_num_workers().


The documentation for this class was generated from the following files:
tbb::internal::market::my_ref_count
unsigned my_ref_count
Reference count controlling market object lifetime.
Definition: market.h:146
tbb::internal::market::my_num_workers_hard_limit
unsigned my_num_workers_hard_limit
Maximal number of workers allowed for use by the underlying resource manager.
Definition: market.h:74
tbb::interface9::global_control::active_value
static size_t active_value(parameter p)
Definition: global_control.h:59
tbb::internal::market::my_arenas
arena_list_type my_arenas
List of registered arenas.
Definition: market.h:135
tbb::internal::market::market
market(unsigned workers_soft_limit, unsigned workers_hard_limit, size_t stack_size)
Constructor.
Definition: market.cpp:64
tbb::internal::market::my_arenas_aba_epoch
uintptr_t my_arenas_aba_epoch
ABA prevention marker to assign to newly created arenas.
Definition: market.h:143
__TBB_ASSERT
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
tbb::internal::market::my_join_workers
bool my_join_workers
Shutdown mode.
Definition: market.h:155
tbb::spin_rw_mutex_v3::lock
void lock()
Acquire writer lock.
Definition: spin_rw_mutex.h:180
tbb::internal::num_priority_levels
static const intptr_t num_priority_levels
Definition: scheduler_common.h:129
tbb::internal::market::my_next_arena
arena * my_next_arena
The first arena to be checked when idle worker seeks for an arena to enter.
Definition: market.h:139
tbb::internal::market::assert_market_valid
void assert_market_valid() const
Definition: market.h:227
tbb::internal::market::arena
friend class arena
Definition: market.h:47
tbb::internal::market::global_market
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
Definition: market.cpp:96
p
void const char const char int ITT_FORMAT __itt_group_sync p
Definition: ittnotify_static.h:76
tbb::internal::market::set_active_num_workers
static void set_active_num_workers(unsigned w)
Set number of active workers.
Definition: market.cpp:235
tbb::internal::market::update_allotment
void update_allotment()
Recalculates the number of workers assigned to each arena in the list.
Definition: market.h:214
tbb::internal::market::my_num_workers_soft_limit
unsigned my_num_workers_soft_limit
Current application-imposed limit on the number of workers (see set_active_num_workers())
Definition: market.h:78
GATHER_STATISTIC
#define GATHER_STATISTIC(x)
Definition: tbb_statistics.h:232
tbb::internal::governor::UsePrivateRML
static bool UsePrivateRML
Definition: governor.h:64
tbb::internal::calc_workers_soft_limit
static unsigned calc_workers_soft_limit(unsigned workers_soft_limit, unsigned workers_hard_limit)
Definition: market.cpp:86
tbb::internal::arena::SNAPSHOT_EMPTY
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
Definition: arena.h:314
tbb::internal::market::theMarket
static market * theMarket
Currently active global market.
Definition: market.h:58
tbb::internal::max
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
Definition: tbb_misc.h:124
tbb::internal::arena::allocate_arena
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
Definition: arena.cpp:241
tbb::internal::__TBB_InitOnce::add_ref
static void add_ref()
Add reference to resources. If first reference added, acquire the resources.
Definition: tbb_main.cpp:117
tbb::internal::as_atomic
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
tbb::internal::generic_scheduler::cleanup_worker
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
Definition: scheduler.cpp:1327
tbb::internal::market::detach_arena
void detach_arena(arena &)
Removes the arena from the market's list.
Definition: market.cpp:321
tbb::internal::__TBB_load_with_acquire
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:712
tbb::internal::market::my_workers_soft_limit_to_report
unsigned my_workers_soft_limit_to_report
Either workers soft limit to be reported via runtime_warning() or skip_soft_limit_warning.
Definition: market.h:161
ITT_THREAD_SET_NAME
#define ITT_THREAD_SET_NAME(name)
Definition: itt_notify.h:117
tbb::internal::governor::create_rml_server
static rml::tbb_server * create_rml_server(rml::tbb_client &)
Definition: governor.cpp:92
tbb::internal::allowed_parallelism_control::active_value_if_present
size_t active_value_if_present() const
Definition: tbb_main.cpp:492
tbb::internal::market::remove_arena_from_list
void remove_arena_from_list(arena &a)
Definition: market.cpp:42
tbb::internal::min
T min(const T &val1, const T &val2)
Utility template function returning lesser of the two values.
Definition: tbb_misc.h:115
tbb::internal::market::generic_scheduler
friend class generic_scheduler
Definition: market.h:46
tbb::internal::intrusive_list_base::begin
iterator begin()
Definition: intrusive_list.h:157
tbb::internal::generic_scheduler::create_worker
static generic_scheduler * create_worker(market &m, size_t index, bool geniune)
Initialize a scheduler for a worker thread.
Definition: scheduler.cpp:1269
tbb::internal::market::my_server
rml::tbb_server * my_server
Pointer to the RML server object that services this TBB instance.
Definition: market.h:70
lock
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
Definition: ittnotify_static.h:106
__TBB_Yield
#define __TBB_Yield()
Definition: ibm_aix51.h:44
tbb::internal::market::my_public_ref_count
unsigned my_public_ref_count
Count of master threads attached.
Definition: market.h:149
tbb::internal::market::destroy
void destroy()
Destroys and deallocates market object created by market::create()
Definition: market.cpp:165
tbb::internal::allowed_parallelism_ctl
static allowed_parallelism_control allowed_parallelism_ctl
Definition: tbb_main.cpp:508
_T
#define _T(string_literal)
Standard Windows style macro to markup the string literals.
Definition: itt_notify.h:62
__TBB_offsetof
#define __TBB_offsetof(class_name, member_name)
Extended variant of the standard offsetof macro.
Definition: tbb_stddef.h:266
tbb::internal::market::arena_list_type
intrusive_list< arena > arena_list_type
Definition: market.h:54
tbb::internal::market::skip_soft_limit_warning
static const unsigned skip_soft_limit_warning
The value indicating that the soft limit warning is unnecessary.
Definition: market.h:158
tbb::internal::market::my_total_demand
int my_total_demand
Number of workers that were requested by all arenas.
Definition: market.h:89
tbb::internal::market::my_stack_size
size_t my_stack_size
Stack size of worker threads.
Definition: market.h:152
tbb::internal::market::app_parallelism_limit
static unsigned app_parallelism_limit()
Reports active parallelism level according to user's settings.
Definition: tbb_main.cpp:513
tbb::internal::market::arena_in_need
arena * arena_in_need(arena *)
Returns next arena that needs more workers, or NULL.
Definition: market.h:221
tbb::internal::arena::ref_worker
static const unsigned ref_worker
Definition: arena.h:324
tbb::internal::market::worker_stack_size
size_t worker_stack_size() const
Returns the requested stack size of worker threads.
Definition: market.h:300
tbb::internal::intrusive_list_base::end
iterator end()
Definition: intrusive_list.h:159
s
void const char const char int ITT_FORMAT __itt_group_sync s
Definition: ittnotify_static.h:76
tbb::internal::runtime_warning
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
Definition: tbb_assert_impl.h:85
tbb::internal::market::my_arenas_list_mutex
arenas_list_mutex_type my_arenas_list_mutex
Definition: market.h:67
tbb::internal::governor::default_num_threads
static unsigned default_num_threads()
Definition: governor.h:84
tbb::internal::market::my_num_workers_requested
int my_num_workers_requested
Number of workers currently requested from RML.
Definition: market.h:81
tbb::internal::NFS_Free
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
Definition: cache_aligned_allocator.cpp:198
tbb::internal::__TBB_InitOnce::remove_ref
static void remove_ref()
Remove reference to resources. If last reference removed, release the resources.
Definition: tbb_main.cpp:122
tbb::spin_rw_mutex_v3::unlock
void unlock()
Release lock.
Definition: spin_rw_mutex.h:187
tbb::internal::market::my_first_unused_worker_idx
atomic< unsigned > my_first_unused_worker_idx
First unused index of worker.
Definition: market.h:86
tbb::internal::governor::local_scheduler_if_initialized
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:139
tbb::internal::governor::assume_scheduler
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
Definition: governor.cpp:116
tbb::internal::market::theMarketMutex
static global_market_mutex_type theMarketMutex
Mutex guarding creation/destruction of theMarket, insertions/deletions in my_arenas,...
Definition: market.h:63
tbb::internal::NFS_Allocate
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
Definition: cache_aligned_allocator.cpp:176
tbb::internal::governor::is_set
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
Definition: governor.cpp:120
tbb::interface9::global_control::thread_stack_size
Definition: global_control.h:29
size
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
Definition: ittnotify_static.h:94

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.