Intel(R) Threading Building Blocks Doxygen Documentation
version 4.2.3
|
Go to the documentation of this file.
20 #define _SCL_SECURE_NO_WARNINGS
32 #if defined(_MSC_VER) && defined(_Wp64)
34 #pragma warning (disable: 4267)
82 template<
typename argument_type>
212 __TBB_ASSERT(new_segment_table,
"NFS_Allocate should throws exception if it cannot allocate the requested storage, and not returns zero pointer" );
223 bool mark_as_not_used_on_failure ) {
225 struct segment_scope_guard :
no_copy{
227 bool my_mark_as_not_used;
228 segment_scope_guard(
segment_t& segment,
bool mark_as_not_used) : my_segment_ptr(&segment), my_mark_as_not_used(mark_as_not_used){}
229 void dismiss(){ my_segment_ptr = 0;}
230 ~segment_scope_guard(){
232 if (!my_mark_as_not_used){
245 size_type size_to_allocate = size_of_enabled_segment;
248 size_of_enabled_segment = 2 ;
266 segment_scope_guard k_segment_guard(
s[k],
false);
268 k_segment_guard.dismiss();
270 publish_segment(
s[k],
274 segment_scope_guard k_segment_guard(
s[k], mark_as_not_used_on_failure);
275 publish_segment(
s[k], allocate_segment(v, size_to_allocate));
276 k_segment_guard.dismiss();
278 return size_of_enabled_segment;
285 get_segment_value(k_start++,
true);
286 if( k_start < first_block ) {
290 for(; k_start < first_block && k_start <= k_end; ++k_start )
292 else for(; k_start < first_block && k_start <= k_end; ++k_start )
293 publish_segment(table[k_start], static_cast<void*>(
296 for(; k_start <= k_end; ++k_start )
303 while( sz <= finish ) {
308 std::memset( (array.
pointer<
char>()) + element_size*start, 0, ((sz<finish?sz:finish) - start)*element_size );
369 if( m > n-b ) m = n-b;
403 if( m > n-b ) m = n-b;
405 if( dst_initialized_size>b ) {
406 a = dst_initialized_size-b;
415 __TBB_ASSERT( src.
my_early_size==n,
"detected use of concurrent_vector::operator= with right side that was concurrently modified" );
427 return (
void*)(
s.load<
relaxed>().pointer<char>() + element_size*j_begin);
449 for( i = 0; i <= k_old; ++i ) {
469 internal_grow( result, result+delta, element_size, init, src );
474 __TBB_ASSERT( start<finish,
"start must be less than finish" );
479 for(; k_end > k_start && k_end >= range.
first_block; --k_end )
481 for(; k_start <= k_end; ++k_start )
508 return j < i? i : j+1;
519 if(k_stop < first_block)
523 if(k_stop == k_end && k == first_block)
532 if ( k != first_block && k )
540 for (
segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) {
542 void *
s = static_cast<void*>(
543 static_cast<char*>(seg) +
segment_base(i)*element_size );
545 if(j + my_segment_size >= my_size) my_segment_size = my_size - j;
547 copy(
s, segment_table[i].load<relaxed>().pointer<void>(), my_segment_size );
557 std::copy(segment_table,segment_table + k,old.
table);
560 static_cast<char*>(seg) +
segment_base(i)*element_size ));
565 for (
segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) {
566 if(j + my_segment_size >= my_size) my_segment_size = my_size - j;
573 if ( k_stop < k_end ) {
575 std::copy(segment_table+k_stop, segment_table+k_end, old.
table+k_stop );
576 std::fill_n(segment_table+k_stop, (k_end-k_stop),
segment_t());
586 if(!my_sz && !v_sz)
return;
segment_value_t get_segment_value(size_type index, bool wait)
static void extend_segment_table(concurrent_vector_base_v3 &v, size_type start)
static void * allocate_segment(concurrent_vector_base_v3 &v, size_type n)
void(__TBB_EXPORTED_FUNC * internal_array_op1)(void *begin, size_type n)
An operation on an n-element array starting at begin.
void(__TBB_EXPORTED_FUNC * internal_array_op2)(void *dst, const void *src, size_type n)
An operation on n-element destination array and n-element source array.
size_type apply(const F &func)
void __TBB_EXPORTED_METHOD internal_reserve(size_type n, size_type element_size, size_type max_size)
void __TBB_EXPORTED_METHOD internal_grow_to_at_least(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
Deprecated entry point for backwards compatibility to TBB 2.1.
Internal structure for compact()
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
init_body(internal_array_op2 init, const void *src)
#define ITT_NOTIFY(name, obj)
void __TBB_EXPORTED_METHOD internal_copy(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op2 copy)
size_type __TBB_EXPORTED_METHOD internal_grow_by(size_type delta, size_type element_size, internal_array_op2 init, const void *src)
TODO: turn into lambda functions when available.
Number of slots for segment pointers inside the class.
static const size_type page_size
memory page size
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
static size_type segment_size(segment_index_t k)
Class that implements exponential backoff.
helper(segment_t *segments, size_type fb, size_type esize, size_type index, size_type s, size_type f)
static void publish_segment(segment_t &s, argument_type rhs)
Publish segment so other threads can see it.
Base class of concurrent vector implementation.
destroy_body(internal_array_op1 destroy)
void operator()(segment_t &, void *begin, size_type n) const
atomic< size_type > my_first_block
count of segments in the first block
void operator()(segment_t &s, void *begin, size_type n) const
__TBB_EXPORTED_METHOD ~concurrent_vector_base_v3()
segment_value_t load() const
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
static size_type enable_segment(concurrent_vector_base_v3 &v, size_type k, size_type element_size, bool mark_as_not_used_on_failure=false)
void __TBB_EXPORTED_METHOD internal_resize(size_type n, size_type element_size, size_type max_size, const void *src, internal_array_op1 destroy, internal_array_op2 init)
#define __TBB_STATIC_ASSERT(condition, msg)
Base class for types that should not be assigned.
segment_not_used_predicate(segment_t &segment)
void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3 &v)
atomic< size_type > my_early_size
Requested size of vector.
void *__TBB_EXPORTED_METHOD internal_push_back(size_type element_size, size_type &index)
static void extend_table_if_necessary(concurrent_vector_base_v3 &v, size_type k, size_type start)
size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
segment_index_t __TBB_EXPORTED_METHOD internal_clear(internal_array_op1 destroy)
static bool incompact_predicate(size_type size)
static size_type find_segment_end(const concurrent_vector_base_v3 &v)
void spin_wait_while(predicate_type condition)
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
void pause()
Pause for a while.
static void assign_first_segment_if_necessary(concurrent_vector_base_v3 &v, segment_index_t k)
assign first segment size. k - is index of last segment to be allocated, not a count of segments
segment_index_t first_block
void store(segment_not_used)
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
size_type __TBB_EXPORTED_METHOD internal_capacity() const
atomic< segment_t * > my_segment
Pointer to the segments table.
static segment_index_t segment_base(segment_index_t k)
void internal_grow(size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src)
void const char const char int ITT_FORMAT __itt_group_sync s
void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const
Obsolete.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
void operator()(segment_t &s, void *begin, size_type n) const
void __TBB_EXPORTED_METHOD internal_assign(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy)
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
static segment_t & acquire_segment(concurrent_vector_base_v3 &v, size_type index, size_type element_size, bool owner)
friend void swap(segment_t &, segment_t &) __TBB_NOEXCEPT(true)
void *__TBB_EXPORTED_METHOD internal_compact(size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy)
static segment_index_t segment_index_of(size_type index)
Base class for types that should not be copied or assigned.
void *(* vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t)
allocator function pointer
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
safe_init_body(internal_array_op2 init, const void *src)
friend void enforce_segment_allocated(segment_value_t const &s, internal::exception_id exception=eid_bad_last_alloc)
segment_t my_storage[pointers_per_short_table]
embedded storage of segment pointers
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void cleanup()
Out of line code to assists destructor in infrequent cases.
segment_t table[pointers_per_long_table]
Copyright © 2005-2019 Intel Corporation. All Rights Reserved.
Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
registered trademarks or trademarks of Intel Corporation or its
subsidiaries in the United States and other countries.
* Other names and brands may be claimed as the property of others.