20 #include "kmp_debug.h" 33 #define KMP_PAD(type, sz) \ 34 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1)) 35 #define KMP_GTID_DNE (-2) 54 #if KMP_OS_LINUX && defined(KMP_GOMP_COMPAT) 55 #define OMP_LOCK_T_SIZE sizeof(int) 56 #define OMP_NEST_LOCK_T_SIZE sizeof(void *) 58 #define OMP_LOCK_T_SIZE sizeof(void *) 59 #define OMP_NEST_LOCK_T_SIZE sizeof(void *) 65 #define OMP_CRITICAL_SIZE sizeof(void *) 66 #define INTEL_CRITICAL_SIZE 32 69 typedef kmp_uint32 kmp_lock_flags_t;
71 #define kmp_lf_critical_section 1 74 typedef kmp_uint32 kmp_lock_index_t;
78 struct kmp_lock_pool {
79 union kmp_user_lock *next;
80 kmp_lock_index_t index;
83 typedef struct kmp_lock_pool kmp_lock_pool_t;
85 extern void __kmp_validate_locks(
void);
122 struct kmp_base_tas_lock {
124 volatile kmp_int32 poll;
125 kmp_int32 depth_locked;
128 typedef struct kmp_base_tas_lock kmp_base_tas_lock_t;
131 kmp_base_tas_lock_t lk;
132 kmp_lock_pool_t pool;
136 typedef union kmp_tas_lock kmp_tas_lock_t;
140 #define KMP_TAS_LOCK_INITIALIZER(lock) \ 142 { KMP_LOCK_FREE(tas), 0 } \ 145 extern int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
146 extern int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
147 extern int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
148 extern void __kmp_init_tas_lock(kmp_tas_lock_t *lck);
149 extern void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck);
151 extern int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
152 extern int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
153 extern int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
154 extern void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck);
155 extern void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck);
157 #define KMP_LOCK_RELEASED 1 158 #define KMP_LOCK_STILL_HELD 0 159 #define KMP_LOCK_ACQUIRED_FIRST 1 160 #define KMP_LOCK_ACQUIRED_NEXT 0 162 #define KMP_USE_FUTEX \ 163 (KMP_OS_LINUX && !KMP_OS_CNK && \ 164 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)) 179 struct kmp_base_futex_lock {
180 volatile kmp_int32 poll;
183 kmp_int32 depth_locked;
186 typedef struct kmp_base_futex_lock kmp_base_futex_lock_t;
188 union kmp_futex_lock {
189 kmp_base_futex_lock_t lk;
190 kmp_lock_pool_t pool;
195 typedef union kmp_futex_lock kmp_futex_lock_t;
199 #define KMP_FUTEX_LOCK_INITIALIZER(lock) \ 201 { KMP_LOCK_FREE(futex), 0 } \ 204 extern int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
205 extern int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
206 extern int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
207 extern void __kmp_init_futex_lock(kmp_futex_lock_t *lck);
208 extern void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck);
210 extern int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck,
212 extern int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
213 extern int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck,
215 extern void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck);
216 extern void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck);
218 #endif // KMP_USE_FUTEX 229 struct kmp_base_ticket_lock {
231 std::atomic_bool initialized;
232 volatile union kmp_ticket_lock *
self;
236 std::atomic_uint now_serving;
237 std::atomic_int owner_id;
238 std::atomic_int depth_locked;
239 kmp_lock_flags_t flags;
242 struct kmp_base_ticket_lock {
244 std::atomic<bool> initialized;
245 volatile union kmp_ticket_lock *
self;
247 std::atomic<unsigned>
249 std::atomic<unsigned>
251 std::atomic<int> owner_id;
252 std::atomic<int> depth_locked;
253 kmp_lock_flags_t flags;
259 struct kmp_base_ticket_lock;
261 #endif // !__cplusplus 263 typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t;
265 union KMP_ALIGN_CACHE kmp_ticket_lock {
266 kmp_base_ticket_lock_t
268 kmp_lock_pool_t pool;
270 char lk_pad[KMP_PAD(kmp_base_ticket_lock_t, CACHE_LINE)];
273 typedef union kmp_ticket_lock kmp_ticket_lock_t;
278 #define KMP_TICKET_LOCK_INITIALIZER(lock) \ 281 ATOMIC_VAR_INIT(true) \ 282 , &(lock), NULL, ATOMIC_VAR_INIT(0U), ATOMIC_VAR_INIT(0U), \ 283 ATOMIC_VAR_INIT(0), ATOMIC_VAR_INIT(-1) \ 287 extern int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
288 extern int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
289 extern int __kmp_test_ticket_lock_with_cheks(kmp_ticket_lock_t *lck,
291 extern int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
292 extern void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck);
293 extern void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck);
295 extern int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck,
297 extern int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck,
299 extern int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck,
301 extern void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck);
302 extern void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck);
307 #if KMP_USE_ADAPTIVE_LOCKS 309 struct kmp_adaptive_lock_info;
311 typedef struct kmp_adaptive_lock_info kmp_adaptive_lock_info_t;
313 #if KMP_DEBUG_ADAPTIVE_LOCKS 315 struct kmp_adaptive_lock_statistics {
317 kmp_adaptive_lock_info_t *next;
318 kmp_adaptive_lock_info_t *prev;
321 kmp_uint32 successfulSpeculations;
322 kmp_uint32 hardFailedSpeculations;
323 kmp_uint32 softFailedSpeculations;
324 kmp_uint32 nonSpeculativeAcquires;
325 kmp_uint32 nonSpeculativeAcquireAttempts;
326 kmp_uint32 lemmingYields;
329 typedef struct kmp_adaptive_lock_statistics kmp_adaptive_lock_statistics_t;
331 extern void __kmp_print_speculative_stats();
332 extern void __kmp_init_speculative_stats();
334 #endif // KMP_DEBUG_ADAPTIVE_LOCKS 336 struct kmp_adaptive_lock_info {
341 kmp_uint32
volatile badness;
342 kmp_uint32
volatile acquire_attempts;
344 kmp_uint32 max_badness;
345 kmp_uint32 max_soft_retries;
347 #if KMP_DEBUG_ADAPTIVE_LOCKS 348 kmp_adaptive_lock_statistics_t
volatile stats;
352 #endif // KMP_USE_ADAPTIVE_LOCKS 354 struct kmp_base_queuing_lock {
357 volatile union kmp_queuing_lock
375 volatile kmp_int32 owner_id;
376 kmp_int32 depth_locked;
378 kmp_lock_flags_t flags;
381 typedef struct kmp_base_queuing_lock kmp_base_queuing_lock_t;
383 KMP_BUILD_ASSERT(offsetof(kmp_base_queuing_lock_t, tail_id) % 8 == 0);
385 union KMP_ALIGN_CACHE kmp_queuing_lock {
386 kmp_base_queuing_lock_t
388 kmp_lock_pool_t pool;
390 char lk_pad[KMP_PAD(kmp_base_queuing_lock_t, CACHE_LINE)];
393 typedef union kmp_queuing_lock kmp_queuing_lock_t;
395 extern int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
396 extern int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
397 extern int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
398 extern void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck);
399 extern void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck);
401 extern int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck,
403 extern int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck,
405 extern int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck,
407 extern void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck);
408 extern void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck);
410 #if KMP_USE_ADAPTIVE_LOCKS 414 struct kmp_base_adaptive_lock {
415 kmp_base_queuing_lock qlk;
416 KMP_ALIGN(CACHE_LINE)
417 kmp_adaptive_lock_info_t
421 typedef struct kmp_base_adaptive_lock kmp_base_adaptive_lock_t;
423 union KMP_ALIGN_CACHE kmp_adaptive_lock {
424 kmp_base_adaptive_lock_t lk;
425 kmp_lock_pool_t pool;
427 char lk_pad[KMP_PAD(kmp_base_adaptive_lock_t, CACHE_LINE)];
429 typedef union kmp_adaptive_lock kmp_adaptive_lock_t;
431 #define GET_QLK_PTR(l) ((kmp_queuing_lock_t *)&(l)->lk.qlk) 433 #endif // KMP_USE_ADAPTIVE_LOCKS 437 struct kmp_base_drdpa_lock {
446 volatile union kmp_drdpa_lock
449 volatile struct kmp_lock_poll { kmp_uint64 poll; } *
volatile polls;
450 volatile kmp_uint64 mask;
451 kmp_uint64 cleanup_ticket;
452 volatile struct kmp_lock_poll *old_polls;
453 kmp_uint32 num_polls;
459 volatile kmp_uint64 next_ticket;
474 kmp_uint64 now_serving;
475 volatile kmp_uint32 owner_id;
476 kmp_int32 depth_locked;
477 kmp_lock_flags_t flags;
480 typedef struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t;
482 union KMP_ALIGN_CACHE kmp_drdpa_lock {
483 kmp_base_drdpa_lock_t
485 kmp_lock_pool_t pool;
487 char lk_pad[KMP_PAD(kmp_base_drdpa_lock_t, CACHE_LINE)];
490 typedef union kmp_drdpa_lock kmp_drdpa_lock_t;
492 extern int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
493 extern int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
494 extern int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
495 extern void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck);
496 extern void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck);
498 extern int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
500 extern int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
501 extern int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
503 extern void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
504 extern void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
518 typedef kmp_ticket_lock_t kmp_bootstrap_lock_t;
520 #define KMP_BOOTSTRAP_LOCK_INITIALIZER(lock) KMP_TICKET_LOCK_INITIALIZER((lock)) 522 static inline int __kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
523 return __kmp_acquire_ticket_lock(lck, KMP_GTID_DNE);
526 static inline int __kmp_test_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
527 return __kmp_test_ticket_lock(lck, KMP_GTID_DNE);
530 static inline void __kmp_release_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
531 __kmp_release_ticket_lock(lck, KMP_GTID_DNE);
534 static inline void __kmp_init_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
535 __kmp_init_ticket_lock(lck);
538 static inline void __kmp_destroy_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
539 __kmp_destroy_ticket_lock(lck);
550 typedef kmp_ticket_lock_t kmp_lock_t;
552 static inline int __kmp_acquire_lock(kmp_lock_t *lck, kmp_int32 gtid) {
553 return __kmp_acquire_ticket_lock(lck, gtid);
556 static inline int __kmp_test_lock(kmp_lock_t *lck, kmp_int32 gtid) {
557 return __kmp_test_ticket_lock(lck, gtid);
560 static inline void __kmp_release_lock(kmp_lock_t *lck, kmp_int32 gtid) {
561 __kmp_release_ticket_lock(lck, gtid);
564 static inline void __kmp_init_lock(kmp_lock_t *lck) {
565 __kmp_init_ticket_lock(lck);
568 static inline void __kmp_destroy_lock(kmp_lock_t *lck) {
569 __kmp_destroy_ticket_lock(lck);
585 #if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX 592 #if KMP_USE_ADAPTIVE_LOCKS 594 #endif // KMP_USE_ADAPTIVE_LOCKS 597 typedef enum kmp_lock_kind kmp_lock_kind_t;
599 extern kmp_lock_kind_t __kmp_user_lock_kind;
601 union kmp_user_lock {
604 kmp_futex_lock_t futex;
606 kmp_ticket_lock_t ticket;
607 kmp_queuing_lock_t queuing;
608 kmp_drdpa_lock_t drdpa;
609 #if KMP_USE_ADAPTIVE_LOCKS 610 kmp_adaptive_lock_t adaptive;
611 #endif // KMP_USE_ADAPTIVE_LOCKS 612 kmp_lock_pool_t pool;
615 typedef union kmp_user_lock *kmp_user_lock_p;
617 #if !KMP_USE_DYNAMIC_LOCK 619 extern size_t __kmp_base_user_lock_size;
620 extern size_t __kmp_user_lock_size;
622 extern kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck);
624 static inline kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck) {
625 KMP_DEBUG_ASSERT(__kmp_get_user_lock_owner_ != NULL);
626 return (*__kmp_get_user_lock_owner_)(lck);
629 extern int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
632 #if KMP_OS_LINUX && \ 633 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) 635 #define __kmp_acquire_user_lock_with_checks(lck, gtid) \ 636 if (__kmp_user_lock_kind == lk_tas) { \ 637 if (__kmp_env_consistency_check) { \ 638 char const *const func = "omp_set_lock"; \ 639 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && \ 640 lck->tas.lk.depth_locked != -1) { \ 641 KMP_FATAL(LockNestableUsedAsSimple, func); \ 643 if ((gtid >= 0) && (lck->tas.lk.poll - 1 == gtid)) { \ 644 KMP_FATAL(LockIsAlreadyOwned, func); \ 647 if ((lck->tas.lk.poll != 0) || \ 648 (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1))) { \ 650 KMP_FSYNC_PREPARE(lck); \ 651 KMP_INIT_YIELD(spins); \ 652 if (TCR_4(__kmp_nth) > \ 653 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ 656 KMP_YIELD_SPIN(spins); \ 659 (lck->tas.lk.poll != 0) || \ 660 (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1))) { \ 661 if (TCR_4(__kmp_nth) > \ 662 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ 665 KMP_YIELD_SPIN(spins); \ 669 KMP_FSYNC_ACQUIRED(lck); \ 671 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL); \ 672 (*__kmp_acquire_user_lock_with_checks_)(lck, gtid); \ 676 static inline int __kmp_acquire_user_lock_with_checks(kmp_user_lock_p lck,
678 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL);
679 return (*__kmp_acquire_user_lock_with_checks_)(lck, gtid);
683 extern int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
686 #if KMP_OS_LINUX && \ 687 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) 689 #include "kmp_i18n.h" 690 extern int __kmp_env_consistency_check;
691 static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
693 if (__kmp_user_lock_kind == lk_tas) {
694 if (__kmp_env_consistency_check) {
695 char const *
const func =
"omp_test_lock";
696 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
697 lck->tas.lk.depth_locked != -1) {
698 KMP_FATAL(LockNestableUsedAsSimple, func);
701 return ((lck->tas.lk.poll == 0) &&
702 KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1));
704 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
705 return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
709 static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
711 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
712 return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
716 extern int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
719 static inline void __kmp_release_user_lock_with_checks(kmp_user_lock_p lck,
721 KMP_DEBUG_ASSERT(__kmp_release_user_lock_with_checks_ != NULL);
722 (*__kmp_release_user_lock_with_checks_)(lck, gtid);
725 extern void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck);
727 static inline void __kmp_init_user_lock_with_checks(kmp_user_lock_p lck) {
728 KMP_DEBUG_ASSERT(__kmp_init_user_lock_with_checks_ != NULL);
729 (*__kmp_init_user_lock_with_checks_)(lck);
734 extern void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck);
736 static inline void __kmp_destroy_user_lock(kmp_user_lock_p lck) {
737 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_ != NULL);
738 (*__kmp_destroy_user_lock_)(lck);
741 extern void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck);
743 static inline void __kmp_destroy_user_lock_with_checks(kmp_user_lock_p lck) {
744 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_with_checks_ != NULL);
745 (*__kmp_destroy_user_lock_with_checks_)(lck);
748 extern int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
751 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64) 753 #define __kmp_acquire_nested_user_lock_with_checks(lck, gtid, depth) \ 754 if (__kmp_user_lock_kind == lk_tas) { \ 755 if (__kmp_env_consistency_check) { \ 756 char const *const func = "omp_set_nest_lock"; \ 757 if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) && \ 758 lck->tas.lk.depth_locked == -1) { \ 759 KMP_FATAL(LockSimpleUsedAsNestable, func); \ 762 if (lck->tas.lk.poll - 1 == gtid) { \ 763 lck->tas.lk.depth_locked += 1; \ 764 *depth = KMP_LOCK_ACQUIRED_NEXT; \ 766 if ((lck->tas.lk.poll != 0) || \ 767 (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1))) { \ 769 KMP_FSYNC_PREPARE(lck); \ 770 KMP_INIT_YIELD(spins); \ 771 if (TCR_4(__kmp_nth) > \ 772 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ 775 KMP_YIELD_SPIN(spins); \ 777 while ((lck->tas.lk.poll != 0) || \ 778 (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, \ 780 if (TCR_4(__kmp_nth) > \ 781 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ 784 KMP_YIELD_SPIN(spins); \ 788 lck->tas.lk.depth_locked = 1; \ 789 *depth = KMP_LOCK_ACQUIRED_FIRST; \ 791 KMP_FSYNC_ACQUIRED(lck); \ 793 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL); \ 794 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid); \ 799 __kmp_acquire_nested_user_lock_with_checks(kmp_user_lock_p lck, kmp_int32 gtid,
801 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL);
802 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid);
806 extern int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
809 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64) 810 static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
812 if (__kmp_user_lock_kind == lk_tas) {
814 if (__kmp_env_consistency_check) {
815 char const *
const func =
"omp_test_nest_lock";
816 if ((
sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) &&
817 lck->tas.lk.depth_locked == -1) {
818 KMP_FATAL(LockSimpleUsedAsNestable, func);
821 KMP_DEBUG_ASSERT(gtid >= 0);
822 if (lck->tas.lk.poll - 1 ==
824 return ++lck->tas.lk.depth_locked;
826 retval = ((lck->tas.lk.poll == 0) &&
827 KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1));
830 lck->tas.lk.depth_locked = 1;
834 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
835 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
839 static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
841 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
842 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
846 extern int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
850 __kmp_release_nested_user_lock_with_checks(kmp_user_lock_p lck,
852 KMP_DEBUG_ASSERT(__kmp_release_nested_user_lock_with_checks_ != NULL);
853 return (*__kmp_release_nested_user_lock_with_checks_)(lck, gtid);
856 extern void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
859 __kmp_init_nested_user_lock_with_checks(kmp_user_lock_p lck) {
860 KMP_DEBUG_ASSERT(__kmp_init_nested_user_lock_with_checks_ != NULL);
861 (*__kmp_init_nested_user_lock_with_checks_)(lck);
864 extern void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
867 __kmp_destroy_nested_user_lock_with_checks(kmp_user_lock_p lck) {
868 KMP_DEBUG_ASSERT(__kmp_destroy_nested_user_lock_with_checks_ != NULL);
869 (*__kmp_destroy_nested_user_lock_with_checks_)(lck);
885 extern int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck);
889 extern const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck);
891 static inline const ident_t *__kmp_get_user_lock_location(kmp_user_lock_p lck) {
892 if (__kmp_get_user_lock_location_ != NULL) {
893 return (*__kmp_get_user_lock_location_)(lck);
899 extern void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
902 static inline void __kmp_set_user_lock_location(kmp_user_lock_p lck,
904 if (__kmp_set_user_lock_location_ != NULL) {
905 (*__kmp_set_user_lock_location_)(lck, loc);
909 extern kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck);
911 extern void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
912 kmp_lock_flags_t flags);
914 static inline void __kmp_set_user_lock_flags(kmp_user_lock_p lck,
915 kmp_lock_flags_t flags) {
916 if (__kmp_set_user_lock_flags_ != NULL) {
917 (*__kmp_set_user_lock_flags_)(lck, flags);
922 extern void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind);
925 #define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) \ 927 __kmp_acquire##nest##user_lock_with_checks_ = (int (*)( \ 928 kmp_user_lock_p, kmp_int32))__kmp_acquire##nest##kind##_##suffix; \ 929 __kmp_release##nest##user_lock_with_checks_ = (int (*)( \ 930 kmp_user_lock_p, kmp_int32))__kmp_release##nest##kind##_##suffix; \ 931 __kmp_test##nest##user_lock_with_checks_ = (int (*)( \ 932 kmp_user_lock_p, kmp_int32))__kmp_test##nest##kind##_##suffix; \ 933 __kmp_init##nest##user_lock_with_checks_ = \ 934 (void (*)(kmp_user_lock_p))__kmp_init##nest##kind##_##suffix; \ 935 __kmp_destroy##nest##user_lock_with_checks_ = \ 936 (void (*)(kmp_user_lock_p))__kmp_destroy##nest##kind##_##suffix; \ 939 #define KMP_BIND_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock) 940 #define KMP_BIND_USER_LOCK_WITH_CHECKS(kind) \ 941 KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock_with_checks) 942 #define KMP_BIND_NESTED_USER_LOCK(kind) \ 943 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock) 944 #define KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(kind) \ 945 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock_with_checks) 970 struct kmp_lock_table {
971 kmp_lock_index_t used;
972 kmp_lock_index_t allocated;
973 kmp_user_lock_p *table;
976 typedef struct kmp_lock_table kmp_lock_table_t;
978 extern kmp_lock_table_t __kmp_user_lock_table;
979 extern kmp_user_lock_p __kmp_lock_pool;
981 struct kmp_block_of_locks {
982 struct kmp_block_of_locks *next_block;
986 typedef struct kmp_block_of_locks kmp_block_of_locks_t;
988 extern kmp_block_of_locks_t *__kmp_lock_blocks;
989 extern int __kmp_num_locks_in_block;
991 extern kmp_user_lock_p __kmp_user_lock_allocate(
void **user_lock,
993 kmp_lock_flags_t flags);
994 extern void __kmp_user_lock_free(
void **user_lock, kmp_int32 gtid,
995 kmp_user_lock_p lck);
996 extern kmp_user_lock_p __kmp_lookup_user_lock(
void **user_lock,
998 extern void __kmp_cleanup_user_locks();
1000 #define KMP_CHECK_USER_LOCK_INIT() \ 1002 if (!TCR_4(__kmp_init_user_locks)) { \ 1003 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock); \ 1004 if (!TCR_4(__kmp_init_user_locks)) { \ 1005 TCW_4(__kmp_init_user_locks, TRUE); \ 1007 __kmp_release_bootstrap_lock(&__kmp_initz_lock); \ 1011 #endif // KMP_USE_DYNAMIC_LOCK 1016 #if KMP_USE_DYNAMIC_LOCK 1054 #define KMP_USE_INLINED_TAS \ 1055 (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)) && 1 1056 #define KMP_USE_INLINED_FUTEX KMP_USE_FUTEX && 0 1063 #define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a) 1064 #define KMP_FOREACH_I_LOCK(m, a) \ 1065 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \ 1066 m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \ 1067 m(nested_queuing, a) m(nested_drdpa, a) 1069 #define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(hle, a) 1070 #define KMP_FOREACH_I_LOCK(m, a) \ 1071 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \ 1072 m(nested_tas, a) m(nested_ticket, a) m(nested_queuing, a) \ 1074 #endif // KMP_USE_FUTEX 1075 #define KMP_LAST_D_LOCK lockseq_hle 1078 #define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) 1079 #define KMP_FOREACH_I_LOCK(m, a) \ 1080 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_futex, a) \ 1081 m(nested_ticket, a) m(nested_queuing, a) m(nested_drdpa, a) 1082 #define KMP_LAST_D_LOCK lockseq_futex 1084 #define KMP_FOREACH_D_LOCK(m, a) m(tas, a) 1085 #define KMP_FOREACH_I_LOCK(m, a) \ 1086 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_ticket, a) \ 1087 m(nested_queuing, a) m(nested_drdpa, a) 1088 #define KMP_LAST_D_LOCK lockseq_tas 1089 #endif // KMP_USE_FUTEX 1090 #endif // KMP_USE_TSX 1093 #define KMP_LOCK_SHIFT \ 1094 8 // number of low bits to be used as tag for direct locks 1095 #define KMP_FIRST_D_LOCK lockseq_tas 1096 #define KMP_FIRST_I_LOCK lockseq_ticket 1097 #define KMP_LAST_I_LOCK lockseq_nested_drdpa 1098 #define KMP_NUM_I_LOCKS \ 1099 (locktag_nested_drdpa + 1) // number of indirect lock types 1102 typedef kmp_uint32 kmp_dyna_lock_t;
1107 lockseq_indirect = 0,
1108 #define expand_seq(l, a) lockseq_##l, 1109 KMP_FOREACH_D_LOCK(expand_seq, 0) KMP_FOREACH_I_LOCK(expand_seq, 0)
1111 } kmp_dyna_lockseq_t;
1115 #define expand_tag(l, a) locktag_##l, 1116 KMP_FOREACH_I_LOCK(expand_tag, 0)
1118 } kmp_indirect_locktag_t;
1121 #define KMP_IS_D_LOCK(seq) \ 1122 ((seq) >= KMP_FIRST_D_LOCK && (seq) <= KMP_LAST_D_LOCK) 1123 #define KMP_IS_I_LOCK(seq) \ 1124 ((seq) >= KMP_FIRST_I_LOCK && (seq) <= KMP_LAST_I_LOCK) 1125 #define KMP_GET_I_TAG(seq) (kmp_indirect_locktag_t)((seq)-KMP_FIRST_I_LOCK) 1126 #define KMP_GET_D_TAG(seq) ((seq) << 1 | 1) 1130 #define expand_tag(l, a) locktag_##l = KMP_GET_D_TAG(lockseq_##l), 1131 KMP_FOREACH_D_LOCK(expand_tag, 0)
1133 } kmp_direct_locktag_t;
1137 kmp_user_lock_p lock;
1138 kmp_indirect_locktag_t type;
1139 } kmp_indirect_lock_t;
1143 extern void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t);
1144 extern void (*__kmp_direct_destroy[])(kmp_dyna_lock_t *);
1145 extern int (*(*__kmp_direct_set))(kmp_dyna_lock_t *, kmp_int32);
1146 extern int (*(*__kmp_direct_unset))(kmp_dyna_lock_t *, kmp_int32);
1147 extern int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32);
1151 extern void (*__kmp_indirect_init[])(kmp_user_lock_p);
1152 extern void (*__kmp_indirect_destroy[])(kmp_user_lock_p);
1153 extern int (*(*__kmp_indirect_set))(kmp_user_lock_p, kmp_int32);
1154 extern int (*(*__kmp_indirect_unset))(kmp_user_lock_p, kmp_int32);
1155 extern int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32);
1158 #define KMP_EXTRACT_D_TAG(l) \ 1159 (*((kmp_dyna_lock_t *)(l)) & ((1 << KMP_LOCK_SHIFT) - 1) & \ 1160 -(*((kmp_dyna_lock_t *)(l)) & 1)) 1163 #define KMP_EXTRACT_I_INDEX(l) (*(kmp_lock_index_t *)(l) >> 1) 1167 #define KMP_D_LOCK_FUNC(l, op) __kmp_direct_##op[KMP_EXTRACT_D_TAG(l)] 1171 #define KMP_I_LOCK_FUNC(l, op) \ 1172 __kmp_indirect_##op[((kmp_indirect_lock_t *)(l))->type] 1175 #define KMP_INIT_D_LOCK(l, seq) \ 1176 __kmp_direct_init[KMP_GET_D_TAG(seq)]((kmp_dyna_lock_t *)l, seq) 1179 #define KMP_INIT_I_LOCK(l, seq) \ 1180 __kmp_direct_init[0]((kmp_dyna_lock_t *)(l), seq) 1183 #define KMP_LOCK_FREE(type) (locktag_##type) 1186 #define KMP_LOCK_BUSY(v, type) ((v) << KMP_LOCK_SHIFT | locktag_##type) 1189 #define KMP_LOCK_STRIP(v) ((v) >> KMP_LOCK_SHIFT) 1193 extern void __kmp_init_dynamic_user_locks();
1196 extern kmp_indirect_lock_t *
1197 __kmp_allocate_indirect_lock(
void **, kmp_int32, kmp_indirect_locktag_t);
1200 extern void __kmp_cleanup_indirect_user_locks();
1203 extern kmp_dyna_lockseq_t __kmp_user_lock_seq;
1206 extern void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
1208 #define KMP_SET_I_LOCK_LOCATION(lck, loc) \ 1210 if (__kmp_indirect_set_location[(lck)->type] != NULL) \ 1211 __kmp_indirect_set_location[(lck)->type]((lck)->lock, loc); \ 1215 extern void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
1217 #define KMP_SET_I_LOCK_FLAGS(lck, flag) \ 1219 if (__kmp_indirect_set_flags[(lck)->type] != NULL) \ 1220 __kmp_indirect_set_flags[(lck)->type]((lck)->lock, flag); \ 1224 extern const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
1226 #define KMP_GET_I_LOCK_LOCATION(lck) \ 1227 (__kmp_indirect_get_location[(lck)->type] != NULL \ 1228 ? __kmp_indirect_get_location[(lck)->type]((lck)->lock) \ 1232 extern kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
1234 #define KMP_GET_I_LOCK_FLAGS(lck) \ 1235 (__kmp_indirect_get_flags[(lck)->type] != NULL \ 1236 ? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \ 1239 #define KMP_I_LOCK_CHUNK \ 1240 1024 // number of kmp_indirect_lock_t objects to be allocated together 1243 typedef struct kmp_indirect_lock_table {
1244 kmp_indirect_lock_t **table;
1245 kmp_lock_index_t size;
1246 kmp_lock_index_t next;
1247 } kmp_indirect_lock_table_t;
1249 extern kmp_indirect_lock_table_t __kmp_i_lock_table;
1252 #define KMP_GET_I_LOCK(index) \ 1253 (*(__kmp_i_lock_table.table + (index) / KMP_I_LOCK_CHUNK) + \ 1254 (index) % KMP_I_LOCK_CHUNK) 1259 extern int __kmp_num_locks_in_block;
1262 #define KMP_LOOKUP_I_LOCK(l) \ 1263 ((OMP_LOCK_T_SIZE < sizeof(void *)) ? KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(l)) \ 1264 : *((kmp_indirect_lock_t **)(l))) 1267 extern kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p, kmp_uint32);
1269 #else // KMP_USE_DYNAMIC_LOCK 1271 #define KMP_LOCK_BUSY(v, type) (v) 1272 #define KMP_LOCK_FREE(type) 0 1273 #define KMP_LOCK_STRIP(v) (v) 1275 #endif // KMP_USE_DYNAMIC_LOCK 1280 kmp_uint32 max_backoff;
1281 kmp_uint32 min_tick;
1285 extern kmp_backoff_t __kmp_spin_backoff_params;
1288 extern void __kmp_spin_backoff(kmp_backoff_t *);
1292 #endif // __cplusplus