#include <stddef.h>
#include <atomic>
#include "kmp.h"
#include "kmp_i18n.h"
#include "kmp_io.h"
#include "kmp_itt.h"
#include "kmp_lock.h"
#include "kmp_wait_release.h"
#include "kmp_wrapper_getpid.h"
#if KMP_USE_FUTEX
#include <sys/syscall.h>
#include <unistd.h>
#ifndef FUTEX_WAIT
#define FUTEX_WAIT …
#endif
#ifndef FUTEX_WAKE
#define FUTEX_WAKE …
#endif
#endif
void __kmp_validate_locks(void) { … }
static kmp_int32 __kmp_get_tas_lock_owner(kmp_tas_lock_t *lck) { … }
static inline bool __kmp_is_tas_lock_nestable(kmp_tas_lock_t *lck) { … }
__forceinline static int
__kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid) { … }
int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_acquire_tas_lock_with_checks(kmp_tas_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_tas_lock_with_checks(kmp_tas_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_release_tas_lock_with_checks(kmp_tas_lock_t *lck,
kmp_int32 gtid) { … }
void __kmp_init_tas_lock(kmp_tas_lock_t *lck) { … }
void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { … }
static void __kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t *lck) { … }
int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
kmp_int32 gtid) { … }
void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck) { … }
void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck) { … }
static void __kmp_destroy_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) { … }
#if KMP_USE_FUTEX
static kmp_int32 __kmp_get_futex_lock_owner(kmp_futex_lock_t *lck) { … }
static inline bool __kmp_is_futex_lock_nestable(kmp_futex_lock_t *lck) { … }
__forceinline static int
__kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t *lck, kmp_int32 gtid) { … }
int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_futex_lock_with_checks(kmp_futex_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_release_futex_lock_with_checks(kmp_futex_lock_t *lck,
kmp_int32 gtid) { … }
void __kmp_init_futex_lock(kmp_futex_lock_t *lck) { … }
void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { … }
static void __kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t *lck) { … }
int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
kmp_int32 gtid) { … }
void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck) { … }
void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck) { … }
static void __kmp_destroy_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) { … }
#endif
static kmp_int32 __kmp_get_ticket_lock_owner(kmp_ticket_lock_t *lck) { … }
static inline bool __kmp_is_ticket_lock_nestable(kmp_ticket_lock_t *lck) { … }
static kmp_uint32 __kmp_bakery_check(void *now_serving, kmp_uint32 my_ticket) { … }
__forceinline static int
__kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
kmp_int32 gtid) { … }
void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck) { … }
void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck) { … }
static void __kmp_destroy_ticket_lock_with_checks(kmp_ticket_lock_t *lck) { … }
int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
kmp_int32 gtid) { … }
void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck) { … }
void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck) { … }
static void
__kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) { … }
static const ident_t *__kmp_get_ticket_lock_location(kmp_ticket_lock_t *lck) { … }
static void __kmp_set_ticket_lock_location(kmp_ticket_lock_t *lck,
const ident_t *loc) { … }
static kmp_lock_flags_t __kmp_get_ticket_lock_flags(kmp_ticket_lock_t *lck) { … }
static void __kmp_set_ticket_lock_flags(kmp_ticket_lock_t *lck,
kmp_lock_flags_t flags) { … }
#ifdef DEBUG_QUEUING_LOCKS
#define TRACE_BUF_ELE …
static char traces[TRACE_BUF_ELE][128] = {0};
static int tc = 0;
#define TRACE_LOCK …
#define TRACE_LOCK_T …
#define TRACE_LOCK_HT …
static void __kmp_dump_queuing_lock(kmp_info_t *this_thr, kmp_int32 gtid,
kmp_queuing_lock_t *lck, kmp_int32 head_id,
kmp_int32 tail_id) {
kmp_int32 t, i;
__kmp_printf_no_lock("\n__kmp_dump_queuing_lock: TRACE BEGINS HERE! \n");
i = tc % TRACE_BUF_ELE;
__kmp_printf_no_lock("%s\n", traces[i]);
i = (i + 1) % TRACE_BUF_ELE;
while (i != (tc % TRACE_BUF_ELE)) {
__kmp_printf_no_lock("%s", traces[i]);
i = (i + 1) % TRACE_BUF_ELE;
}
__kmp_printf_no_lock("\n");
__kmp_printf_no_lock("\n__kmp_dump_queuing_lock: gtid+1:%d, spin_here:%d, "
"next_wait:%d, head_id:%d, tail_id:%d\n",
gtid + 1, this_thr->th.th_spin_here,
this_thr->th.th_next_waiting, head_id, tail_id);
__kmp_printf_no_lock("\t\thead: %d ", lck->lk.head_id);
if (lck->lk.head_id >= 1) {
t = __kmp_threads[lck->lk.head_id - 1]->th.th_next_waiting;
while (t > 0) {
__kmp_printf_no_lock("-> %d ", t);
t = __kmp_threads[t - 1]->th.th_next_waiting;
}
}
__kmp_printf_no_lock("; tail: %d ", lck->lk.tail_id);
__kmp_printf_no_lock("\n\n");
}
#endif
static kmp_int32 __kmp_get_queuing_lock_owner(kmp_queuing_lock_t *lck) { … }
static inline bool __kmp_is_queuing_lock_nestable(kmp_queuing_lock_t *lck) { … }
template <bool takeTime>
__forceinline static int
__kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck) { … }
void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck) { … }
static void __kmp_destroy_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { … }
int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { … }
static int
__kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { … }
static int
__kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck) { … }
void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck) { … }
static void
__kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { … }
static const ident_t *__kmp_get_queuing_lock_location(kmp_queuing_lock_t *lck) { … }
static void __kmp_set_queuing_lock_location(kmp_queuing_lock_t *lck,
const ident_t *loc) { … }
static kmp_lock_flags_t __kmp_get_queuing_lock_flags(kmp_queuing_lock_t *lck) { … }
static void __kmp_set_queuing_lock_flags(kmp_queuing_lock_t *lck,
kmp_lock_flags_t flags) { … }
#if KMP_USE_ADAPTIVE_LOCKS
#if KMP_HAVE_RTM_INTRINSICS
#include <immintrin.h>
#define SOFT_ABORT_MASK …
#else
#define _XBEGIN_STARTED …
#define _XABORT_EXPLICIT …
#define _XABORT_RETRY …
#define _XABORT_CONFLICT …
#define _XABORT_CAPACITY …
#define _XABORT_DEBUG …
#define _XABORT_NESTED …
#define _XABORT_CODE …
#define SOFT_ABORT_MASK …
#define STRINGIZE_INTERNAL …
#define STRINGIZE …
static __inline int _xbegin() {
int res = -1;
#if KMP_OS_WINDOWS
#if KMP_ARCH_X86_64
_asm {
_emit 0xC7
_emit 0xF8
_emit 2
_emit 0
_emit 0
_emit 0
jmp L2
mov res, eax
L2:
}
#else
_asm {
_emit 0xC7
_emit 0xF8
_emit 2
_emit 0
_emit 0
_emit 0
jmp L2
mov res, eax
L2:
}
#endif
#else
__asm__ volatile("1: .byte 0xC7; .byte 0xF8;\n"
" .long 1f-1b-6\n"
" jmp 2f\n"
"1: movl %%eax,%0\n"
"2:"
: "+r"(res)::"memory", "%eax");
#endif
return res;
}
static __inline void _xend() {
#if KMP_OS_WINDOWS
__asm {
_emit 0x0f
_emit 0x01
_emit 0xd5
}
#else
__asm__ volatile(".byte 0x0f; .byte 0x01; .byte 0xd5" ::: "memory");
#endif
}
#if KMP_OS_WINDOWS
#define _xabort …
#else
#define _xabort …
#endif
#endif
#if KMP_DEBUG_ADAPTIVE_LOCKS
static kmp_adaptive_lock_statistics_t destroyedStats;
static kmp_adaptive_lock_info_t liveLocks;
static kmp_bootstrap_lock_t chain_lock =
KMP_BOOTSTRAP_LOCK_INITIALIZER(chain_lock);
void __kmp_init_speculative_stats() {
kmp_adaptive_lock_info_t *lck = &liveLocks;
memset(CCAST(kmp_adaptive_lock_statistics_t *, &(lck->stats)), 0,
sizeof(lck->stats));
lck->stats.next = lck;
lck->stats.prev = lck;
KMP_ASSERT(lck->stats.next->stats.prev == lck);
KMP_ASSERT(lck->stats.prev->stats.next == lck);
__kmp_init_bootstrap_lock(&chain_lock);
}
static void __kmp_remember_lock(kmp_adaptive_lock_info_t *lck) {
__kmp_acquire_bootstrap_lock(&chain_lock);
lck->stats.next = liveLocks.stats.next;
lck->stats.prev = &liveLocks;
liveLocks.stats.next = lck;
lck->stats.next->stats.prev = lck;
KMP_ASSERT(lck->stats.next->stats.prev == lck);
KMP_ASSERT(lck->stats.prev->stats.next == lck);
__kmp_release_bootstrap_lock(&chain_lock);
}
static void __kmp_forget_lock(kmp_adaptive_lock_info_t *lck) {
KMP_ASSERT(lck->stats.next->stats.prev == lck);
KMP_ASSERT(lck->stats.prev->stats.next == lck);
kmp_adaptive_lock_info_t *n = lck->stats.next;
kmp_adaptive_lock_info_t *p = lck->stats.prev;
n->stats.prev = p;
p->stats.next = n;
}
static void __kmp_zero_speculative_stats(kmp_adaptive_lock_info_t *lck) {
memset(CCAST(kmp_adaptive_lock_statistics_t *, &lck->stats), 0,
sizeof(lck->stats));
__kmp_remember_lock(lck);
}
static void __kmp_add_stats(kmp_adaptive_lock_statistics_t *t,
kmp_adaptive_lock_info_t *lck) {
kmp_adaptive_lock_statistics_t volatile *s = &lck->stats;
t->nonSpeculativeAcquireAttempts += lck->acquire_attempts;
t->successfulSpeculations += s->successfulSpeculations;
t->hardFailedSpeculations += s->hardFailedSpeculations;
t->softFailedSpeculations += s->softFailedSpeculations;
t->nonSpeculativeAcquires += s->nonSpeculativeAcquires;
t->lemmingYields += s->lemmingYields;
}
static void __kmp_accumulate_speculative_stats(kmp_adaptive_lock_info_t *lck) {
__kmp_acquire_bootstrap_lock(&chain_lock);
__kmp_add_stats(&destroyedStats, lck);
__kmp_forget_lock(lck);
__kmp_release_bootstrap_lock(&chain_lock);
}
static float percent(kmp_uint32 count, kmp_uint32 total) {
return (total == 0) ? 0.0 : (100.0 * count) / total;
}
void __kmp_print_speculative_stats() {
kmp_adaptive_lock_statistics_t total = destroyedStats;
kmp_adaptive_lock_info_t *lck;
for (lck = liveLocks.stats.next; lck != &liveLocks; lck = lck->stats.next) {
__kmp_add_stats(&total, lck);
}
kmp_adaptive_lock_statistics_t *t = &total;
kmp_uint32 totalSections =
t->nonSpeculativeAcquires + t->successfulSpeculations;
kmp_uint32 totalSpeculations = t->successfulSpeculations +
t->hardFailedSpeculations +
t->softFailedSpeculations;
if (totalSections <= 0)
return;
kmp_safe_raii_file_t statsFile;
if (strcmp(__kmp_speculative_statsfile, "-") == 0) {
statsFile.set_stdout();
} else {
size_t buffLen = KMP_STRLEN(__kmp_speculative_statsfile) + 20;
char buffer[buffLen];
KMP_SNPRINTF(&buffer[0], buffLen, __kmp_speculative_statsfile,
(kmp_int32)getpid());
statsFile.open(buffer, "w");
}
fprintf(statsFile, "Speculative lock statistics (all approximate!)\n");
fprintf(statsFile,
" Lock parameters: \n"
" max_soft_retries : %10d\n"
" max_badness : %10d\n",
__kmp_adaptive_backoff_params.max_soft_retries,
__kmp_adaptive_backoff_params.max_badness);
fprintf(statsFile, " Non-speculative acquire attempts : %10d\n",
t->nonSpeculativeAcquireAttempts);
fprintf(statsFile, " Total critical sections : %10d\n",
totalSections);
fprintf(statsFile, " Successful speculations : %10d (%5.1f%%)\n",
t->successfulSpeculations,
percent(t->successfulSpeculations, totalSections));
fprintf(statsFile, " Non-speculative acquires : %10d (%5.1f%%)\n",
t->nonSpeculativeAcquires,
percent(t->nonSpeculativeAcquires, totalSections));
fprintf(statsFile, " Lemming yields : %10d\n\n",
t->lemmingYields);
fprintf(statsFile, " Speculative acquire attempts : %10d\n",
totalSpeculations);
fprintf(statsFile, " Successes : %10d (%5.1f%%)\n",
t->successfulSpeculations,
percent(t->successfulSpeculations, totalSpeculations));
fprintf(statsFile, " Soft failures : %10d (%5.1f%%)\n",
t->softFailedSpeculations,
percent(t->softFailedSpeculations, totalSpeculations));
fprintf(statsFile, " Hard failures : %10d (%5.1f%%)\n",
t->hardFailedSpeculations,
percent(t->hardFailedSpeculations, totalSpeculations));
}
#define KMP_INC_STAT …
#else
#define KMP_INC_STAT(lck, stat) …
#endif
static inline bool __kmp_is_unlocked_queuing_lock(kmp_queuing_lock_t *lck) { … }
static __inline void
__kmp_update_badness_after_success(kmp_adaptive_lock_t *lck) { … }
static __inline void __kmp_step_badness(kmp_adaptive_lock_t *lck) { … }
KMP_ATTRIBUTE_TARGET_RTM
static __inline int __kmp_should_speculate(kmp_adaptive_lock_t *lck,
kmp_int32 gtid) { … }
KMP_ATTRIBUTE_TARGET_RTM
static int __kmp_test_adaptive_lock_only(kmp_adaptive_lock_t *lck,
kmp_int32 gtid) { … }
static int __kmp_test_adaptive_lock(kmp_adaptive_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
kmp_int32 gtid) { … }
static void __kmp_acquire_adaptive_lock(kmp_adaptive_lock_t *lck,
kmp_int32 gtid) { … }
static void __kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
kmp_int32 gtid) { … }
KMP_ATTRIBUTE_TARGET_RTM
static int __kmp_release_adaptive_lock(kmp_adaptive_lock_t *lck,
kmp_int32 gtid) { … }
static int __kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
kmp_int32 gtid) { … }
static void __kmp_init_adaptive_lock(kmp_adaptive_lock_t *lck) { … }
static void __kmp_destroy_adaptive_lock(kmp_adaptive_lock_t *lck) { … }
static void __kmp_destroy_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) { … }
#endif
static kmp_int32 __kmp_get_drdpa_lock_owner(kmp_drdpa_lock_t *lck) { … }
static inline bool __kmp_is_drdpa_lock_nestable(kmp_drdpa_lock_t *lck) { … }
__forceinline static int
__kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { … }
int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
kmp_int32 gtid) { … }
void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck) { … }
void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck) { … }
static void __kmp_destroy_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) { … }
int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { … }
static void __kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
kmp_int32 gtid) { … }
int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
kmp_int32 gtid) { … }
void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck) { … }
void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck) { … }
static void __kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) { … }
static const ident_t *__kmp_get_drdpa_lock_location(kmp_drdpa_lock_t *lck) { … }
static void __kmp_set_drdpa_lock_location(kmp_drdpa_lock_t *lck,
const ident_t *loc) { … }
static kmp_lock_flags_t __kmp_get_drdpa_lock_flags(kmp_drdpa_lock_t *lck) { … }
static void __kmp_set_drdpa_lock_flags(kmp_drdpa_lock_t *lck,
kmp_lock_flags_t flags) { … }
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
#define __kmp_tsc() …
kmp_backoff_t __kmp_spin_backoff_params = …;
#else
extern kmp_uint64 __kmp_now_nsec();
kmp_backoff_t __kmp_spin_backoff_params = {1, 256, 100};
#define __kmp_tsc …
#endif
static inline bool before(kmp_uint64 a, kmp_uint64 b) { … }
void __kmp_spin_backoff(kmp_backoff_t *boff) { … }
#if KMP_USE_DYNAMIC_LOCK
static void __kmp_init_direct_lock(kmp_dyna_lock_t *lck,
kmp_dyna_lockseq_t seq) { … }
#if KMP_USE_TSX
#define HLE_ACQUIRE …
#define HLE_RELEASE …
static inline kmp_uint32 swap4(kmp_uint32 volatile *p, kmp_uint32 v) { … }
static void __kmp_destroy_hle_lock(kmp_dyna_lock_t *lck) { … }
static void __kmp_destroy_hle_lock_with_checks(kmp_dyna_lock_t *lck) { … }
static void __kmp_acquire_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) { … }
static void __kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t *lck,
kmp_int32 gtid) { … }
static int __kmp_release_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_release_hle_lock_with_checks(kmp_dyna_lock_t *lck,
kmp_int32 gtid) { … }
static int __kmp_test_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_hle_lock_with_checks(kmp_dyna_lock_t *lck,
kmp_int32 gtid) { … }
static void __kmp_init_rtm_queuing_lock(kmp_queuing_lock_t *lck) { … }
static void __kmp_destroy_rtm_queuing_lock(kmp_queuing_lock_t *lck) { … }
static void
__kmp_destroy_rtm_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { … }
KMP_ATTRIBUTE_TARGET_RTM
static void __kmp_acquire_rtm_queuing_lock(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
static void __kmp_acquire_rtm_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
KMP_ATTRIBUTE_TARGET_RTM
static int __kmp_release_rtm_queuing_lock(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
static int __kmp_release_rtm_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
KMP_ATTRIBUTE_TARGET_RTM
static int __kmp_test_rtm_queuing_lock(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
static int __kmp_test_rtm_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
kmp_int32 gtid) { … }
kmp_rtm_spin_lock_t;
static void __kmp_destroy_rtm_spin_lock(kmp_rtm_spin_lock_t *lck) { … }
static void __kmp_destroy_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *lck) { … }
KMP_ATTRIBUTE_TARGET_RTM
static int __kmp_acquire_rtm_spin_lock(kmp_rtm_spin_lock_t *lck,
kmp_int32 gtid) { … }
static int __kmp_acquire_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *lck,
kmp_int32 gtid) { … }
KMP_ATTRIBUTE_TARGET_RTM
static int __kmp_release_rtm_spin_lock(kmp_rtm_spin_lock_t *lck,
kmp_int32 gtid) { … }
static int __kmp_release_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *lck,
kmp_int32 gtid) { … }
KMP_ATTRIBUTE_TARGET_RTM
static int __kmp_test_rtm_spin_lock(kmp_rtm_spin_lock_t *lck, kmp_int32 gtid) { … }
static int __kmp_test_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *lck,
kmp_int32 gtid) { … }
#endif
static void __kmp_init_indirect_lock(kmp_dyna_lock_t *l,
kmp_dyna_lockseq_t tag);
static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock);
static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
kmp_int32);
static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
kmp_int32);
static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
kmp_int32);
#define KMP_FOREACH_LOCK_KIND(m, a) …
#define expand1 …
#define expand2 …
#define expand3 …
#define expand4 …
KMP_FOREACH_LOCK_KIND(…)
KMP_FOREACH_LOCK_KIND(…)
KMP_FOREACH_LOCK_KIND(…)
KMP_FOREACH_LOCK_KIND(…)
KMP_FOREACH_LOCK_KIND(…)
KMP_FOREACH_LOCK_KIND(…)
KMP_FOREACH_LOCK_KIND(…)
KMP_FOREACH_LOCK_KIND(…)
KMP_FOREACH_LOCK_KIND(…)
KMP_FOREACH_LOCK_KIND(…)
KMP_FOREACH_LOCK_KIND(…)
KMP_FOREACH_LOCK_KIND(…)
#undef expand1
#undef expand2
#undef expand3
#undef expand4
#define expand …
void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t) = …;
#undef expand
#define expand …
static void (*direct_destroy[])(kmp_dyna_lock_t *) = …;
#undef expand
#define expand …
static void (*direct_destroy_check[])(kmp_dyna_lock_t *) = …;
#undef expand
#define expand …
static int (*direct_set[])(kmp_dyna_lock_t *, kmp_int32) = …;
#undef expand
#define expand …
static int (*direct_set_check[])(kmp_dyna_lock_t *, kmp_int32) = …;
#undef expand
#define expand …
static int (*direct_unset[])(kmp_dyna_lock_t *, kmp_int32) = …;
static int (*direct_test[])(kmp_dyna_lock_t *, kmp_int32) = …;
#undef expand
#define expand …
static int (*direct_unset_check[])(kmp_dyna_lock_t *, kmp_int32) = …;
static int (*direct_test_check[])(kmp_dyna_lock_t *, kmp_int32) = …;
#undef expand
void (**__kmp_direct_destroy)(kmp_dyna_lock_t *) = …;
int (**__kmp_direct_set)(kmp_dyna_lock_t *, kmp_int32) = …;
int (**__kmp_direct_unset)(kmp_dyna_lock_t *, kmp_int32) = …;
int (**__kmp_direct_test)(kmp_dyna_lock_t *, kmp_int32) = …;
#define expand …
void (*__kmp_indirect_init[])(kmp_user_lock_p) = …;
#undef expand
#define expand …
static void (*indirect_destroy[])(kmp_user_lock_p) = …;
#undef expand
#define expand …
static void (*indirect_destroy_check[])(kmp_user_lock_p) = …;
#undef expand
#define expand …
static int (*indirect_set[])(kmp_user_lock_p,
kmp_int32) = …;
#undef expand
#define expand …
static int (*indirect_set_check[])(kmp_user_lock_p, kmp_int32) = …;
#undef expand
#define expand …
static int (*indirect_unset[])(kmp_user_lock_p, kmp_int32) = …;
static int (*indirect_test[])(kmp_user_lock_p,
kmp_int32) = …;
#undef expand
#define expand …
static int (*indirect_unset_check[])(kmp_user_lock_p, kmp_int32) = …;
static int (*indirect_test_check[])(kmp_user_lock_p, kmp_int32) = …;
#undef expand
void (**__kmp_indirect_destroy)(kmp_user_lock_p) = …;
int (**__kmp_indirect_set)(kmp_user_lock_p, kmp_int32) = …;
int (**__kmp_indirect_unset)(kmp_user_lock_p, kmp_int32) = …;
int (**__kmp_indirect_test)(kmp_user_lock_p, kmp_int32) = …;
kmp_indirect_lock_table_t __kmp_i_lock_table;
static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = …;
void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
const ident_t *) = …;
void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
kmp_lock_flags_t) = …;
const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
kmp_user_lock_p) = …;
kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
kmp_user_lock_p) = …;
static kmp_indirect_lock_t *__kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = …;
kmp_indirect_lock_t *__kmp_allocate_indirect_lock(void **user_lock,
kmp_int32 gtid,
kmp_indirect_locktag_t tag) { … }
static __forceinline kmp_indirect_lock_t *
__kmp_lookup_indirect_lock(void **user_lock, const char *func) { … }
static void __kmp_init_indirect_lock(kmp_dyna_lock_t *lock,
kmp_dyna_lockseq_t seq) { … }
static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock) { … }
static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) { … }
static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) { … }
static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) { … }
static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
kmp_int32 gtid) { … }
static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
kmp_int32 gtid) { … }
static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
kmp_int32 gtid) { … }
kmp_dyna_lockseq_t __kmp_user_lock_seq = …;
kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck, kmp_uint32 seq) { … }
void __kmp_init_dynamic_user_locks() { … }
void __kmp_cleanup_indirect_user_locks() { … }
enum kmp_lock_kind __kmp_user_lock_kind = …;
int __kmp_num_locks_in_block = …;
#else
static void __kmp_init_tas_lock_with_checks(kmp_tas_lock_t *lck) {
__kmp_init_tas_lock(lck);
}
static void __kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
__kmp_init_nested_tas_lock(lck);
}
#if KMP_USE_FUTEX
static void __kmp_init_futex_lock_with_checks(kmp_futex_lock_t *lck) {
__kmp_init_futex_lock(lck);
}
static void __kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
__kmp_init_nested_futex_lock(lck);
}
#endif
static int __kmp_is_ticket_lock_initialized(kmp_ticket_lock_t *lck) {
return lck == lck->lk.self;
}
static void __kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
__kmp_init_ticket_lock(lck);
}
static void __kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
__kmp_init_nested_ticket_lock(lck);
}
static int __kmp_is_queuing_lock_initialized(kmp_queuing_lock_t *lck) {
return lck == lck->lk.initialized;
}
static void __kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
__kmp_init_queuing_lock(lck);
}
static void
__kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
__kmp_init_nested_queuing_lock(lck);
}
#if KMP_USE_ADAPTIVE_LOCKS
static void __kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
__kmp_init_adaptive_lock(lck);
}
#endif
static int __kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t *lck) {
return lck == lck->lk.initialized;
}
static void __kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
__kmp_init_drdpa_lock(lck);
}
static void __kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
__kmp_init_nested_drdpa_lock(lck);
}
enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
size_t __kmp_base_user_lock_size = 0;
size_t __kmp_user_lock_size = 0;
kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck) = NULL;
int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
kmp_int32 gtid) = NULL;
int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
kmp_int32 gtid) = NULL;
int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
kmp_int32 gtid) = NULL;
void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck) = NULL;
void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
kmp_int32 gtid) = NULL;
int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
kmp_int32 gtid) = NULL;
int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
kmp_int32 gtid) = NULL;
void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck) = NULL;
const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck) = NULL;
void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
const ident_t *loc) = NULL;
kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck) = NULL;
void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
kmp_lock_flags_t flags) = NULL;
void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind) {
switch (user_lock_kind) {
case lk_default:
default:
KMP_ASSERT(0);
case lk_tas: {
__kmp_base_user_lock_size = sizeof(kmp_base_tas_lock_t);
__kmp_user_lock_size = sizeof(kmp_tas_lock_t);
__kmp_get_user_lock_owner_ =
(kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_tas_lock_owner);
if (__kmp_env_consistency_check) {
KMP_BIND_USER_LOCK_WITH_CHECKS(tas);
KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(tas);
} else {
KMP_BIND_USER_LOCK(tas);
KMP_BIND_NESTED_USER_LOCK(tas);
}
__kmp_destroy_user_lock_ =
(void (*)(kmp_user_lock_p))(&__kmp_destroy_tas_lock);
__kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
__kmp_get_user_lock_location_ = (const ident_t *(*)(kmp_user_lock_p))NULL;
__kmp_set_user_lock_location_ =
(void (*)(kmp_user_lock_p, const ident_t *))NULL;
__kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
__kmp_set_user_lock_flags_ =
(void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
} break;
#if KMP_USE_FUTEX
case lk_futex: {
__kmp_base_user_lock_size = sizeof(kmp_base_futex_lock_t);
__kmp_user_lock_size = sizeof(kmp_futex_lock_t);
__kmp_get_user_lock_owner_ =
(kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_futex_lock_owner);
if (__kmp_env_consistency_check) {
KMP_BIND_USER_LOCK_WITH_CHECKS(futex);
KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(futex);
} else {
KMP_BIND_USER_LOCK(futex);
KMP_BIND_NESTED_USER_LOCK(futex);
}
__kmp_destroy_user_lock_ =
(void (*)(kmp_user_lock_p))(&__kmp_destroy_futex_lock);
__kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
__kmp_get_user_lock_location_ = (const ident_t *(*)(kmp_user_lock_p))NULL;
__kmp_set_user_lock_location_ =
(void (*)(kmp_user_lock_p, const ident_t *))NULL;
__kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
__kmp_set_user_lock_flags_ =
(void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
} break;
#endif
case lk_ticket: {
__kmp_base_user_lock_size = sizeof(kmp_base_ticket_lock_t);
__kmp_user_lock_size = sizeof(kmp_ticket_lock_t);
__kmp_get_user_lock_owner_ =
(kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_owner);
if (__kmp_env_consistency_check) {
KMP_BIND_USER_LOCK_WITH_CHECKS(ticket);
KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(ticket);
} else {
KMP_BIND_USER_LOCK(ticket);
KMP_BIND_NESTED_USER_LOCK(ticket);
}
__kmp_destroy_user_lock_ =
(void (*)(kmp_user_lock_p))(&__kmp_destroy_ticket_lock);
__kmp_is_user_lock_initialized_ =
(int (*)(kmp_user_lock_p))(&__kmp_is_ticket_lock_initialized);
__kmp_get_user_lock_location_ =
(const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_location);
__kmp_set_user_lock_location_ = (void (*)(
kmp_user_lock_p, const ident_t *))(&__kmp_set_ticket_lock_location);
__kmp_get_user_lock_flags_ =
(kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_flags);
__kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
&__kmp_set_ticket_lock_flags);
} break;
case lk_queuing: {
__kmp_base_user_lock_size = sizeof(kmp_base_queuing_lock_t);
__kmp_user_lock_size = sizeof(kmp_queuing_lock_t);
__kmp_get_user_lock_owner_ =
(kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
if (__kmp_env_consistency_check) {
KMP_BIND_USER_LOCK_WITH_CHECKS(queuing);
KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(queuing);
} else {
KMP_BIND_USER_LOCK(queuing);
KMP_BIND_NESTED_USER_LOCK(queuing);
}
__kmp_destroy_user_lock_ =
(void (*)(kmp_user_lock_p))(&__kmp_destroy_queuing_lock);
__kmp_is_user_lock_initialized_ =
(int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
__kmp_get_user_lock_location_ =
(const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
__kmp_set_user_lock_location_ = (void (*)(
kmp_user_lock_p, const ident_t *))(&__kmp_set_queuing_lock_location);
__kmp_get_user_lock_flags_ =
(kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
__kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
&__kmp_set_queuing_lock_flags);
} break;
#if KMP_USE_ADAPTIVE_LOCKS
case lk_adaptive: {
__kmp_base_user_lock_size = sizeof(kmp_base_adaptive_lock_t);
__kmp_user_lock_size = sizeof(kmp_adaptive_lock_t);
__kmp_get_user_lock_owner_ =
(kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
if (__kmp_env_consistency_check) {
KMP_BIND_USER_LOCK_WITH_CHECKS(adaptive);
} else {
KMP_BIND_USER_LOCK(adaptive);
}
__kmp_destroy_user_lock_ =
(void (*)(kmp_user_lock_p))(&__kmp_destroy_adaptive_lock);
__kmp_is_user_lock_initialized_ =
(int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
__kmp_get_user_lock_location_ =
(const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
__kmp_set_user_lock_location_ = (void (*)(
kmp_user_lock_p, const ident_t *))(&__kmp_set_queuing_lock_location);
__kmp_get_user_lock_flags_ =
(kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
__kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
&__kmp_set_queuing_lock_flags);
} break;
#endif
case lk_drdpa: {
__kmp_base_user_lock_size = sizeof(kmp_base_drdpa_lock_t);
__kmp_user_lock_size = sizeof(kmp_drdpa_lock_t);
__kmp_get_user_lock_owner_ =
(kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_owner);
if (__kmp_env_consistency_check) {
KMP_BIND_USER_LOCK_WITH_CHECKS(drdpa);
KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(drdpa);
} else {
KMP_BIND_USER_LOCK(drdpa);
KMP_BIND_NESTED_USER_LOCK(drdpa);
}
__kmp_destroy_user_lock_ =
(void (*)(kmp_user_lock_p))(&__kmp_destroy_drdpa_lock);
__kmp_is_user_lock_initialized_ =
(int (*)(kmp_user_lock_p))(&__kmp_is_drdpa_lock_initialized);
__kmp_get_user_lock_location_ =
(const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_location);
__kmp_set_user_lock_location_ = (void (*)(
kmp_user_lock_p, const ident_t *))(&__kmp_set_drdpa_lock_location);
__kmp_get_user_lock_flags_ =
(kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_flags);
__kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
&__kmp_set_drdpa_lock_flags);
} break;
}
}
kmp_lock_table_t __kmp_user_lock_table = {1, 0, NULL};
kmp_user_lock_p __kmp_lock_pool = NULL;
kmp_block_of_locks *__kmp_lock_blocks = NULL;
int __kmp_num_locks_in_block = 1;
static kmp_lock_index_t __kmp_lock_table_insert(kmp_user_lock_p lck) {
kmp_lock_index_t index;
if (__kmp_user_lock_table.used >= __kmp_user_lock_table.allocated) {
kmp_lock_index_t size;
kmp_user_lock_p *table;
if (__kmp_user_lock_table.allocated == 0) {
size = 1024;
} else {
size = __kmp_user_lock_table.allocated * 2;
}
table = (kmp_user_lock_p *)__kmp_allocate(sizeof(kmp_user_lock_p) * size);
KMP_MEMCPY(table + 1, __kmp_user_lock_table.table + 1,
sizeof(kmp_user_lock_p) * (__kmp_user_lock_table.used - 1));
table[0] = (kmp_user_lock_p)__kmp_user_lock_table.table;
__kmp_user_lock_table.table = table;
__kmp_user_lock_table.allocated = size;
}
KMP_DEBUG_ASSERT(__kmp_user_lock_table.used <
__kmp_user_lock_table.allocated);
index = __kmp_user_lock_table.used;
__kmp_user_lock_table.table[index] = lck;
++__kmp_user_lock_table.used;
return index;
}
static kmp_user_lock_p __kmp_lock_block_allocate() {
static int last_index = 0;
if ((last_index >= __kmp_num_locks_in_block) || (__kmp_lock_blocks == NULL)) {
last_index = 0;
KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
size_t space_for_locks = __kmp_user_lock_size * __kmp_num_locks_in_block;
char *buffer =
(char *)__kmp_allocate(space_for_locks + sizeof(kmp_block_of_locks));
kmp_block_of_locks *new_block =
(kmp_block_of_locks *)(&buffer[space_for_locks]);
new_block->next_block = __kmp_lock_blocks;
new_block->locks = (void *)buffer;
KMP_MB();
__kmp_lock_blocks = new_block;
}
kmp_user_lock_p ret = (kmp_user_lock_p)(&(
((char *)(__kmp_lock_blocks->locks))[last_index * __kmp_user_lock_size]));
last_index++;
return ret;
}
kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock, kmp_int32 gtid,
kmp_lock_flags_t flags) {
kmp_user_lock_p lck;
kmp_lock_index_t index;
KMP_DEBUG_ASSERT(user_lock);
__kmp_acquire_lock(&__kmp_global_lock, gtid);
if (__kmp_lock_pool == NULL) {
if (__kmp_num_locks_in_block <= 1) {
lck = (kmp_user_lock_p)__kmp_allocate(__kmp_user_lock_size);
} else {
lck = __kmp_lock_block_allocate();
}
index = __kmp_lock_table_insert(lck);
} else {
lck = __kmp_lock_pool;
index = __kmp_lock_pool->pool.index;
__kmp_lock_pool = __kmp_lock_pool->pool.next;
}
if (OMP_LOCK_T_SIZE < sizeof(void *)) {
*((kmp_lock_index_t *)user_lock) = index;
} else {
*((kmp_user_lock_p *)user_lock) = lck;
}
__kmp_set_user_lock_flags(lck, flags);
__kmp_release_lock(&__kmp_global_lock, gtid);
return lck;
}
void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid,
kmp_user_lock_p lck) {
KMP_DEBUG_ASSERT(user_lock != NULL);
KMP_DEBUG_ASSERT(lck != NULL);
__kmp_acquire_lock(&__kmp_global_lock, gtid);
lck->pool.next = __kmp_lock_pool;
__kmp_lock_pool = lck;
if (OMP_LOCK_T_SIZE < sizeof(void *)) {
kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
KMP_DEBUG_ASSERT(0 < index && index <= __kmp_user_lock_table.used);
lck->pool.index = index;
}
__kmp_release_lock(&__kmp_global_lock, gtid);
}
kmp_user_lock_p __kmp_lookup_user_lock(void **user_lock, char const *func) {
kmp_user_lock_p lck = NULL;
if (__kmp_env_consistency_check) {
if (user_lock == NULL) {
KMP_FATAL(LockIsUninitialized, func);
}
}
if (OMP_LOCK_T_SIZE < sizeof(void *)) {
kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
if (__kmp_env_consistency_check) {
if (!(0 < index && index < __kmp_user_lock_table.used)) {
KMP_FATAL(LockIsUninitialized, func);
}
}
KMP_DEBUG_ASSERT(0 < index && index < __kmp_user_lock_table.used);
KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
lck = __kmp_user_lock_table.table[index];
} else {
lck = *((kmp_user_lock_p *)user_lock);
}
if (__kmp_env_consistency_check) {
if (lck == NULL) {
KMP_FATAL(LockIsUninitialized, func);
}
}
return lck;
}
void __kmp_cleanup_user_locks(void) {
__kmp_lock_pool = NULL;
#define IS_CRITICAL …
while (__kmp_user_lock_table.used > 1) {
const ident *loc;
kmp_user_lock_p lck =
__kmp_user_lock_table.table[--__kmp_user_lock_table.used];
if ((__kmp_is_user_lock_initialized_ != NULL) &&
(*__kmp_is_user_lock_initialized_)(lck)) {
if (__kmp_env_consistency_check && (!IS_CRITICAL(lck)) &&
((loc = __kmp_get_user_lock_location(lck)) != NULL) &&
(loc->psource != NULL)) {
kmp_str_loc_t str_loc = __kmp_str_loc_init(loc->psource, false);
KMP_WARNING(CnsLockNotDestroyed, str_loc.file, str_loc.line);
__kmp_str_loc_free(&str_loc);
}
#ifdef KMP_DEBUG
if (IS_CRITICAL(lck)) {
KA_TRACE(
20,
("__kmp_cleanup_user_locks: free critical section lock %p (%p)\n",
lck, *(void **)lck));
} else {
KA_TRACE(20, ("__kmp_cleanup_user_locks: free lock %p (%p)\n", lck,
*(void **)lck));
}
#endif
__kmp_destroy_user_lock(lck);
}
if (__kmp_lock_blocks == NULL) {
__kmp_free(lck);
}
}
#undef IS_CRITICAL
kmp_user_lock_p *table_ptr = __kmp_user_lock_table.table;
__kmp_user_lock_table.table = NULL;
__kmp_user_lock_table.allocated = 0;
while (table_ptr != NULL) {
kmp_user_lock_p *next = (kmp_user_lock_p *)(table_ptr[0]);
__kmp_free(table_ptr);
table_ptr = next;
}
kmp_block_of_locks_t *block_ptr = __kmp_lock_blocks;
__kmp_lock_blocks = NULL;
while (block_ptr != NULL) {
kmp_block_of_locks_t *next = block_ptr->next_block;
__kmp_free(block_ptr->locks);
block_ptr = next;
}
TCW_4(__kmp_init_user_locks, FALSE);
}
#endif