#include "kmp.h"
#include "kmp_affinity.h"
#include "kmp_atomic.h"
#include "kmp_environment.h"
#include "kmp_error.h"
#include "kmp_i18n.h"
#include "kmp_io.h"
#include "kmp_itt.h"
#include "kmp_settings.h"
#include "kmp_stats.h"
#include "kmp_str.h"
#include "kmp_wait_release.h"
#include "kmp_wrapper_getpid.h"
#include "kmp_dispatch.h"
#include "kmp_utils.h"
#if KMP_USE_HIER_SCHED
#include "kmp_dispatch_hier.h"
#endif
#if OMPT_SUPPORT
#include "ompt-specific.h"
#endif
#if OMPD_SUPPORT
#include "ompd-specific.h"
#endif
#if OMP_PROFILING_SUPPORT
#include "llvm/Support/TimeProfiler.h"
static char *ProfileTraceFile = nullptr;
#endif
#define KMP_USE_PRCTL …
#if KMP_OS_WINDOWS
#include <process.h>
#endif
#ifndef KMP_USE_SHM
#else
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#define SHM_SIZE …
#endif
#if defined(KMP_GOMP_COMPAT)
char const __kmp_version_alt_comp[] = …;
#endif
char const __kmp_version_omp_api[] = …;
#ifdef KMP_DEBUG
char const __kmp_version_lock[] =
KMP_VERSION_PREFIX "lock type: run time selectable";
#endif
#define KMP_MIN(x, y) …
#if KMP_USE_MONITOR
kmp_info_t __kmp_monitor;
#endif
void __kmp_cleanup(void);
static void __kmp_initialize_info(kmp_info_t *, kmp_team_t *, int tid,
int gtid);
static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
kmp_internal_control_t *new_icvs,
ident_t *loc);
#if KMP_AFFINITY_SUPPORTED
static void __kmp_partition_places(kmp_team_t *team,
int update_master_only = 0);
#endif
static void __kmp_do_serial_initialize(void);
void __kmp_fork_barrier(int gtid, int tid);
void __kmp_join_barrier(int gtid);
void __kmp_setup_icv_copy(kmp_team_t *team, int new_nproc,
kmp_internal_control_t *new_icvs, ident_t *loc);
#ifdef USE_LOAD_BALANCE
static int __kmp_load_balance_nproc(kmp_root_t *root, int set_nproc);
#endif
static int __kmp_expand_threads(int nNeed);
#if KMP_OS_WINDOWS
static int __kmp_unregister_root_other_thread(int gtid);
#endif
static void __kmp_reap_thread(kmp_info_t *thread, int is_root);
kmp_info_t *__kmp_thread_pool_insert_pt = …;
void __kmp_resize_dist_barrier(kmp_team_t *team, int old_nthreads,
int new_nthreads);
void __kmp_add_threads_to_team(kmp_team_t *team, int new_nthreads);
static kmp_nested_nthreads_t *__kmp_override_nested_nth(kmp_info_t *thr,
int level) { … }
int __kmp_get_global_thread_id() { … }
int __kmp_get_global_thread_id_reg() { … }
void __kmp_check_stack_overlap(kmp_info_t *th) { … }
void __kmp_infinite_loop(void) { … }
#define MAX_MESSAGE …
void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2, size_t size,
char const *format, ...) { … }
void __kmp_warn(char const *format, ...) { … }
void __kmp_abort_process() { … }
void __kmp_abort_thread(void) { … }
static void __kmp_print_thread_storage_map(kmp_info_t *thr, int gtid) { … }
static void __kmp_print_team_storage_map(const char *header, kmp_team_t *team,
int team_id, int num_thr) { … }
static void __kmp_init_allocator() { … }
static void __kmp_fini_allocator() { … }
#if ENABLE_LIBOMPTARGET
static void __kmp_init_omptarget() { … }
#endif
#if KMP_DYNAMIC_LIB
#if KMP_OS_WINDOWS
BOOL WINAPI DllMain(HINSTANCE hInstDLL, DWORD fdwReason, LPVOID lpReserved) {
switch (fdwReason) {
case DLL_PROCESS_ATTACH:
KA_TRACE(10, ("DllMain: PROCESS_ATTACH\n"));
return TRUE;
case DLL_PROCESS_DETACH:
KA_TRACE(10, ("DllMain: PROCESS_DETACH T#%d\n", __kmp_gtid_get_specific()));
if (lpReserved == NULL)
__kmp_internal_end_library(__kmp_gtid_get_specific());
return TRUE;
case DLL_THREAD_ATTACH:
KA_TRACE(10, ("DllMain: THREAD_ATTACH\n"));
return TRUE;
case DLL_THREAD_DETACH:
KA_TRACE(10, ("DllMain: THREAD_DETACH T#%d\n", __kmp_gtid_get_specific()));
__kmp_internal_end_thread(__kmp_gtid_get_specific());
return TRUE;
}
return TRUE;
}
#endif
#endif
void __kmp_parallel_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) { … }
void __kmp_parallel_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) { … }
int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws) { … }
void __kmp_exit_single(int gtid) { … }
static int __kmp_reserve_threads(kmp_root_t *root, kmp_team_t *parent_team,
int master_tid, int set_nthreads,
int enter_teams) { … }
static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
kmp_info_t *master_th, int master_gtid,
int fork_teams_workers) { … }
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
inline static void propagateFPControl(kmp_team_t *team) { … }
inline static void updateHWFPControl(kmp_team_t *team) { … }
#else
#define propagateFPControl …
#define updateHWFPControl …
#endif
static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team,
int realloc);
void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { … }
static inline bool __kmp_is_fork_in_teams(kmp_info_t *master_th,
microtask_t microtask, int level,
int teams_level, kmp_va_list ap) { … }
static inline bool __kmp_is_entering_teams(int active_level, int level,
int teams_level, kmp_va_list ap) { … }
static inline int
__kmp_fork_in_teams(ident_t *loc, int gtid, kmp_team_t *parent_team,
kmp_int32 argc, kmp_info_t *master_th, kmp_root_t *root,
enum fork_context_e call_context, microtask_t microtask,
launch_t invoker, int master_set_numthreads, int level,
#if OMPT_SUPPORT
ompt_data_t ompt_parallel_data, void *return_address,
#endif
kmp_va_list ap) { … }
static inline int
__kmp_serial_fork_call(ident_t *loc, int gtid, enum fork_context_e call_context,
kmp_int32 argc, microtask_t microtask, launch_t invoker,
kmp_info_t *master_th, kmp_team_t *parent_team,
#if OMPT_SUPPORT
ompt_data_t *ompt_parallel_data, void **return_address,
ompt_data_t **parent_task_data,
#endif
kmp_va_list ap) { … }
int __kmp_fork_call(ident_t *loc, int gtid,
enum fork_context_e call_context,
kmp_int32 argc, microtask_t microtask, launch_t invoker,
kmp_va_list ap) { … }
#if OMPT_SUPPORT
static inline void __kmp_join_restore_state(kmp_info_t *thread,
kmp_team_t *team) { … }
static inline void __kmp_join_ompt(int gtid, kmp_info_t *thread,
kmp_team_t *team, ompt_data_t *parallel_data,
int flags, void *codeptr) { … }
#endif
void __kmp_join_call(ident_t *loc, int gtid
#if OMPT_SUPPORT
,
enum fork_context_e fork_context
#endif
,
int exit_teams) { … }
void __kmp_save_internal_controls(kmp_info_t *thread) { … }
void __kmp_set_num_threads(int new_nth, int gtid) { … }
void __kmp_set_max_active_levels(int gtid, int max_active_levels) { … }
int __kmp_get_max_active_levels(int gtid) { … }
void __kmp_set_num_teams(int num_teams) { … }
int __kmp_get_max_teams(void) { … }
void __kmp_set_teams_thread_limit(int limit) { … }
int __kmp_get_teams_thread_limit(void) { … }
KMP_BUILD_ASSERT(…);
KMP_BUILD_ASSERT(…);
void __kmp_set_schedule(int gtid, kmp_sched_t kind, int chunk) { … }
void __kmp_get_schedule(int gtid, kmp_sched_t *kind, int *chunk) { … }
int __kmp_get_ancestor_thread_num(int gtid, int level) { … }
int __kmp_get_team_size(int gtid, int level) { … }
kmp_r_sched_t __kmp_get_schedule_global() { … }
static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team, int realloc) { … }
static void __kmp_allocate_team_arrays(kmp_team_t *team, int max_nth) { … }
static void __kmp_free_team_arrays(kmp_team_t *team) { … }
static void __kmp_reallocate_team_arrays(kmp_team_t *team, int max_nth) { … }
static kmp_internal_control_t __kmp_get_global_icvs(void) { … }
static kmp_internal_control_t __kmp_get_x_global_icvs(const kmp_team_t *team) { … }
static void __kmp_initialize_root(kmp_root_t *root) { … }
#ifdef KMP_DEBUG
typedef struct kmp_team_list_item {
kmp_team_p const *entry;
struct kmp_team_list_item *next;
} kmp_team_list_item_t;
typedef kmp_team_list_item_t *kmp_team_list_t;
static void __kmp_print_structure_team_accum(
kmp_team_list_t list,
kmp_team_p const *team
) {
kmp_team_list_t l;
KMP_DEBUG_ASSERT(list != NULL);
if (team == NULL) {
return;
}
__kmp_print_structure_team_accum(list, team->t.t_parent);
__kmp_print_structure_team_accum(list, team->t.t_next_pool);
l = list;
while (l->next != NULL && l->entry != team) {
l = l->next;
}
if (l->next != NULL) {
return;
}
l = list;
while (l->next != NULL && l->entry->t.t_id <= team->t.t_id) {
l = l->next;
}
{
kmp_team_list_item_t *item = (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(
sizeof(kmp_team_list_item_t));
*item = *l;
l->entry = team;
l->next = item;
}
}
static void __kmp_print_structure_team(char const *title, kmp_team_p const *team
) {
__kmp_printf("%s", title);
if (team != NULL) {
__kmp_printf("%2x %p\n", team->t.t_id, team);
} else {
__kmp_printf(" - (nil)\n");
}
}
static void __kmp_print_structure_thread(char const *title,
kmp_info_p const *thread) {
__kmp_printf("%s", title);
if (thread != NULL) {
__kmp_printf("%2d %p\n", thread->th.th_info.ds.ds_gtid, thread);
} else {
__kmp_printf(" - (nil)\n");
}
}
void __kmp_print_structure(void) {
kmp_team_list_t list;
list =
(kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(sizeof(kmp_team_list_item_t));
list->entry = NULL;
list->next = NULL;
__kmp_printf("\n------------------------------\nGlobal Thread "
"Table\n------------------------------\n");
{
int gtid;
for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
__kmp_printf("%2d", gtid);
if (__kmp_threads != NULL) {
__kmp_printf(" %p", __kmp_threads[gtid]);
}
if (__kmp_root != NULL) {
__kmp_printf(" %p", __kmp_root[gtid]);
}
__kmp_printf("\n");
}
}
__kmp_printf("\n------------------------------\nThreads\n--------------------"
"----------\n");
if (__kmp_threads != NULL) {
int gtid;
for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
kmp_info_t const *thread = __kmp_threads[gtid];
if (thread != NULL) {
__kmp_printf("GTID %2d %p:\n", gtid, thread);
__kmp_printf(" Our Root: %p\n", thread->th.th_root);
__kmp_print_structure_team(" Our Team: ", thread->th.th_team);
__kmp_print_structure_team(" Serial Team: ",
thread->th.th_serial_team);
__kmp_printf(" Threads: %2d\n", thread->th.th_team_nproc);
__kmp_print_structure_thread(" Primary: ",
thread->th.th_team_master);
__kmp_printf(" Serialized?: %2d\n", thread->th.th_team_serialized);
__kmp_printf(" Set NProc: %2d\n", thread->th.th_set_nproc);
__kmp_printf(" Set Proc Bind: %2d\n", thread->th.th_set_proc_bind);
__kmp_print_structure_thread(" Next in pool: ",
thread->th.th_next_pool);
__kmp_printf("\n");
__kmp_print_structure_team_accum(list, thread->th.th_team);
__kmp_print_structure_team_accum(list, thread->th.th_serial_team);
}
}
} else {
__kmp_printf("Threads array is not allocated.\n");
}
__kmp_printf("\n------------------------------\nUbers\n----------------------"
"--------\n");
if (__kmp_root != NULL) {
int gtid;
for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
kmp_root_t const *root = __kmp_root[gtid];
if (root != NULL) {
__kmp_printf("GTID %2d %p:\n", gtid, root);
__kmp_print_structure_team(" Root Team: ", root->r.r_root_team);
__kmp_print_structure_team(" Hot Team: ", root->r.r_hot_team);
__kmp_print_structure_thread(" Uber Thread: ",
root->r.r_uber_thread);
__kmp_printf(" Active?: %2d\n", root->r.r_active);
__kmp_printf(" In Parallel: %2d\n",
KMP_ATOMIC_LD_RLX(&root->r.r_in_parallel));
__kmp_printf("\n");
__kmp_print_structure_team_accum(list, root->r.r_root_team);
__kmp_print_structure_team_accum(list, root->r.r_hot_team);
}
}
} else {
__kmp_printf("Ubers array is not allocated.\n");
}
__kmp_printf("\n------------------------------\nTeams\n----------------------"
"--------\n");
while (list->next != NULL) {
kmp_team_p const *team = list->entry;
int i;
__kmp_printf("Team %2x %p:\n", team->t.t_id, team);
__kmp_print_structure_team(" Parent Team: ", team->t.t_parent);
__kmp_printf(" Primary TID: %2d\n", team->t.t_master_tid);
__kmp_printf(" Max threads: %2d\n", team->t.t_max_nproc);
__kmp_printf(" Levels of serial: %2d\n", team->t.t_serialized);
__kmp_printf(" Number threads: %2d\n", team->t.t_nproc);
for (i = 0; i < team->t.t_nproc; ++i) {
__kmp_printf(" Thread %2d: ", i);
__kmp_print_structure_thread("", team->t.t_threads[i]);
}
__kmp_print_structure_team(" Next in pool: ", team->t.t_next_pool);
__kmp_printf("\n");
list = list->next;
}
__kmp_printf("\n------------------------------\nPools\n----------------------"
"--------\n");
__kmp_print_structure_thread("Thread pool: ",
CCAST(kmp_info_t *, __kmp_thread_pool));
__kmp_print_structure_team("Team pool: ",
CCAST(kmp_team_t *, __kmp_team_pool));
__kmp_printf("\n");
while (list != NULL) {
kmp_team_list_item_t *item = list;
list = list->next;
KMP_INTERNAL_FREE(item);
}
}
#endif
static const unsigned __kmp_primes[] = …;
unsigned short __kmp_get_random(kmp_info_t *thread) { … }
void __kmp_init_random(kmp_info_t *thread) { … }
#if KMP_OS_WINDOWS
static int __kmp_reclaim_dead_roots(void) {
int i, r = 0;
for (i = 0; i < __kmp_threads_capacity; ++i) {
if (KMP_UBER_GTID(i) &&
!__kmp_still_running((kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[i])) &&
!__kmp_root[i]
->r.r_active) {
r += __kmp_unregister_root_other_thread(i);
}
}
return r;
}
#endif
static int __kmp_expand_threads(int nNeed) { … }
int __kmp_register_root(int initial_thread) { … }
#if KMP_NESTED_HOT_TEAMS
static int __kmp_free_hot_teams(kmp_root_t *root, kmp_info_t *thr, int level,
const int max_level) { … }
#endif
static int __kmp_reset_root(int gtid, kmp_root_t *root) { … }
void __kmp_unregister_root_current_thread(int gtid) { … }
#if KMP_OS_WINDOWS
static int __kmp_unregister_root_other_thread(int gtid) {
kmp_root_t *root = __kmp_root[gtid];
int r;
KA_TRACE(1, ("__kmp_unregister_root_other_thread: enter T#%d\n", gtid));
KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
KMP_ASSERT(KMP_UBER_GTID(gtid));
KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
KMP_ASSERT(root->r.r_active == FALSE);
r = __kmp_reset_root(gtid, root);
KC_TRACE(10,
("__kmp_unregister_root_other_thread: T#%d unregistered\n", gtid));
return r;
}
#endif
#if KMP_DEBUG
void __kmp_task_info() {
kmp_int32 gtid = __kmp_entry_gtid();
kmp_int32 tid = __kmp_tid_from_gtid(gtid);
kmp_info_t *this_thr = __kmp_threads[gtid];
kmp_team_t *steam = this_thr->th.th_serial_team;
kmp_team_t *team = this_thr->th.th_team;
__kmp_printf(
"__kmp_task_info: gtid=%d tid=%d t_thread=%p team=%p steam=%p curtask=%p "
"ptask=%p\n",
gtid, tid, this_thr, team, steam, this_thr->th.th_current_task,
team->t.t_implicit_task_taskdata[tid].td_parent);
}
#endif
static void __kmp_initialize_info(kmp_info_t *this_thr, kmp_team_t *team,
int tid, int gtid) { … }
kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
int new_tid) { … }
static void __kmp_reinitialize_team(kmp_team_t *team,
kmp_internal_control_t *new_icvs,
ident_t *loc) { … }
static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
kmp_internal_control_t *new_icvs,
ident_t *loc) { … }
#if KMP_AFFINITY_SUPPORTED
static inline void __kmp_set_thread_place(kmp_team_t *team, kmp_info_t *th,
int first, int last, int newp) { … }
static void __kmp_partition_places(kmp_team_t *team, int update_master_only) { … }
#endif
kmp_team_t *
__kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
#if OMPT_SUPPORT
ompt_data_t ompt_parallel_data,
#endif
kmp_proc_bind_t new_proc_bind,
kmp_internal_control_t *new_icvs,
int argc USE_NESTED_HOT_ARG(kmp_info_t *master)) { … }
void __kmp_free_team(kmp_root_t *root,
kmp_team_t *team USE_NESTED_HOT_ARG(kmp_info_t *master)) { … }
kmp_team_t *__kmp_reap_team(kmp_team_t *team) { … }
void __kmp_free_thread(kmp_info_t *this_th) { … }
void *__kmp_launch_thread(kmp_info_t *this_thr) { … }
void __kmp_internal_end_dest(void *specific_gtid) { … }
#if KMP_OS_UNIX && KMP_DYNAMIC_LIB
__attribute__((destructor)) void __kmp_internal_end_dtor(void) { … }
#endif
void __kmp_internal_end_atexit(void) { … }
static void __kmp_reap_thread(kmp_info_t *thread, int is_root) { … }
static void __kmp_itthash_clean(kmp_info_t *th) { … }
static void __kmp_internal_end(void) { … }
void __kmp_internal_end_library(int gtid_req) { … }
void __kmp_internal_end_thread(int gtid_req) { … }
static long __kmp_registration_flag = …;
static char *__kmp_registration_str = …;
static inline char *__kmp_reg_status_name() { … }
#if defined(KMP_USE_SHM)
bool __kmp_shm_available = …;
bool __kmp_tmp_available = …;
char *temp_reg_status_file_name = …;
#endif
void __kmp_register_library_startup(void) { … }
void __kmp_unregister_library(void) { … }
#if KMP_MIC_SUPPORTED
static void __kmp_check_mic_type() { … }
#endif
#if KMP_HAVE_UMWAIT
static void __kmp_user_level_mwait_init() { … }
#elif KMP_HAVE_MWAIT
#ifndef AT_INTELPHIUSERMWAIT
#define AT_INTELPHIUSERMWAIT …
#endif
unsigned long getauxval(unsigned long) KMP_WEAK_ATTRIBUTE_EXTERNAL;
unsigned long getauxval(unsigned long) { return 0; }
static void __kmp_user_level_mwait_init() {
if (__kmp_mic_type == mic3) {
unsigned long res = getauxval(AT_INTELPHIUSERMWAIT);
if ((res & 0x1) || __kmp_user_level_mwait) {
__kmp_mwait_enabled = TRUE;
if (__kmp_user_level_mwait) {
KMP_INFORM(EnvMwaitWarn);
}
} else {
__kmp_mwait_enabled = FALSE;
}
}
KF_TRACE(30, ("__kmp_user_level_mwait_init: __kmp_mic_type = %d, "
"__kmp_mwait_enabled = %d\n",
__kmp_mic_type, __kmp_mwait_enabled));
}
#endif
static void __kmp_do_serial_initialize(void) { … }
void __kmp_serial_initialize(void) { … }
static void __kmp_do_middle_initialize(void) { … }
void __kmp_middle_initialize(void) { … }
void __kmp_parallel_initialize(void) { … }
void __kmp_hidden_helper_initialize() { … }
void __kmp_run_before_invoked_task(int gtid, int tid, kmp_info_t *this_thr,
kmp_team_t *team) { … }
void __kmp_run_after_invoked_task(int gtid, int tid, kmp_info_t *this_thr,
kmp_team_t *team) { … }
int __kmp_invoke_task_func(int gtid) { … }
void __kmp_teams_master(int gtid) { … }
int __kmp_invoke_teams_master(int gtid) { … }
void __kmp_push_num_threads(ident_t *id, int gtid, int num_threads) { … }
void __kmp_push_num_threads_list(ident_t *id, int gtid, kmp_uint32 list_length,
int *num_threads_list) { … }
void __kmp_set_strict_num_threads(ident_t *loc, int gtid, int sev,
const char *msg) { … }
static void __kmp_push_thread_limit(kmp_info_t *thr, int num_teams,
int num_threads) { … }
void __kmp_push_num_teams(ident_t *id, int gtid, int num_teams,
int num_threads) { … }
void __kmp_push_num_teams_51(ident_t *id, int gtid, int num_teams_lb,
int num_teams_ub, int num_threads) { … }
void __kmp_push_proc_bind(ident_t *id, int gtid, kmp_proc_bind_t proc_bind) { … }
void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team) { … }
void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team) { … }
#ifdef USE_LOAD_BALANCE
static int __kmp_active_hot_team_nproc(kmp_root_t *root) { … }
static int __kmp_load_balance_nproc(kmp_root_t *root, int set_nproc) { … }
#endif
void __kmp_cleanup(void) { … }
int __kmp_ignore_mppbeg(void) { … }
int __kmp_ignore_mppend(void) { … }
void __kmp_internal_begin(void) { … }
void __kmp_user_set_library(enum library_type arg) { … }
void __kmp_aux_set_stacksize(size_t arg) { … }
void __kmp_aux_set_library(enum library_type arg) { … }
static kmp_team_t *__kmp_aux_get_team_info(int &teams_serialized) { … }
int __kmp_aux_get_team_num() { … }
int __kmp_aux_get_num_teams() { … }
kmp_affinity_format_field_t;
static const kmp_affinity_format_field_t __kmp_affinity_format_table[] = …;
static int __kmp_aux_capture_affinity_field(int gtid, const kmp_info_t *th,
const char **ptr,
kmp_str_buf_t *field_buffer) { … }
size_t __kmp_aux_capture_affinity(int gtid, const char *format,
kmp_str_buf_t *buffer) { … }
void __kmp_aux_display_affinity(int gtid, const char *format) { … }
void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid) { … }
void __kmp_aux_set_defaults(char const *str, size_t len) { … }
PACKED_REDUCTION_METHOD_T
__kmp_determine_reduction_method(
ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
kmp_critical_name *lck) { … }
kmp_int32 __kmp_get_reduce_method(void) { … }
void __kmp_soft_pause() { … }
void __kmp_hard_pause() { … }
void __kmp_resume_if_soft_paused() { … }
int __kmp_pause_resource(kmp_pause_status_t level) { … }
void __kmp_omp_display_env(int verbose) { … }
void __kmp_resize_dist_barrier(kmp_team_t *team, int old_nthreads,
int new_nthreads) { … }
void __kmp_add_threads_to_team(kmp_team_t *team, int new_nthreads) { … }
kmp_info_t **__kmp_hidden_helper_threads;
kmp_info_t *__kmp_hidden_helper_main_thread;
std::atomic<kmp_int32> __kmp_unexecuted_hidden_helper_tasks;
#if KMP_OS_LINUX
kmp_int32 __kmp_hidden_helper_threads_num = …;
kmp_int32 __kmp_enable_hidden_helper = …;
#else
kmp_int32 __kmp_hidden_helper_threads_num = 0;
kmp_int32 __kmp_enable_hidden_helper = FALSE;
#endif
namespace {
std::atomic<kmp_int32> __kmp_hit_hidden_helper_threads_num;
void __kmp_hidden_helper_wrapper_fn(int *gtid, int *, ...) { … }
}
void __kmp_hidden_helper_threads_initz_routine() { … }
void __kmp_init_nesting_mode() { … }
void __kmp_set_nesting_mode_threads() { … }
extern "C" {
#if !KMP_STATS_ENABLED
void __kmp_reset_stats() { … }
#endif
#if !USE_DEBUGGER
int __kmp_omp_debug_struct_info = …;
int __kmp_debugging = …;
#endif
#if !USE_ITT_BUILD || !USE_ITT_NOTIFY
void __kmp_itt_fini_ittlib() {}
void __kmp_itt_init_ittlib() {}
#endif
}