#include "kmp.h"
#include "kmp_affinity.h"
#if KMP_USE_HIER_SCHED
#include "kmp_dispatch_hier.h"
#endif
kmp_key_t __kmp_gtid_threadprivate_key;
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
kmp_cpuinfo_t __kmp_cpuinfo = …;
#endif
#if KMP_STATS_ENABLED
#include "kmp_stats.h"
kmp_tas_lock_t __kmp_stats_lock;
kmp_stats_list *__kmp_stats_list;
KMP_THREAD_LOCAL kmp_stats_list *__kmp_stats_thread_ptr = NULL;
tsc_tick_count __kmp_stats_start_time;
#endif
volatile int __kmp_init_serial = …;
volatile int __kmp_init_gtid = …;
volatile int __kmp_init_common = …;
volatile int __kmp_need_register_serial = …;
volatile int __kmp_init_middle = …;
volatile int __kmp_init_parallel = …;
volatile int __kmp_init_hidden_helper = …;
volatile int __kmp_init_hidden_helper_threads = …;
volatile int __kmp_hidden_helper_team_done = …;
#if KMP_USE_MONITOR
volatile int __kmp_init_monitor =
0;
#endif
volatile int __kmp_init_user_locks = …;
kmp_cached_addr_t *__kmp_threadpriv_cache_list = …;
int __kmp_init_counter = …;
int __kmp_root_counter = …;
int __kmp_version = …;
std::atomic<kmp_int32> __kmp_team_counter = …;
std::atomic<kmp_int32> __kmp_task_counter = …;
size_t __kmp_stksize = …;
#if KMP_USE_MONITOR
size_t __kmp_monitor_stksize = 0;
#endif
size_t __kmp_stkoffset = …;
int __kmp_stkpadding = …;
size_t __kmp_malloc_pool_incr = …;
kmp_uint32 __kmp_barrier_gather_bb_dflt = …;
kmp_uint32 __kmp_barrier_release_bb_dflt = …;
kmp_bar_pat_e __kmp_barrier_gather_pat_dflt = …;
kmp_bar_pat_e __kmp_barrier_release_pat_dflt = …;
kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier] = …;
kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier] = …;
kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier] = …;
kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier] = …;
char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier] = …;
char const *__kmp_barrier_pattern_env_name[bs_last_barrier] = …;
char const *__kmp_barrier_type_name[bs_last_barrier] = …;
char const *__kmp_barrier_pattern_name[bp_last_bar] = …;
int __kmp_allThreadsSpecified = …;
size_t __kmp_align_alloc = …;
int __kmp_generate_warnings = …;
int __kmp_reserve_warn = …;
int __kmp_xproc = …;
int __kmp_avail_proc = …;
size_t __kmp_sys_min_stksize = …;
int __kmp_sys_max_nth = …;
int __kmp_max_nth = …;
int __kmp_cg_max_nth = …;
int __kmp_task_max_nth = …;
int __kmp_teams_max_nth = …;
int __kmp_threads_capacity = …;
int __kmp_dflt_team_nth = …;
int __kmp_dflt_team_nth_ub = …;
int __kmp_tp_capacity = …;
int __kmp_tp_cached = …;
int __kmp_dispatch_num_buffers = …;
int __kmp_dflt_max_active_levels = …;
bool __kmp_dflt_max_active_levels_set = …;
#if KMP_NESTED_HOT_TEAMS
int __kmp_hot_teams_mode = …;
int __kmp_hot_teams_max_level = …;
#endif
enum library_type __kmp_library = …;
enum sched_type __kmp_sched = …;
enum sched_type __kmp_static = …;
enum sched_type __kmp_guided = …;
enum sched_type __kmp_auto = …;
#if KMP_USE_HIER_SCHED
int __kmp_dispatch_hand_threading = 0;
int __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LAST + 1];
int __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LAST + 1];
kmp_hier_sched_env_t __kmp_hier_scheds = {0, 0, NULL, NULL, NULL};
#endif
int __kmp_dflt_blocktime = …;
char __kmp_blocktime_units = …;
bool __kmp_wpolicy_passive = …;
#if KMP_USE_MONITOR
int __kmp_monitor_wakeups = KMP_MIN_MONITOR_WAKEUPS;
int __kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(KMP_DEFAULT_BLOCKTIME,
KMP_MIN_MONITOR_WAKEUPS);
#endif
#ifdef KMP_ADJUST_BLOCKTIME
int __kmp_zero_bt = …;
#endif
#ifdef KMP_DFLT_NTH_CORES
int __kmp_ncores = 0;
#endif
int __kmp_chunk = …;
int __kmp_force_monotonic = …;
int __kmp_abort_delay = …;
#if (KMP_OS_LINUX || KMP_OS_AIX) && defined(KMP_TDATA_GTID)
int __kmp_gtid_mode = …;
int __kmp_adjust_gtid_mode = …;
#elif KMP_OS_WINDOWS
int __kmp_gtid_mode = 2;
int __kmp_adjust_gtid_mode = FALSE;
#else
int __kmp_gtid_mode = 0;
int __kmp_adjust_gtid_mode = TRUE;
#endif
#ifdef KMP_TDATA_GTID
KMP_THREAD_LOCAL int __kmp_gtid = …;
#endif
int __kmp_tls_gtid_min = …;
int __kmp_foreign_tp = …;
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
int __kmp_inherit_fp_control = …;
kmp_int16 __kmp_init_x87_fpu_control_word = …;
kmp_uint32 __kmp_init_mxcsr = …;
#endif
#ifdef USE_LOAD_BALANCE
double __kmp_load_balance_interval = …;
#endif
kmp_nested_nthreads_t __kmp_nested_nth = …;
#if KMP_USE_ADAPTIVE_LOCKS
kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params = …;
#if KMP_DEBUG_ADAPTIVE_LOCKS
const char *__kmp_speculative_statsfile = "-";
#endif
#endif
int __kmp_display_env = …;
int __kmp_display_env_verbose = …;
int __kmp_omp_cancellation = …;
int __kmp_nteams = …;
int __kmp_teams_thread_limit = …;
#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
int __kmp_user_level_mwait = …;
int __kmp_umwait_enabled = …;
int __kmp_mwait_enabled = …;
int __kmp_mwait_hints = …;
#endif
#if KMP_HAVE_UMWAIT
int __kmp_waitpkg_enabled = …;
int __kmp_tpause_state = …;
int __kmp_tpause_hint = …;
int __kmp_tpause_enabled = …;
#endif
enum sched_type __kmp_sch_map[kmp_sched_upper - kmp_sched_lower_ext +
kmp_sched_upper_std - kmp_sched_lower - 2] = …;
#if KMP_OS_LINUX
enum clock_function_type __kmp_clock_function;
int __kmp_clock_function_param;
#endif
#if KMP_MIC_SUPPORTED
enum mic_type __kmp_mic_type = …;
#endif
#if KMP_AFFINITY_SUPPORTED
KMPAffinity *__kmp_affinity_dispatch = …;
#if KMP_USE_HWLOC
int __kmp_hwloc_error = FALSE;
hwloc_topology_t __kmp_hwloc_topology = NULL;
#endif
#if KMP_OS_WINDOWS
#if KMP_GROUP_AFFINITY
int __kmp_num_proc_groups = 1;
#endif
kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount = NULL;
kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount = NULL;
kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity = NULL;
kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity = NULL;
#endif
size_t __kmp_affin_mask_size = …;
enum affinity_top_method __kmp_affinity_top_method = …;
kmp_affinity_t __kmp_affinity = …;
kmp_affinity_t __kmp_hh_affinity = …;
kmp_affinity_t *__kmp_affinities[] = …;
char *__kmp_cpuinfo_file = …;
#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
int __kmp_first_osid_with_ecore = …;
#endif
#endif
kmp_nested_proc_bind_t __kmp_nested_proc_bind = …;
kmp_proc_bind_t __kmp_teams_proc_bind = …;
int __kmp_affinity_num_places = …;
int __kmp_display_affinity = …;
char *__kmp_affinity_format = …;
kmp_int32 __kmp_default_device = …;
kmp_tasking_mode_t __kmp_tasking_mode = …;
kmp_int32 __kmp_max_task_priority = …;
kmp_uint64 __kmp_taskloop_min_tasks = …;
int __kmp_memkind_available = …;
omp_allocator_handle_t const omp_null_allocator = …;
omp_allocator_handle_t const omp_default_mem_alloc = …;
omp_allocator_handle_t const omp_large_cap_mem_alloc = …;
omp_allocator_handle_t const omp_const_mem_alloc = …;
omp_allocator_handle_t const omp_high_bw_mem_alloc = …;
omp_allocator_handle_t const omp_low_lat_mem_alloc = …;
omp_allocator_handle_t const omp_cgroup_mem_alloc = …;
omp_allocator_handle_t const omp_pteam_mem_alloc = …;
omp_allocator_handle_t const omp_thread_mem_alloc = …;
omp_allocator_handle_t const llvm_omp_target_host_mem_alloc = …;
omp_allocator_handle_t const llvm_omp_target_shared_mem_alloc = …;
omp_allocator_handle_t const llvm_omp_target_device_mem_alloc = …;
omp_allocator_handle_t const kmp_max_mem_alloc = …;
omp_allocator_handle_t __kmp_def_allocator = …;
omp_memspace_handle_t const omp_default_mem_space = …;
omp_memspace_handle_t const omp_large_cap_mem_space = …;
omp_memspace_handle_t const omp_const_mem_space = …;
omp_memspace_handle_t const omp_high_bw_mem_space = …;
omp_memspace_handle_t const omp_low_lat_mem_space = …;
omp_memspace_handle_t const llvm_omp_target_host_mem_space = …;
omp_memspace_handle_t const llvm_omp_target_shared_mem_space = …;
omp_memspace_handle_t const llvm_omp_target_device_mem_space = …;
KMP_BUILD_ASSERT(…);
int __kmp_task_stealing_constraint = …;
int __kmp_enable_task_throttling = …;
#ifdef DEBUG_SUSPEND
int __kmp_suspend_count = 0;
#endif
int __kmp_settings = …;
int __kmp_duplicate_library_ok = …;
#if USE_ITT_BUILD
int __kmp_forkjoin_frames = …;
int __kmp_forkjoin_frames_mode = …;
#endif
PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method = …;
int __kmp_determ_red = …;
#ifdef KMP_DEBUG
int kmp_a_debug = 0;
int kmp_b_debug = 0;
int kmp_c_debug = 0;
int kmp_d_debug = 0;
int kmp_e_debug = 0;
int kmp_f_debug = 0;
int kmp_diag = 0;
#endif
int __kmp_debug_buf = …;
int __kmp_debug_buf_lines = …;
int __kmp_debug_buf_chars = …;
int __kmp_debug_buf_atomic = …;
char *__kmp_debug_buffer = …;
std::atomic<int> __kmp_debug_count = …;
int __kmp_debug_buf_warn_chars = …;
#ifdef KMP_DEBUG
int __kmp_par_range;
char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN] = {'\0'};
char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN] = {'\0'};
int __kmp_par_range_lb = 0;
int __kmp_par_range_ub = INT_MAX;
#endif
int __kmp_storage_map = …;
int __kmp_storage_map_verbose = …;
int __kmp_storage_map_verbose_specified = …;
int __kmp_need_register_atfork = …;
int __kmp_need_register_atfork_specified = …;
int __kmp_env_stksize = …;
int __kmp_env_blocktime = …;
int __kmp_env_checks = …;
int __kmp_env_consistency_check = …;
#if KMP_OS_DARWIN && KMP_ARCH_AARCH64
kmp_int32 __kmp_use_yield = 0;
#else
kmp_int32 __kmp_use_yield = …;
#endif
kmp_int32 __kmp_use_yield_exp_set = …;
kmp_uint32 __kmp_yield_init = …;
kmp_uint32 __kmp_yield_next = …;
kmp_uint64 __kmp_pause_init = …;
KMP_ALIGN_CACHE
kmp_info_t **__kmp_threads = …;
kmp_root_t **__kmp_root = …;
kmp_old_threads_list_t *__kmp_old_threads_list = …;
KMP_ALIGN_CACHE
volatile int __kmp_nth = …;
volatile int __kmp_all_nth = …;
volatile kmp_info_t *__kmp_thread_pool = …;
volatile kmp_team_t *__kmp_team_pool = …;
KMP_ALIGN_CACHE
std::atomic<int> __kmp_thread_pool_active_nth = …;
KMP_ALIGN_CACHE
kmp_global_t __kmp_global;
#if KMP_USE_INTERNODE_ALIGNMENT
KMP_ALIGN_CACHE_INTERNODE
KMP_BOOTSTRAP_LOCK_INIT(__kmp_initz_lock);
KMP_ALIGN_CACHE_INTERNODE
KMP_BOOTSTRAP_LOCK_INIT(__kmp_forkjoin_lock);
KMP_ALIGN_CACHE_INTERNODE
KMP_BOOTSTRAP_LOCK_INIT(__kmp_exit_lock);
#if KMP_USE_MONITOR
KMP_ALIGN_CACHE_INTERNODE
KMP_BOOTSTRAP_LOCK_INIT(__kmp_monitor_lock);
#endif
KMP_ALIGN_CACHE_INTERNODE
KMP_BOOTSTRAP_LOCK_INIT(__kmp_tp_cached_lock);
KMP_ALIGN_CACHE_INTERNODE
KMP_LOCK_INIT(__kmp_global_lock);
KMP_ALIGN_CACHE_INTERNODE
kmp_queuing_lock_t __kmp_dispatch_lock;
KMP_ALIGN_CACHE_INTERNODE
KMP_LOCK_INIT(__kmp_debug_lock);
#else
KMP_ALIGN_CACHE
KMP_BOOTSTRAP_LOCK_INIT(…);
KMP_BOOTSTRAP_LOCK_INIT(…);
KMP_BOOTSTRAP_LOCK_INIT(…);
#if KMP_USE_MONITOR
KMP_BOOTSTRAP_LOCK_INIT(__kmp_monitor_lock);
#endif
KMP_BOOTSTRAP_LOCK_INIT(…);
KMP_ALIGN(…)
KMP_LOCK_INIT(…);
KMP_ALIGN(128)
kmp_queuing_lock_t __kmp_dispatch_lock;
KMP_ALIGN(…)
KMP_LOCK_INIT(…);
#endif
#if KMP_HANDLE_SIGNALS
int __kmp_handle_signals = …;
#endif
#ifdef DEBUG_SUSPEND
int get_suspend_count_(void) {
int count = __kmp_suspend_count;
__kmp_suspend_count = 0;
return count;
}
void set_suspend_count_(int *value) { __kmp_suspend_count = *value; }
#endif
kmp_target_offload_kind_t __kmp_target_offload = …;
kmp_pause_status_t __kmp_pause_status = …;
int __kmp_nesting_mode = …;
int __kmp_nesting_mode_nlevels = …;
int *__kmp_nesting_nth_level;
#if OMPX_TASKGRAPH
int __kmp_tdg_dot = 0;
kmp_int32 __kmp_max_tdgs = 100;
kmp_tdg_info_t **__kmp_global_tdgs = NULL;
kmp_int32 __kmp_curr_tdg_idx =
0;
kmp_int32 __kmp_num_tdg = 0;
kmp_int32 __kmp_successors_size = 10;
std::atomic<kmp_int32> __kmp_tdg_task_id = 0;
#endif