#include "kmp.h"
#include "kmp_error.h"
#include "kmp_i18n.h"
#include "kmp_itt.h"
#include "kmp_stats.h"
#include "kmp_str.h"
#if KMP_USE_X87CONTROL
#include <float.h>
#endif
#include "kmp_lock.h"
#include "kmp_dispatch.h"
#if KMP_USE_HIER_SCHED
#include "kmp_dispatch_hier.h"
#endif
#if OMPT_SUPPORT
#include "ompt-specific.h"
#endif
void __kmp_dispatch_deo_error(int *gtid_ref, int *cid_ref, ident_t *loc_ref) { … }
void __kmp_dispatch_dxo_error(int *gtid_ref, int *cid_ref, ident_t *loc_ref) { … }
static inline int __kmp_get_monotonicity(ident_t *loc, enum sched_type schedule,
bool use_hier = false) { … }
#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
static inline float __kmp_round_2decimal_val(float num) { … }
static inline int __kmp_get_round_val(float num) { … }
#endif
template <typename T>
inline void
__kmp_initialize_self_buffer(kmp_team_t *team, T id,
dispatch_private_info_template<T> *pr,
typename traits_t<T>::unsigned_t nchunks, T nproc,
typename traits_t<T>::unsigned_t &init,
T &small_chunk, T &extras, T &p_extra) { … }
#if KMP_STATIC_STEAL_ENABLED
enum { … };
#endif
template <typename T>
void __kmp_dispatch_init_algorithm(ident_t *loc, int gtid,
dispatch_private_info_template<T> *pr,
enum sched_type schedule, T lb, T ub,
typename traits_t<T>::signed_t st,
#if USE_ITT_BUILD
kmp_uint64 *cur_chunk,
#endif
typename traits_t<T>::signed_t chunk,
T nproc, T tid) { … }
#if KMP_USE_HIER_SCHED
template <typename T>
inline void __kmp_dispatch_init_hier_runtime(ident_t *loc, T lb, T ub,
typename traits_t<T>::signed_t st);
template <>
inline void
__kmp_dispatch_init_hier_runtime<kmp_int32>(ident_t *loc, kmp_int32 lb,
kmp_int32 ub, kmp_int32 st) {
__kmp_dispatch_init_hierarchy<kmp_int32>(
loc, __kmp_hier_scheds.size, __kmp_hier_scheds.layers,
__kmp_hier_scheds.scheds, __kmp_hier_scheds.small_chunks, lb, ub, st);
}
template <>
inline void
__kmp_dispatch_init_hier_runtime<kmp_uint32>(ident_t *loc, kmp_uint32 lb,
kmp_uint32 ub, kmp_int32 st) {
__kmp_dispatch_init_hierarchy<kmp_uint32>(
loc, __kmp_hier_scheds.size, __kmp_hier_scheds.layers,
__kmp_hier_scheds.scheds, __kmp_hier_scheds.small_chunks, lb, ub, st);
}
template <>
inline void
__kmp_dispatch_init_hier_runtime<kmp_int64>(ident_t *loc, kmp_int64 lb,
kmp_int64 ub, kmp_int64 st) {
__kmp_dispatch_init_hierarchy<kmp_int64>(
loc, __kmp_hier_scheds.size, __kmp_hier_scheds.layers,
__kmp_hier_scheds.scheds, __kmp_hier_scheds.large_chunks, lb, ub, st);
}
template <>
inline void
__kmp_dispatch_init_hier_runtime<kmp_uint64>(ident_t *loc, kmp_uint64 lb,
kmp_uint64 ub, kmp_int64 st) {
__kmp_dispatch_init_hierarchy<kmp_uint64>(
loc, __kmp_hier_scheds.size, __kmp_hier_scheds.layers,
__kmp_hier_scheds.scheds, __kmp_hier_scheds.large_chunks, lb, ub, st);
}
void __kmp_dispatch_free_hierarchies(kmp_team_t *team) {
int num_disp_buff = team->t.t_max_nproc > 1 ? __kmp_dispatch_num_buffers : 2;
for (int i = 0; i < num_disp_buff; ++i) {
auto sh =
reinterpret_cast<dispatch_shared_info_template<kmp_int32> volatile *>(
&team->t.t_disp_buffer[i]);
if (sh->hier) {
sh->hier->deallocate();
__kmp_free(sh->hier);
}
}
}
#endif
template <typename T>
static void
__kmp_dispatch_init(ident_t *loc, int gtid, enum sched_type schedule, T lb,
T ub, typename traits_t<T>::signed_t st,
typename traits_t<T>::signed_t chunk, int push_ws) { … }
template <typename UT>
static void __kmp_dispatch_finish(int gtid, ident_t *loc) { … }
#ifdef KMP_GOMP_COMPAT
template <typename UT>
static void __kmp_dispatch_finish_chunk(int gtid, ident_t *loc) { … }
#endif
template <typename T>
int __kmp_dispatch_next_algorithm(int gtid,
dispatch_private_info_template<T> *pr,
dispatch_shared_info_template<T> volatile *sh,
kmp_int32 *p_last, T *p_lb, T *p_ub,
typename traits_t<T>::signed_t *p_st, T nproc,
T tid) { … }
#if OMPT_SUPPORT && OMPT_OPTIONAL
#define OMPT_LOOP_END …
#define OMPT_LOOP_DISPATCH(lb, ub, st, status) …
#else
#define OMPT_LOOP_END …
#define OMPT_LOOP_DISPATCH …
#endif
#if KMP_STATS_ENABLED
#define KMP_STATS_LOOP_END …
#else
#define KMP_STATS_LOOP_END …
#endif
template <typename T>
static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
T *p_lb, T *p_ub,
typename traits_t<T>::signed_t *p_st
#if OMPT_SUPPORT && OMPT_OPTIONAL
,
void *codeptr
#endif
) { … }
kmp_int32 __kmpc_sections_init(ident_t *loc, kmp_int32 gtid) { … }
kmp_int32 __kmpc_next_section(ident_t *loc, kmp_int32 gtid,
kmp_int32 numberOfSections) { … }
void __kmpc_end_sections(ident_t *loc, kmp_int32 gtid) { … }
template <typename T>
static void __kmp_dist_get_bounds(ident_t *loc, kmp_int32 gtid,
kmp_int32 *plastiter, T *plower, T *pupper,
typename traits_t<T>::signed_t incr) { … }
extern "C" {
void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_int32 lb,
kmp_int32 ub, kmp_int32 st, kmp_int32 chunk) { … }
void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_uint32 lb,
kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk) { … }
void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_int64 lb,
kmp_int64 ub, kmp_int64 st, kmp_int64 chunk) { … }
void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_uint64 lb,
kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk) { … }
void __kmpc_dist_dispatch_init_4(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_int32 *p_last,
kmp_int32 lb, kmp_int32 ub, kmp_int32 st,
kmp_int32 chunk) { … }
void __kmpc_dist_dispatch_init_4u(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_int32 *p_last,
kmp_uint32 lb, kmp_uint32 ub, kmp_int32 st,
kmp_int32 chunk) { … }
void __kmpc_dist_dispatch_init_8(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_int32 *p_last,
kmp_int64 lb, kmp_int64 ub, kmp_int64 st,
kmp_int64 chunk) { … }
void __kmpc_dist_dispatch_init_8u(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_int32 *p_last,
kmp_uint64 lb, kmp_uint64 ub, kmp_int64 st,
kmp_int64 chunk) { … }
int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st) { … }
int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
kmp_uint32 *p_lb, kmp_uint32 *p_ub,
kmp_int32 *p_st) { … }
int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st) { … }
int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
kmp_uint64 *p_lb, kmp_uint64 *p_ub,
kmp_int64 *p_st) { … }
void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid) { … }
void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid) { … }
void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid) { … }
void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid) { … }
void __kmpc_dispatch_deinit(ident_t *loc, kmp_int32 gtid) { … }
kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker) { … }
kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker) { … }
kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker) { … }
kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker) { … }
kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker) { … }
kmp_uint32
__kmp_wait_4(volatile kmp_uint32 *spinner, kmp_uint32 checker,
kmp_uint32 (*pred)(kmp_uint32, kmp_uint32),
void *obj
) { … }
void __kmp_wait_4_ptr(void *spinner, kmp_uint32 checker,
kmp_uint32 (*pred)(void *, kmp_uint32),
void *obj
) { … }
}
#ifdef KMP_GOMP_COMPAT
void __kmp_aux_dispatch_init_4(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_int32 lb,
kmp_int32 ub, kmp_int32 st, kmp_int32 chunk,
int push_ws) { … }
void __kmp_aux_dispatch_init_4u(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_uint32 lb,
kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk,
int push_ws) { … }
void __kmp_aux_dispatch_init_8(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_int64 lb,
kmp_int64 ub, kmp_int64 st, kmp_int64 chunk,
int push_ws) { … }
void __kmp_aux_dispatch_init_8u(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_uint64 lb,
kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk,
int push_ws) { … }
void __kmp_aux_dispatch_fini_chunk_4(ident_t *loc, kmp_int32 gtid) { … }
void __kmp_aux_dispatch_fini_chunk_8(ident_t *loc, kmp_int32 gtid) { … }
void __kmp_aux_dispatch_fini_chunk_4u(ident_t *loc, kmp_int32 gtid) { … }
void __kmp_aux_dispatch_fini_chunk_8u(ident_t *loc, kmp_int32 gtid) { … }
#endif