llvm/openmp/runtime/src/kmp_alloc.cpp

/*
 * kmp_alloc.cpp -- private/shared dynamic memory allocation and management
 */

//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#include "kmp.h"
#include "kmp_io.h"
#include "kmp_wrapper_malloc.h"

// Disable bget when it is not used
#if KMP_USE_BGET

/* Thread private buffer management code */

bget_compact_t;
bget_acquire_t;
bget_release_t;

/* NOTE: bufsize must be a signed datatype */

#if KMP_OS_WINDOWS
#if KMP_ARCH_X86 || KMP_ARCH_ARM
typedef kmp_int32 bufsize;
#else
typedef kmp_int64 bufsize;
#endif
#else
bufsize;
#endif // KMP_OS_WINDOWS

/* The three modes of operation are, fifo search, lifo search, and best-fit */

bget_mode_t;

static void bpool(kmp_info_t *th, void *buffer, bufsize len);
static void *bget(kmp_info_t *th, bufsize size);
static void *bgetz(kmp_info_t *th, bufsize size);
static void *bgetr(kmp_info_t *th, void *buffer, bufsize newsize);
static void brel(kmp_info_t *th, void *buf);
static void bectl(kmp_info_t *th, bget_compact_t compact,
                  bget_acquire_t acquire, bget_release_t release,
                  bufsize pool_incr);

/* BGET CONFIGURATION */
/* Buffer allocation size quantum: all buffers allocated are a
   multiple of this size.  This MUST be a power of two. */

/* On IA-32 architecture with  Linux* OS, malloc() does not
   ensure 16 byte alignment */

#if KMP_ARCH_X86 || !KMP_HAVE_QUAD

#define SizeQuant
#define AlignType

#else

#define SizeQuant
#define AlignType

#endif

// Define this symbol to enable the bstats() function which calculates the
// total free space in the buffer pool, the largest available buffer, and the
// total space currently allocated.
#define BufStats

#ifdef KMP_DEBUG

// Define this symbol to enable the bpoold() function which dumps the buffers
// in a buffer pool.
#define BufDump

// Define this symbol to enable the bpoolv() function for validating a buffer
// pool.
#define BufValid

// Define this symbol to enable the bufdump() function which allows dumping the
// contents of an allocated or free buffer.
#define DumpData

#ifdef NOT_USED_NOW

// Wipe free buffers to a guaranteed pattern of garbage to trip up miscreants
// who attempt to use pointers into released buffers.
#define FreeWipe

// Use a best fit algorithm when searching for space for an allocation request.
// This uses memory more efficiently, but allocation will be much slower.
#define BestFit

#endif /* NOT_USED_NOW */
#endif /* KMP_DEBUG */

static bufsize bget_bin_size[] =;

#define MAX_BGET_BINS

struct bfhead;

//  Declare the interface, including the requested buffer size type, bufsize.

/* Queue links */
qlinks_t;

/* Header in allocated and free buffers */
bhead2_t;

/* Make sure the bhead structure is a multiple of SizeQuant in size. */
bhead_t;
#define BH(p)

/*  Header in directly allocated buffers (by acqfcn) */
bdhead_t;
#define BDH(p)

/* Header in free buffers */
bfhead_t;
#define BFH(p)

thr_data_t;

/*  Minimum allocation quantum: */
#define QLSize
#define SizeQ
#define MaxSize
// Maximum for the requested size.

/* End sentinel: value placed in bsize field of dummy block delimiting
   end of pool block.  The most negative number which will  fit  in  a
   bufsize, defined in a way that the compiler will accept. */

#define ESent

/* Thread Data management routines */
static int bget_get_bin(bufsize size) {}

static void set_thr_data(kmp_info_t *th) {}

static thr_data_t *get_thr_data(kmp_info_t *th) {}

/* Walk the free list and release the enqueued buffers */
static void __kmp_bget_dequeue(kmp_info_t *th) {}

/* Chain together the free buffers by using the thread owner field */
static void __kmp_bget_enqueue(kmp_info_t *th, void *buf
#ifdef USE_QUEUING_LOCK_FOR_BGET
                               ,
                               kmp_int32 rel_gtid
#endif
) {}

/* insert buffer back onto a new freelist */
static void __kmp_bget_insert_into_freelist(thr_data_t *thr, bfhead_t *b) {}

/* unlink the buffer from the old freelist */
static void __kmp_bget_remove_from_freelist(bfhead_t *b) {}

/*  GET STATS -- check info on free list */
static void bcheck(kmp_info_t *th, bufsize *max_free, bufsize *total_free) {}

/*  BGET  --  Allocate a buffer.  */
static void *bget(kmp_info_t *th, bufsize requested_size) {}

/*  BGETZ  --  Allocate a buffer and clear its contents to zero.  We clear
               the  entire  contents  of  the buffer to zero, not just the
               region requested by the caller. */

static void *bgetz(kmp_info_t *th, bufsize size) {}

/*  BGETR  --  Reallocate a buffer.  This is a minimal implementation,
               simply in terms of brel()  and  bget().   It  could  be
               enhanced to allow the buffer to grow into adjacent free
               blocks and to avoid moving data unnecessarily.  */

static void *bgetr(kmp_info_t *th, void *buf, bufsize size) {}

/*  BREL  --  Release a buffer.  */
static void brel(kmp_info_t *th, void *buf) {}

/*  BECTL  --  Establish automatic pool expansion control  */
static void bectl(kmp_info_t *th, bget_compact_t compact,
                  bget_acquire_t acquire, bget_release_t release,
                  bufsize pool_incr) {}

/*  BPOOL  --  Add a region of memory to the buffer pool.  */
static void bpool(kmp_info_t *th, void *buf, bufsize len) {}

/*  BFREED  --  Dump the free lists for this thread. */
static void bfreed(kmp_info_t *th) {}

void __kmp_initialize_bget(kmp_info_t *th) {}

void __kmp_finalize_bget(kmp_info_t *th) {}

void kmpc_set_poolsize(size_t size) {}

size_t kmpc_get_poolsize(void) {}

void kmpc_set_poolmode(int mode) {}

int kmpc_get_poolmode(void) {}

void kmpc_get_poolstat(size_t *maxmem, size_t *allmem) {}

void kmpc_poolprint(void) {}

#endif // #if KMP_USE_BGET

void *kmpc_malloc(size_t size) {}

#define IS_POWER_OF_TWO(n)

void *kmpc_aligned_malloc(size_t size, size_t alignment) {}

void *kmpc_calloc(size_t nelem, size_t elsize) {}

void *kmpc_realloc(void *ptr, size_t size) {}

// NOTE: the library must have already been initialized by a previous allocate
void kmpc_free(void *ptr) {}

void *___kmp_thread_malloc(kmp_info_t *th, size_t size KMP_SRC_LOC_DECL) {}

void *___kmp_thread_calloc(kmp_info_t *th, size_t nelem,
                           size_t elsize KMP_SRC_LOC_DECL) {}

void *___kmp_thread_realloc(kmp_info_t *th, void *ptr,
                            size_t size KMP_SRC_LOC_DECL) {}

void ___kmp_thread_free(kmp_info_t *th, void *ptr KMP_SRC_LOC_DECL) {}

/* OMP 5.0 Memory Management support */
static const char *kmp_mk_lib_name;
static void *h_memkind;
/* memkind experimental API: */
// memkind_alloc
static void *(*kmp_mk_alloc)(void *k, size_t sz);
// memkind_free
static void (*kmp_mk_free)(void *kind, void *ptr);
// memkind_check_available
static int (*kmp_mk_check)(void *kind);
// kinds we are going to use
static void **mk_default;
static void **mk_interleave;
static void **mk_hbw;
static void **mk_hbw_interleave;
static void **mk_hbw_preferred;
static void **mk_hugetlb;
static void **mk_hbw_hugetlb;
static void **mk_hbw_preferred_hugetlb;
static void **mk_dax_kmem;
static void **mk_dax_kmem_all;
static void **mk_dax_kmem_preferred;
static void *(*kmp_target_alloc_host)(size_t size, int device);
static void *(*kmp_target_alloc_shared)(size_t size, int device);
static void *(*kmp_target_alloc_device)(size_t size, int device);
static void *(*kmp_target_lock_mem)(void *ptr, size_t size, int device);
static void *(*kmp_target_unlock_mem)(void *ptr, int device);
static void *(*kmp_target_free_host)(void *ptr, int device);
static void *(*kmp_target_free_shared)(void *ptr, int device);
static void *(*kmp_target_free_device)(void *ptr, int device);
static bool __kmp_target_mem_available;
#define KMP_IS_TARGET_MEM_SPACE(MS)
#define KMP_IS_TARGET_MEM_ALLOC(MA)

#if KMP_OS_UNIX && KMP_DYNAMIC_LIB && !KMP_OS_DARWIN
static inline void chk_kind(void ***pkind) {}
#endif

void __kmp_init_memkind() {}

void __kmp_fini_memkind() {}

void __kmp_init_target_mem() {}

omp_allocator_handle_t __kmpc_init_allocator(int gtid, omp_memspace_handle_t ms,
                                             int ntraits,
                                             omp_alloctrait_t traits[]) {}

void __kmpc_destroy_allocator(int gtid, omp_allocator_handle_t allocator) {}

void __kmpc_set_default_allocator(int gtid, omp_allocator_handle_t allocator) {}

omp_allocator_handle_t __kmpc_get_default_allocator(int gtid) {}

kmp_mem_desc_t;
static int alignment =; // align to pointer size by default

// external interfaces are wrappers over internal implementation
void *__kmpc_alloc(int gtid, size_t size, omp_allocator_handle_t allocator) {}

void *__kmpc_aligned_alloc(int gtid, size_t algn, size_t size,
                           omp_allocator_handle_t allocator) {}

void *__kmpc_calloc(int gtid, size_t nmemb, size_t size,
                    omp_allocator_handle_t allocator) {}

void *__kmpc_realloc(int gtid, void *ptr, size_t size,
                     omp_allocator_handle_t allocator,
                     omp_allocator_handle_t free_allocator) {}

void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t allocator) {}

// internal implementation, called from inside the library
void *__kmp_alloc(int gtid, size_t algn, size_t size,
                  omp_allocator_handle_t allocator) {}

void *__kmp_calloc(int gtid, size_t algn, size_t nmemb, size_t size,
                   omp_allocator_handle_t allocator) {}

void *__kmp_realloc(int gtid, void *ptr, size_t size,
                    omp_allocator_handle_t allocator,
                    omp_allocator_handle_t free_allocator) {}

void ___kmpc_free(int gtid, void *ptr, omp_allocator_handle_t allocator) {}

/* If LEAK_MEMORY is defined, __kmp_free() will *not* free memory. It causes
   memory leaks, but it may be useful for debugging memory corruptions, used
   freed pointers, etc. */
/* #define LEAK_MEMORY */
struct kmp_mem_descr {};
kmp_mem_descr_t;

/* Allocate memory on requested boundary, fill allocated memory with 0x00.
   NULL is NEVER returned, __kmp_abort() is called in case of memory allocation
   error. Must use __kmp_free when freeing memory allocated by this routine! */
static void *___kmp_allocate_align(size_t size,
                                   size_t alignment KMP_SRC_LOC_DECL) {} // func ___kmp_allocate_align

/* Allocate memory on cache line boundary, fill allocated memory with 0x00.
   Do not call this func directly! Use __kmp_allocate macro instead.
   NULL is NEVER returned, __kmp_abort() is called in case of memory allocation
   error. Must use __kmp_free when freeing memory allocated by this routine! */
void *___kmp_allocate(size_t size KMP_SRC_LOC_DECL) {} // func ___kmp_allocate

/* Allocate memory on page boundary, fill allocated memory with 0x00.
   Does not call this func directly! Use __kmp_page_allocate macro instead.
   NULL is NEVER returned, __kmp_abort() is called in case of memory allocation
   error. Must use __kmp_free when freeing memory allocated by this routine! */
void *___kmp_page_allocate(size_t size KMP_SRC_LOC_DECL) {} // ___kmp_page_allocate

/* Free memory allocated by __kmp_allocate() and __kmp_page_allocate().
   In debug mode, fill the memory block with 0xEF before call to free(). */
void ___kmp_free(void *ptr KMP_SRC_LOC_DECL) {} // func ___kmp_free

#if USE_FAST_MEMORY == 3
// Allocate fast memory by first scanning the thread's free lists
// If a chunk the right size exists, grab it off the free list.
// Otherwise allocate normally using kmp_thread_malloc.

// AC: How to choose the limit? Just get 16 for now...
#define KMP_FREE_LIST_LIMIT

// Always use 128 bytes for determining buckets for caching memory blocks
#define DCACHE_LINE

void *___kmp_fast_allocate(kmp_info_t *this_thr, size_t size KMP_SRC_LOC_DECL) {} // func __kmp_fast_allocate

// Free fast memory and place it on the thread's free list if it is of
// the correct size.
void ___kmp_fast_free(kmp_info_t *this_thr, void *ptr KMP_SRC_LOC_DECL) {} // func __kmp_fast_free

// Initialize the thread free lists related to fast memory
// Only do this when a thread is initially created.
void __kmp_initialize_fast_memory(kmp_info_t *this_thr) {}

// Free the memory in the thread free lists related to fast memory
// Only do this when a thread is being reaped (destroyed).
void __kmp_free_fast_memory(kmp_info_t *th) {}

#endif // USE_FAST_MEMORY