#include "mimalloc.h"
#include "mimalloc/internal.h"
#include "mimalloc/atomic.h"
#include "mimalloc/prim.h"
#include <string.h>
#if defined(_MSC_VER) && (_MSC_VER < 1920)
#pragma warning(disable:4204)
#endif
heap_page_visitor_fun;
static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2)
{ … }
#if MI_DEBUG>=2
static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
MI_UNUSED(arg1);
MI_UNUSED(arg2);
MI_UNUSED(pq);
mi_assert_internal(mi_page_heap(page) == heap);
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_internal(segment->thread_id == heap->thread_id);
mi_assert_expensive(_mi_page_is_valid(page));
return true;
}
#endif
#if MI_DEBUG>=3
static bool mi_heap_is_valid(mi_heap_t* heap) {
mi_assert_internal(heap!=NULL);
mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL);
return true;
}
#endif
mi_collect_t;
static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) { … }
static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { … }
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
{ … }
void _mi_heap_collect_abandon(mi_heap_t* heap) { … }
void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept { … }
void mi_collect(bool force) mi_attr_noexcept { … }
mi_heap_t* mi_heap_get_default(void) { … }
static bool mi_heap_is_default(const mi_heap_t* heap) { … }
mi_heap_t* mi_heap_get_backing(void) { … }
void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool no_reclaim, uint8_t tag)
{ … }
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) { … }
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) { … }
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) { … }
uintptr_t _mi_heap_random_next(mi_heap_t* heap) { … }
static void mi_heap_reset_pages(mi_heap_t* heap) { … }
static void mi_heap_free(mi_heap_t* heap) { … }
static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { … }
void _mi_heap_destroy_pages(mi_heap_t* heap) { … }
#if MI_TRACK_HEAP_DESTROY
static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {
MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size);
mi_track_free_size(block,mi_usable_size(block));
return true;
}
#endif
void mi_heap_destroy(mi_heap_t* heap) { … }
void _mi_heap_unsafe_destroy_all(void) { … }
static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { … }
void mi_heap_delete(mi_heap_t* heap)
{ … }
mi_heap_t* mi_heap_set_default(mi_heap_t* heap) { … }
static mi_heap_t* mi_heap_of_block(const void* p) { … }
bool mi_heap_contains_block(mi_heap_t* heap, const void* p) { … }
static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) { … }
bool mi_heap_check_owned(mi_heap_t* heap, const void* p) { … }
bool mi_check_owned(const void* p) { … }
mi_heap_area_ex_t;
static void mi_fast_divisor(size_t divisor, size_t* magic, size_t* shift) { … }
static size_t mi_fast_divide(size_t n, size_t magic, size_t shift) { … }
bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t *page, mi_block_visit_fun* visitor, void* arg) { … }
mi_heap_area_visit_fun;
void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) { … }
static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) { … }
static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) { … }
mi_visit_blocks_args_t;
static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) { … }
bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { … }