/* * jmemmgr.c * * This file was part of the Independent JPEG Group's software: * Copyright (C) 1991-1997, Thomas G. Lane. * libjpeg-turbo Modifications: * Copyright (C) 2016, 2021-2022, D. R. Commander. * For conditions of distribution and use, see the accompanying README.ijg * file. * * This file contains the JPEG system-independent memory management * routines. This code is usable across a wide variety of machines; most * of the system dependencies have been isolated in a separate file. * The major functions provided here are: * * pool-based allocation and freeing of memory; * * policy decisions about how to divide available memory among the * virtual arrays; * * control logic for swapping virtual arrays between main memory and * backing storage. * The separate system-dependent file provides the actual backing-storage * access code, and it contains the policy decision about how much total * main memory to use. * This file is system-dependent in the sense that some of its functions * are unnecessary in some systems. For example, if there is enough virtual * memory so that backing storage will never be used, much of the virtual * array control logic could be removed. (Of course, if you have that much * memory then you shouldn't care about a little bit of unused code...) */ #define JPEG_INTERNALS #define AM_MEMORY_MANAGER … #include "jinclude.h" #include "jpeglib.h" #include "jmemsys.h" /* import the system-dependent declarations */ #if !defined(_MSC_VER) || _MSC_VER > 1600 #include <stdint.h> #endif #include <limits.h> LOCAL(size_t) round_up_pow2(size_t a, size_t b) /* a rounded up to the next multiple of b, i.e. ceil(a/b)*b */ /* Assumes a >= 0, b > 0, and b is a power of 2 */ { … } /* * Some important notes: * The allocation routines provided here must never return NULL. * They should exit to error_exit if unsuccessful. * * It's not a good idea to try to merge the sarray and barray routines, * even though they are textually almost the same, because samples are * usually stored as bytes while coefficients are shorts or ints. Thus, * in machines where byte pointers have a different representation from * word pointers, the resulting machine code could not be the same. */ /* * Many machines require storage alignment: longs must start on 4-byte * boundaries, doubles on 8-byte boundaries, etc. On such machines, malloc() * always returns pointers that are multiples of the worst-case alignment * requirement, and we had better do so too. * There isn't any really portable way to determine the worst-case alignment * requirement. This module assumes that the alignment requirement is * multiples of ALIGN_SIZE. * By default, we define ALIGN_SIZE as the maximum of sizeof(double) and * sizeof(void *). This is necessary on some workstations (where doubles * really do need 8-byte alignment) and will work fine on nearly everything. * We use the maximum of sizeof(double) and sizeof(void *) since sizeof(double) * may be insufficient, for example, on CHERI-enabled platforms with 16-byte * pointers and a 16-byte alignment requirement. If your machine has lesser * alignment needs, you can save a few bytes by making ALIGN_SIZE smaller. * The only place I know of where this will NOT work is certain Macintosh * 680x0 compilers that define double as a 10-byte IEEE extended float. * Doing 10-byte alignment is counterproductive because longwords won't be * aligned well. Put "#define ALIGN_SIZE 4" in jconfig.h if you have * such a compiler. */ #ifndef ALIGN_SIZE /* so can override from jconfig.h */ #ifndef WITH_SIMD #define ALIGN_SIZE … #else #define ALIGN_SIZE … #endif #endif /* * We allocate objects from "pools", where each pool is gotten with a single * request to jpeg_get_small() or jpeg_get_large(). There is no per-object * overhead within a pool, except for alignment padding. Each pool has a * header with a link to the next pool of the same class. * Small and large pool headers are identical. */ small_pool_ptr; small_pool_hdr; large_pool_ptr; large_pool_hdr; /* * Here is the full definition of a memory manager object. */ my_memory_mgr; my_mem_ptr; /* * The control blocks for virtual arrays. * Note that these blocks are allocated in the "small" pool area. * System-dependent info for the associated backing store (if any) is hidden * inside the backing_store_info struct. */ struct jvirt_sarray_control { … }; struct jvirt_barray_control { … }; #ifdef MEM_STATS /* optional extra stuff for statistics */ LOCAL(void) print_mem_stats(j_common_ptr cinfo, int pool_id) { my_mem_ptr mem = (my_mem_ptr)cinfo->mem; small_pool_ptr shdr_ptr; large_pool_ptr lhdr_ptr; /* Since this is only a debugging stub, we can cheat a little by using * fprintf directly rather than going through the trace message code. * This is helpful because message parm array can't handle longs. */ fprintf(stderr, "Freeing pool %d, total space = %ld\n", pool_id, mem->total_space_allocated); for (lhdr_ptr = mem->large_list[pool_id]; lhdr_ptr != NULL; lhdr_ptr = lhdr_ptr->next) { fprintf(stderr, " Large chunk used %ld\n", (long)lhdr_ptr->bytes_used); } for (shdr_ptr = mem->small_list[pool_id]; shdr_ptr != NULL; shdr_ptr = shdr_ptr->next) { fprintf(stderr, " Small chunk used %ld free %ld\n", (long)shdr_ptr->bytes_used, (long)shdr_ptr->bytes_left); } } #endif /* MEM_STATS */ LOCAL(void) out_of_memory(j_common_ptr cinfo, int which) /* Report an out-of-memory error and stop execution */ /* If we compiled MEM_STATS support, report alloc requests before dying */ { … } /* * Allocation of "small" objects. * * For these, we use pooled storage. When a new pool must be created, * we try to get enough space for the current request plus a "slop" factor, * where the slop will be the amount of leftover space in the new pool. * The speed vs. space tradeoff is largely determined by the slop values. * A different slop value is provided for each pool class (lifetime), * and we also distinguish the first pool of a class from later ones. * NOTE: the values given work fairly well on both 16- and 32-bit-int * machines, but may be too small if longs are 64 bits or more. * * Since we do not know what alignment malloc() gives us, we have to * allocate ALIGN_SIZE-1 extra space per pool to have room for alignment * adjustment. */ static const size_t first_pool_slop[JPOOL_NUMPOOLS] = …; static const size_t extra_pool_slop[JPOOL_NUMPOOLS] = …; #define MIN_SLOP … METHODDEF(void *) alloc_small(j_common_ptr cinfo, int pool_id, size_t sizeofobject) /* Allocate a "small" object */ { … } /* * Allocation of "large" objects. * * The external semantics of these are the same as "small" objects. However, * the pool management heuristics are quite different. We assume that each * request is large enough that it may as well be passed directly to * jpeg_get_large; the pool management just links everything together * so that we can free it all on demand. * Note: the major use of "large" objects is in JSAMPARRAY and JBLOCKARRAY * structures. The routines that create these structures (see below) * deliberately bunch rows together to ensure a large request size. */ METHODDEF(void *) alloc_large(j_common_ptr cinfo, int pool_id, size_t sizeofobject) /* Allocate a "large" object */ { … } /* * Creation of 2-D sample arrays. * * To minimize allocation overhead and to allow I/O of large contiguous * blocks, we allocate the sample rows in groups of as many rows as possible * without exceeding MAX_ALLOC_CHUNK total bytes per allocation request. * NB: the virtual array control routines, later in this file, know about * this chunking of rows. The rowsperchunk value is left in the mem manager * object so that it can be saved away if this sarray is the workspace for * a virtual array. * * Since we are often upsampling with a factor 2, we align the size (not * the start) to 2 * ALIGN_SIZE so that the upsampling routines don't have * to be as careful about size. */ METHODDEF(JSAMPARRAY) alloc_sarray(j_common_ptr cinfo, int pool_id, JDIMENSION samplesperrow, JDIMENSION numrows) /* Allocate a 2-D sample array */ { … } /* * Creation of 2-D coefficient-block arrays. * This is essentially the same as the code for sample arrays, above. */ METHODDEF(JBLOCKARRAY) alloc_barray(j_common_ptr cinfo, int pool_id, JDIMENSION blocksperrow, JDIMENSION numrows) /* Allocate a 2-D coefficient-block array */ { … } /* * About virtual array management: * * The above "normal" array routines are only used to allocate strip buffers * (as wide as the image, but just a few rows high). Full-image-sized buffers * are handled as "virtual" arrays. The array is still accessed a strip at a * time, but the memory manager must save the whole array for repeated * accesses. The intended implementation is that there is a strip buffer in * memory (as high as is possible given the desired memory limit), plus a * backing file that holds the rest of the array. * * The request_virt_array routines are told the total size of the image and * the maximum number of rows that will be accessed at once. The in-memory * buffer must be at least as large as the maxaccess value. * * The request routines create control blocks but not the in-memory buffers. * That is postponed until realize_virt_arrays is called. At that time the * total amount of space needed is known (approximately, anyway), so free * memory can be divided up fairly. * * The access_virt_array routines are responsible for making a specific strip * area accessible (after reading or writing the backing file, if necessary). * Note that the access routines are told whether the caller intends to modify * the accessed strip; during a read-only pass this saves having to rewrite * data to disk. The access routines are also responsible for pre-zeroing * any newly accessed rows, if pre-zeroing was requested. * * In current usage, the access requests are usually for nonoverlapping * strips; that is, successive access start_row numbers differ by exactly * num_rows = maxaccess. This means we can get good performance with simple * buffer dump/reload logic, by making the in-memory buffer be a multiple * of the access height; then there will never be accesses across bufferload * boundaries. The code will still work with overlapping access requests, * but it doesn't handle bufferload overlaps very efficiently. */ METHODDEF(jvirt_sarray_ptr) request_virt_sarray(j_common_ptr cinfo, int pool_id, boolean pre_zero, JDIMENSION samplesperrow, JDIMENSION numrows, JDIMENSION maxaccess) /* Request a virtual 2-D sample array */ { … } METHODDEF(jvirt_barray_ptr) request_virt_barray(j_common_ptr cinfo, int pool_id, boolean pre_zero, JDIMENSION blocksperrow, JDIMENSION numrows, JDIMENSION maxaccess) /* Request a virtual 2-D coefficient-block array */ { … } METHODDEF(void) realize_virt_arrays(j_common_ptr cinfo) /* Allocate the in-memory buffers for any unrealized virtual arrays */ { … } LOCAL(void) do_sarray_io(j_common_ptr cinfo, jvirt_sarray_ptr ptr, boolean writing) /* Do backing store read or write of a virtual sample array */ { … } LOCAL(void) do_barray_io(j_common_ptr cinfo, jvirt_barray_ptr ptr, boolean writing) /* Do backing store read or write of a virtual coefficient-block array */ { … } METHODDEF(JSAMPARRAY) access_virt_sarray(j_common_ptr cinfo, jvirt_sarray_ptr ptr, JDIMENSION start_row, JDIMENSION num_rows, boolean writable) /* Access the part of a virtual sample array starting at start_row */ /* and extending for num_rows rows. writable is true if */ /* caller intends to modify the accessed area. */ { … } METHODDEF(JBLOCKARRAY) access_virt_barray(j_common_ptr cinfo, jvirt_barray_ptr ptr, JDIMENSION start_row, JDIMENSION num_rows, boolean writable) /* Access the part of a virtual block array starting at start_row */ /* and extending for num_rows rows. writable is true if */ /* caller intends to modify the accessed area. */ { … } /* * Release all objects belonging to a specified pool. */ METHODDEF(void) free_pool(j_common_ptr cinfo, int pool_id) { … } /* * Close up shop entirely. * Note that this cannot be called unless cinfo->mem is non-NULL. */ METHODDEF(void) self_destruct(j_common_ptr cinfo) { … } /* * Memory manager initialization. * When this is called, only the error manager pointer is valid in cinfo! */ GLOBAL(void) jinit_memory_mgr(j_common_ptr cinfo) { … }