| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015 | /*** Two Level Segregated Fit memory allocator, version 3.1.** Written by Matthew Conte**	http://tlsf.baisoku.org**** Based on the original documentation by Miguel Masmano:**	http://www.gii.upv.es/tlsf/main/docs**** This implementation was written to the specification** of the document, therefore no GPL restrictions apply.**** Copyright (c) 2006-2016, Matthew Conte** All rights reserved.**** Redistribution and use in source and binary forms, with or without** modification, are permitted provided that the following conditions are met:**     * Redistributions of source code must retain the above copyright**       notice, this list of conditions and the following disclaimer.**     * Redistributions in binary form must reproduce the above copyright**       notice, this list of conditions and the following disclaimer in the**       documentation and/or other materials provided with the distribution.**     * Neither the name of the copyright holder nor the**       names of its contributors may be used to endorse or promote products**       derived from this software without specific prior written permission.**** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/#include "multi_heap_config.h"#include "multi_heap.h"#include "multi_heap_internal.h"#include "heap_tlsf_config.h"#include "heap_tlsf.h"#include "esp_log.h"/*** Architecture-specific bit manipulation routines.**** TLSF achieves O(1) cost for malloc and free operations by limiting** the search for a free block to a free list of guaranteed size** adequate to fulfill the request, combined with efficient free list** queries using bitmasks and architecture-specific bit-manipulation** routines.**** Most modern processors provide instructions to count leading zeroes** in a word, find the lowest and highest set bit, etc. These** specific implementations will be used when available, falling back** to a reasonably efficient generic implementation.**** NOTE: TLSF spec relies on ffs/fls returning value 0..31.** ffs/fls return 1-32 by default, returning 0 for error.*//* The TLSF control structure. */typedef struct control_t{	/* Empty lists point at this block to indicate they are free. */	block_header_t block_null;		/* Local parameter for the pool */	unsigned int fl_index_count;	unsigned int fl_index_shift;	unsigned int fl_index_max;		unsigned int sl_index_count;	unsigned int sl_index_count_log2;	unsigned int small_block_size;	size_t size;	/* Bitmaps for free lists. */	unsigned int fl_bitmap;	unsigned int *sl_bitmap;		/* Head of free lists. */	block_header_t** blocks;} control_t;static inline __attribute__((__always_inline__)) int tlsf_ffs(unsigned int word){	const unsigned int reverse = word & (~word + 1);	const int bit = 32 - __builtin_clz(reverse);	return bit - 1;}static inline __attribute__((__always_inline__)) int tlsf_fls(unsigned int word){	const int bit = word ? 32 - __builtin_clz(word) : 0;	return bit - 1;}/*** Set assert macro, if it has not been provided by the user.*/#if !defined (tlsf_assert)#define tlsf_assert assert#endif/*** Static assertion mechanism.*/#define _tlsf_glue2(x, y) x ## y#define _tlsf_glue(x, y) _tlsf_glue2(x, y)#define tlsf_static_assert(exp) \	typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]/* This code has been tested on 32- and 64-bit (LP/LLP) architectures. */tlsf_static_assert(sizeof(int) * CHAR_BIT == 32);tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32);tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64);static inline __attribute__((__always_inline__)) size_t align_up(size_t x, size_t align){	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");	return (x + (align - 1)) & ~(align - 1);}static inline __attribute__((__always_inline__)) size_t align_down(size_t x, size_t align){	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");	return x - (x & (align - 1));}static inline __attribute__((__always_inline__)) void* align_ptr(const void* ptr, size_t align){	const tlsfptr_t aligned =		(tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");	return tlsf_cast(void*, aligned);}/*** Adjust an allocation size to be aligned to word size, and no smaller** than internal minimum.*/static inline __attribute__((__always_inline__)) size_t adjust_request_size(tlsf_t tlsf, size_t size, size_t align){	size_t adjust = 0;	if (size)	{		const size_t aligned = align_up(size, align);		/* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */		if (aligned < tlsf_block_size_max(tlsf))		{			adjust = tlsf_max(aligned, block_size_min);		}	}	return adjust;}/*** TLSF utility functions. In most cases, these are direct translations of** the documentation found in the white paper.*/static inline __attribute__((__always_inline__)) void mapping_insert(control_t *control, size_t size, int* fli, int* sli){	int fl, sl;	if (size < control->small_block_size)	{		/* Store small blocks in first list. */		fl = 0;		sl = tlsf_cast(int, size) >> 2;	}	else	{		fl = tlsf_fls(size);		sl = tlsf_cast(int, size >> (fl - control->sl_index_count_log2)) ^ (1 << control->sl_index_count_log2);		fl -= (control->fl_index_shift - 1);	}	*fli = fl;	*sli = sl;}/* This version rounds up to the next block size (for allocations) */static inline __attribute__((__always_inline__)) void mapping_search(control_t *control, size_t size, int* fli, int* sli){	if (size >= control->small_block_size)	{		const size_t round = (1 << (tlsf_fls(size) - control->sl_index_count_log2)) - 1;		size += round;	}	mapping_insert(control, size, fli, sli);}static inline __attribute__((__always_inline__)) block_header_t* search_suitable_block(control_t* control, int* fli, int* sli){	int fl = *fli;	int sl = *sli;	/*	** First, search for a block in the list associated with the given	** fl/sl index.	*/	unsigned int sl_map = control->sl_bitmap[fl] & (~0U << sl);	if (!sl_map)	{		/* No block exists. Search in the next largest first-level list. */		const unsigned int fl_map = control->fl_bitmap & (~0U << (fl + 1));		if (!fl_map)		{			/* No free blocks available, memory has been exhausted. */			return 0;		}		fl = tlsf_ffs(fl_map);		*fli = fl;		sl_map = control->sl_bitmap[fl];	}	tlsf_assert(sl_map && "internal error - second level bitmap is null");	sl = tlsf_ffs(sl_map);	*sli = sl;	/* Return the first block in the free list. */	return control->blocks[fl*control->sl_index_count + sl];}/* Remove a free block from the free list.*/static inline __attribute__((__always_inline__)) void remove_free_block(control_t* control, block_header_t* block, int fl, int sl){	block_header_t* prev = block->prev_free;	block_header_t* next = block->next_free;	tlsf_assert(prev && "prev_free field can not be null");	tlsf_assert(next && "next_free field can not be null");	next->prev_free = prev;	prev->next_free = next;	/* If this block is the head of the free list, set new head. */	if (control->blocks[fl*control->sl_index_count + sl] == block)	{		control->blocks[fl*control->sl_index_count + sl] = next;		/* If the new head is null, clear the bitmap. */		if (next == &control->block_null)		{			control->sl_bitmap[fl] &= ~(1 << sl);			/* If the second bitmap is now empty, clear the fl bitmap. */			if (!control->sl_bitmap[fl])			{				control->fl_bitmap &= ~(1 << fl);			}		}	}}/* Insert a free block into the free block list. */static inline __attribute__((__always_inline__)) void insert_free_block(control_t* control, block_header_t* block, int fl, int sl){	block_header_t* current = control->blocks[fl*control->sl_index_count + sl];	tlsf_assert(current && "free list cannot have a null entry");	tlsf_assert(block && "cannot insert a null entry into the free list");	block->next_free = current;	block->prev_free = &control->block_null;	current->prev_free = block;	tlsf_assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE)		&& "block not aligned properly");	/*	** Insert the new block at the head of the list, and mark the first-	** and second-level bitmaps appropriately.	*/	control->blocks[fl*control->sl_index_count + sl] = block;	control->fl_bitmap |= (1 << fl);	control->sl_bitmap[fl] |= (1 << sl);}/* Remove a given block from the free list. */static inline __attribute__((__always_inline__)) void block_remove(control_t* control, block_header_t* block){	int fl, sl;	mapping_insert(control, block_size(block), &fl, &sl);	remove_free_block(control, block, fl, sl);}/* Insert a given block into the free list. */static inline __attribute__((__always_inline__)) void block_insert(control_t* control, block_header_t* block){	int fl, sl;	mapping_insert(control, block_size(block), &fl, &sl);	insert_free_block(control, block, fl, sl);}static inline __attribute__((__always_inline__)) int block_can_split(block_header_t* block, size_t size){	return block_size(block) >= sizeof(block_header_t) + size;}/* Split a block into two, the second of which is free. */static inline __attribute__((__always_inline__)) block_header_t* block_split(block_header_t* block, size_t size){    /* Calculate the amount of space left in the remaining block.     * REMINDER: remaining pointer's first field is `prev_phys_block` but this field is part of the     * previous physical block. */	block_header_t* remaining =		offset_to_block(block_to_ptr(block), size - block_header_overhead);    /* `size` passed as an argument is the first block's new size, thus, the remaining block's size     * is `block_size(block) - size`. However, the block's data must be precedeed by the data size.     * This field is NOT part of the size, so it has to be substracted from the calculation. */	const size_t remain_size = block_size(block) - (size + block_header_overhead);	tlsf_assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE)		&& "remaining block not aligned properly");	tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);	block_set_size(remaining, remain_size);	tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");	block_set_size(block, size);	block_mark_as_free(remaining);    /**     * Here is the final outcome of this function:     *     * block             remaining (block_ptr + size - BHO)     * +                                +     * |                                |     * v                                v     * +----------------------------------------------------------------------+     * |0000|    |xxxxxxxxxxxxxxxxxxxxxx|xxxx|    |###########################|     * |0000|    |xxxxxxxxxxxxxxxxxxxxxx|xxxx|    |###########################|     * |0000|    |xxxxxxxxxxxxxxxxxxxxxx|xxxx|    |###########################|     * |0000|    |xxxxxxxxxxxxxxxxxxxxxx|xxxx|    |###########################|     * +----------------------------------------------------------------------+     *      |    |                           |    |     *      +    +<------------------------->+    +<------------------------->     *       BHO    `size` (argument) bytes   BHO      `remain_size` bytes     *     * Where BHO = block_header_overhead,     * 0: part of the memory owned by a `block`'s previous neighbour,     * x: part of the memory owned by `block`.     * #: part of the memory owned by `remaining`.     */	return remaining;}/* Absorb a free block's storage into an adjacent previous free block. */static inline __attribute__((__always_inline__)) block_header_t* block_absorb(block_header_t* prev, block_header_t* block){	tlsf_assert(!block_is_last(prev) && "previous block can't be last");	/* Note: Leaves flags untouched. */	prev->size += block_size(block) + block_header_overhead;	block_link_next(prev);#ifdef MULTI_HEAP_POISONING_SLOW        /* next_block header needs to be replaced with a fill pattern */        multi_heap_internal_poison_fill_region(block, sizeof(block_header_t), true /* free */);#endif	return prev;}/* Merge a just-freed block with an adjacent previous free block. */static inline __attribute__((__always_inline__)) block_header_t* block_merge_prev(control_t* control, block_header_t* block){	if (block_is_prev_free(block))	{		block_header_t* prev = block_prev(block);		tlsf_assert(prev && "prev physical block can't be null");		tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");		block_remove(control, prev);		block = block_absorb(prev, block);	}	return block;}/* Merge a just-freed block with an adjacent free block. */static inline __attribute__((__always_inline__)) block_header_t* block_merge_next(control_t* control, block_header_t* block){	block_header_t* next = block_next(block);	tlsf_assert(next && "next physical block can't be null");	if (block_is_free(next))	{		tlsf_assert(!block_is_last(block) && "previous block can't be last");		block_remove(control, next);		block = block_absorb(block, next);	}	return block;}/* Trim any trailing block space off the end of a block, return to pool. */static inline __attribute__((__always_inline__)) void block_trim_free(control_t* control, block_header_t* block, size_t size){	tlsf_assert(block_is_free(block) && "block must be free");	if (block_can_split(block, size))	{		block_header_t* remaining_block = block_split(block, size);		block_link_next(block);		block_set_prev_free(remaining_block);		block_insert(control, remaining_block);	}}/* Trim any trailing block space off the end of a used block, return to pool. */static inline __attribute__((__always_inline__)) void block_trim_used(control_t* control, block_header_t* block, size_t size){	tlsf_assert(!block_is_free(block) && "block must be used");	if (block_can_split(block, size))	{		/* If the next block is free, we must coalesce. */		block_header_t* remaining_block = block_split(block, size);		block_set_prev_used(remaining_block);		remaining_block = block_merge_next(control, remaining_block);		block_insert(control, remaining_block);	}}static inline __attribute__((__always_inline__)) block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size){	block_header_t* remaining_block = block;	if (block_can_split(block, size))	{        /* We want to split `block` in two: the first block will be freed and the         * second block will be returned. */		remaining_block = block_split(block, size - block_header_overhead);        /* `remaining_block` is the second block, mark its predecessor (first         * block) as free. */		block_set_prev_free(remaining_block);		block_link_next(block);        /* Put back the first block into the free memory list. */		block_insert(control, block);	}	return remaining_block;}static inline  __attribute__((__always_inline__)) block_header_t* block_locate_free(control_t* control, size_t size){	int fl = 0, sl = 0;	block_header_t* block = 0;	if (size)	{		mapping_search(control, size, &fl, &sl);		/*		** mapping_search can futz with the size, so for excessively large sizes it can sometimes wind up		** with indices that are off the end of the block array.		** So, we protect against that here, since this is the only callsite of mapping_search.		** Note that we don't need to check sl, since it comes from a modulo operation that guarantees it's always in range.		*/		if (fl < control->fl_index_count)		{			block = search_suitable_block(control, &fl, &sl);		}	}	if (block)	{		tlsf_assert(block_size(block) >= size);		remove_free_block(control, block, fl, sl);	}	return block;}static inline __attribute__((__always_inline__)) void* block_prepare_used(control_t* control, block_header_t* block, size_t size){	void* p = 0;	if (block)	{		tlsf_assert(size && "size must be non-zero");		block_trim_free(control, block, size);		block_mark_as_used(block);		p = block_to_ptr(block);	}	return p;}/* Clear structure and point all empty lists at the null block. */static void control_construct(control_t* control, size_t bytes){	int i, j;	control->block_null.next_free = &control->block_null;	control->block_null.prev_free = &control->block_null;	/* find the closest ^2 for first layer */	i = (bytes - 1) / (16 * 1024);	control->fl_index_max = FL_INDEX_MAX_MIN + sizeof(i) * 8 - __builtin_clz(i);	/* adapt second layer to the pool */	if (bytes <= 16 * 1024) control->sl_index_count_log2 = 3;	else if (bytes <= 256 * 1024) control->sl_index_count_log2 = 4;	else control->sl_index_count_log2 = 5;		control->fl_index_shift = (control->sl_index_count_log2 + ALIGN_SIZE_LOG2);	control->sl_index_count = 1 << control->sl_index_count_log2;	control->fl_index_count = control->fl_index_max - control->fl_index_shift + 1;	control->small_block_size = 1 << control->fl_index_shift;	control->fl_bitmap = 0;		control->sl_bitmap = align_ptr(control + 1, sizeof(*control->sl_bitmap));	control->blocks = align_ptr(control->sl_bitmap + control->fl_index_count, sizeof(*control->blocks));	control->size = (void*) (control->blocks + control->sl_index_count * control->fl_index_count) - (void*) control;		ESP_EARLY_LOGW( "REMOVE", "NEW POOL of %d bytes, ctrl_size: %d sli_c:%d fli_c:%d small_b %d max_b:%d", 					bytes, 					control->size, control->sl_index_count, control->fl_index_count,					control->small_block_size, 1 << control->fl_index_max );			/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */	tlsf_assert(sizeof(unsigned int) * CHAR_BIT >= control->sl_index_count && "CHAR_BIT less than sl_index_count");	/* Ensure we've properly tuned our sizes. */	tlsf_assert(ALIGN_SIZE == control->small_block_size / control->sl_index_count && "ALIGN_SIZE does not match");		for (i = 0; i < control->fl_index_count; ++i)	{		control->sl_bitmap[i] = 0;		for (j = 0; j < control->sl_index_count; ++j)		{			control->blocks[i*control->sl_index_count + j] = &control->block_null;		}	}}/*** Debugging utilities.*/typedef struct integrity_t{	int prev_status;	int status;} integrity_t;#define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } }static void integrity_walker(void* ptr, size_t size, int used, void* user){	block_header_t* block = block_from_ptr(ptr);	integrity_t* integ = tlsf_cast(integrity_t*, user);	const int this_prev_status = block_is_prev_free(block) ? 1 : 0;	const int this_status = block_is_free(block) ? 1 : 0;	const size_t this_block_size = block_size(block);	int status = 0;	(void)used;	tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");	tlsf_insist(size == this_block_size && "block size incorrect");	integ->prev_status = this_status;	integ->status += status;}int tlsf_check(tlsf_t tlsf){	int i, j;	control_t* control = tlsf_cast(control_t*, tlsf);	int status = 0;	/* Check that the free lists and bitmaps are accurate. */	for (i = 0; i < control->fl_index_count; ++i)	{		for (j = 0; j < control->sl_index_count; ++j)		{			const int fl_map = control->fl_bitmap & (1 << i);			const int sl_list = control->sl_bitmap[i];			const int sl_map = sl_list & (1 << j);			const block_header_t* block = control->blocks[i*control->sl_index_count + j];			/* Check that first- and second-level lists agree. */			if (!fl_map)			{				tlsf_insist(!sl_map && "second-level map must be null");			}			if (!sl_map)			{				tlsf_insist(block == &control->block_null && "block list must be null");				continue;			}			/* Check that there is at least one free block. */			tlsf_insist(sl_list && "no free blocks in second-level map");			tlsf_insist(block != &control->block_null && "block should not be null");			while (block != &control->block_null)			{				int fli, sli;				tlsf_insist(block_is_free(block) && "block should be free");				tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");				tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");				tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");				tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");				mapping_insert(control, block_size(block), &fli, &sli);				tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");				block = block->next_free;			}		}	}	return status;}#undef tlsf_insiststatic void default_walker(void* ptr, size_t size, int used, void* user){	(void)user;	printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr));}void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user){	tlsf_walker pool_walker = walker ? walker : default_walker;	block_header_t* block =		offset_to_block(pool, -(int)block_header_overhead);	while (block && !block_is_last(block))	{		pool_walker(			block_to_ptr(block),			block_size(block),			!block_is_free(block),			user);		block = block_next(block);	}}size_t tlsf_block_size(void* ptr){	size_t size = 0;	if (ptr)	{		const block_header_t* block = block_from_ptr(ptr);		size = block_size(block);	}	return size;}int tlsf_check_pool(pool_t pool){	/* Check that the blocks are physically correct. */	integrity_t integ = { 0, 0 };	tlsf_walk_pool(pool, integrity_walker, &integ);	return integ.status;}size_t tlsf_fit_size(tlsf_t tlsf, size_t size){	/* because it's GoodFit, allocable size is one range lower */    if (size) 	{		control_t* control = tlsf_cast(control_t*, tlsf);        size_t sl_interval = (1 << ((sizeof(size_t) * 8 - 1) - __builtin_clz(size))) / control->sl_index_count;        return size & ~(sl_interval - 1);    }		return 0;}	/*** Size of the TLSF structures in a given memory block passed to** tlsf_create, equal to the size of a control_t*/size_t tlsf_size(tlsf_t tlsf){	if (tlsf) 	{		control_t* control = tlsf_cast(control_t*, tlsf);		return control->size;	}			/* no tlsf, we'll just return a min size */	return sizeof(control_t) + 	       sizeof(int) * SL_INDEX_COUNT_MIN + 	       sizeof(block_header_t*) * SL_INDEX_COUNT_MIN * FL_INDEX_COUNT_MIN;}size_t tlsf_align_size(void){	return ALIGN_SIZE;}size_t tlsf_block_size_min(void){	return block_size_min;}size_t tlsf_block_size_max(tlsf_t tlsf){	control_t* control = tlsf_cast(control_t*, tlsf);	return tlsf_cast(size_t, 1) << control->fl_index_max;}/*** Overhead of the TLSF structures in a given memory block passed to** tlsf_add_pool, equal to the overhead of a free block and the** sentinel block.*/size_t tlsf_pool_overhead(void){	return 2 * block_header_overhead;}size_t tlsf_alloc_overhead(void){	return block_header_overhead;}pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes){	block_header_t* block;	block_header_t* next;	const size_t pool_overhead = tlsf_pool_overhead();	const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);	if (((ptrdiff_t)mem % ALIGN_SIZE) != 0)	{		printf("tlsf_add_pool: Memory must be aligned by %u bytes.\n",			(unsigned int)ALIGN_SIZE);		return 0;	}	if (pool_bytes < block_size_min || pool_bytes > tlsf_block_size_max(tlsf))	{#if defined (TLSF_64BIT)		printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n",			(unsigned int)(pool_overhead + block_size_min),			(unsigned int)((pool_overhead + tlsf_block_size_max(tlsf)) / 256));#else		printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n",			(unsigned int)(pool_overhead + block_size_min),			(unsigned int)(pool_overhead + tlsf_block_size_max(tlsf)));#endif		return 0;	}	/*	** Create the main free block. Offset the start of the block slightly	** so that the prev_phys_block field falls outside of the pool -	** it will never be used.	*/	block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead);	block_set_size(block, pool_bytes);	block_set_free(block);	block_set_prev_used(block);	block_insert(tlsf_cast(control_t*, tlsf), block);	/* Split the block to create a zero-size sentinel block. */	next = block_link_next(block);	block_set_size(next, 0);	block_set_used(next);	block_set_prev_free(next);	return mem;}void tlsf_remove_pool(tlsf_t tlsf, pool_t pool){	control_t* control = tlsf_cast(control_t*, tlsf);	block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);	int fl = 0, sl = 0;	tlsf_assert(block_is_free(block) && "block should be free");	tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free");	tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero");	mapping_insert(control, block_size(block), &fl, &sl);	remove_free_block(control, block, fl, sl);}/*** TLSF main interface.*/tlsf_t tlsf_create(void* mem, size_t max_bytes){#if _DEBUG	if (test_ffs_fls())	{		return 0;	}#endif	if (((tlsfptr_t)mem % ALIGN_SIZE) != 0)	{		printf("tlsf_create: Memory must be aligned to %u bytes.\n",			(unsigned int)ALIGN_SIZE);		return 0;	}	control_construct(tlsf_cast(control_t*, mem), max_bytes);	return tlsf_cast(tlsf_t, mem);}pool_t tlsf_get_pool(tlsf_t tlsf){	return tlsf_cast(pool_t, (char*)tlsf + tlsf_size(tlsf));}tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes){	tlsf_t tlsf = tlsf_create(mem, max_bytes ? max_bytes : pool_bytes);	tlsf_add_pool(tlsf, (char*)mem + tlsf_size(tlsf), pool_bytes - tlsf_size(tlsf));	return tlsf;}void* tlsf_malloc(tlsf_t tlsf, size_t size){	control_t* control = tlsf_cast(control_t*, tlsf);	size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);	block_header_t* block = block_locate_free(control, adjust);	return block_prepare_used(control, block, adjust);}/** * @brief Allocate memory of at least `size` bytes where byte at `data_offset` will be aligned to `alignment`. * * This function will allocate memory pointed by `ptr`. However, the byte at `data_offset` of * this piece of memory (i.e., byte at `ptr` + `data_offset`) will be aligned to `alignment`. * This function is useful for allocating memory that will internally have a header, and the * usable memory following the header (i.e. `ptr` + `data_offset`) must be aligned. * * For example, a call to `multi_heap_aligned_alloc_impl_offs(heap, 64, 256, 20)` will return a * pointer `ptr` to free memory of minimum 64 bytes, where `ptr + 20` is aligned on `256`. * So `(ptr + 20) % 256` equals 0. * * @param tlsf TLSF structure to allocate memory from. * @param align Alignment for the returned pointer's offset. * @param size Minimum size, in bytes, of the memory to allocate INCLUDING *             `data_offset` bytes. * @param data_offset Offset to be aligned on `alignment`. This can be 0, in *                    this case, the returned pointer will be aligned on *                    `alignment`. If it is not a multiple of CPU word size, *                    it will be aligned up to the closest multiple of it. * * @return pointer to free memory. */void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_offset){    control_t* control = tlsf_cast(control_t*, tlsf);    const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);    const size_t off_adjust = align_up(data_offset, ALIGN_SIZE);	/*	** We must allocate an additional minimum block size bytes so that if	** our free block will leave an alignment gap which is smaller, we can	** trim a leading free block and release it back to the pool. We must	** do this because the previous physical block is in use, therefore	** the prev_phys_block field is not valid, and we can't simply adjust	** the size of that block.	*/	const size_t gap_minimum = sizeof(block_header_t) + off_adjust;    /* The offset is included in both `adjust` and `gap_minimum`, so we    ** need to subtract it once.    */	const size_t size_with_gap = adjust_request_size(tlsf, adjust + align + gap_minimum - off_adjust, align);	/*	** If alignment is less than or equals base alignment, we're done.	** If we requested 0 bytes, return null, as tlsf_malloc(0) does.	*/	const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust;	block_header_t* block = block_locate_free(control, aligned_size);	/* This can't be a static assert. */	tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);	if (block)	{		void* ptr = block_to_ptr(block);		void* aligned = align_ptr(ptr, align);		size_t gap = tlsf_cast(size_t,			tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));       /*        ** If gap size is too small or if there is not gap but we need one,        ** offset to next aligned boundary.        */		if ((gap && gap < gap_minimum) || (!gap && off_adjust))		{			const size_t gap_remain = gap_minimum - gap;			const size_t offset = tlsf_max(gap_remain, align);			const void* next_aligned = tlsf_cast(void*,				tlsf_cast(tlsfptr_t, aligned) + offset);			aligned = align_ptr(next_aligned, align);			gap = tlsf_cast(size_t,				tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));		}		if (gap)		{			tlsf_assert(gap >= gap_minimum && "gap size too small");			block = block_trim_free_leading(control, block, gap - off_adjust);		}	}    /* Preparing the block will also the trailing free memory. */	return block_prepare_used(control, block, adjust);}/** * @brief Same as `tlsf_memalign_offs` function but with a 0 offset. * The pointer returned is aligned on `align`. */void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size){    return tlsf_memalign_offs(tlsf, align, size, 0);}void tlsf_free(tlsf_t tlsf, void* ptr){	/* Don't attempt to free a NULL pointer. */	if (ptr)	{		control_t* control = tlsf_cast(control_t*, tlsf);		block_header_t* block = block_from_ptr(ptr);		tlsf_assert(!block_is_free(block) && "block already marked as free");		block_mark_as_free(block);		block = block_merge_prev(control, block);		block = block_merge_next(control, block);		block_insert(control, block);	}}/*** The TLSF block information provides us with enough information to** provide a reasonably intelligent implementation of realloc, growing or** shrinking the currently allocated block as required.**** This routine handles the somewhat esoteric edge cases of realloc:** - a non-zero size with a null pointer will behave like malloc** - a zero size with a non-null pointer will behave like free** - a request that cannot be satisfied will leave the original buffer**   untouched** - an extended buffer size will leave the newly-allocated area with**   contents undefined*/void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size){	control_t* control = tlsf_cast(control_t*, tlsf);	void* p = 0;	/* Zero-size requests are treated as free. */	if (ptr && size == 0)	{		tlsf_free(tlsf, ptr);	}	/* Requests with NULL pointers are treated as malloc. */	else if (!ptr)	{		p = tlsf_malloc(tlsf, size);	}	else	{		block_header_t* block = block_from_ptr(ptr);		block_header_t* next = block_next(block);		const size_t cursize = block_size(block);		const size_t combined = cursize + block_size(next) + block_header_overhead;		const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);		tlsf_assert(!block_is_free(block) && "block already marked as free");		/*		** If the next block is used, or when combined with the current		** block, does not offer enough space, we must reallocate and copy.		*/		if (adjust > cursize && (!block_is_free(next) || adjust > combined))		{			p = tlsf_malloc(tlsf, size);			if (p)			{				const size_t minsize = tlsf_min(cursize, size);				memcpy(p, ptr, minsize);				tlsf_free(tlsf, ptr);			}		}		else		{			/* Do we need to expand to the next block? */			if (adjust > cursize)			{				block_merge_next(control, block);				block_mark_as_used(block);			}			/* Trim the resulting block and return the original pointer. */			block_trim_used(control, block, adjust);			p = ptr;		}	}	return p;}
 |