heap_tlsf_block_functions.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. /*
  2. ** Two Level Segregated Fit memory allocator, version 3.1.
  3. ** Written by Matthew Conte
  4. ** http://tlsf.baisoku.org
  5. **
  6. ** Based on the original documentation by Miguel Masmano:
  7. ** http://www.gii.upv.es/tlsf/main/docs
  8. **
  9. ** This implementation was written to the specification
  10. ** of the document, therefore no GPL restrictions apply.
  11. **
  12. ** Copyright (c) 2006-2016, Matthew Conte
  13. ** All rights reserved.
  14. **
  15. ** Redistribution and use in source and binary forms, with or without
  16. ** modification, are permitted provided that the following conditions are met:
  17. ** * Redistributions of source code must retain the above copyright
  18. ** notice, this list of conditions and the following disclaimer.
  19. ** * Redistributions in binary form must reproduce the above copyright
  20. ** notice, this list of conditions and the following disclaimer in the
  21. ** documentation and/or other materials provided with the distribution.
  22. ** * Neither the name of the copyright holder nor the
  23. ** names of its contributors may be used to endorse or promote products
  24. ** derived from this software without specific prior written permission.
  25. **
  26. ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
  27. ** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  28. ** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  29. ** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
  30. ** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  31. ** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  32. ** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  33. ** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  34. ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  35. ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  36. */
  37. #pragma once
  38. /*
  39. ** Data structures and associated constants.
  40. */
  41. /*
  42. ** Since block sizes are always at least a multiple of 4, the two least
  43. ** significant bits of the size field are used to store the block status:
  44. ** - bit 0: whether block is busy or free
  45. ** - bit 1: whether previous block is busy or free
  46. */
  47. #define block_header_free_bit (1 << 0)
  48. #define block_header_prev_free_bit (1 << 1)
  49. /*
  50. ** The size of the block header exposed to used blocks is the size field.
  51. ** The prev_phys_block field is stored *inside* the previous free block.
  52. */
  53. #define block_header_overhead (sizeof(size_t))
  54. /* User data starts directly after the size field in a used block. */
  55. #define block_start_offset (offsetof(block_header_t, size) + sizeof(size_t))
  56. /*
  57. ** A free block must be large enough to store its header minus the size of
  58. ** the prev_phys_block field, and no larger than the number of addressable
  59. ** bits for FL_INDEX.
  60. ** The block_size_max macro returns the maximum block for the minimum pool
  61. ** use tlsf_block_size_max for a value specific to the pool
  62. */
  63. #define block_size_min (sizeof(block_header_t) - sizeof(block_header_t*))
  64. #define block_size_max (tlsf_cast(size_t, 1) << FL_INDEX_MAX_MIN)
  65. /*
  66. ** block_header_t member functions.
  67. */
  68. static inline __attribute__((__always_inline__)) size_t block_size(const block_header_t* block)
  69. {
  70. return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
  71. }
  72. static inline __attribute__((__always_inline__)) void block_set_size(block_header_t* block, size_t size)
  73. {
  74. const size_t oldsize = block->size;
  75. block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
  76. }
  77. static inline __attribute__((__always_inline__)) int block_is_last(const block_header_t* block)
  78. {
  79. return block_size(block) == 0;
  80. }
  81. static inline __attribute__((__always_inline__)) int block_is_free(const block_header_t* block)
  82. {
  83. return tlsf_cast(int, block->size & block_header_free_bit);
  84. }
  85. static inline __attribute__((__always_inline__)) void block_set_free(block_header_t* block)
  86. {
  87. block->size |= block_header_free_bit;
  88. }
  89. static inline __attribute__((__always_inline__)) void block_set_used(block_header_t* block)
  90. {
  91. block->size &= ~block_header_free_bit;
  92. }
  93. static inline __attribute__((__always_inline__)) int block_is_prev_free(const block_header_t* block)
  94. {
  95. return tlsf_cast(int, block->size & block_header_prev_free_bit);
  96. }
  97. static inline __attribute__((__always_inline__)) void block_set_prev_free(block_header_t* block)
  98. {
  99. block->size |= block_header_prev_free_bit;
  100. }
  101. static inline __attribute__((__always_inline__)) void block_set_prev_used(block_header_t* block)
  102. {
  103. block->size &= ~block_header_prev_free_bit;
  104. }
  105. static inline __attribute__((__always_inline__)) block_header_t* block_from_ptr(const void* ptr)
  106. {
  107. return tlsf_cast(block_header_t*,
  108. tlsf_cast(unsigned char*, ptr) - block_start_offset);
  109. }
  110. static inline __attribute__((__always_inline__)) void* block_to_ptr(const block_header_t* block)
  111. {
  112. return tlsf_cast(void*,
  113. tlsf_cast(unsigned char*, block) + block_start_offset);
  114. }
  115. /* Return location of next block after block of given size. */
  116. static inline __attribute__((__always_inline__)) block_header_t* offset_to_block(const void* ptr, size_t size)
  117. {
  118. return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size);
  119. }
  120. /* Return location of previous block. */
  121. static inline __attribute__((__always_inline__)) block_header_t* block_prev(const block_header_t* block)
  122. {
  123. return block->prev_phys_block;
  124. }
  125. /* Return location of next existing block. */
  126. static inline __attribute__((__always_inline__)) block_header_t* block_next(const block_header_t* block)
  127. {
  128. block_header_t* next = offset_to_block(block_to_ptr(block),
  129. block_size(block) - block_header_overhead);
  130. return next;
  131. }
  132. /* Link a new block with its physical neighbor, return the neighbor. */
  133. static inline __attribute__((__always_inline__)) block_header_t* block_link_next(block_header_t* block)
  134. {
  135. block_header_t* next = block_next(block);
  136. next->prev_phys_block = block;
  137. return next;
  138. }
  139. static inline __attribute__((__always_inline__)) void block_mark_as_free(block_header_t* block)
  140. {
  141. /* Link the block to the next block, first. */
  142. block_header_t* next = block_link_next(block);
  143. block_set_prev_free(next);
  144. block_set_free(block);
  145. }
  146. static inline __attribute__((__always_inline__)) void block_mark_as_used(block_header_t* block)
  147. {
  148. block_header_t* next = block_next(block);
  149. block_set_prev_used(next);
  150. block_set_used(block);
  151. }