heap_caps_init.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. #include "heap_private.h"
  14. #include <assert.h>
  15. #include <string.h>
  16. #include <sys/lock.h>
  17. #include "esp_log.h"
  18. #include "multi_heap.h"
  19. #include "multi_heap_platform.h"
  20. #include "esp_heap_caps_init.h"
  21. #include "soc/soc_memory_layout.h"
  22. static const char *TAG = "heap_init";
  23. /* Linked-list of registered heaps */
  24. struct registered_heap_ll registered_heaps;
  25. static void register_heap(heap_t *region)
  26. {
  27. size_t heap_size = region->end - region->start;
  28. assert(heap_size <= HEAP_SIZE_MAX);
  29. region->heap = multi_heap_register((void *)region->start, heap_size);
  30. if (region->heap != NULL) {
  31. ESP_EARLY_LOGD(TAG, "New heap initialised at %p", region->heap);
  32. }
  33. }
  34. void heap_caps_enable_nonos_stack_heaps(void)
  35. {
  36. heap_t *heap;
  37. SLIST_FOREACH(heap, &registered_heaps, next) {
  38. // Assume any not-yet-registered heap is
  39. // a nonos-stack heap
  40. if (heap->heap == NULL) {
  41. register_heap(heap);
  42. if (heap->heap != NULL) {
  43. multi_heap_set_lock(heap->heap, &heap->heap_mux);
  44. }
  45. }
  46. }
  47. }
  48. /* Initialize the heap allocator to use all of the memory not
  49. used by static data or reserved for other purposes
  50. */
  51. void heap_caps_init(void)
  52. {
  53. /* Get the array of regions that we can use for heaps
  54. (with reserved memory removed already.)
  55. */
  56. size_t num_regions = soc_get_available_memory_region_max_count();
  57. soc_memory_region_t regions[num_regions];
  58. num_regions = soc_get_available_memory_regions(regions);
  59. //The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory,
  60. //it's useful to coalesce adjacent regions that have the same type.
  61. for (size_t i = 1; i < num_regions; i++) {
  62. soc_memory_region_t *a = &regions[i - 1];
  63. soc_memory_region_t *b = &regions[i];
  64. if (b->start == (intptr_t)(a->start + a->size) && b->type == a->type ) {
  65. a->type = -1;
  66. b->start = a->start;
  67. b->size += a->size;
  68. }
  69. }
  70. /* Count the heaps left after merging */
  71. size_t num_heaps = 0;
  72. for (size_t i = 0; i < num_regions; i++) {
  73. if (regions[i].type != -1) {
  74. num_heaps++;
  75. }
  76. }
  77. /* Start by allocating the registered heap data on the stack.
  78. Once we have a heap to copy it to, we will copy it to a heap buffer.
  79. */
  80. heap_t temp_heaps[num_heaps];
  81. size_t heap_idx = 0;
  82. ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:");
  83. for (size_t i = 0; i < num_regions; i++) {
  84. soc_memory_region_t *region = &regions[i];
  85. const soc_memory_type_desc_t *type = &soc_memory_types[region->type];
  86. heap_t *heap = &temp_heaps[heap_idx];
  87. if (region->type == -1) {
  88. continue;
  89. }
  90. heap_idx++;
  91. assert(heap_idx <= num_heaps);
  92. memcpy(heap->caps, type->caps, sizeof(heap->caps));
  93. heap->start = region->start;
  94. heap->end = region->start + region->size;
  95. MULTI_HEAP_LOCK_INIT(&heap->heap_mux);
  96. if (type->startup_stack) {
  97. /* Will be registered when OS scheduler starts */
  98. heap->heap = NULL;
  99. } else {
  100. register_heap(heap);
  101. }
  102. SLIST_NEXT(heap, next) = NULL;
  103. ESP_EARLY_LOGI(TAG, "At %08X len %08X (%d KiB): %s",
  104. region->start, region->size, region->size / 1024, type->name);
  105. }
  106. assert(heap_idx == num_heaps);
  107. /* Allocate the permanent heap data that we'll use as a linked list at runtime.
  108. Allocate this part of data contiguously, even though it's a linked list... */
  109. assert(SLIST_EMPTY(&registered_heaps));
  110. heap_t *heaps_array = NULL;
  111. for (size_t i = 0; i < num_heaps; i++) {
  112. if (heap_caps_match(&temp_heaps[i], MALLOC_CAP_8BIT|MALLOC_CAP_INTERNAL)) {
  113. /* use the first DRAM heap which can fit the data */
  114. heaps_array = multi_heap_malloc(temp_heaps[i].heap, sizeof(heap_t) * num_heaps);
  115. if (heaps_array != NULL) {
  116. break;
  117. }
  118. }
  119. }
  120. assert(heaps_array != NULL); /* if NULL, there's not enough free startup heap space */
  121. memcpy(heaps_array, temp_heaps, sizeof(heap_t)*num_heaps);
  122. /* Iterate the heaps and set their locks, also add them to the linked list. */
  123. for (size_t i = 0; i < num_heaps; i++) {
  124. if (heaps_array[i].heap != NULL) {
  125. multi_heap_set_lock(heaps_array[i].heap, &heaps_array[i].heap_mux);
  126. }
  127. if (i == 0) {
  128. SLIST_INSERT_HEAD(&registered_heaps, &heaps_array[0], next);
  129. } else {
  130. SLIST_INSERT_AFTER(&heaps_array[i-1], &heaps_array[i], next);
  131. }
  132. }
  133. }
  134. esp_err_t heap_caps_add_region(intptr_t start, intptr_t end)
  135. {
  136. if (start == 0) {
  137. return ESP_ERR_INVALID_ARG;
  138. }
  139. for (size_t i = 0; i < soc_memory_region_count; i++) {
  140. const soc_memory_region_t *region = &soc_memory_regions[i];
  141. // Test requested start only as 'end' may be in a different region entry, assume 'end' has same caps
  142. if (region->start <= start && (intptr_t)(region->start + region->size) > start) {
  143. const uint32_t *caps = soc_memory_types[region->type].caps;
  144. return heap_caps_add_region_with_caps(caps, start, end);
  145. }
  146. }
  147. return ESP_ERR_NOT_FOUND;
  148. }
  149. esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end)
  150. {
  151. esp_err_t err = ESP_FAIL;
  152. if (caps == NULL || start == 0 || end == 0 || end <= start) {
  153. return ESP_ERR_INVALID_ARG;
  154. }
  155. //Check if region overlaps the start and/or end of an existing region. If so, the
  156. //region is invalid (or maybe added twice)
  157. /*
  158. * assume that in on region, start must be less than end (cannot equal to) !!
  159. * Specially, the 4th scenario can be allowed. For example, allocate memory from heap,
  160. * then change the capability and call this function to create a new region for special
  161. * application.
  162. * In the following chart, 'start = start' and 'end = end' is contained in 3rd scenario.
  163. * This all equal scenario is incorrect because the same region cannot be add twice. For example,
  164. * add the .bss memory to region twice, if not do the check, it will cause exception.
  165. *
  166. * the existing heap region s(tart) e(nd)
  167. * |----------------------|
  168. * 1.add region [Correct] (s1<s && e1<=s) |-----|
  169. * 2.add region [Incorrect] (s2<=s && s<e2<=e) |---------------|
  170. * 3.add region [Incorrect] (s3<=s && e<e3) |-------------------------------------|
  171. * 4 add region [Correct] (s<s4<e && s<e4<=e) |-------|
  172. * 5.add region [Incorrect] (s<s5<e && e<e5) |----------------------------|
  173. * 6.add region [Correct] (e<=s6 && e<e6) |----|
  174. */
  175. heap_t *heap;
  176. SLIST_FOREACH(heap, &registered_heaps, next) {
  177. if ((start <= heap->start && end > heap->start)
  178. || (start < heap->end && end > heap->end)) {
  179. return ESP_FAIL;
  180. }
  181. }
  182. heap_t *p_new = heap_caps_malloc(sizeof(heap_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
  183. if (p_new == NULL) {
  184. err = ESP_ERR_NO_MEM;
  185. goto done;
  186. }
  187. memcpy(p_new->caps, caps, sizeof(p_new->caps));
  188. p_new->start = start;
  189. p_new->end = end;
  190. MULTI_HEAP_LOCK_INIT(&p_new->heap_mux);
  191. p_new->heap = multi_heap_register((void *)start, end - start);
  192. SLIST_NEXT(p_new, next) = NULL;
  193. if (p_new->heap == NULL) {
  194. err = ESP_ERR_INVALID_SIZE;
  195. goto done;
  196. }
  197. multi_heap_set_lock(p_new->heap, &p_new->heap_mux);
  198. /* (This insertion is atomic to registered_heaps, so
  199. we don't need to worry about thread safety for readers,
  200. only for writers. */
  201. static multi_heap_lock_t registered_heaps_write_lock = MULTI_HEAP_LOCK_STATIC_INITIALIZER;
  202. MULTI_HEAP_LOCK(&registered_heaps_write_lock);
  203. SLIST_INSERT_HEAD(&registered_heaps, p_new, next);
  204. MULTI_HEAP_UNLOCK(&registered_heaps_write_lock);
  205. err = ESP_OK;
  206. done:
  207. if (err != ESP_OK) {
  208. free(p_new);
  209. }
  210. return err;
  211. }