Pārlūkot izejas kodu

move to 4.3.5

- remove SPI workaround and heap optimization
- move a few items from DRAM to either EXTRAM or keep them in text
philippe44 1 gadu atpakaļ
vecāks
revīzija
e531bea28a
46 mainītis faili ar 8 papildinājumiem un 9575 dzēšanām
  1. 1 1
      components/_override/CMakeLists.txt
  2. 3 2
      components/_override/esp32/i2s.c
  3. 0 849
      components/_override/esp32/spi_bus_lock.c
  4. 0 1007
      components/_override/esp32/spi_master.c
  5. 0 1047
      components/_override/esp32/spi_master.c.debug
  6. 0 49
      components/heap/CMakeLists.txt
  7. 0 74
      components/heap/Kconfig
  8. 0 32
      components/heap/component.mk
  9. 0 609
      components/heap/heap_caps.c
  10. 0 241
      components/heap/heap_caps_init.c
  11. 0 77
      components/heap/heap_private.h
  12. 0 129
      components/heap/heap_task_info.c
  13. 0 1015
      components/heap/heap_tlsf.c
  14. 0 119
      components/heap/heap_tlsf.h
  15. 0 174
      components/heap/heap_tlsf_block_functions.h
  16. 0 66
      components/heap/heap_tlsf_config.h
  17. 0 255
      components/heap/heap_trace_standalone.c
  18. 0 402
      components/heap/include/esp_heap_caps.h
  19. 0 92
      components/heap/include/esp_heap_caps_init.h
  20. 0 98
      components/heap/include/esp_heap_task_info.h
  21. 0 154
      components/heap/include/esp_heap_trace.h
  22. 0 200
      components/heap/include/heap_trace.inc
  23. 0 190
      components/heap/include/multi_heap.h
  24. 0 7
      components/heap/linker.lf
  25. 0 376
      components/heap/multi_heap.c
  26. 0 31
      components/heap/multi_heap_config.h
  27. 0 76
      components/heap/multi_heap_internal.h
  28. 0 108
      components/heap/multi_heap_platform.h
  29. 0 426
      components/heap/multi_heap_poisoning.c
  30. 0 3
      components/heap/test/CMakeLists.txt
  31. 0 5
      components/heap/test/component.mk
  32. 0 147
      components/heap/test/test_aligned_alloc_caps.c
  33. 0 108
      components/heap/test/test_allocator_timings.c
  34. 0 74
      components/heap/test/test_diram.c
  35. 0 164
      components/heap/test/test_heap_trace.c
  36. 0 60
      components/heap/test/test_leak.c
  37. 0 134
      components/heap/test/test_malloc.c
  38. 0 247
      components/heap/test/test_malloc_caps.c
  39. 0 67
      components/heap/test/test_realloc.c
  40. 0 72
      components/heap/test/test_runtime_heap_reg.c
  41. 0 54
      components/heap/test_multi_heap_host/Makefile
  42. 0 2
      components/heap/test_multi_heap_host/main.cpp
  43. 0 20
      components/heap/test_multi_heap_host/test_all_configs.sh
  44. 0 508
      components/heap/test_multi_heap_host/test_multi_heap.cpp
  45. 3 3
      components/raop/raop.c
  46. 1 1
      components/squeezelite/output_i2s.c

+ 1 - 1
components/_override/CMakeLists.txt

@@ -1,6 +1,6 @@
 if(IDF_TARGET STREQUAL esp32)
     set(lib_dir ${build_dir}/esp-idf)
-    set(driver esp32/i2s.c esp32/i2s_hal.c esp32/spi_bus_lock.c)
+    set(driver esp32/i2s.c esp32/i2s_hal.c)
     string(REPLACE ".c" ".c.obj" driver_obj "${driver}")
 
     idf_component_register( SRCS ${driver}

+ 3 - 2
components/_override/esp32/i2s.c

@@ -38,7 +38,8 @@
 #include "esp_attr.h"
 #include "esp_log.h"
 #include "esp_pm.h"
-#include "esp_efuse.h"
+#include "soc/chip_revision.h"
+#include "hal/efuse_hal.h"
 #include "esp_rom_gpio.h"
 
 #include "sdkconfig.h"
@@ -193,7 +194,7 @@ static float i2s_apll_get_fi2s(int bits_per_sample, int sdm0, int sdm1, int sdm2
 
 #if CONFIG_IDF_TARGET_ESP32
     /* ESP32 rev0 silicon issue for APLL range/accuracy, please see ESP32 ECO document for more information on this */
-    if (esp_efuse_get_chip_ver() == 0) {
+    if (!ESP_CHIP_REV_ABOVE(efuse_hal_chip_revision(), 100)) {
         sdm0 = 0;
         sdm1 = 0;
     }

+ 0 - 849
components/_override/esp32/spi_bus_lock.c

@@ -1,849 +0,0 @@
-// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "freertos/FreeRTOS.h"
-#include "freertos/semphr.h"
-#include <stdatomic.h>
-#include "sdkconfig.h"
-#include "spi_common_internal.h"
-#include "esp_intr_alloc.h"
-#include "soc/soc_caps.h"
-#include "stdatomic.h"
-#include "esp_log.h"
-#include <strings.h>
-#include "esp_heap_caps.h"
-
-/*
- * This lock is designed to solve the conflicts between SPI devices (used in tasks) and
- * the background operations (ISR or cache access).
- *
- * There are N (device/task) + 1 (BG) acquiring processer candidates that may touch the bus.
- *
- * The core of the lock is a `status` atomic variable, which is always available. No intermediate
- * status is allowed. The atomic operations (mainly `atomic_fetch_and`, `atomic_fetch_or`)
- * atomically read the status, and bitwisely write status value ORed / ANDed with given masks.
- *
- * Definitions of the status:
- * - [30]    WEAK_BG_FLAG, active when the BG is the cache
- * - [29:20] LOCK bits, active when corresponding device is asking for acquiring
- * - [19:10] PENDING bits, active when the BG acknowledges the REQ bits, but hasn't fully handled them.
- * - [ 9: 0] REQ bits, active when corresponding device is requesting for BG operations.
- *
- *   The REQ bits together PENDING bits are called BG bits, which represent the actual BG request
- *   state of devices. Either one of REQ or PENDING being active indicates the device has pending BG
- *   requests. Reason of having two bits instead of one is in the appendix below.
- *
- * Acquiring processer means the current processor (task or ISR) allowed to touch the critical
- * resources, or the SPI bus.
- *
- * States of the lock:
- * - STATE_IDLE: There's no acquiring processor. No device is acquiring the bus, and no BG
- *   operation is in progress.
- *
- * - STATE_ACQ: The acquiring processor is a device task. This means one of the devices is
- *   acquiring the bus.
- *
- * - STATE_BG: The acquiring processor is the ISR, and there is no acquiring device.
- *
- * - STATE_BG_ACQ: The acquiring processor is the ISR, and there is an acquiring device.
- *
- *
- * Whenever a bit is written to the status, it means the a device on a task is trying to acquire
- * the lock (either for the task, or the ISR). When there is no LOCK bits or BG bits active, the
- * caller immediately become the acquiring processor. Otherwise, the task has to block, and the ISR
- * will not be invoked until scheduled by the current acquiring processor.
- *
- * The acquiring processor is responsible to assign the next acquiring processor by calling the
- * scheduler, usually after it finishes some requests, and cleared the corresponding status bit.
- * But there is one exception, when the last bit is cleared from the status, after which there is
- * no other LOCK bits or BG bits active, the acquiring processor lost its role immediately, and
- * don't need to call the scheduler to assign the next acquiring processor.
- *
- * The acquiring processor may also choose to assign a new acquiring device when there is no, by
- * calling `spi_bus_lock_bg_rotate_acq_dev` in the ISR. But the acquiring processor, in this case,
- * is still the ISR, until it calls the scheduler.
- *
- *
- * Transition of the FSM:
- *
- * - STATE_IDLE: no acquiring device, nor acquiring processor, no LOCK or BG bits active
- *   -> STATE_BG: by `req_core`
- *   -> STATE_ACQ: by `acquire_core`
- *
- * - STATE_BG:
- *      * No acquiring device, the ISR is the acquiring processor, there is BG bits active, but no LOCK
- *        bits
- *      * The BG operation should be enabled while turning into this state.
- *
- *   -> STATE_IDLE: by `bg_exit_core` after `clear_pend_core` for all BG bits
- *   -> STATE_BG_ACQ: by `schedule_core`, when there is new LOCK bit set (by `acquire_core`)
- *
- * - STATE_BG_ACQ:
- *      * There is acquiring device, the ISR is the acquiring processor, there may be BG bits active for
- *        the acquiring device.
- *      * The BG operation should be enabled while turning into this state.
- *
- *   -> STATE_ACQ: by `bg_exit_core` after `clear_pend_core` for all BG bits for the acquiring
- *                 device.
- *
- *                 Should not go to the STATE_ACQ (unblock the acquiring task) until all requests of the
- *                 acquiring device are finished. This is to preserve the sequence of foreground (polling) and
- *                 background operations of the device. The background operations queued before the acquiring
- *                 should be completed first.
- *
- * - STATE_ACQ:
- *      * There is acquiring device, the task is the acquiring processor, there is no BG bits active for
- *        the acquiring device.
- *      * The acquiring task (if blocked at `spi_bus_lock_acquire_start` or `spi_bus_lock_wait_bg_done`)
- *        should be resumed while turning into this state.
- *
- *   -> STATE_BG_ACQ: by `req_core`
- *   -> STATE_BG_ACQ (other device): by `acquire_end_core`, when there is LOCK bit for another
- *                    device, and the new acquiring device has active BG bits.
- *   -> STATE_ACQ (other device): by `acquire_end_core`, when there is LOCK bit for another devices,
- *                    but the new acquiring device has no active BG bits.
- *   -> STATE_BG: by `acquire_end_core` when there is no LOCK bit active, but there are active BG
- *                bits.
- *   -> STATE_IDLE: by `acquire_end_core` when there is no LOCK bit, nor BG bit active.
- *
- * The `req_core` used in the task is a little special. It asks for acquiring processor for the
- * ISR. When it succeed for the first time, it will invoke the ISR (hence passing the acquiring
- * role to the BG). Otherwise it will not block, the ISR will be automatically be invoked by other
- * acquiring processor. The caller of `req_core` will never become acquiring processor by this
- * function.
- *
- *
- * Appendix: The design, that having both request bit and pending bit, is to solve the
- * concurrency issue between tasks and the bg, when the task can queue several requests,
- * however the request bit cannot represent the number of requests queued.
- *
- * Here's the workflow of task and ISR work concurrently:
- * - Task: (a) Write to Queue -> (b) Write request bit
- *   The Task have to write request bit (b) after the data is prepared in the queue (a),
- *   otherwise the BG may fail to read from the queue when it sees the request bit set.
- *
- * - BG: (c) Read queue -> (d) Clear request bit
- *   Since the BG cannot know the number of requests queued, it have to repeatedly check the
- *   queue (c), until it find the data is empty, and then clear the request bit (d).
- *
- * The events are possible to happen in the order: (c) -> (a) -> (b) -> (d). This may cause a false
- * clear of the request bit. And there will be data prepared in the queue, but the request bit is
- * inactive.
- *
- * (e) move REQ bits to PEND bits, happen before (c) is introduced to solve this problem. In this
- * case (d) is changed to clear the PEND bit. Even if (e) -> (c) -> (a) -> (b) -> (d), only PEND
- * bit is cleared, while the REQ bit is still active.
- */
-
-struct spi_bus_lock_dev_t;
-typedef struct spi_bus_lock_dev_t spi_bus_lock_dev_t;
-
-typedef struct spi_bus_lock_t spi_bus_lock_t;
-
-
-#define MAX_DEV_NUM     10
-
-// Bit 29-20: lock bits, Bit 19-10: pending bits
-// Bit 9-0: request bits, Bit 30:
-#define LOCK_SHIFT      20
-#define PENDING_SHIFT   10
-#define REQ_SHIFT       0
-
-#define WEAK_BG_FLAG    BIT(30)    /**< The bus is permanently requested by background operations.
-                                     * This flag is weak, will not prevent acquiring of devices. But will help the BG to be re-enabled again after the bus is release.
-                                     */
-
-// get the bit mask wher bit [high-1, low] are all 1'b1 s.
-#define BIT1_MASK(high, low)   ((UINT32_MAX << (high)) ^ (UINT32_MAX << (low)))
-
-#define LOCK_BIT(mask)      ((mask) << LOCK_SHIFT)
-#define REQUEST_BIT(mask)   ((mask) << REQ_SHIFT)
-#define PENDING_BIT(mask)   ((mask) << PENDING_SHIFT)
-#define DEV_MASK(id)        (LOCK_BIT(1<<id) | PENDING_BIT(1<<id) | REQUEST_BIT(1<<id))
-#define ID_DEV_MASK(mask)   (ffs(mask) - 1)
-
-#define REQ_MASK            BIT1_MASK(REQ_SHIFT+MAX_DEV_NUM, REQ_SHIFT)
-#define PEND_MASK           BIT1_MASK(PENDING_SHIFT+MAX_DEV_NUM, PENDING_SHIFT)
-#define BG_MASK             BIT1_MASK(REQ_SHIFT+MAX_DEV_NUM*2, REQ_SHIFT)
-#define LOCK_MASK           BIT1_MASK(LOCK_SHIFT+MAX_DEV_NUM, LOCK_SHIFT)
-
-#define DEV_REQ_MASK(dev)   ((dev)->mask & REQ_MASK)
-#define DEV_PEND_MASK(dev)  ((dev)->mask & PEND_MASK)
-#define DEV_BG_MASK(dev)    ((dev)->mask & BG_MASK)
-
-struct spi_bus_lock_t {
-    /**
-     * The core of the lock. These bits are status of the lock, which should be always available.
-     * No intermediate status is allowed. This is realized by atomic operations, mainly
-     * `atomic_fetch_and`, `atomic_fetch_or`, which atomically read the status, and bitwise write
-     * status value ORed / ANDed with given masks.
-     *
-     * The request bits together pending bits represent the actual bg request state of one device.
-     * Either one of them being active indicates the device has pending bg requests.
-     *
-     * Whenever a bit is written to the status, it means the a device on a task is trying to
-     * acquire the lock. But this will succeed only when no LOCK or BG bits active.
-     *
-     * The acquiring processor is responsible to call the scheduler to pass its role to other tasks
-     * or the BG, unless it clear the last bit in the status register.
-     */
-    //// Critical resources, they are only writable by acquiring processor, and stable only when read by the acquiring processor.
-    atomic_uint_fast32_t    status;
-    spi_bus_lock_dev_t* volatile acquiring_dev;   ///< The acquiring device
-    bool                volatile acq_dev_bg_active;    ///< BG is the acquiring processor serving the acquiring device, used for the wait_bg to skip waiting quickly.
-    bool                volatile in_isr;         ///< ISR is touching HW
-    //// End of critical resources
-
-    atomic_intptr_t     dev[DEV_NUM_MAX];     ///< Child locks.
-    bg_ctrl_func_t      bg_enable;      ///< Function to enable background operations.
-    bg_ctrl_func_t      bg_disable;     ///< Function to disable background operations
-    void*               bg_arg;            ///< Argument for `bg_enable` and `bg_disable` functions.
-
-    spi_bus_lock_dev_t* last_dev;       ///< Last used device, to decide whether to refresh all registers.
-    int                 periph_cs_num;  ///< Number of the CS pins the HW has.
-
-    //debug information
-    int                 host_id;        ///< Host ID, for debug information printing
-    uint32_t            new_req;        ///< Last int_req when `spi_bus_lock_bg_start` is called. Debug use.
-};
-
-struct spi_bus_lock_dev_t {
-    SemaphoreHandle_t   semphr;     ///< Binray semaphore to notify the device it claimed the bus
-    spi_bus_lock_t*     parent;     ///< Pointer to parent spi_bus_lock_t
-    uint32_t            mask;       ///< Bitwise OR-ed mask of the REQ, PEND, LOCK bits of this device
-};
-
-portMUX_TYPE s_spinlock = portMUX_INITIALIZER_UNLOCKED;
-
-DRAM_ATTR static const char TAG[] = "bus_lock";
-
-#define LOCK_CHECK(a, str, ret_val, ...) \
-    if (!(a)) { \
-        ESP_LOGE(TAG,"%s(%d): "str, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
-        return (ret_val); \
-    }
-
-static inline int mask_get_id(uint32_t mask);
-static inline int dev_lock_get_id(spi_bus_lock_dev_t *dev_lock);
-
-/*******************************************************************************
- * atomic operations to the status
- ******************************************************************************/
-SPI_MASTER_ISR_ATTR static inline uint32_t lock_status_fetch_set(spi_bus_lock_t *lock, uint32_t set)
-{
-    return atomic_fetch_or(&lock->status, set);
-}
-
-IRAM_ATTR static inline uint32_t lock_status_fetch_clear(spi_bus_lock_t *lock, uint32_t clear)
-{
-    return atomic_fetch_and(&lock->status, ~clear);
-}
-
-IRAM_ATTR static inline uint32_t lock_status_fetch(spi_bus_lock_t *lock)
-{
-    return atomic_load(&lock->status);
-}
-
-SPI_MASTER_ISR_ATTR static inline void lock_status_init(spi_bus_lock_t *lock)
-{
-    atomic_store(&lock->status, 0);
-}
-
-// return the remaining status bits
-IRAM_ATTR static inline uint32_t lock_status_clear(spi_bus_lock_t* lock, uint32_t clear)
-{
-    //the fetch and clear should be atomic, avoid missing the all '0' status when all bits are clear.
-    uint32_t state = lock_status_fetch_clear(lock, clear);
-    return state & (~clear);
-}
-
-/*******************************************************************************
- * Schedule service
- *
- * The modification to the status bits may cause rotating of the acquiring processor. It also have
- * effects to `acquired_dev` (the acquiring device), `in_isr` (HW used in BG), and
- * `acq_dev_bg_active` (wait_bg_end can be skipped) members of the lock structure.
- *
- * Most of them should be atomic, and special attention should be paid to the operation
- * sequence.
- ******************************************************************************/
-SPI_MASTER_ISR_ATTR static inline void resume_dev_in_isr(spi_bus_lock_dev_t *dev_lock, BaseType_t *do_yield)
-{
-    xSemaphoreGiveFromISR(dev_lock->semphr, do_yield);
-}
-
-IRAM_ATTR static inline void resume_dev(const spi_bus_lock_dev_t *dev_lock)
-{
-    xSemaphoreGive(dev_lock->semphr);
-}
-
-SPI_MASTER_ISR_ATTR static inline void bg_disable(spi_bus_lock_t *lock)
-{
-    BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->bg_disable);
-    lock->bg_disable(lock->bg_arg);
-}
-
-IRAM_ATTR static inline void bg_enable(spi_bus_lock_t* lock)
-{
-    BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->bg_enable);
-    lock->bg_enable(lock->bg_arg);
-}
-
-// Set the REQ bit. If we become the acquiring processor, invoke the ISR and pass that to it.
-// The caller will never become the acquiring processor after this function returns.
-SPI_MASTER_ATTR static inline void req_core(spi_bus_lock_dev_t *dev_handle)
-{
-    spi_bus_lock_t *lock = dev_handle->parent;
-
-    // Though `acquired_dev` is critical resource, `dev_handle == lock->acquired_dev`
-    // is a stable statement unless `acquire_start` or `acquire_end` is called by current
-    // device.
-    if (dev_handle == lock->acquiring_dev){
-        // Set the REQ bit and check BG bits if we are the acquiring processor.
-        // If the BG bits were not active before, invoke the BG again.
-
-        // Avoid competitive risk against the `clear_pend_core`, `acq_dev_bg_active` should be set before
-        // setting REQ bit.
-        lock->acq_dev_bg_active = true;
-        uint32_t status = lock_status_fetch_set(lock, DEV_REQ_MASK(dev_handle));
-        if ((status & DEV_BG_MASK(dev_handle)) == 0) {
-            bg_enable(lock); //acquiring processor passed to BG
-        }
-    } else {
-        uint32_t status = lock_status_fetch_set(lock, DEV_REQ_MASK(dev_handle));
-        if (status == 0) {
-            bg_enable(lock); //acquiring processor passed to BG
-        }
-    }
-}
-
-//Set the LOCK bit. Handle related stuff and return true if we become the acquiring processor.
-SPI_MASTER_ISR_ATTR static inline bool acquire_core(spi_bus_lock_dev_t *dev_handle)
-{
-    spi_bus_lock_t* lock = dev_handle->parent;
-	portENTER_CRITICAL_SAFE(&s_spinlock);
-    uint32_t status = lock_status_fetch_set(lock, dev_handle->mask & LOCK_MASK);
-	portEXIT_CRITICAL_SAFE(&s_spinlock);
-
-    // Check all bits except WEAK_BG
-    if ((status & (BG_MASK | LOCK_MASK)) == 0) {
-        //succeed at once
-        lock->acquiring_dev = dev_handle;
-        BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
-        if (status & WEAK_BG_FLAG) {
-            //Mainly to disable the cache (Weak_BG), that is not able to disable itself
-            bg_disable(lock);
-        }
-        return true;
-    }
-    return false;
-}
-
-/**
- * Find the next acquiring processor according to the status. Will directly change
- * the acquiring device if new one found.
- *
- * Cases:
- * - BG should still be the acquiring processor (Return false):
- *     1. Acquiring device has active BG bits: out_desired_dev = new acquiring device
- *     2. No acquiring device, but BG active: out_desired_dev = randomly pick one device with active BG bits
- * - BG should yield to the task (Return true):
- *     3. Acquiring device has no active BG bits: out_desired_dev = new acquiring device
- *     4. No acquiring device while no active BG bits: out_desired_dev=NULL
- *
- * Acquiring device task need to be resumed only when case 3.
- *
- * This scheduling can happen in either task or ISR, so `in_isr` or `bg_active` not touched.
- *
- * @param lock
- * @param status Current status
- * @param out_desired_dev Desired device to work next, see above.
- *
- * @return False if BG should still be the acquiring processor, otherwise True (yield to task).
- */
-IRAM_ATTR static inline bool
-schedule_core(spi_bus_lock_t *lock, uint32_t status, spi_bus_lock_dev_t **out_desired_dev)
-{
-    spi_bus_lock_dev_t* desired_dev = NULL;
-    uint32_t lock_bits = (status & LOCK_MASK) >> LOCK_SHIFT;
-    uint32_t bg_bits = status & BG_MASK;
-    bg_bits = ((bg_bits >> REQ_SHIFT) | (bg_bits >> PENDING_SHIFT)) & REQ_MASK;
-
-    bool bg_yield;
-    if (lock_bits) {
-        int dev_id = mask_get_id(lock_bits);
-        desired_dev = (spi_bus_lock_dev_t *)atomic_load(&lock->dev[dev_id]);
-        BUS_LOCK_DEBUG_EXECUTE_CHECK(desired_dev);
-
-        lock->acquiring_dev = desired_dev;
-        bg_yield = ((bg_bits & desired_dev->mask) == 0);
-        lock->acq_dev_bg_active = !bg_yield;
-    } else {
-        lock->acq_dev_bg_active = false;
-        if (bg_bits) {
-            int dev_id = mask_get_id(bg_bits);
-            desired_dev = (spi_bus_lock_dev_t *)atomic_load(&lock->dev[dev_id]);
-            BUS_LOCK_DEBUG_EXECUTE_CHECK(desired_dev);
-
-            lock->acquiring_dev = NULL;
-            bg_yield = false;
-        } else {
-            desired_dev = NULL;
-            lock->acquiring_dev = NULL;
-            bg_yield = true;
-        }
-    }
-    *out_desired_dev = desired_dev;
-    return bg_yield;
-}
-
-//Clear the LOCK bit and trigger a rescheduling.
-IRAM_ATTR static inline void acquire_end_core(spi_bus_lock_dev_t *dev_handle)
-{
-    spi_bus_lock_t* lock = dev_handle->parent;
-    //uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK);
-    spi_bus_lock_dev_t* desired_dev = NULL;
-	
-    portENTER_CRITICAL_SAFE(&s_spinlock);
-    uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK);
-    bool invoke_bg = !schedule_core(lock, status, &desired_dev);
-    portEXIT_CRITICAL_SAFE(&s_spinlock);
-
-    if (invoke_bg) {
-        bg_enable(lock);
-    } else if (desired_dev) {
-        resume_dev(desired_dev);
-    } else if (status & WEAK_BG_FLAG) {
-        bg_enable(lock);
-    }
-}
-
-// Move the REQ bits to corresponding PEND bits. Must be called by acquiring processor.
-// Have no side effects on the acquiring device/processor.
-SPI_MASTER_ISR_ATTR static inline void update_pend_core(spi_bus_lock_t *lock, uint32_t status)
-{
-    uint32_t active_req_bits = status & REQ_MASK;
-#if PENDING_SHIFT > REQ_SHIFT
-    uint32_t pending_mask = active_req_bits << (PENDING_SHIFT - REQ_SHIFT);
-#else
-    uint32_t pending_mask = active_req_bits >> (REQ_SHIFT - PENDING_SHIFT);
-#endif
-    // We have to set the PEND bits and then clear the REQ bits, since BG bits are using bitwise OR logic,
-    // this will not influence the effectiveness of the BG bits of every device.
-    lock_status_fetch_set(lock, pending_mask);
-    lock_status_fetch_clear(lock, active_req_bits);
-}
-
-// Clear the PEND bit (not REQ bit!) of a device, return the suggestion whether we can try to quit the ISR.
-// Lost the acquiring processor immediately when the BG bits for active device are inactive, indiciating by the return value.
-// Can be called only when ISR is acting as the acquiring processor.
-SPI_MASTER_ISR_ATTR static inline bool clear_pend_core(spi_bus_lock_dev_t *dev_handle)
-{
-    bool finished;
-    spi_bus_lock_t *lock = dev_handle->parent;
-    uint32_t pend_mask = DEV_PEND_MASK(dev_handle);
-    BUS_LOCK_DEBUG_EXECUTE_CHECK(lock_status_fetch(lock) & pend_mask);
-
-    uint32_t status = lock_status_clear(lock, pend_mask);
-
-    if (lock->acquiring_dev == dev_handle) {
-        finished = ((status & DEV_REQ_MASK(dev_handle)) == 0);
-        if (finished) {
-            lock->acq_dev_bg_active = false;
-        }
-    } else {
-        finished = (status == 0);
-    }
-    return finished;
-}
-
-// Return true if the ISR has already touched the HW, which means previous operations should
-// be terminated first, before we use the HW again. Otherwise return false.
-// In either case `in_isr` will be marked as true, until call to `bg_exit_core` with `wip=false` successfully.
-SPI_MASTER_ISR_ATTR static inline bool bg_entry_core(spi_bus_lock_t *lock)
-{
-    BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev || lock->acq_dev_bg_active);
-    /*
-     * The interrupt is disabled at the entry of ISR to avoid competitive risk as below:
-     *
-     * The `esp_intr_enable` will be called (b) after new BG request is queued (a) in the task;
-     * while `esp_intr_disable` should be called (c) if we check and found the sending queue is empty (d).
-     * If (c) happens after (d), if things happens in this sequence:
-     * (d) -> (a) -> (b) -> (c), the interrupt will be disabled while there's pending BG request in the queue.
-     *
-     * To avoid this, interrupt is disabled here, and re-enabled later if required. (c) -> (d) -> (a) -> (b) -> revert (c) if !d
-     */
-    bg_disable(lock);
-    if (lock->in_isr) {
-        return false;
-    } else {
-        lock->in_isr = true;
-        return true;
-    }
-}
-
-// Handle the conditions of status and interrupt, avoiding the ISR being disabled when there is any new coming BG requests.
-// When called with `wip=true`, means the ISR is performing some operations. Will enable the interrupt again and exit unconditionally.
-// When called with `wip=false`, will only return `true` when there is no coming BG request. If return value is `false`, the ISR should try again.
-// Will not change acquiring device.
-SPI_MASTER_ISR_ATTR static inline bool bg_exit_core(spi_bus_lock_t *lock, bool wip, BaseType_t *do_yield)
-{
-    //See comments in `bg_entry_core`, re-enable interrupt disabled in entry if we do need the interrupt
-    if (wip) {
-        bg_enable(lock);
-        BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev || lock->acq_dev_bg_active);
-        return true;
-    }
-
-    bool ret;
-    uint32_t status = lock_status_fetch(lock);
-    if (lock->acquiring_dev) {
-        if (status & DEV_BG_MASK(lock->acquiring_dev)) {
-            BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->acq_dev_bg_active);
-            ret = false;
-        } else {
-            // The request may happen any time, even after we fetched the status.
-            // The value of `acq_dev_bg_active` is random.
-            resume_dev_in_isr(lock->acquiring_dev, do_yield);
-            ret = true;
-        }
-    } else {
-        BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
-        ret = !(status & BG_MASK);
-    }
-    if (ret) {
-        //when successfully exit, but no transaction done, mark BG as inactive
-        lock->in_isr = false;
-    }
-    return ret;
-}
-
-IRAM_ATTR static inline void dev_wait_prepare(spi_bus_lock_dev_t *dev_handle)
-{
-    xSemaphoreTake(dev_handle->semphr, 0);
-}
-
-SPI_MASTER_ISR_ATTR static inline esp_err_t dev_wait(spi_bus_lock_dev_t *dev_handle, TickType_t wait)
-{
-    BaseType_t ret = xSemaphoreTake(dev_handle->semphr, wait);
-
-    if (ret == pdFALSE) return ESP_ERR_TIMEOUT;
-    return ESP_OK;
-}
-
-/*******************************************************************************
- * Initialization & Deinitialization
- ******************************************************************************/
-esp_err_t spi_bus_init_lock(spi_bus_lock_handle_t *out_lock, const spi_bus_lock_config_t *config)
-{
-    spi_bus_lock_t* lock = (spi_bus_lock_t*)calloc(sizeof(spi_bus_lock_t), 1);
-    if (lock == NULL) {
-        return ESP_ERR_NO_MEM;
-    }
-
-    lock_status_init(lock);
-    lock->acquiring_dev = NULL;
-    lock->last_dev = NULL;
-    lock->periph_cs_num = config->cs_num;
-    lock->host_id = config->host_id;
-
-    *out_lock = lock;
-    return ESP_OK;
-}
-
-void spi_bus_deinit_lock(spi_bus_lock_handle_t lock)
-{
-    for (int i = 0; i < DEV_NUM_MAX; i++) {
-        assert(atomic_load(&lock->dev[i]) == (intptr_t)NULL);
-    }
-    free(lock);
-}
-
-static int try_acquire_free_dev(spi_bus_lock_t *lock, bool cs_required)
-{
-    if (cs_required) {
-        int i;
-        for (i = 0; i < lock->periph_cs_num; i++) {
-            intptr_t null = (intptr_t) NULL;
-            //use 1 to occupy the slot, actual setup comes later
-            if (atomic_compare_exchange_strong(&lock->dev[i], &null, (intptr_t) 1)) {
-                break;
-            }
-        }
-        return ((i == lock->periph_cs_num)? -1: i);
-    } else {
-        int i;
-        for (i = DEV_NUM_MAX - 1; i >= 0; i--) {
-            intptr_t null = (intptr_t) NULL;
-            //use 1 to occupy the slot, actual setup comes later
-            if (atomic_compare_exchange_strong(&lock->dev[i], &null, (intptr_t) 1)) {
-                break;
-            }
-        }
-        return i;
-    }
-}
-
-esp_err_t spi_bus_lock_register_dev(spi_bus_lock_handle_t lock, spi_bus_lock_dev_config_t *config,
-                                    spi_bus_lock_dev_handle_t *out_dev_handle)
-{
-    if (lock == NULL) return ESP_ERR_INVALID_ARG;
-    int id = try_acquire_free_dev(lock, config->flags & SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED);
-    if (id == -1) return ESP_ERR_NOT_SUPPORTED;
-
-    spi_bus_lock_dev_t* dev_lock = (spi_bus_lock_dev_t*)heap_caps_calloc(sizeof(spi_bus_lock_dev_t), 1, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
-    if (dev_lock == NULL) {
-        return ESP_ERR_NO_MEM;
-    }
-    dev_lock->semphr = xSemaphoreCreateBinary();
-    if (dev_lock->semphr == NULL) {
-        free(dev_lock);
-        atomic_store(&lock->dev[id], (intptr_t)NULL);
-        return ESP_ERR_NO_MEM;
-    }
-    dev_lock->parent = lock;
-    dev_lock->mask = DEV_MASK(id);
-
-    ESP_LOGV(TAG, "device registered on bus %d slot %d.", lock->host_id, id);
-    atomic_store(&lock->dev[id], (intptr_t)dev_lock);
-    *out_dev_handle = dev_lock;
-    return ESP_OK;
-}
-
-void spi_bus_lock_unregister_dev(spi_bus_lock_dev_handle_t dev_handle)
-{
-    int id = dev_lock_get_id(dev_handle);
-
-    spi_bus_lock_t* lock = dev_handle->parent;
-    BUS_LOCK_DEBUG_EXECUTE_CHECK(atomic_load(&lock->dev[id]) == (intptr_t)dev_handle);
-
-    if (lock->last_dev == dev_handle) lock->last_dev = NULL;
-
-    atomic_store(&lock->dev[id], (intptr_t)NULL);
-    if (dev_handle->semphr) {
-        vSemaphoreDelete(dev_handle->semphr);
-    }
-
-    free(dev_handle);
-}
-
-IRAM_ATTR static inline int mask_get_id(uint32_t mask)
-{
-    return ID_DEV_MASK(mask);
-}
-
-IRAM_ATTR static inline int dev_lock_get_id(spi_bus_lock_dev_t *dev_lock)
-{
-    return mask_get_id(dev_lock->mask);
-}
-
-void spi_bus_lock_set_bg_control(spi_bus_lock_handle_t lock, bg_ctrl_func_t bg_enable, bg_ctrl_func_t bg_disable, void *arg)
-{
-    lock->bg_enable = bg_enable;
-    lock->bg_disable = bg_disable;
-    lock->bg_arg = arg;
-}
-
-IRAM_ATTR int spi_bus_lock_get_dev_id(spi_bus_lock_dev_handle_t dev_handle)
-{
-    return (dev_handle? dev_lock_get_id(dev_handle): -1);
-}
-
-//will be called when cache disabled
-IRAM_ATTR bool spi_bus_lock_touch(spi_bus_lock_dev_handle_t dev_handle)
-{
-    spi_bus_lock_dev_t* last_dev = dev_handle->parent->last_dev;
-    dev_handle->parent->last_dev = dev_handle;
-    if (last_dev != dev_handle) {
-        int last_dev_id = (last_dev? dev_lock_get_id(last_dev): -1);
-        ESP_DRAM_LOGV(TAG, "SPI dev changed from %d to %d",
-                    last_dev_id, dev_lock_get_id(dev_handle));
-    }
-    return (dev_handle != last_dev);
-}
-
-/*******************************************************************************
- * Acquiring service
- ******************************************************************************/
-IRAM_ATTR esp_err_t spi_bus_lock_acquire_start(spi_bus_lock_dev_t *dev_handle, TickType_t wait)
-{
-    LOCK_CHECK(wait == portMAX_DELAY, "timeout other than portMAX_DELAY not supported", ESP_ERR_INVALID_ARG);
-
-    spi_bus_lock_t* lock = dev_handle->parent;
-
-    // Clear the semaphore before checking
-    dev_wait_prepare(dev_handle);
-    if (!acquire_core(dev_handle)) {
-        //block until becoming the acquiring processor (help by previous acquiring processor)
-        esp_err_t err = dev_wait(dev_handle, wait);
-        //TODO: add timeout handling here.
-        if (err != ESP_OK) return err;
-    }
-
-    ESP_DRAM_LOGV(TAG, "dev %d acquired.", dev_lock_get_id(dev_handle));
-    BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->acquiring_dev == dev_handle);
-
-    //When arrives at here, requests of this device should already be handled
-    uint32_t status = lock_status_fetch(lock);
-    (void) status;
-    BUS_LOCK_DEBUG_EXECUTE_CHECK((status & DEV_BG_MASK(dev_handle)) == 0);
-
-    return ESP_OK;
-}
-
-IRAM_ATTR esp_err_t spi_bus_lock_acquire_end(spi_bus_lock_dev_t *dev_handle)
-{
-    //release the bus
-    spi_bus_lock_t* lock = dev_handle->parent;
-    LOCK_CHECK(lock->acquiring_dev == dev_handle, "Cannot release a lock that hasn't been acquired.", ESP_ERR_INVALID_STATE);
-
-    acquire_end_core(dev_handle);
-
-    ESP_LOGV(TAG, "dev %d released.", dev_lock_get_id(dev_handle));
-    return ESP_OK;
-}
-
-SPI_MASTER_ISR_ATTR spi_bus_lock_dev_handle_t spi_bus_lock_get_acquiring_dev(spi_bus_lock_t *lock)
-{
-    return lock->acquiring_dev;
-}
-
-/*******************************************************************************
- * BG (background operation) service
- ******************************************************************************/
-SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_entry(spi_bus_lock_t* lock)
-{
-    return bg_entry_core(lock);
-}
-
-SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_exit(spi_bus_lock_t* lock, bool wip, BaseType_t* do_yield)
-{
-    return bg_exit_core(lock, wip, do_yield);
-}
-
-SPI_MASTER_ATTR esp_err_t spi_bus_lock_bg_request(spi_bus_lock_dev_t *dev_handle)
-{
-    req_core(dev_handle);
-    return ESP_OK;
-}
-
-IRAM_ATTR esp_err_t spi_bus_lock_wait_bg_done(spi_bus_lock_dev_handle_t dev_handle, TickType_t wait)
-{
-    spi_bus_lock_t *lock = dev_handle->parent;
-    LOCK_CHECK(lock->acquiring_dev == dev_handle, "Cannot wait for a device that is not acquired", ESP_ERR_INVALID_STATE);
-    LOCK_CHECK(wait == portMAX_DELAY, "timeout other than portMAX_DELAY not supported", ESP_ERR_INVALID_ARG);
-
-    // If no BG bits active, skip quickly. This is ensured by `spi_bus_lock_wait_bg_done`
-    // cannot be executed with `bg_request` on the same device concurrently.
-    if (lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) {
-        // Clear the semaphore before checking
-        dev_wait_prepare(dev_handle);
-        if (lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) {
-            //block until becoming the acquiring processor (help by previous acquiring processor)
-            esp_err_t err = dev_wait(dev_handle, wait);
-            //TODO: add timeout handling here.
-            if (err != ESP_OK) return err;
-        }
-    }
-
-    BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
-    BUS_LOCK_DEBUG_EXECUTE_CHECK((lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) == 0);
-    return ESP_OK;
-}
-
-SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_clear_req(spi_bus_lock_dev_t *dev_handle)
-{
-    bool finished = clear_pend_core(dev_handle);
-    ESP_EARLY_LOGV(TAG, "dev %d served from bg.", dev_lock_get_id(dev_handle));
-    return finished;
-}
-
-SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_check_dev_acq(spi_bus_lock_t *lock,
-                                                       spi_bus_lock_dev_handle_t *out_dev_lock)
-{
-    BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev);
-    uint32_t status = lock_status_fetch(lock);
-    return schedule_core(lock, status, out_dev_lock);
-}
-
-SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_check_dev_req(spi_bus_lock_dev_t *dev_lock)
-{
-    spi_bus_lock_t* lock = dev_lock->parent;
-    uint32_t status = lock_status_fetch(lock);
-    uint32_t dev_status = status & dev_lock->mask;
-
-    // move REQ bits of all device to corresponding PEND bits.
-    // To reduce executing time, only done when the REQ bit of the calling device is set.
-    if (dev_status & REQ_MASK) {
-        update_pend_core(lock, status);
-        return true;
-    } else {
-        return dev_status & PEND_MASK;
-    }
-}
-
-SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_req_exist(spi_bus_lock_t *lock)
-{
-    uint32_t status = lock_status_fetch(lock);
-    return status & BG_MASK;
-}
-
-/*******************************************************************************
- * Static variables of the locks of the main flash
- ******************************************************************************/
-#if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
-static spi_bus_lock_dev_t lock_main_flash_dev;
-
-static spi_bus_lock_t main_spi_bus_lock = {
-    /*
-     * the main bus cache is permanently required, this flag is set here and never clear so that the
-     * cache will always be enabled if acquiring devices yield.
-     */
-    .status = ATOMIC_VAR_INIT(WEAK_BG_FLAG),
-    .acquiring_dev = NULL,
-    .dev = {ATOMIC_VAR_INIT((intptr_t)&lock_main_flash_dev)},
-    .new_req = 0,
-    .periph_cs_num = SOC_SPI_PERIPH_CS_NUM(0),
-};
-const spi_bus_lock_handle_t g_main_spi_bus_lock = &main_spi_bus_lock;
-
-esp_err_t spi_bus_lock_init_main_bus(void)
-{
-    spi_bus_main_set_lock(g_main_spi_bus_lock);
-    return ESP_OK;
-}
-
-static StaticSemaphore_t main_flash_semphr;
-
-static spi_bus_lock_dev_t lock_main_flash_dev = {
-    .semphr = NULL,
-    .parent = &main_spi_bus_lock,
-    .mask = DEV_MASK(0),
-};
-const spi_bus_lock_dev_handle_t g_spi_lock_main_flash_dev = &lock_main_flash_dev;
-
-esp_err_t spi_bus_lock_init_main_dev(void)
-{
-    g_spi_lock_main_flash_dev->semphr = xSemaphoreCreateBinaryStatic(&main_flash_semphr);
-    if (g_spi_lock_main_flash_dev->semphr == NULL) {
-        return ESP_ERR_NO_MEM;
-    }
-    return ESP_OK;
-}
-#else //CONFIG_SPI_FLASH_SHARE_SPI1_BUS
-
-//when the dev lock is not initialized, point to NULL
-const spi_bus_lock_dev_handle_t g_spi_lock_main_flash_dev = NULL;
-
-#endif

+ 0 - 1007
components/_override/esp32/spi_master.c

@@ -1,1007 +0,0 @@
-// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Architecture:
-
-We can initialize a SPI driver, but we don't talk to the SPI driver itself, we address a device. A device essentially
-is a combination of SPI port and CS pin, plus some information about the specifics of communication to the device
-(timing, command/address length etc). The arbitration between tasks is also in conception of devices.
-
-A device can work in interrupt mode and polling mode, and a third but
-complicated mode which combines the two modes above:
-
-1. Work in the ISR with a set of queues; one per device.
-
-   The idea is that to send something to a SPI device, you allocate a
-   transaction descriptor. It contains some information about the transfer
-   like the lenghth, address, command etc, plus pointers to transmit and
-   receive buffer. The address of this block gets pushed into the transmit
-   queue. The SPI driver does its magic, and sends and retrieves the data
-   eventually. The data gets written to the receive buffers, if needed the
-   transaction descriptor is modified to indicate returned parameters and
-   the entire thing goes into the return queue, where whatever software
-   initiated the transaction can retrieve it.
-
-   The entire thing is run from the SPI interrupt handler. If SPI is done
-   transmitting/receiving but nothing is in the queue, it will not clear the
-   SPI interrupt but just disable it by esp_intr_disable. This way, when a
-   new thing is sent, pushing the packet into the send queue and re-enabling
-   the interrupt (by esp_intr_enable) will trigger the interrupt again, which
-   can then take care of the sending.
-
-2. Work in the polling mode in the task.
-
-   In this mode we get rid of the ISR, FreeRTOS queue and task switching, the
-   task is no longer blocked during a transaction. This increase the cpu
-   load, but decrease the interval of SPI transactions. Each time only one
-   device (in one task) can send polling transactions, transactions to
-   other devices are blocked until the polling transaction of current device
-   is done.
-
-   In the polling mode, the queue is not used, all the operations are done
-   in the task. The task calls ``spi_device_polling_start`` to setup and start
-   a new transaction, then call ``spi_device_polling_end`` to handle the
-   return value of the transaction.
-
-   To handle the arbitration among devices, the device "temporarily" acquire
-   a bus by the ``device_acquire_bus_internal`` function, which writes
-   dev_request by CAS operation. Other devices which wants to send polling
-   transactions but don't own the bus will block and wait until given the
-   semaphore which indicates the ownership of bus.
-
-   In case of the ISR is still sending transactions to other devices, the ISR
-   should maintain an ``random_idle`` flag indicating that it's not doing
-   transactions. When the bus is locked, the ISR can only send new
-   transactions to the acquiring device. The ISR will automatically disable
-   itself and send semaphore to the device if the ISR is free. If the device
-   sees the random_idle flag, it can directly start its polling transaction.
-   Otherwise it should block and wait for the semaphore from the ISR.
-
-   After the polling transaction, the driver will release the bus. During the
-   release of the bus, the driver search all other devices to see whether
-   there is any device waiting to acquire the bus, if so, acquire for it and
-   send it a semaphore if the device queue is empty, or invoke the ISR for
-   it. If all other devices don't need to acquire the bus, but there are
-   still transactions in the queues, the ISR will also be invoked.
-
-   To get better polling efficiency, user can call ``spi_device_acquire_bus``
-   function, which also calls the ``spi_bus_lock_acquire_core`` function,
-   before a series of polling transactions to a device. The bus acquiring and
-   task switching before and after the polling transaction will be escaped.
-
-3. Mixed mode
-
-   The driver is written under the assumption that polling and interrupt
-   transactions are not happening simultaneously. When sending polling
-   transactions, it will check whether the ISR is active, which includes the
-   case the ISR is sending the interrupt transactions of the acquiring
-   device. If the ISR is still working, the routine sending a polling
-   transaction will get blocked and wait until the semaphore from the ISR
-   which indicates the ISR is free now.
-
-   A fatal case is, a polling transaction is in flight, but the ISR received
-   an interrupt transaction. The behavior of the driver is unpredictable,
-   which should be strictly forbidden.
-
-We have two bits to control the interrupt:
-
-1. The slave->trans_done bit, which is automatically asserted when a transaction is done.
-
-   This bit is cleared during an interrupt transaction, so that the interrupt
-   will be triggered when the transaction is done, or the SW can check the
-   bit to see if the transaction is done for polling transactions.
-
-   When no transaction is in-flight, the bit is kept active, so that the SW
-   can easily invoke the ISR by enable the interrupt.
-
-2. The system interrupt enable/disable, controlled by esp_intr_enable and esp_intr_disable.
-
-   The interrupt is disabled (by the ISR itself) when no interrupt transaction
-   is queued. When the bus is not occupied, any task, which queues a
-   transaction into the queue, will enable the interrupt to invoke the ISR.
-   When the bus is occupied by a device, other device will put off the
-   invoking of ISR to the moment when the bus is released. The device
-   acquiring the bus can still send interrupt transactions by enable the
-   interrupt.
-
-*/
-
-#include <string.h>
-#include "driver/spi_common_internal.h"
-#include "driver/spi_master.h"
-
-#include "esp_log.h"
-#include "freertos/task.h"
-#include "freertos/queue.h"
-#include "freertos/semphr.h"
-#include "soc/soc_memory_layout.h"
-#include "driver/gpio.h"
-#include "hal/spi_hal.h"
-#include "esp_heap_caps.h"
-
-
-typedef struct spi_device_t spi_device_t;
-
-/// struct to hold private transaction data (like tx and rx buffer for DMA).
-typedef struct {
-    spi_transaction_t   *trans;
-    const uint32_t *buffer_to_send;   //equals to tx_data, if SPI_TRANS_USE_RXDATA is applied; otherwise if original buffer wasn't in DMA-capable memory, this gets the address of a temporary buffer that is;
-                                //otherwise sets to the original buffer or NULL if no buffer is assigned.
-    uint32_t *buffer_to_rcv;    // similar to buffer_to_send
-} spi_trans_priv_t;
-
-typedef struct {
-    int id;
-    spi_device_t* device[DEV_NUM_MAX];
-    intr_handle_t intr;
-    spi_hal_context_t hal;
-    spi_trans_priv_t cur_trans_buf;
-    int cur_cs;     //current device doing transaction
-    const spi_bus_attr_t* bus_attr;
-
-    /**
-     * the bus is permanently controlled by a device until `spi_bus_release_bus`` is called. Otherwise
-     * the acquiring of SPI bus will be freed when `spi_device_polling_end` is called.
-     */
-    spi_device_t* device_acquiring_lock;
-
-//debug information
-    bool polling;   //in process of a polling, avoid of queue new transactions into ISR
-	
-//	PATCH
-	SemaphoreHandle_t mutex;
-	int count;	
-} spi_host_t;
-
-struct spi_device_t {
-    int id;
-    QueueHandle_t trans_queue;
-    QueueHandle_t ret_queue;
-    spi_device_interface_config_t cfg;
-    spi_hal_dev_config_t hal_dev;
-    spi_host_t *host;
-    spi_bus_lock_dev_handle_t dev_lock;
-};
-
-static spi_host_t* bus_driver_ctx[SOC_SPI_PERIPH_NUM] = {};
-
-static const char *SPI_TAG = "spi_master";
-#define SPI_CHECK(a, str, ret_val, ...) \
-    if (unlikely(!(a))) { \
-        ESP_LOGE(SPI_TAG,"%s(%d): "str, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
-        return (ret_val); \
-    }
-
-
-static void spi_intr(void *arg);
-static void spi_bus_intr_enable(void *host);
-static void spi_bus_intr_disable(void *host);
-
-static esp_err_t spi_master_deinit_driver(void* arg);
-
-static inline bool is_valid_host(spi_host_device_t host)
-{
-//SPI1 can be used as GPSPI only on ESP32
-#if CONFIG_IDF_TARGET_ESP32
-    return host >= SPI1_HOST && host <= SPI3_HOST;
-#elif (SOC_SPI_PERIPH_NUM == 2)
-    return host == SPI2_HOST;
-#elif (SOC_SPI_PERIPH_NUM == 3)
-    return host >= SPI2_HOST && host <= SPI3_HOST;
-#endif
-}
-
-// Should be called before any devices are actually registered or used.
-// Currently automatically called after `spi_bus_initialize()` and when first device is registered.
-static esp_err_t spi_master_init_driver(spi_host_device_t host_id)
-{
-    esp_err_t err = ESP_OK;
-
-    const spi_bus_attr_t* bus_attr = spi_bus_get_attr(host_id);
-    SPI_CHECK(bus_attr != NULL, "host_id not initialized", ESP_ERR_INVALID_STATE);
-    SPI_CHECK(bus_attr->lock != NULL, "SPI Master cannot attach to bus. (Check CONFIG_SPI_FLASH_SHARE_SPI1_BUS)", ESP_ERR_INVALID_ARG);
-    // spihost contains atomic variables, which should not be put in PSRAM
-    spi_host_t* host = heap_caps_malloc(sizeof(spi_host_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
-    if (host == NULL) {
-        err = ESP_ERR_NO_MEM;
-        goto cleanup;
-    }
-
-    *host = (spi_host_t) {
-        .id = host_id,
-        .cur_cs = DEV_NUM_MAX,
-        .polling = false,
-        .device_acquiring_lock = NULL,
-        .bus_attr = bus_attr,
-    };
-
-    if (host_id != SPI1_HOST) {
-        // interrupts are not allowed on SPI1 bus
-        err = esp_intr_alloc(spicommon_irqsource_for_host(host_id),
-                            bus_attr->bus_cfg.intr_flags | ESP_INTR_FLAG_INTRDISABLED,
-                            spi_intr, host, &host->intr);
-        if (err != ESP_OK) {
-            goto cleanup;
-        }
-    }
-
-    //assign the SPI, RX DMA and TX DMA peripheral registers beginning address
-    spi_hal_config_t hal_config = {
-        //On ESP32-S2 and earlier chips, DMA registers are part of SPI registers. Pass the registers of SPI peripheral to control it.
-        .dma_in = SPI_LL_GET_HW(host_id),
-        .dma_out = SPI_LL_GET_HW(host_id),
-        .dma_enabled = bus_attr->dma_enabled,
-        .dmadesc_tx = bus_attr->dmadesc_tx,
-        .dmadesc_rx = bus_attr->dmadesc_rx,
-        .tx_dma_chan = bus_attr->tx_dma_chan,
-        .rx_dma_chan = bus_attr->rx_dma_chan,
-        .dmadesc_n = bus_attr->dma_desc_num,
-    };
-    spi_hal_init(&host->hal, host_id, &hal_config);
-
-    if (host_id != SPI1_HOST) {
-        //SPI1 attributes are already initialized at start up.
-        spi_bus_lock_handle_t lock = spi_bus_lock_get_by_id(host_id);
-        spi_bus_lock_set_bg_control(lock, spi_bus_intr_enable, spi_bus_intr_disable, host);
-        spi_bus_register_destroy_func(host_id, spi_master_deinit_driver, host);
-    }
-
-    bus_driver_ctx[host_id] = host;
-    return ESP_OK;
-
-cleanup:
-    if (host) {
-        spi_hal_deinit(&host->hal);
-        if (host->intr) {
-            esp_intr_free(host->intr);
-        }
-    }
-    free(host);
-    return err;
-}
-
-static esp_err_t spi_master_deinit_driver(void* arg)
-{
-    spi_host_t *host = (spi_host_t*)arg;
-    SPI_CHECK(host != NULL, "host_id not in use", ESP_ERR_INVALID_STATE);
-
-    int host_id = host->id;
-    SPI_CHECK(is_valid_host(host_id), "invalid host_id", ESP_ERR_INVALID_ARG);
-
-    int x;
-    for (x=0; x<DEV_NUM_MAX; x++) {
-        SPI_CHECK(host->device[x] == NULL, "not all CSses freed", ESP_ERR_INVALID_STATE);
-    }
-
-    spi_hal_deinit(&host->hal);
-
-    if (host->intr) {
-        esp_intr_free(host->intr);
-    }
-    free(host);
-    bus_driver_ctx[host_id] = NULL;
-    return ESP_OK;
-}
-
-void spi_get_timing(bool gpio_is_used, int input_delay_ns, int eff_clk, int* dummy_o, int* cycles_remain_o)
-{
-    int timing_dummy;
-    int timing_miso_delay;
-
-    spi_hal_cal_timing(eff_clk, gpio_is_used, input_delay_ns, &timing_dummy, &timing_miso_delay);
-    if (dummy_o) *dummy_o = timing_dummy;
-    if (cycles_remain_o) *cycles_remain_o = timing_miso_delay;
-}
-
-int spi_get_freq_limit(bool gpio_is_used, int input_delay_ns)
-{
-    return spi_hal_get_freq_limit(gpio_is_used, input_delay_ns);
-}
-
-/*
- Add a device. This allocates a CS line for the device, allocates memory for the device structure and hooks
- up the CS pin to whatever is specified.
-*/
-esp_err_t spi_bus_add_device(spi_host_device_t host_id, const spi_device_interface_config_t *dev_config, spi_device_handle_t *handle)
-{
-    spi_device_t *dev = NULL;
-    esp_err_t err = ESP_OK;
-
-    SPI_CHECK(is_valid_host(host_id), "invalid host", ESP_ERR_INVALID_ARG);
-    if (bus_driver_ctx[host_id] == NULL) {
-        //lazy initialization the driver, get deinitialized by the bus is freed
-        err = spi_master_init_driver(host_id);
-        if (err != ESP_OK) {
-            return err;
-        }
-    }
-
-    spi_host_t *host = bus_driver_ctx[host_id];
-    const spi_bus_attr_t* bus_attr = host->bus_attr;
-    SPI_CHECK(dev_config->spics_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(dev_config->spics_io_num), "spics pin invalid", ESP_ERR_INVALID_ARG);
-    SPI_CHECK(dev_config->clock_speed_hz > 0, "invalid sclk speed", ESP_ERR_INVALID_ARG);
-#ifdef CONFIG_IDF_TARGET_ESP32
-    //The hardware looks like it would support this, but actually setting cs_ena_pretrans when transferring in full
-    //duplex mode does absolutely nothing on the ESP32.
-    SPI_CHECK(dev_config->cs_ena_pretrans <= 1 || (dev_config->address_bits == 0 && dev_config->command_bits == 0) ||
-        (dev_config->flags & SPI_DEVICE_HALFDUPLEX), "In full-duplex mode, only support cs pretrans delay = 1 and without address_bits and command_bits", ESP_ERR_INVALID_ARG);
-#endif
-    uint32_t lock_flag = ((dev_config->spics_io_num != -1)? SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED: 0);
-
-    spi_bus_lock_dev_config_t lock_config = {
-        .flags = lock_flag,
-    };
-    spi_bus_lock_dev_handle_t dev_handle;
-    err = spi_bus_lock_register_dev(bus_attr->lock, &lock_config, &dev_handle);
-    if (err != ESP_OK) {
-        goto nomem;
-    }
-
-    int freecs = spi_bus_lock_get_dev_id(dev_handle);
-    SPI_CHECK(freecs != -1, "no free cs pins for the host", ESP_ERR_NOT_FOUND);
-
-    //input parameters to calculate timing configuration
-    int half_duplex = dev_config->flags & SPI_DEVICE_HALFDUPLEX ? 1 : 0;
-    int no_compensate = dev_config->flags & SPI_DEVICE_NO_DUMMY ? 1 : 0;
-    int duty_cycle = (dev_config->duty_cycle_pos==0) ? 128 : dev_config->duty_cycle_pos;
-    int use_gpio = !(bus_attr->flags & SPICOMMON_BUSFLAG_IOMUX_PINS);
-    spi_hal_timing_param_t timing_param = {
-        .half_duplex = half_duplex,
-        .no_compensate = no_compensate,
-        .clock_speed_hz = dev_config->clock_speed_hz,
-        .duty_cycle = duty_cycle,
-        .input_delay_ns = dev_config->input_delay_ns,
-        .use_gpio = use_gpio
-    };
-
-    //output values of timing configuration
-    spi_hal_timing_conf_t temp_timing_conf;
-    int freq;
-    esp_err_t ret = spi_hal_cal_clock_conf(&timing_param, &freq, &temp_timing_conf);
-    SPI_CHECK(ret==ESP_OK, "assigned clock speed not supported", ret);
-
-    //Allocate memory for device
-    dev = malloc(sizeof(spi_device_t));
-    if (dev == NULL) goto nomem;
-    memset(dev, 0, sizeof(spi_device_t));
-
-    dev->id = freecs;
-    dev->dev_lock = dev_handle;
-
-    //Allocate queues, set defaults
-    dev->trans_queue = xQueueCreate(dev_config->queue_size, sizeof(spi_trans_priv_t));
-    dev->ret_queue = xQueueCreate(dev_config->queue_size, sizeof(spi_trans_priv_t));
-    if (!dev->trans_queue || !dev->ret_queue) {
-        goto nomem;
-    }
-
-    //We want to save a copy of the dev config in the dev struct.
-    memcpy(&dev->cfg, dev_config, sizeof(spi_device_interface_config_t));
-    dev->cfg.duty_cycle_pos = duty_cycle;
-    // TODO: if we have to change the apb clock among transactions, re-calculate this each time the apb clock lock is locked.
-
-    //Set CS pin, CS options
-    if (dev_config->spics_io_num >= 0) {
-        spicommon_cs_initialize(host_id, dev_config->spics_io_num, freecs, use_gpio);
-    }
-	
-	// create a mutex if we have more than one client
-	if (host->count++) {
-		ESP_LOGI(SPI_TAG, "More than one device on SPI %d => creating mutex", host_id);
-		host->mutex = xSemaphoreCreateMutex();
-	}
-
-    //save a pointer to device in spi_host_t
-    host->device[freecs] = dev;
-    //save a pointer to host in spi_device_t
-    dev->host= host;
-
-    //initialise the device specific configuration
-    spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
-    hal_dev->mode = dev_config->mode;
-    hal_dev->cs_setup = dev_config->cs_ena_pretrans;
-    hal_dev->cs_hold = dev_config->cs_ena_posttrans;
-    //set hold_time to 0 will not actually append delay to CS
-    //set it to 1 since we do need at least one clock of hold time in most cases
-    if (hal_dev->cs_hold == 0) {
-        hal_dev->cs_hold = 1;
-    }
-    hal_dev->cs_pin_id = dev->id;
-    hal_dev->timing_conf = temp_timing_conf;
-    hal_dev->sio = (dev_config->flags) & SPI_DEVICE_3WIRE ? 1 : 0;
-    hal_dev->half_duplex = dev_config->flags & SPI_DEVICE_HALFDUPLEX ? 1 : 0;
-    hal_dev->tx_lsbfirst = dev_config->flags & SPI_DEVICE_TXBIT_LSBFIRST ? 1 : 0;
-    hal_dev->rx_lsbfirst = dev_config->flags & SPI_DEVICE_RXBIT_LSBFIRST ? 1 : 0;
-    hal_dev->no_compensate = dev_config->flags & SPI_DEVICE_NO_DUMMY ? 1 : 0;
-#if SOC_SPI_SUPPORT_AS_CS
-    hal_dev->as_cs = dev_config->flags& SPI_DEVICE_CLK_AS_CS ? 1 : 0;
-#endif
-    hal_dev->positive_cs = dev_config->flags & SPI_DEVICE_POSITIVE_CS ? 1 : 0;
-
-    *handle = dev;
-    ESP_LOGD(SPI_TAG, "SPI%d: New device added to CS%d, effective clock: %dkHz", host_id+1, freecs, freq/1000);
-
-    return ESP_OK;
-
-nomem:
-    if (dev) {
-        if (dev->trans_queue) vQueueDelete(dev->trans_queue);
-        if (dev->ret_queue) vQueueDelete(dev->ret_queue);
-        spi_bus_lock_unregister_dev(dev->dev_lock);
-    }
-    free(dev);
-    return ESP_ERR_NO_MEM;
-}
-
-esp_err_t spi_bus_remove_device(spi_device_handle_t handle)
-{
-    SPI_CHECK(handle!=NULL, "invalid handle", ESP_ERR_INVALID_ARG);
-    //These checks aren't exhaustive; another thread could sneak in a transaction inbetween. These are only here to
-    //catch design errors and aren't meant to be triggered during normal operation.
-    SPI_CHECK(uxQueueMessagesWaiting(handle->trans_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
-    SPI_CHECK(handle->host->cur_cs == DEV_NUM_MAX || handle->host->device[handle->host->cur_cs] != handle, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
-    SPI_CHECK(uxQueueMessagesWaiting(handle->ret_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
-
-    //return
-    int spics_io_num = handle->cfg.spics_io_num;
-    if (spics_io_num >= 0) spicommon_cs_free_io(spics_io_num);
-
-    //Kill queues
-    vQueueDelete(handle->trans_queue);
-    vQueueDelete(handle->ret_queue);
-    spi_bus_lock_unregister_dev(handle->dev_lock);
-
-    assert(handle->host->device[handle->id] == handle);
-    handle->host->device[handle->id] = NULL;
-    free(handle);
-    return ESP_OK;
-}
-
-int spi_cal_clock(int fapb, int hz, int duty_cycle, uint32_t *reg_o)
-{
-    return spi_ll_master_cal_clock(fapb, hz, duty_cycle, reg_o);
-}
-
-int spi_get_actual_clock(int fapb, int hz, int duty_cycle)
-{
-    return spi_hal_master_cal_clock(fapb, hz, duty_cycle);
-}
-
-// Setup the device-specified configuration registers. Called every time a new
-// transaction is to be sent, but only apply new configurations when the device
-// changes.
-static SPI_MASTER_ISR_ATTR void spi_setup_device(spi_device_t *dev)
-{
-    spi_bus_lock_dev_handle_t dev_lock = dev->dev_lock;
-
-    if (!spi_bus_lock_touch(dev_lock)) {
-        //if the configuration is already applied, skip the following.
-        return;
-    }
-    spi_hal_context_t *hal = &dev->host->hal;
-    spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
-    spi_hal_setup_device(hal, hal_dev);
-}
-
-static SPI_MASTER_ISR_ATTR spi_device_t *get_acquiring_dev(spi_host_t *host)
-{
-    spi_bus_lock_dev_handle_t dev_lock = spi_bus_lock_get_acquiring_dev(host->bus_attr->lock);
-    if (!dev_lock) return NULL;
-
-    return host->device[spi_bus_lock_get_dev_id(dev_lock)];
-}
-
-// Debug only
-// NOTE if the acquiring is not fully completed, `spi_bus_lock_get_acquiring_dev`
-// may return a false `NULL` cause the function returning false `false`.
-static inline SPI_MASTER_ISR_ATTR bool spi_bus_device_is_polling(spi_device_t *dev)
-{
-    return get_acquiring_dev(dev->host) == dev && dev->host->polling;
-}
-
-/*-----------------------------------------------------------------------------
-    Working Functions
------------------------------------------------------------------------------*/
-
-// The interrupt may get invoked by the bus lock.
-static void SPI_MASTER_ISR_ATTR spi_bus_intr_enable(void *host)
-{
-    esp_intr_enable(((spi_host_t*)host)->intr);
-}
-
-// The interrupt is always disabled by the ISR itself, not exposed
-static void SPI_MASTER_ISR_ATTR spi_bus_intr_disable(void *host)
-{
-    esp_intr_disable(((spi_host_t*)host)->intr);
-}
-
-// The function is called to send a new transaction, in ISR or in the task.
-// Setup the transaction-specified registers and linked-list used by the DMA (or FIFO if DMA is not used)
-static void SPI_MASTER_ISR_ATTR spi_new_trans(spi_device_t *dev, spi_trans_priv_t *trans_buf)
-{
-    spi_transaction_t *trans = NULL;
-    spi_host_t *host = dev->host;
-    spi_hal_context_t *hal = &(host->hal);
-    spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
-
-    trans = trans_buf->trans;
-    host->cur_cs = dev->id;
-
-    //Reconfigure according to device settings, the function only has effect when the dev_id is changed.
-    spi_setup_device(dev);
-
-    //set the transaction specific configuration each time before a transaction setup
-    spi_hal_trans_config_t hal_trans = {};
-    hal_trans.tx_bitlen = trans->length;
-    hal_trans.rx_bitlen = trans->rxlength;
-    hal_trans.rcv_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_rcv;
-    hal_trans.send_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_send;
-    hal_trans.cmd = trans->cmd;
-    hal_trans.addr = trans->addr;
-    //Set up QIO/DIO if needed
-    hal_trans.io_mode = (trans->flags & SPI_TRANS_MODE_DIO ?
-                        (trans->flags & SPI_TRANS_MODE_DIOQIO_ADDR ? SPI_LL_IO_MODE_DIO : SPI_LL_IO_MODE_DUAL) :
-                    (trans->flags & SPI_TRANS_MODE_QIO ?
-                        (trans->flags & SPI_TRANS_MODE_DIOQIO_ADDR ? SPI_LL_IO_MODE_QIO : SPI_LL_IO_MODE_QUAD) :
-                    SPI_LL_IO_MODE_NORMAL
-                    ));
-
-    if (trans->flags & SPI_TRANS_VARIABLE_CMD) {
-        hal_trans.cmd_bits = ((spi_transaction_ext_t *)trans)->command_bits;
-    } else {
-        hal_trans.cmd_bits = dev->cfg.command_bits;
-    }
-    if (trans->flags & SPI_TRANS_VARIABLE_ADDR) {
-        hal_trans.addr_bits = ((spi_transaction_ext_t *)trans)->address_bits;
-    } else {
-        hal_trans.addr_bits = dev->cfg.address_bits;
-    }
-    if (trans->flags & SPI_TRANS_VARIABLE_DUMMY) {
-        hal_trans.dummy_bits = ((spi_transaction_ext_t *)trans)->dummy_bits;
-    } else {
-        hal_trans.dummy_bits = dev->cfg.dummy_bits;
-    }
-
-    spi_hal_setup_trans(hal, hal_dev, &hal_trans);
-    spi_hal_prepare_data(hal, hal_dev, &hal_trans);
-
-    //Call pre-transmission callback, if any
-    if (dev->cfg.pre_cb) dev->cfg.pre_cb(trans);
-    //Kick off transfer
-    spi_hal_user_start(hal);
-}
-
-// The function is called when a transaction is done, in ISR or in the task.
-// Fetch the data from FIFO and call the ``post_cb``.
-static void SPI_MASTER_ISR_ATTR spi_post_trans(spi_host_t *host)
-{
-    spi_transaction_t *cur_trans = host->cur_trans_buf.trans;
-
-    spi_hal_fetch_result(&host->hal);
-    //Call post-transaction callback, if any
-    spi_device_t* dev = host->device[host->cur_cs];
-    if (dev->cfg.post_cb) dev->cfg.post_cb(cur_trans);
-
-    host->cur_cs = DEV_NUM_MAX;
-}
-
-// This is run in interrupt context.
-static void SPI_MASTER_ISR_ATTR spi_intr(void *arg)
-{
-    BaseType_t do_yield = pdFALSE;
-    spi_host_t *host = (spi_host_t *)arg;
-    const spi_bus_attr_t* bus_attr = host->bus_attr;
-
-    assert(spi_hal_usr_is_done(&host->hal));
-
-    /*
-     * Help to skip the handling of in-flight transaction, and disable of the interrupt.
-     * The esp_intr_enable will be called (b) after new BG request is queued (a) in the task;
-     * while esp_intr_disable should be called (c) if we check and found the sending queue is empty (d).
-     * If (c) is called after (d), then there is a risk that things happens in this sequence:
-     * (d) -> (a) -> (b) -> (c), and in this case the interrupt is disabled while there's pending BG request in the queue.
-     * To avoid this, interrupt is disabled here, and re-enabled later if required.
-     */
-    if (!spi_bus_lock_bg_entry(bus_attr->lock)) {
-        /*------------ deal with the in-flight transaction -----------------*/
-        assert(host->cur_cs != DEV_NUM_MAX);
-        //Okay, transaction is done.
-        const int cs = host->cur_cs;
-        //Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset.
-        if (bus_attr->dma_enabled) {
-            //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
-            spicommon_dmaworkaround_idle(bus_attr->tx_dma_chan);
-        }
-
-        //cur_cs is changed to DEV_NUM_MAX here
-        spi_post_trans(host);
-        // spi_bus_lock_bg_pause(bus_attr->lock);
-        //Return transaction descriptor.
-        xQueueSendFromISR(host->device[cs]->ret_queue, &host->cur_trans_buf, &do_yield);
-#ifdef CONFIG_PM_ENABLE
-        //Release APB frequency lock
-        esp_pm_lock_release(bus_attr->pm_lock);
-#endif
-    }
-
-    /*------------ new transaction starts here ------------------*/
-    assert(host->cur_cs == DEV_NUM_MAX);
-
-    spi_bus_lock_handle_t lock = host->bus_attr->lock;
-    BaseType_t trans_found = pdFALSE;
-
-
-    // There should be remaining requests
-    BUS_LOCK_DEBUG_EXECUTE_CHECK(spi_bus_lock_bg_req_exist(lock));
-
-    do {
-        spi_bus_lock_dev_handle_t acq_dev_lock = spi_bus_lock_get_acquiring_dev(lock);
-        spi_bus_lock_dev_handle_t desired_dev = acq_dev_lock;
-        bool resume_task = false;
-        spi_device_t* device_to_send = NULL;
-
-        if (!acq_dev_lock) {
-            // This function may assign a new acquiring device, otherwise it will suggest a desired device with BG active
-            // We use either of them without further searching in the devices.
-            // If the return value is true, it means either there's no acquiring device, or the acquiring device's BG is active,
-            // We stay in the ISR to deal with those transactions of desired device, otherwise nothing will be done, check whether we need to resume some other tasks, or just quit the ISR
-            resume_task = spi_bus_lock_bg_check_dev_acq(lock, &desired_dev);
-        }
-
-        if (!resume_task) {
-            bool dev_has_req = spi_bus_lock_bg_check_dev_req(desired_dev);
-            if (dev_has_req) {
-                device_to_send = host->device[spi_bus_lock_get_dev_id(desired_dev)];
-                trans_found = xQueueReceiveFromISR(device_to_send->trans_queue, &host->cur_trans_buf, &do_yield);
-                if (!trans_found) {
-                    spi_bus_lock_bg_clear_req(desired_dev);
-                }
-            }
-        }
-
-        if (trans_found) {
-            spi_trans_priv_t *const cur_trans_buf = &host->cur_trans_buf;
-            if (bus_attr->dma_enabled && (cur_trans_buf->buffer_to_rcv || cur_trans_buf->buffer_to_send)) {
-                //mark channel as active, so that the DMA will not be reset by the slave
-                //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
-                spicommon_dmaworkaround_transfer_active(bus_attr->tx_dma_chan);
-            }
-            spi_new_trans(device_to_send, cur_trans_buf);
-        }
-        // Exit of the ISR, handle interrupt re-enable (if sending transaction), retry (if there's coming BG),
-        // or resume acquiring device task (if quit due to bus acquiring).
-    } while (!spi_bus_lock_bg_exit(lock, trans_found, &do_yield));
-
-    if (do_yield) portYIELD_FROM_ISR();
-}
-
-static SPI_MASTER_ISR_ATTR esp_err_t check_trans_valid(spi_device_handle_t handle, spi_transaction_t *trans_desc)
-{
-    SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
-    spi_host_t *host = handle->host;
-    const spi_bus_attr_t* bus_attr = host->bus_attr;
-    bool tx_enabled = (trans_desc->flags & SPI_TRANS_USE_TXDATA) || (trans_desc->tx_buffer);
-    bool rx_enabled = (trans_desc->flags & SPI_TRANS_USE_RXDATA) || (trans_desc->rx_buffer);
-    spi_transaction_ext_t *t_ext = (spi_transaction_ext_t *)trans_desc;
-    bool dummy_enabled = (((trans_desc->flags & SPI_TRANS_VARIABLE_DUMMY)? t_ext->dummy_bits: handle->cfg.dummy_bits) != 0);
-    bool extra_dummy_enabled = handle->hal_dev.timing_conf.timing_dummy;
-    bool is_half_duplex = ((handle->cfg.flags & SPI_DEVICE_HALFDUPLEX) != 0);
-
-    //check transmission length
-    SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_RXDATA)==0 || trans_desc->rxlength <= 32, "SPI_TRANS_USE_RXDATA only available for rxdata transfer <= 32 bits", ESP_ERR_INVALID_ARG);
-    SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_TXDATA)==0 || trans_desc->length <= 32, "SPI_TRANS_USE_TXDATA only available for txdata transfer <= 32 bits", ESP_ERR_INVALID_ARG);
-    SPI_CHECK(trans_desc->length <= bus_attr->max_transfer_sz*8, "txdata transfer > host maximum", ESP_ERR_INVALID_ARG);
-    SPI_CHECK(trans_desc->rxlength <= bus_attr->max_transfer_sz*8, "rxdata transfer > host maximum", ESP_ERR_INVALID_ARG);
-    SPI_CHECK(is_half_duplex || trans_desc->rxlength <= trans_desc->length, "rx length > tx length in full duplex mode", ESP_ERR_INVALID_ARG);
-    //check working mode
-    SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && (handle->cfg.flags & SPI_DEVICE_3WIRE)), "incompatible iface params", ESP_ERR_INVALID_ARG);
-    SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && !is_half_duplex), "incompatible iface params", ESP_ERR_INVALID_ARG);
-#ifdef CONFIG_IDF_TARGET_ESP32
-    SPI_CHECK(!is_half_duplex || !bus_attr->dma_enabled || !rx_enabled || !tx_enabled, "SPI half duplex mode does not support using DMA with both MOSI and MISO phases.", ESP_ERR_INVALID_ARG );
-#elif CONFIG_IDF_TARGET_ESP32S3
-    SPI_CHECK(!is_half_duplex || !tx_enabled || !rx_enabled, "SPI half duplex mode is not supported when both MOSI and MISO phases are enabled.", ESP_ERR_INVALID_ARG);
-#endif
-    //MOSI phase is skipped only when both tx_buffer and SPI_TRANS_USE_TXDATA are not set.
-    SPI_CHECK(trans_desc->length != 0 || !tx_enabled, "trans tx_buffer should be NULL and SPI_TRANS_USE_TXDATA should be cleared to skip MOSI phase.", ESP_ERR_INVALID_ARG);
-    //MISO phase is skipped only when both rx_buffer and SPI_TRANS_USE_RXDATA are not set.
-    //If set rxlength=0 in full_duplex mode, it will be automatically set to length
-    SPI_CHECK(!is_half_duplex || trans_desc->rxlength != 0 || !rx_enabled, "trans rx_buffer should be NULL and SPI_TRANS_USE_RXDATA should be cleared to skip MISO phase.", ESP_ERR_INVALID_ARG);
-    //In Full duplex mode, default rxlength to be the same as length, if not filled in.
-    // set rxlength to length is ok, even when rx buffer=NULL
-    if (trans_desc->rxlength==0 && !is_half_duplex) {
-        trans_desc->rxlength=trans_desc->length;
-    }
-    //Dummy phase is not available when both data out and in are enabled, regardless of FD or HD mode.
-    SPI_CHECK(!tx_enabled || !rx_enabled || !dummy_enabled || !extra_dummy_enabled, "Dummy phase is not available when both data out and in are enabled", ESP_ERR_INVALID_ARG);
-
-    return ESP_OK;
-}
-
-static SPI_MASTER_ISR_ATTR void uninstall_priv_desc(spi_trans_priv_t* trans_buf)
-{
-    spi_transaction_t *trans_desc = trans_buf->trans;
-    if ((void *)trans_buf->buffer_to_send != &trans_desc->tx_data[0] &&
-        trans_buf->buffer_to_send != trans_desc->tx_buffer) {
-        free((void *)trans_buf->buffer_to_send); //force free, ignore const
-    }
-    // copy data from temporary DMA-capable buffer back to IRAM buffer and free the temporary one.
-    if ((void *)trans_buf->buffer_to_rcv != &trans_desc->rx_data[0] &&
-        trans_buf->buffer_to_rcv != trans_desc->rx_buffer) { // NOLINT(clang-analyzer-unix.Malloc)
-        if (trans_desc->flags & SPI_TRANS_USE_RXDATA) {
-            memcpy((uint8_t *) & trans_desc->rx_data[0], trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
-        } else {
-            memcpy(trans_desc->rx_buffer, trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
-        }
-        free(trans_buf->buffer_to_rcv);
-    }
-}
-
-static SPI_MASTER_ISR_ATTR esp_err_t setup_priv_desc(spi_transaction_t *trans_desc, spi_trans_priv_t* new_desc, bool isdma)
-{
-    *new_desc = (spi_trans_priv_t) { .trans = trans_desc, };
-
-    // rx memory assign
-    uint32_t* rcv_ptr;
-    if ( trans_desc->flags & SPI_TRANS_USE_RXDATA ) {
-        rcv_ptr = (uint32_t *)&trans_desc->rx_data[0];
-    } else {
-        //if not use RXDATA neither rx_buffer, buffer_to_rcv assigned to NULL
-        rcv_ptr = trans_desc->rx_buffer;
-    }
-    if (rcv_ptr && isdma && (!esp_ptr_dma_capable(rcv_ptr) || ((int)rcv_ptr % 4 != 0))) {
-        //if rxbuf in the desc not DMA-capable, malloc a new one. The rx buffer need to be length of multiples of 32 bits to avoid heap corruption.
-        ESP_LOGD(SPI_TAG, "Allocate RX buffer for DMA" );
-        rcv_ptr = heap_caps_malloc((trans_desc->rxlength + 31) / 8, MALLOC_CAP_DMA);
-        if (rcv_ptr == NULL) goto clean_up;
-    }
-    new_desc->buffer_to_rcv = rcv_ptr;
-
-    // tx memory assign
-    const uint32_t *send_ptr;
-    if ( trans_desc->flags & SPI_TRANS_USE_TXDATA ) {
-        send_ptr = (uint32_t *)&trans_desc->tx_data[0];
-    } else {
-        //if not use TXDATA neither tx_buffer, tx data assigned to NULL
-        send_ptr = trans_desc->tx_buffer ;
-    }
-    if (send_ptr && isdma && !esp_ptr_dma_capable( send_ptr )) {
-        //if txbuf in the desc not DMA-capable, malloc a new one
-        ESP_LOGD(SPI_TAG, "Allocate TX buffer for DMA" );
-        uint32_t *temp = heap_caps_malloc((trans_desc->length + 7) / 8, MALLOC_CAP_DMA);
-        if (temp == NULL) goto clean_up;
-
-        memcpy( temp, send_ptr, (trans_desc->length + 7) / 8 );
-        send_ptr = temp;
-    }
-    new_desc->buffer_to_send = send_ptr;
-
-    return ESP_OK;
-
-clean_up:
-    uninstall_priv_desc(new_desc);
-    return ESP_ERR_NO_MEM;
-}
-
-esp_err_t SPI_MASTER_ATTR spi_device_queue_trans(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait)
-{
-    esp_err_t ret = check_trans_valid(handle, trans_desc);
-    if (ret != ESP_OK) return ret;
-
-    spi_host_t *host = handle->host;
-
-    SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE );
-
-    spi_trans_priv_t trans_buf;
-    ret = setup_priv_desc(trans_desc, &trans_buf, (host->bus_attr->dma_enabled));
-    if (ret != ESP_OK) return ret;
-
-#ifdef CONFIG_PM_ENABLE
-    esp_pm_lock_acquire(host->bus_attr->pm_lock);
-#endif
-    //Send to queue and invoke the ISR.
-
-    BaseType_t r = xQueueSend(handle->trans_queue, (void *)&trans_buf, ticks_to_wait);
-    if (!r) {
-        ret = ESP_ERR_TIMEOUT;
-#ifdef CONFIG_PM_ENABLE
-        //Release APB frequency lock
-        esp_pm_lock_release(host->bus_attr->pm_lock);
-#endif
-        goto clean_up;
-    }
-
-    // The ISR will be invoked at correct time by the lock with `spi_bus_intr_enable`.
-    ret = spi_bus_lock_bg_request(handle->dev_lock);
-    if (ret != ESP_OK) {
-        goto clean_up;
-    }
-    return ESP_OK;
-
-clean_up:
-    uninstall_priv_desc(&trans_buf);
-    return ret;
-}
-
-esp_err_t SPI_MASTER_ATTR spi_device_get_trans_result(spi_device_handle_t handle, spi_transaction_t **trans_desc, TickType_t ticks_to_wait)
-{
-    BaseType_t r;
-    spi_trans_priv_t trans_buf;
-    SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
-
-    //use the interrupt, block until return
-    r=xQueueReceive(handle->ret_queue, (void*)&trans_buf, ticks_to_wait);
-    if (!r) {
-        // The memory occupied by rx and tx DMA buffer destroyed only when receiving from the queue (transaction finished).
-        // If timeout, wait and retry.
-        // Every in-flight transaction request occupies internal memory as DMA buffer if needed.
-        return ESP_ERR_TIMEOUT;
-    }
-    //release temporary buffers
-    uninstall_priv_desc(&trans_buf);
-    (*trans_desc) = trans_buf.trans;
-
-    return ESP_OK;
-}
-
-//Porcelain to do one blocking transmission.
-esp_err_t SPI_MASTER_ATTR spi_device_transmit(spi_device_handle_t handle, spi_transaction_t *trans_desc)
-{
-    esp_err_t ret;
-    spi_transaction_t *ret_trans;
-    //ToDo: check if any spi transfers in flight
-    ret = spi_device_queue_trans(handle, trans_desc, portMAX_DELAY);
-    if (ret != ESP_OK) return ret;
-
-    ret = spi_device_get_trans_result(handle, &ret_trans, portMAX_DELAY);
-    if (ret != ESP_OK) return ret;
-
-    assert(ret_trans == trans_desc);
-    return ESP_OK;
-}
-
-esp_err_t SPI_MASTER_ISR_ATTR spi_device_acquire_bus(spi_device_t *device, TickType_t wait)
-{
-    spi_host_t *const host = device->host;
-    SPI_CHECK(wait==portMAX_DELAY, "acquire finite time not supported now.", ESP_ERR_INVALID_ARG);
-    SPI_CHECK(!spi_bus_device_is_polling(device), "Cannot acquire bus when a polling transaction is in progress.", ESP_ERR_INVALID_STATE );
-
-    esp_err_t ret = spi_bus_lock_acquire_start(device->dev_lock, wait);
-    if (ret != ESP_OK) {
-        return ret;
-    }
-    host->device_acquiring_lock = device;
-
-    ESP_LOGD(SPI_TAG, "device%d locked the bus", device->id);
-
-#ifdef CONFIG_PM_ENABLE
-    // though we don't suggest to block the task before ``release_bus``, still allow doing so.
-    // this keeps the spi clock at 80MHz even if all tasks are blocked
-    esp_pm_lock_acquire(host->bus_attr->pm_lock);
-#endif
-    //configure the device ahead so that we don't need to do it again in the following transactions
-    spi_setup_device(host->device[device->id]);
-    //the DMA is also occupied by the device, all the slave devices that using DMA should wait until bus released.
-    if (host->bus_attr->dma_enabled) {
-        //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
-        spicommon_dmaworkaround_transfer_active(host->bus_attr->tx_dma_chan);
-    }
-    return ESP_OK;
-}
-
-// This function restore configurations required in the non-polling mode
-void SPI_MASTER_ISR_ATTR spi_device_release_bus(spi_device_t *dev)
-{
-    spi_host_t *host = dev->host;
-
-    if (spi_bus_device_is_polling(dev)){
-        ESP_EARLY_LOGE(SPI_TAG, "Cannot release bus when a polling transaction is in progress.");
-        assert(0);
-    }
-
-    if (host->bus_attr->dma_enabled) {
-        //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
-        spicommon_dmaworkaround_idle(host->bus_attr->tx_dma_chan);
-    }
-    //Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset.
-
-    //allow clock to be lower than 80MHz when all tasks blocked
-#ifdef CONFIG_PM_ENABLE
-    //Release APB frequency lock
-    esp_pm_lock_release(host->bus_attr->pm_lock);
-#endif
-    ESP_LOGD(SPI_TAG, "device%d release bus", dev->id);
-
-    host->device_acquiring_lock = NULL;
-    esp_err_t ret = spi_bus_lock_acquire_end(dev->dev_lock);
-    assert(ret == ESP_OK);
-}
-
-esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_start(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait)
-{
-    esp_err_t ret;
-    SPI_CHECK(ticks_to_wait == portMAX_DELAY, "currently timeout is not available for polling transactions", ESP_ERR_INVALID_ARG);
-    ret = check_trans_valid(handle, trans_desc);
-    if (ret!=ESP_OK) return ret;
-    SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot send polling transaction while the previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE );
-ESP_LOGI("gragra", "LOCAL SPI_MASTER");
-    /* If device_acquiring_lock is set to handle, it means that the user has already
-     * acquired the bus thanks to the function `spi_device_acquire_bus()`.
-     * In that case, we don't need to take the lock again. */
-    spi_host_t *host = handle->host;
-    if (host->device_acquiring_lock != handle) {
-        ret = spi_bus_lock_acquire_start(handle->dev_lock, ticks_to_wait);
-    } else {
-        ret = spi_bus_lock_wait_bg_done(handle->dev_lock, ticks_to_wait);
-    }
-    if (ret != ESP_OK) return ret;
-
-    ret = setup_priv_desc(trans_desc, &host->cur_trans_buf, (host->bus_attr->dma_enabled));
-    if (ret!=ESP_OK) return ret;
-
-    //Polling, no interrupt is used.
-    host->polling = true;
-
-    ESP_LOGV(SPI_TAG, "polling trans");
-    spi_new_trans(handle, &host->cur_trans_buf);
-
-    return ESP_OK;
-}
-
-esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_end(spi_device_handle_t handle, TickType_t ticks_to_wait)
-{
-    SPI_CHECK(handle != NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
-    spi_host_t *host = handle->host;
-
-    assert(host->cur_cs == handle->id);
-    assert(handle == get_acquiring_dev(host));
-
-    TickType_t start = xTaskGetTickCount();
-    while (!spi_hal_usr_is_done(&host->hal)) {
-        TickType_t end = xTaskGetTickCount();
-        if (end - start > ticks_to_wait) {
-            return ESP_ERR_TIMEOUT;
-        }
-    }
-
-    ESP_LOGV(SPI_TAG, "polling trans done");
-    //deal with the in-flight transaction
-    spi_post_trans(host);
-    //release temporary buffers
-    uninstall_priv_desc(&host->cur_trans_buf);
-
-    host->polling = false;
-    if (host->device_acquiring_lock != handle) {
-        assert(host->device_acquiring_lock == NULL);
-        spi_bus_lock_acquire_end(handle->dev_lock);
-    }
-
-    return ESP_OK;
-}
-
-esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_transmit(spi_device_handle_t handle, spi_transaction_t* trans_desc)
-{
-    esp_err_t ret;
-if (handle->host->mutex) xSemaphoreTake(handle->host->mutex, portMAX_DELAY);
-
-    ret = spi_device_polling_start(handle, trans_desc, portMAX_DELAY);
-	if (ret != ESP_OK) {
-		if (handle->host->mutex) xSemaphoreGive(handle->host->mutex);
-		return ret;	
-	}	
-
-	ret = spi_device_polling_end(handle, portMAX_DELAY);
-	if (handle->host->mutex) xSemaphoreGive(handle->host->mutex);
-	return ret;
-}

+ 0 - 1047
components/_override/esp32/spi_master.c.debug

@@ -1,1047 +0,0 @@
-// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Architecture:
-
-We can initialize a SPI driver, but we don't talk to the SPI driver itself, we address a device. A device essentially
-is a combination of SPI port and CS pin, plus some information about the specifics of communication to the device
-(timing, command/address length etc). The arbitration between tasks is also in conception of devices.
-
-A device can work in interrupt mode and polling mode, and a third but
-complicated mode which combines the two modes above:
-
-1. Work in the ISR with a set of queues; one per device.
-
-   The idea is that to send something to a SPI device, you allocate a
-   transaction descriptor. It contains some information about the transfer
-   like the lenghth, address, command etc, plus pointers to transmit and
-   receive buffer. The address of this block gets pushed into the transmit
-   queue. The SPI driver does its magic, and sends and retrieves the data
-   eventually. The data gets written to the receive buffers, if needed the
-   transaction descriptor is modified to indicate returned parameters and
-   the entire thing goes into the return queue, where whatever software
-   initiated the transaction can retrieve it.
-
-   The entire thing is run from the SPI interrupt handler. If SPI is done
-   transmitting/receiving but nothing is in the queue, it will not clear the
-   SPI interrupt but just disable it by esp_intr_disable. This way, when a
-   new thing is sent, pushing the packet into the send queue and re-enabling
-   the interrupt (by esp_intr_enable) will trigger the interrupt again, which
-   can then take care of the sending.
-
-2. Work in the polling mode in the task.
-
-   In this mode we get rid of the ISR, FreeRTOS queue and task switching, the
-   task is no longer blocked during a transaction. This increase the cpu
-   load, but decrease the interval of SPI transactions. Each time only one
-   device (in one task) can send polling transactions, transactions to
-   other devices are blocked until the polling transaction of current device
-   is done.
-
-   In the polling mode, the queue is not used, all the operations are done
-   in the task. The task calls ``spi_device_polling_start`` to setup and start
-   a new transaction, then call ``spi_device_polling_end`` to handle the
-   return value of the transaction.
-
-   To handle the arbitration among devices, the device "temporarily" acquire
-   a bus by the ``device_acquire_bus_internal`` function, which writes
-   dev_request by CAS operation. Other devices which wants to send polling
-   transactions but don't own the bus will block and wait until given the
-   semaphore which indicates the ownership of bus.
-
-   In case of the ISR is still sending transactions to other devices, the ISR
-   should maintain an ``random_idle`` flag indicating that it's not doing
-   transactions. When the bus is locked, the ISR can only send new
-   transactions to the acquiring device. The ISR will automatically disable
-   itself and send semaphore to the device if the ISR is free. If the device
-   sees the random_idle flag, it can directly start its polling transaction.
-   Otherwise it should block and wait for the semaphore from the ISR.
-
-   After the polling transaction, the driver will release the bus. During the
-   release of the bus, the driver search all other devices to see whether
-   there is any device waiting to acquire the bus, if so, acquire for it and
-   send it a semaphore if the device queue is empty, or invoke the ISR for
-   it. If all other devices don't need to acquire the bus, but there are
-   still transactions in the queues, the ISR will also be invoked.
-
-   To get better polling efficiency, user can call ``spi_device_acquire_bus``
-   function, which also calls the ``spi_bus_lock_acquire_core`` function,
-   before a series of polling transactions to a device. The bus acquiring and
-   task switching before and after the polling transaction will be escaped.
-
-3. Mixed mode
-
-   The driver is written under the assumption that polling and interrupt
-   transactions are not happening simultaneously. When sending polling
-   transactions, it will check whether the ISR is active, which includes the
-   case the ISR is sending the interrupt transactions of the acquiring
-   device. If the ISR is still working, the routine sending a polling
-   transaction will get blocked and wait until the semaphore from the ISR
-   which indicates the ISR is free now.
-
-   A fatal case is, a polling transaction is in flight, but the ISR received
-   an interrupt transaction. The behavior of the driver is unpredictable,
-   which should be strictly forbidden.
-
-We have two bits to control the interrupt:
-
-1. The slave->trans_done bit, which is automatically asserted when a transaction is done.
-
-   This bit is cleared during an interrupt transaction, so that the interrupt
-   will be triggered when the transaction is done, or the SW can check the
-   bit to see if the transaction is done for polling transactions.
-
-   When no transaction is in-flight, the bit is kept active, so that the SW
-   can easily invoke the ISR by enable the interrupt.
-
-2. The system interrupt enable/disable, controlled by esp_intr_enable and esp_intr_disable.
-
-   The interrupt is disabled (by the ISR itself) when no interrupt transaction
-   is queued. When the bus is not occupied, any task, which queues a
-   transaction into the queue, will enable the interrupt to invoke the ISR.
-   When the bus is occupied by a device, other device will put off the
-   invoking of ISR to the moment when the bus is released. The device
-   acquiring the bus can still send interrupt transactions by enable the
-   interrupt.
-
-*/
-
-#include <string.h>
-#include "driver/spi_common_internal.h"
-#include "driver/spi_master.h"
-
-#include "esp_log.h"
-#include "freertos/task.h"
-#include "freertos/queue.h"
-#include "freertos/semphr.h"
-#include "soc/soc_memory_layout.h"
-#include "driver/gpio.h"
-#include "hal/spi_hal.h"
-#include "esp_heap_caps.h"
-
-
-typedef struct spi_device_t spi_device_t;
-
-/// struct to hold private transaction data (like tx and rx buffer for DMA).
-typedef struct {
-    spi_transaction_t   *trans;
-    const uint32_t *buffer_to_send;   //equals to tx_data, if SPI_TRANS_USE_RXDATA is applied; otherwise if original buffer wasn't in DMA-capable memory, this gets the address of a temporary buffer that is;
-                                //otherwise sets to the original buffer or NULL if no buffer is assigned.
-    uint32_t *buffer_to_rcv;    // similar to buffer_to_send
-} spi_trans_priv_t;
-
-typedef struct {
-    int id;
-    spi_device_t* device[DEV_NUM_MAX];
-    intr_handle_t intr;
-    spi_hal_context_t hal;
-    spi_trans_priv_t cur_trans_buf;
-    int cur_cs;     //current device doing transaction
-    const spi_bus_attr_t* bus_attr;
-
-    /**
-     * the bus is permanently controlled by a device until `spi_bus_release_bus`` is called. Otherwise
-     * the acquiring of SPI bus will be freed when `spi_device_polling_end` is called.
-     */
-    spi_device_t* device_acquiring_lock;
-
-//debug information
-    bool polling;   //in process of a polling, avoid of queue new transactions into ISR
-	
-//	PATCH
-	SemaphoreHandle_t mutex;
-	int count;	
-} spi_host_t;
-
-struct spi_device_t {
-    int id;
-    QueueHandle_t trans_queue;
-    QueueHandle_t ret_queue;
-    spi_device_interface_config_t cfg;
-    spi_hal_dev_config_t hal_dev;
-    spi_host_t *host;
-    spi_bus_lock_dev_handle_t dev_lock;
-};
-
-static spi_host_t* bus_driver_ctx[SOC_SPI_PERIPH_NUM] = {};
-
-static const char *SPI_TAG = "spi_master";
-#define SPI_CHECK(a, str, ret_val, ...) \
-    if (unlikely(!(a))) { \
-        ESP_LOGE(SPI_TAG,"%s(%d): "str, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
-        return (ret_val); \
-    }
-
-
-static void spi_intr(void *arg);
-static void spi_bus_intr_enable(void *host);
-static void spi_bus_intr_disable(void *host);
-
-static esp_err_t spi_master_deinit_driver(void* arg);
-
-static inline bool is_valid_host(spi_host_device_t host)
-{
-//SPI1 can be used as GPSPI only on ESP32
-#if CONFIG_IDF_TARGET_ESP32
-    return host >= SPI1_HOST && host <= SPI3_HOST;
-#elif (SOC_SPI_PERIPH_NUM == 2)
-    return host == SPI2_HOST;
-#elif (SOC_SPI_PERIPH_NUM == 3)
-    return host >= SPI2_HOST && host <= SPI3_HOST;
-#endif
-}
-
-// Should be called before any devices are actually registered or used.
-// Currently automatically called after `spi_bus_initialize()` and when first device is registered.
-static esp_err_t spi_master_init_driver(spi_host_device_t host_id)
-{
-    esp_err_t err = ESP_OK;
-
-    const spi_bus_attr_t* bus_attr = spi_bus_get_attr(host_id);
-    SPI_CHECK(bus_attr != NULL, "host_id not initialized", ESP_ERR_INVALID_STATE);
-    SPI_CHECK(bus_attr->lock != NULL, "SPI Master cannot attach to bus. (Check CONFIG_SPI_FLASH_SHARE_SPI1_BUS)", ESP_ERR_INVALID_ARG);
-    // spihost contains atomic variables, which should not be put in PSRAM
-    spi_host_t* host = heap_caps_malloc(sizeof(spi_host_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
-    if (host == NULL) {
-        err = ESP_ERR_NO_MEM;
-        goto cleanup;
-    }
-
-    *host = (spi_host_t) {
-        .id = host_id,
-        .cur_cs = DEV_NUM_MAX,
-        .polling = false,
-        .device_acquiring_lock = NULL,
-        .bus_attr = bus_attr,
-    };
-
-    if (host_id != SPI1_HOST) {
-        // interrupts are not allowed on SPI1 bus
-        err = esp_intr_alloc(spicommon_irqsource_for_host(host_id),
-                            bus_attr->bus_cfg.intr_flags | ESP_INTR_FLAG_INTRDISABLED,
-                            spi_intr, host, &host->intr);
-        if (err != ESP_OK) {
-            goto cleanup;
-        }
-    }
-
-    //assign the SPI, RX DMA and TX DMA peripheral registers beginning address
-    spi_hal_config_t hal_config = {
-        //On ESP32-S2 and earlier chips, DMA registers are part of SPI registers. Pass the registers of SPI peripheral to control it.
-        .dma_in = SPI_LL_GET_HW(host_id),
-        .dma_out = SPI_LL_GET_HW(host_id),
-        .dma_enabled = bus_attr->dma_enabled,
-        .dmadesc_tx = bus_attr->dmadesc_tx,
-        .dmadesc_rx = bus_attr->dmadesc_rx,
-        .tx_dma_chan = bus_attr->tx_dma_chan,
-        .rx_dma_chan = bus_attr->rx_dma_chan,
-        .dmadesc_n = bus_attr->dma_desc_num,
-    };
-    spi_hal_init(&host->hal, host_id, &hal_config);
-
-    if (host_id != SPI1_HOST) {
-        //SPI1 attributes are already initialized at start up.
-        spi_bus_lock_handle_t lock = spi_bus_lock_get_by_id(host_id);
-        spi_bus_lock_set_bg_control(lock, spi_bus_intr_enable, spi_bus_intr_disable, host);
-        spi_bus_register_destroy_func(host_id, spi_master_deinit_driver, host);
-    }
-
-    bus_driver_ctx[host_id] = host;
-    return ESP_OK;
-
-cleanup:
-    if (host) {
-        spi_hal_deinit(&host->hal);
-        if (host->intr) {
-            esp_intr_free(host->intr);
-        }
-    }
-    free(host);
-    return err;
-}
-
-static esp_err_t spi_master_deinit_driver(void* arg)
-{
-    spi_host_t *host = (spi_host_t*)arg;
-    SPI_CHECK(host != NULL, "host_id not in use", ESP_ERR_INVALID_STATE);
-
-    int host_id = host->id;
-    SPI_CHECK(is_valid_host(host_id), "invalid host_id", ESP_ERR_INVALID_ARG);
-
-    int x;
-    for (x=0; x<DEV_NUM_MAX; x++) {
-        SPI_CHECK(host->device[x] == NULL, "not all CSses freed", ESP_ERR_INVALID_STATE);
-    }
-
-    spi_hal_deinit(&host->hal);
-
-    if (host->intr) {
-        esp_intr_free(host->intr);
-    }
-    free(host);
-    bus_driver_ctx[host_id] = NULL;
-    return ESP_OK;
-}
-
-void spi_get_timing(bool gpio_is_used, int input_delay_ns, int eff_clk, int* dummy_o, int* cycles_remain_o)
-{
-    int timing_dummy;
-    int timing_miso_delay;
-
-    spi_hal_cal_timing(eff_clk, gpio_is_used, input_delay_ns, &timing_dummy, &timing_miso_delay);
-    if (dummy_o) *dummy_o = timing_dummy;
-    if (cycles_remain_o) *cycles_remain_o = timing_miso_delay;
-}
-
-int spi_get_freq_limit(bool gpio_is_used, int input_delay_ns)
-{
-    return spi_hal_get_freq_limit(gpio_is_used, input_delay_ns);
-}
-
-/*
- Add a device. This allocates a CS line for the device, allocates memory for the device structure and hooks
- up the CS pin to whatever is specified.
-*/
-esp_err_t spi_bus_add_device(spi_host_device_t host_id, const spi_device_interface_config_t *dev_config, spi_device_handle_t *handle)
-{
-    spi_device_t *dev = NULL;
-    esp_err_t err = ESP_OK;
-
-    SPI_CHECK(is_valid_host(host_id), "invalid host", ESP_ERR_INVALID_ARG);
-    if (bus_driver_ctx[host_id] == NULL) {
-        //lazy initialization the driver, get deinitialized by the bus is freed
-        err = spi_master_init_driver(host_id);
-        if (err != ESP_OK) {
-            return err;
-        }
-    }
-
-    spi_host_t *host = bus_driver_ctx[host_id];
-    const spi_bus_attr_t* bus_attr = host->bus_attr;
-    SPI_CHECK(dev_config->spics_io_num < 0 || GPIO_IS_VALID_OUTPUT_GPIO(dev_config->spics_io_num), "spics pin invalid", ESP_ERR_INVALID_ARG);
-    SPI_CHECK(dev_config->clock_speed_hz > 0, "invalid sclk speed", ESP_ERR_INVALID_ARG);
-#ifdef CONFIG_IDF_TARGET_ESP32
-    //The hardware looks like it would support this, but actually setting cs_ena_pretrans when transferring in full
-    //duplex mode does absolutely nothing on the ESP32.
-    SPI_CHECK(dev_config->cs_ena_pretrans <= 1 || (dev_config->address_bits == 0 && dev_config->command_bits == 0) ||
-        (dev_config->flags & SPI_DEVICE_HALFDUPLEX), "In full-duplex mode, only support cs pretrans delay = 1 and without address_bits and command_bits", ESP_ERR_INVALID_ARG);
-#endif
-    uint32_t lock_flag = ((dev_config->spics_io_num != -1)? SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED: 0);
-
-    spi_bus_lock_dev_config_t lock_config = {
-        .flags = lock_flag,
-    };
-    spi_bus_lock_dev_handle_t dev_handle;
-    err = spi_bus_lock_register_dev(bus_attr->lock, &lock_config, &dev_handle);
-    if (err != ESP_OK) {
-        goto nomem;
-    }
-
-    int freecs = spi_bus_lock_get_dev_id(dev_handle);
-    SPI_CHECK(freecs != -1, "no free cs pins for the host", ESP_ERR_NOT_FOUND);
-
-    //input parameters to calculate timing configuration
-    int half_duplex = dev_config->flags & SPI_DEVICE_HALFDUPLEX ? 1 : 0;
-    int no_compensate = dev_config->flags & SPI_DEVICE_NO_DUMMY ? 1 : 0;
-    int duty_cycle = (dev_config->duty_cycle_pos==0) ? 128 : dev_config->duty_cycle_pos;
-    int use_gpio = !(bus_attr->flags & SPICOMMON_BUSFLAG_IOMUX_PINS);
-    spi_hal_timing_param_t timing_param = {
-        .half_duplex = half_duplex,
-        .no_compensate = no_compensate,
-        .clock_speed_hz = dev_config->clock_speed_hz,
-        .duty_cycle = duty_cycle,
-        .input_delay_ns = dev_config->input_delay_ns,
-        .use_gpio = use_gpio
-    };
-
-    //output values of timing configuration
-    spi_hal_timing_conf_t temp_timing_conf;
-    int freq;
-    esp_err_t ret = spi_hal_cal_clock_conf(&timing_param, &freq, &temp_timing_conf);
-    SPI_CHECK(ret==ESP_OK, "assigned clock speed not supported", ret);
-
-    //Allocate memory for device
-    dev = malloc(sizeof(spi_device_t));
-    if (dev == NULL) goto nomem;
-    memset(dev, 0, sizeof(spi_device_t));
-
-    dev->id = freecs;
-    dev->dev_lock = dev_handle;
-
-    //Allocate queues, set defaults
-    dev->trans_queue = xQueueCreate(dev_config->queue_size, sizeof(spi_trans_priv_t));
-    dev->ret_queue = xQueueCreate(dev_config->queue_size, sizeof(spi_trans_priv_t));
-    if (!dev->trans_queue || !dev->ret_queue) {
-        goto nomem;
-    }
-
-    //We want to save a copy of the dev config in the dev struct.
-    memcpy(&dev->cfg, dev_config, sizeof(spi_device_interface_config_t));
-    dev->cfg.duty_cycle_pos = duty_cycle;
-    // TODO: if we have to change the apb clock among transactions, re-calculate this each time the apb clock lock is locked.
-
-    //Set CS pin, CS options
-    if (dev_config->spics_io_num >= 0) {
-        spicommon_cs_initialize(host_id, dev_config->spics_io_num, freecs, use_gpio);
-    }
-
-	// create a mutex if we have more than one client
-	if (host->count++) {
-		ESP_LOGI(SPI_TAG, "More than one device on SPI %d => creating mutex", host_id);
-		//host->mutex = xSemaphoreCreateMutex();
-	}
-
-    //save a pointer to device in spi_host_t
-    host->device[freecs] = dev;
-    //save a pointer to host in spi_device_t
-    dev->host= host;
-
-    //initialise the device specific configuration
-    spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
-    hal_dev->mode = dev_config->mode;
-    hal_dev->cs_setup = dev_config->cs_ena_pretrans;
-    hal_dev->cs_hold = dev_config->cs_ena_posttrans;
-    //set hold_time to 0 will not actually append delay to CS
-    //set it to 1 since we do need at least one clock of hold time in most cases
-    if (hal_dev->cs_hold == 0) {
-        hal_dev->cs_hold = 1;
-    }
-    hal_dev->cs_pin_id = dev->id;
-    hal_dev->timing_conf = temp_timing_conf;
-    hal_dev->sio = (dev_config->flags) & SPI_DEVICE_3WIRE ? 1 : 0;
-    hal_dev->half_duplex = dev_config->flags & SPI_DEVICE_HALFDUPLEX ? 1 : 0;
-    hal_dev->tx_lsbfirst = dev_config->flags & SPI_DEVICE_TXBIT_LSBFIRST ? 1 : 0;
-    hal_dev->rx_lsbfirst = dev_config->flags & SPI_DEVICE_RXBIT_LSBFIRST ? 1 : 0;
-    hal_dev->no_compensate = dev_config->flags & SPI_DEVICE_NO_DUMMY ? 1 : 0;
-#if SOC_SPI_SUPPORT_AS_CS
-    hal_dev->as_cs = dev_config->flags& SPI_DEVICE_CLK_AS_CS ? 1 : 0;
-#endif
-    hal_dev->positive_cs = dev_config->flags & SPI_DEVICE_POSITIVE_CS ? 1 : 0;
-
-    *handle = dev;
-    ESP_LOGD(SPI_TAG, "SPI%d: New device added to CS%d, effective clock: %dkHz", host_id+1, freecs, freq/1000);
-
-    return ESP_OK;
-
-nomem:
-    if (dev) {
-        if (dev->trans_queue) vQueueDelete(dev->trans_queue);
-        if (dev->ret_queue) vQueueDelete(dev->ret_queue);
-        spi_bus_lock_unregister_dev(dev->dev_lock);
-    }
-    free(dev);
-    return ESP_ERR_NO_MEM;
-}
-
-esp_err_t spi_bus_remove_device(spi_device_handle_t handle)
-{
-    SPI_CHECK(handle!=NULL, "invalid handle", ESP_ERR_INVALID_ARG);
-    //These checks aren't exhaustive; another thread could sneak in a transaction inbetween. These are only here to
-    //catch design errors and aren't meant to be triggered during normal operation.
-    SPI_CHECK(uxQueueMessagesWaiting(handle->trans_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
-    SPI_CHECK(handle->host->cur_cs == DEV_NUM_MAX || handle->host->device[handle->host->cur_cs] != handle, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
-    SPI_CHECK(uxQueueMessagesWaiting(handle->ret_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE);
-
-    //return
-    int spics_io_num = handle->cfg.spics_io_num;
-    if (spics_io_num >= 0) spicommon_cs_free_io(spics_io_num);
-
-    //Kill queues
-    vQueueDelete(handle->trans_queue);
-    vQueueDelete(handle->ret_queue);
-    spi_bus_lock_unregister_dev(handle->dev_lock);
-
-    assert(handle->host->device[handle->id] == handle);
-    handle->host->device[handle->id] = NULL;
-    free(handle);
-    return ESP_OK;
-}
-
-int spi_cal_clock(int fapb, int hz, int duty_cycle, uint32_t *reg_o)
-{
-    return spi_ll_master_cal_clock(fapb, hz, duty_cycle, reg_o);
-}
-
-int spi_get_actual_clock(int fapb, int hz, int duty_cycle)
-{
-    return spi_hal_master_cal_clock(fapb, hz, duty_cycle);
-}
-
-// Setup the device-specified configuration registers. Called every time a new
-// transaction is to be sent, but only apply new configurations when the device
-// changes.
-static SPI_MASTER_ISR_ATTR void spi_setup_device(spi_device_t *dev)
-{
-    spi_bus_lock_dev_handle_t dev_lock = dev->dev_lock;
-
-    if (!spi_bus_lock_touch(dev_lock)) {
-        //if the configuration is already applied, skip the following.
-        return;
-    }
-    spi_hal_context_t *hal = &dev->host->hal;
-    spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
-    spi_hal_setup_device(hal, hal_dev);
-}
-
-static SPI_MASTER_ISR_ATTR spi_device_t *get_acquiring_dev(spi_host_t *host)
-{
-    spi_bus_lock_dev_handle_t dev_lock = spi_bus_lock_get_acquiring_dev(host->bus_attr->lock);
-if (!dev_lock) {
-ESP_LOGW(SPI_TAG, "NOBODY HERE");
-return NULL;
-}	
-
-    return host->device[spi_bus_lock_get_dev_id(dev_lock)];
-}
-
-// Debug only
-// NOTE if the acquiring is not fully completed, `spi_bus_lock_get_acquiring_dev`
-// may return a false `NULL` cause the function returning false `false`.
-static inline SPI_MASTER_ISR_ATTR bool spi_bus_device_is_polling(spi_device_t *dev)
-{
-spi_device_t *toto = NULL;
-spi_bus_lock_dev_handle_t dev_lock = spi_bus_lock_get_acquiring_dev(dev->host->bus_attr->lock);
-if (dev_lock) toto = dev->host->device[spi_bus_lock_get_dev_id(dev_lock)];
-return toto == dev && dev->host->polling;
-	
-    //return get_acquiring_dev(dev->host) == dev && dev->host->polling;
-}
-
-/*-----------------------------------------------------------------------------
-    Working Functions
------------------------------------------------------------------------------*/
-
-// The interrupt may get invoked by the bus lock.
-static void SPI_MASTER_ISR_ATTR spi_bus_intr_enable(void *host)
-{
-    esp_intr_enable(((spi_host_t*)host)->intr);
-}
-
-// The interrupt is always disabled by the ISR itself, not exposed
-static void SPI_MASTER_ISR_ATTR spi_bus_intr_disable(void *host)
-{
-    esp_intr_disable(((spi_host_t*)host)->intr);
-}
-
-// The function is called to send a new transaction, in ISR or in the task.
-// Setup the transaction-specified registers and linked-list used by the DMA (or FIFO if DMA is not used)
-static void SPI_MASTER_ISR_ATTR spi_new_trans(spi_device_t *dev, spi_trans_priv_t *trans_buf)
-{
-    spi_transaction_t *trans = NULL;
-    spi_host_t *host = dev->host;
-    spi_hal_context_t *hal = &(host->hal);
-    spi_hal_dev_config_t *hal_dev = &(dev->hal_dev);
-
-    trans = trans_buf->trans;
-    host->cur_cs = dev->id;
-
-    //Reconfigure according to device settings, the function only has effect when the dev_id is changed.
-    spi_setup_device(dev);
-
-    //set the transaction specific configuration each time before a transaction setup
-    spi_hal_trans_config_t hal_trans = {};
-    hal_trans.tx_bitlen = trans->length;
-    hal_trans.rx_bitlen = trans->rxlength;
-    hal_trans.rcv_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_rcv;
-    hal_trans.send_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_send;
-    hal_trans.cmd = trans->cmd;
-    hal_trans.addr = trans->addr;
-    //Set up QIO/DIO if needed
-    hal_trans.io_mode = (trans->flags & SPI_TRANS_MODE_DIO ?
-                        (trans->flags & SPI_TRANS_MODE_DIOQIO_ADDR ? SPI_LL_IO_MODE_DIO : SPI_LL_IO_MODE_DUAL) :
-                    (trans->flags & SPI_TRANS_MODE_QIO ?
-                        (trans->flags & SPI_TRANS_MODE_DIOQIO_ADDR ? SPI_LL_IO_MODE_QIO : SPI_LL_IO_MODE_QUAD) :
-                    SPI_LL_IO_MODE_NORMAL
-                    ));
-
-    if (trans->flags & SPI_TRANS_VARIABLE_CMD) {
-        hal_trans.cmd_bits = ((spi_transaction_ext_t *)trans)->command_bits;
-    } else {
-        hal_trans.cmd_bits = dev->cfg.command_bits;
-    }
-    if (trans->flags & SPI_TRANS_VARIABLE_ADDR) {
-        hal_trans.addr_bits = ((spi_transaction_ext_t *)trans)->address_bits;
-    } else {
-        hal_trans.addr_bits = dev->cfg.address_bits;
-    }
-    if (trans->flags & SPI_TRANS_VARIABLE_DUMMY) {
-        hal_trans.dummy_bits = ((spi_transaction_ext_t *)trans)->dummy_bits;
-    } else {
-        hal_trans.dummy_bits = dev->cfg.dummy_bits;
-    }
-
-    spi_hal_setup_trans(hal, hal_dev, &hal_trans);
-    spi_hal_prepare_data(hal, hal_dev, &hal_trans);
-
-    //Call pre-transmission callback, if any
-    if (dev->cfg.pre_cb) dev->cfg.pre_cb(trans);
-    //Kick off transfer
-    spi_hal_user_start(hal);
-}
-
-// The function is called when a transaction is done, in ISR or in the task.
-// Fetch the data from FIFO and call the ``post_cb``.
-static void SPI_MASTER_ISR_ATTR spi_post_trans(spi_host_t *host)
-{
-    spi_transaction_t *cur_trans = host->cur_trans_buf.trans;
-
-    spi_hal_fetch_result(&host->hal);
-    //Call post-transaction callback, if any
-    spi_device_t* dev = host->device[host->cur_cs];
-    if (dev->cfg.post_cb) dev->cfg.post_cb(cur_trans);
-
-    host->cur_cs = DEV_NUM_MAX;
-}
-
-// This is run in interrupt context.
-static void SPI_MASTER_ISR_ATTR spi_intr(void *arg)
-{
-    BaseType_t do_yield = pdFALSE;
-    spi_host_t *host = (spi_host_t *)arg;
-    const spi_bus_attr_t* bus_attr = host->bus_attr;
-
-    assert(spi_hal_usr_is_done(&host->hal));
-
-    /*
-     * Help to skip the handling of in-flight transaction, and disable of the interrupt.
-     * The esp_intr_enable will be called (b) after new BG request is queued (a) in the task;
-     * while esp_intr_disable should be called (c) if we check and found the sending queue is empty (d).
-     * If (c) is called after (d), then there is a risk that things happens in this sequence:
-     * (d) -> (a) -> (b) -> (c), and in this case the interrupt is disabled while there's pending BG request in the queue.
-     * To avoid this, interrupt is disabled here, and re-enabled later if required.
-     */
-    if (!spi_bus_lock_bg_entry(bus_attr->lock)) {
-        /*------------ deal with the in-flight transaction -----------------*/
-        assert(host->cur_cs != DEV_NUM_MAX);
-        //Okay, transaction is done.
-        const int cs = host->cur_cs;
-        //Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset.
-        if (bus_attr->dma_enabled) {
-            //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
-            spicommon_dmaworkaround_idle(bus_attr->tx_dma_chan);
-        }
-
-        //cur_cs is changed to DEV_NUM_MAX here
-        spi_post_trans(host);
-        // spi_bus_lock_bg_pause(bus_attr->lock);
-        //Return transaction descriptor.
-        xQueueSendFromISR(host->device[cs]->ret_queue, &host->cur_trans_buf, &do_yield);
-#ifdef CONFIG_PM_ENABLE
-        //Release APB frequency lock
-        esp_pm_lock_release(bus_attr->pm_lock);
-#endif
-    }
-
-    /*------------ new transaction starts here ------------------*/
-    assert(host->cur_cs == DEV_NUM_MAX);
-
-    spi_bus_lock_handle_t lock = host->bus_attr->lock;
-    BaseType_t trans_found = pdFALSE;
-
-
-    // There should be remaining requests
-    BUS_LOCK_DEBUG_EXECUTE_CHECK(spi_bus_lock_bg_req_exist(lock));
-
-    do {
-        spi_bus_lock_dev_handle_t acq_dev_lock = spi_bus_lock_get_acquiring_dev(lock);
-        spi_bus_lock_dev_handle_t desired_dev = acq_dev_lock;
-        bool resume_task = false;
-        spi_device_t* device_to_send = NULL;
-
-        if (!acq_dev_lock) {
-            // This function may assign a new acquiring device, otherwise it will suggest a desired device with BG active
-            // We use either of them without further searching in the devices.
-            // If the return value is true, it means either there's no acquiring device, or the acquiring device's BG is active,
-            // We stay in the ISR to deal with those transactions of desired device, otherwise nothing will be done, check whether we need to resume some other tasks, or just quit the ISR
-            resume_task = spi_bus_lock_bg_check_dev_acq(lock, &desired_dev);
-        }
-
-        if (!resume_task) {
-            bool dev_has_req = spi_bus_lock_bg_check_dev_req(desired_dev);
-            if (dev_has_req) {
-                device_to_send = host->device[spi_bus_lock_get_dev_id(desired_dev)];
-                trans_found = xQueueReceiveFromISR(device_to_send->trans_queue, &host->cur_trans_buf, &do_yield);
-                if (!trans_found) {
-                    spi_bus_lock_bg_clear_req(desired_dev);
-                }
-            }
-        }
-
-        if (trans_found) {
-            spi_trans_priv_t *const cur_trans_buf = &host->cur_trans_buf;
-            if (bus_attr->dma_enabled && (cur_trans_buf->buffer_to_rcv || cur_trans_buf->buffer_to_send)) {
-                //mark channel as active, so that the DMA will not be reset by the slave
-                //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
-                spicommon_dmaworkaround_transfer_active(bus_attr->tx_dma_chan);
-            }
-            spi_new_trans(device_to_send, cur_trans_buf);
-        }
-        // Exit of the ISR, handle interrupt re-enable (if sending transaction), retry (if there's coming BG),
-        // or resume acquiring device task (if quit due to bus acquiring).
-    } while (!spi_bus_lock_bg_exit(lock, trans_found, &do_yield));
-
-    if (do_yield) portYIELD_FROM_ISR();
-}
-
-static SPI_MASTER_ISR_ATTR esp_err_t check_trans_valid(spi_device_handle_t handle, spi_transaction_t *trans_desc)
-{
-    SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
-    spi_host_t *host = handle->host;
-    const spi_bus_attr_t* bus_attr = host->bus_attr;
-    bool tx_enabled = (trans_desc->flags & SPI_TRANS_USE_TXDATA) || (trans_desc->tx_buffer);
-    bool rx_enabled = (trans_desc->flags & SPI_TRANS_USE_RXDATA) || (trans_desc->rx_buffer);
-    spi_transaction_ext_t *t_ext = (spi_transaction_ext_t *)trans_desc;
-    bool dummy_enabled = (((trans_desc->flags & SPI_TRANS_VARIABLE_DUMMY)? t_ext->dummy_bits: handle->cfg.dummy_bits) != 0);
-    bool extra_dummy_enabled = handle->hal_dev.timing_conf.timing_dummy;
-    bool is_half_duplex = ((handle->cfg.flags & SPI_DEVICE_HALFDUPLEX) != 0);
-
-    //check transmission length
-    SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_RXDATA)==0 || trans_desc->rxlength <= 32, "SPI_TRANS_USE_RXDATA only available for rxdata transfer <= 32 bits", ESP_ERR_INVALID_ARG);
-    SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_TXDATA)==0 || trans_desc->length <= 32, "SPI_TRANS_USE_TXDATA only available for txdata transfer <= 32 bits", ESP_ERR_INVALID_ARG);
-    SPI_CHECK(trans_desc->length <= bus_attr->max_transfer_sz*8, "txdata transfer > host maximum", ESP_ERR_INVALID_ARG);
-    SPI_CHECK(trans_desc->rxlength <= bus_attr->max_transfer_sz*8, "rxdata transfer > host maximum", ESP_ERR_INVALID_ARG);
-    SPI_CHECK(is_half_duplex || trans_desc->rxlength <= trans_desc->length, "rx length > tx length in full duplex mode", ESP_ERR_INVALID_ARG);
-    //check working mode
-    SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && (handle->cfg.flags & SPI_DEVICE_3WIRE)), "incompatible iface params", ESP_ERR_INVALID_ARG);
-    SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && !is_half_duplex), "incompatible iface params", ESP_ERR_INVALID_ARG);
-#ifdef CONFIG_IDF_TARGET_ESP32
-    SPI_CHECK(!is_half_duplex || !bus_attr->dma_enabled || !rx_enabled || !tx_enabled, "SPI half duplex mode does not support using DMA with both MOSI and MISO phases.", ESP_ERR_INVALID_ARG );
-#elif CONFIG_IDF_TARGET_ESP32S3
-    SPI_CHECK(!is_half_duplex || !tx_enabled || !rx_enabled, "SPI half duplex mode is not supported when both MOSI and MISO phases are enabled.", ESP_ERR_INVALID_ARG);
-#endif
-    //MOSI phase is skipped only when both tx_buffer and SPI_TRANS_USE_TXDATA are not set.
-    SPI_CHECK(trans_desc->length != 0 || !tx_enabled, "trans tx_buffer should be NULL and SPI_TRANS_USE_TXDATA should be cleared to skip MOSI phase.", ESP_ERR_INVALID_ARG);
-    //MISO phase is skipped only when both rx_buffer and SPI_TRANS_USE_RXDATA are not set.
-    //If set rxlength=0 in full_duplex mode, it will be automatically set to length
-    SPI_CHECK(!is_half_duplex || trans_desc->rxlength != 0 || !rx_enabled, "trans rx_buffer should be NULL and SPI_TRANS_USE_RXDATA should be cleared to skip MISO phase.", ESP_ERR_INVALID_ARG);
-    //In Full duplex mode, default rxlength to be the same as length, if not filled in.
-    // set rxlength to length is ok, even when rx buffer=NULL
-    if (trans_desc->rxlength==0 && !is_half_duplex) {
-        trans_desc->rxlength=trans_desc->length;
-    }
-    //Dummy phase is not available when both data out and in are enabled, regardless of FD or HD mode.
-    SPI_CHECK(!tx_enabled || !rx_enabled || !dummy_enabled || !extra_dummy_enabled, "Dummy phase is not available when both data out and in are enabled", ESP_ERR_INVALID_ARG);
-
-    return ESP_OK;
-}
-
-static SPI_MASTER_ISR_ATTR void uninstall_priv_desc(spi_trans_priv_t* trans_buf)
-{
-    spi_transaction_t *trans_desc = trans_buf->trans;
-    if ((void *)trans_buf->buffer_to_send != &trans_desc->tx_data[0] &&
-        trans_buf->buffer_to_send != trans_desc->tx_buffer) {
-        free((void *)trans_buf->buffer_to_send); //force free, ignore const
-    }
-    // copy data from temporary DMA-capable buffer back to IRAM buffer and free the temporary one.
-    if ((void *)trans_buf->buffer_to_rcv != &trans_desc->rx_data[0] &&
-        trans_buf->buffer_to_rcv != trans_desc->rx_buffer) { // NOLINT(clang-analyzer-unix.Malloc)
-        if (trans_desc->flags & SPI_TRANS_USE_RXDATA) {
-            memcpy((uint8_t *) & trans_desc->rx_data[0], trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
-        } else {
-            memcpy(trans_desc->rx_buffer, trans_buf->buffer_to_rcv, (trans_desc->rxlength + 7) / 8);
-        }
-        free(trans_buf->buffer_to_rcv);
-    }
-}
-
-static SPI_MASTER_ISR_ATTR esp_err_t setup_priv_desc(spi_transaction_t *trans_desc, spi_trans_priv_t* new_desc, bool isdma)
-{
-    *new_desc = (spi_trans_priv_t) { .trans = trans_desc, };
-
-    // rx memory assign
-    uint32_t* rcv_ptr;
-    if ( trans_desc->flags & SPI_TRANS_USE_RXDATA ) {
-        rcv_ptr = (uint32_t *)&trans_desc->rx_data[0];
-    } else {
-        //if not use RXDATA neither rx_buffer, buffer_to_rcv assigned to NULL
-        rcv_ptr = trans_desc->rx_buffer;
-    }
-    if (rcv_ptr && isdma && (!esp_ptr_dma_capable(rcv_ptr) || ((int)rcv_ptr % 4 != 0))) {
-        //if rxbuf in the desc not DMA-capable, malloc a new one. The rx buffer need to be length of multiples of 32 bits to avoid heap corruption.
-        ESP_LOGD(SPI_TAG, "Allocate RX buffer for DMA" );
-        rcv_ptr = heap_caps_malloc((trans_desc->rxlength + 31) / 8, MALLOC_CAP_DMA);
-        if (rcv_ptr == NULL) goto clean_up;
-    }
-    new_desc->buffer_to_rcv = rcv_ptr;
-
-    // tx memory assign
-    const uint32_t *send_ptr;
-    if ( trans_desc->flags & SPI_TRANS_USE_TXDATA ) {
-        send_ptr = (uint32_t *)&trans_desc->tx_data[0];
-    } else {
-        //if not use TXDATA neither tx_buffer, tx data assigned to NULL
-        send_ptr = trans_desc->tx_buffer ;
-    }
-    if (send_ptr && isdma && !esp_ptr_dma_capable( send_ptr )) {
-        //if txbuf in the desc not DMA-capable, malloc a new one
-        ESP_LOGD(SPI_TAG, "Allocate TX buffer for DMA" );
-        uint32_t *temp = heap_caps_malloc((trans_desc->length + 7) / 8, MALLOC_CAP_DMA);
-        if (temp == NULL) goto clean_up;
-
-        memcpy( temp, send_ptr, (trans_desc->length + 7) / 8 );
-        send_ptr = temp;
-    }
-    new_desc->buffer_to_send = send_ptr;
-
-    return ESP_OK;
-
-clean_up:
-    uninstall_priv_desc(new_desc);
-    return ESP_ERR_NO_MEM;
-}
-
-esp_err_t SPI_MASTER_ATTR spi_device_queue_trans(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait)
-{
-    esp_err_t ret = check_trans_valid(handle, trans_desc);
-    if (ret != ESP_OK) return ret;
-
-    spi_host_t *host = handle->host;
-
-    SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE );
-
-    spi_trans_priv_t trans_buf;
-    ret = setup_priv_desc(trans_desc, &trans_buf, (host->bus_attr->dma_enabled));
-    if (ret != ESP_OK) return ret;
-
-#ifdef CONFIG_PM_ENABLE
-    esp_pm_lock_acquire(host->bus_attr->pm_lock);
-#endif
-    //Send to queue and invoke the ISR.
-
-    BaseType_t r = xQueueSend(handle->trans_queue, (void *)&trans_buf, ticks_to_wait);
-    if (!r) {
-        ret = ESP_ERR_TIMEOUT;
-#ifdef CONFIG_PM_ENABLE
-        //Release APB frequency lock
-        esp_pm_lock_release(host->bus_attr->pm_lock);
-#endif
-        goto clean_up;
-    }
-
-    // The ISR will be invoked at correct time by the lock with `spi_bus_intr_enable`.
-    ret = spi_bus_lock_bg_request(handle->dev_lock);
-    if (ret != ESP_OK) {
-        goto clean_up;
-    }
-    return ESP_OK;
-
-clean_up:
-    uninstall_priv_desc(&trans_buf);
-    return ret;
-}
-
-esp_err_t SPI_MASTER_ATTR spi_device_get_trans_result(spi_device_handle_t handle, spi_transaction_t **trans_desc, TickType_t ticks_to_wait)
-{
-    BaseType_t r;
-    spi_trans_priv_t trans_buf;
-    SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
-
-    //use the interrupt, block until return
-    r=xQueueReceive(handle->ret_queue, (void*)&trans_buf, ticks_to_wait);
-    if (!r) {
-        // The memory occupied by rx and tx DMA buffer destroyed only when receiving from the queue (transaction finished).
-        // If timeout, wait and retry.
-        // Every in-flight transaction request occupies internal memory as DMA buffer if needed.
-        return ESP_ERR_TIMEOUT;
-    }
-    //release temporary buffers
-    uninstall_priv_desc(&trans_buf);
-    (*trans_desc) = trans_buf.trans;
-
-    return ESP_OK;
-}
-
-//Porcelain to do one blocking transmission.
-esp_err_t SPI_MASTER_ATTR spi_device_transmit(spi_device_handle_t handle, spi_transaction_t *trans_desc)
-{
-    esp_err_t ret;
-    spi_transaction_t *ret_trans;
-    //ToDo: check if any spi transfers in flight
-    ret = spi_device_queue_trans(handle, trans_desc, portMAX_DELAY);
-    if (ret != ESP_OK) return ret;
-
-    ret = spi_device_get_trans_result(handle, &ret_trans, portMAX_DELAY);
-    if (ret != ESP_OK) return ret;
-
-    assert(ret_trans == trans_desc);
-    return ESP_OK;
-}
-
-esp_err_t SPI_MASTER_ISR_ATTR spi_device_acquire_bus(spi_device_t *device, TickType_t wait)
-{
-    spi_host_t *const host = device->host;
-    SPI_CHECK(wait==portMAX_DELAY, "acquire finite time not supported now.", ESP_ERR_INVALID_ARG);
-    SPI_CHECK(!spi_bus_device_is_polling(device), "Cannot acquire bus when a polling transaction is in progress.", ESP_ERR_INVALID_STATE );
-
-    esp_err_t ret = spi_bus_lock_acquire_start(device->dev_lock, wait);
-    if (ret != ESP_OK) {
-        return ret;
-    }
-    host->device_acquiring_lock = device;
-
-    ESP_LOGD(SPI_TAG, "device%d locked the bus", device->id);
-
-#ifdef CONFIG_PM_ENABLE
-    // though we don't suggest to block the task before ``release_bus``, still allow doing so.
-    // this keeps the spi clock at 80MHz even if all tasks are blocked
-    esp_pm_lock_acquire(host->bus_attr->pm_lock);
-#endif
-    //configure the device ahead so that we don't need to do it again in the following transactions
-    spi_setup_device(host->device[device->id]);
-    //the DMA is also occupied by the device, all the slave devices that using DMA should wait until bus released.
-    if (host->bus_attr->dma_enabled) {
-        //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
-        spicommon_dmaworkaround_transfer_active(host->bus_attr->tx_dma_chan);
-    }
-    return ESP_OK;
-}
-
-// This function restore configurations required in the non-polling mode
-void SPI_MASTER_ISR_ATTR spi_device_release_bus(spi_device_t *dev)
-{
-    spi_host_t *host = dev->host;
-
-    if (spi_bus_device_is_polling(dev)){
-        ESP_EARLY_LOGE(SPI_TAG, "Cannot release bus when a polling transaction is in progress.");
-        assert(0);
-    }
-
-    if (host->bus_attr->dma_enabled) {
-        //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
-        spicommon_dmaworkaround_idle(host->bus_attr->tx_dma_chan);
-    }
-    //Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset.
-
-    //allow clock to be lower than 80MHz when all tasks blocked
-#ifdef CONFIG_PM_ENABLE
-    //Release APB frequency lock
-    esp_pm_lock_release(host->bus_attr->pm_lock);
-#endif
-    ESP_LOGD(SPI_TAG, "device%d release bus", dev->id);
-
-    host->device_acquiring_lock = NULL;
-    esp_err_t ret = spi_bus_lock_acquire_end(dev->dev_lock);
-    assert(ret == ESP_OK);
-}
-
-esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_start(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait)
-{
-    esp_err_t ret;
-    SPI_CHECK(ticks_to_wait == portMAX_DELAY, "currently timeout is not available for polling transactions", ESP_ERR_INVALID_ARG);
-    ret = check_trans_valid(handle, trans_desc);
-    if (ret!=ESP_OK) return ret;
-    SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot send polling transaction while the previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE );
-
-    /* If device_acquiring_lock is set to handle, it means that the user has already
-     * acquired the bus thanks to the function `spi_device_acquire_bus()`.
-     * In that case, we don't need to take the lock again. */
-    spi_host_t *host = handle->host;
-try_again:	
-    if (host->device_acquiring_lock != handle) {
-        ret = spi_bus_lock_acquire_start(handle->dev_lock, ticks_to_wait);
-    } else {
-ESP_LOGW(SPI_TAG, "ALREADYA CQUIRED %d", handle->id);						
-        ret = spi_bus_lock_wait_bg_done(handle->dev_lock, ticks_to_wait);
-    }
-    if (ret != ESP_OK) return ret;
-	
-if (handle != get_acquiring_dev(host)) {
-ESP_LOGW(SPI_TAG, "WE THOUGHT WE ACQUIRED THE BUS %d %p", handle->id, host);						
-goto try_again;
-}	
-
-    ret = setup_priv_desc(trans_desc, &host->cur_trans_buf, (host->bus_attr->dma_enabled));
-    if (ret!=ESP_OK) return ret;
-
-    //Polling, no interrupt is used.
-    host->polling = true;
-
-    ESP_LOGV(SPI_TAG, "polling trans");
-    spi_new_trans(handle, &host->cur_trans_buf);
-
-    return ESP_OK;
-}
-
-esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_end(spi_device_handle_t handle, TickType_t ticks_to_wait)
-{
-    SPI_CHECK(handle != NULL, "invalid dev handle", ESP_ERR_INVALID_ARG);
-    spi_host_t *host = handle->host;
-
-    assert(host->cur_cs == handle->id);
-if (handle != get_acquiring_dev(host)) {
-spi_device_handle_t toto = get_acquiring_dev(host);
-ESP_LOGW(SPI_TAG, "SPIDEVICEHANDLE id %d (%d) , host %p (%p)", handle->id, toto ? toto->id : -1, handle->host, toto ? toto->host : NULL);
-}
-    assert(handle == get_acquiring_dev(host));
-
-    TickType_t start = xTaskGetTickCount();
-    while (!spi_hal_usr_is_done(&host->hal)) {
-        TickType_t end = xTaskGetTickCount();
-        if (end - start > ticks_to_wait) {
-            return ESP_ERR_TIMEOUT;
-        }
-    }
-
-    ESP_LOGV(SPI_TAG, "polling trans done");
-    //deal with the in-flight transaction
-    spi_post_trans(host);
-    //release temporary buffers
-    uninstall_priv_desc(&host->cur_trans_buf);
-
-    host->polling = false;
-    if (host->device_acquiring_lock != handle) {
-        assert(host->device_acquiring_lock == NULL);
-        spi_bus_lock_acquire_end(handle->dev_lock);
-    }
-
-    return ESP_OK;
-}
-
-esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_transmit(spi_device_handle_t handle, spi_transaction_t* trans_desc)
-{
-    esp_err_t ret;
-static struct {
-int count;
-SemaphoreHandle_t mutex;
-} ctx[4];
-int id = handle->id;
-ctx[id].count++;
-//if (!ctx[id].mutex) ctx[id].mutex = xSemaphoreCreateMutex();
-//xSemaphoreTake(ctx[id].mutex, portMAX_DELAY);
-if (handle->host->mutex) xSemaphoreTake(handle->host->mutex, portMAX_DELAY);
-if (ctx[id].count > 1) {
-ESP_LOGW(SPI_TAG, "COUNTER of %d is %d %p", id, ctx[id].count, handle->host);
-}
-
-    ret = spi_device_polling_start(handle, trans_desc, portMAX_DELAY);
-if (ctx[id].count > 1) {
-ESP_LOGW(SPI_TAG, "COUNTER of %d REALLY %d %p", id, ctx[id].count, handle->host);
-}	
-    //if (ret != ESP_OK) return ret;
-if (ret != ESP_OK) {
-ctx[id].count--;
-ESP_LOGE(SPI_TAG, "CAN'T START SPI POLLING %d", id);
-//	xSemaphoreGive(ctx[id].mutex);
-if (handle->host->mutex) xSemaphoreGive(handle->host->mutex);
-return ret;	
-}	
-
-ret = spi_device_polling_end(handle, portMAX_DELAY);
-ctx[id].count--;
-//xSemaphoreGive(ctx[id].mutex);
-if (handle->host->mutex) xSemaphoreGive(handle->host->mutex);
-return ret;
-//    return spi_device_polling_end(handle, portMAX_DELAY);
-}

+ 0 - 49
components/heap/CMakeLists.txt

@@ -1,49 +0,0 @@
-set(srcs
-    "heap_caps.c"
-    "heap_caps_init.c"
-    "multi_heap.c"
-    "heap_tlsf.c")
-
-if(NOT CONFIG_HEAP_POISONING_DISABLED)
-    list(APPEND srcs "multi_heap_poisoning.c")
-endif()
-
-if(CONFIG_HEAP_TASK_TRACKING)
-    list(APPEND srcs "heap_task_info.c")
-endif()
-
-if(CONFIG_HEAP_TRACING_STANDALONE)
-    list(APPEND srcs "heap_trace_standalone.c")
-    set_source_files_properties(heap_trace_standalone.c
-        PROPERTIES COMPILE_FLAGS
-        -Wno-frame-address)
-endif()
-
-idf_component_register(SRCS "${srcs}"
-                    INCLUDE_DIRS include
-                    LDFRAGMENTS linker.lf
-                    PRIV_REQUIRES soc)
-
-if(CONFIG_HEAP_TRACING)
-    set(WRAP_FUNCTIONS
-        calloc
-        malloc
-        free
-        realloc
-        heap_caps_malloc
-        heap_caps_free
-        heap_caps_realloc
-        heap_caps_malloc_default
-        heap_caps_realloc_default)
-
-    foreach(wrap ${WRAP_FUNCTIONS})
-        target_link_libraries(${COMPONENT_LIB} INTERFACE "-Wl,--wrap=${wrap}")
-    endforeach()
-endif()
-
-if(NOT CMAKE_BUILD_EARLY_EXPANSION)
-    idf_build_get_property(build_components BUILD_COMPONENTS)
-    if(freertos IN_LIST build_components)
-        target_compile_options(${COMPONENT_TARGET} PRIVATE "-DMULTI_HEAP_FREERTOS")
-    endif()
-endif()

+ 0 - 74
components/heap/Kconfig

@@ -1,74 +0,0 @@
-menu "Heap memory debugging"
-
-    choice HEAP_CORRUPTION_DETECTION
-        prompt "Heap corruption detection"
-        default HEAP_POISONING_DISABLED
-        help
-            Enable heap poisoning features to detect heap corruption caused by out-of-bounds access to heap memory.
-
-            See the "Heap Memory Debugging" page of the IDF documentation
-            for a description of each level of heap corruption detection.
-
-        config HEAP_POISONING_DISABLED
-            bool "Basic (no poisoning)"
-        config HEAP_POISONING_LIGHT
-            bool "Light impact"
-        config HEAP_POISONING_COMPREHENSIVE
-            bool "Comprehensive"
-    endchoice
-
-    choice HEAP_TRACING_DEST
-        bool "Heap tracing"
-        default HEAP_TRACING_OFF
-        help
-            Enables the heap tracing API defined in esp_heap_trace.h.
-
-            This function causes a moderate increase in IRAM code side and a minor increase in heap function
-            (malloc/free/realloc) CPU overhead, even when the tracing feature is not used.
-            So it's best to keep it disabled unless tracing is being used.
-
-        config HEAP_TRACING_OFF
-            bool "Disabled"
-        config HEAP_TRACING_STANDALONE
-            bool "Standalone"
-            select HEAP_TRACING
-        config HEAP_TRACING_TOHOST
-            bool "Host-based"
-            select HEAP_TRACING
-    endchoice
-
-    config HEAP_TRACING
-        bool
-        default F
-        help
-            Enables/disables heap tracing API.
-
-    config HEAP_TRACING_STACK_DEPTH
-        int "Heap tracing stack depth"
-        range 0 0 if IDF_TARGET_ARCH_RISCV # Disabled for RISC-V due to `__builtin_return_address` limitation
-        default 0 if IDF_TARGET_ARCH_RISCV
-        range 0 10
-        default 2
-        depends on HEAP_TRACING
-        help
-            Number of stack frames to save when tracing heap operation callers.
-
-            More stack frames uses more memory in the heap trace buffer (and slows down allocation), but
-            can provide useful information.
-
-    config HEAP_TASK_TRACKING
-        bool "Enable heap task tracking"
-        depends on !HEAP_POISONING_DISABLED
-        help
-            Enables tracking the task responsible for each heap allocation.
-
-            This function depends on heap poisoning being enabled and adds four more bytes of overhead for each block
-            allocated.
-
-    config HEAP_ABORT_WHEN_ALLOCATION_FAILS
-        bool "Abort if memory allocation fails"
-        default n
-        help
-            When enabled, if a memory allocation operation fails it will cause a system abort.
-
-endmenu

+ 0 - 32
components/heap/component.mk

@@ -1,32 +0,0 @@
-#
-# Component Makefile
-#
-
-COMPONENT_OBJS := heap_caps_init.o heap_caps.o multi_heap.o heap_tlsf.o
-
-ifndef CONFIG_HEAP_POISONING_DISABLED
-COMPONENT_OBJS += multi_heap_poisoning.o
-
-ifdef CONFIG_HEAP_TASK_TRACKING
-COMPONENT_OBJS += heap_task_info.o
-endif
-endif
-
-ifdef CONFIG_HEAP_TRACING_STANDALONE
-
-COMPONENT_OBJS += heap_trace_standalone.o
-
-endif
-
-ifdef CONFIG_HEAP_TRACING
-
-WRAP_FUNCTIONS = calloc malloc free realloc heap_caps_malloc heap_caps_free heap_caps_realloc heap_caps_malloc_default heap_caps_realloc_default
-WRAP_ARGUMENT := -Wl,--wrap=
-
-COMPONENT_ADD_LDFLAGS = -l$(COMPONENT_NAME) $(addprefix $(WRAP_ARGUMENT),$(WRAP_FUNCTIONS))
-
-endif
-
-COMPONENT_ADD_LDFRAGMENTS += linker.lf
-
-CFLAGS += -DMULTI_HEAP_FREERTOS

+ 0 - 609
components/heap/heap_caps.c

@@ -1,609 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#include <stdbool.h>
-#include <string.h>
-#include <assert.h>
-#include <stdio.h>
-#include <sys/param.h>
-#include "esp_attr.h"
-#include "esp_heap_caps.h"
-#include "multi_heap.h"
-#include "esp_log.h"
-#include "heap_private.h"
-#include "esp_system.h"
-
-/*
-This file, combined with a region allocator that supports multiple heaps, solves the problem that the ESP32 has RAM
-that's slightly heterogeneous. Some RAM can be byte-accessed, some allows only 32-bit accesses, some can execute memory,
-some can be remapped by the MMU to only be accessed by a certain PID etc. In order to allow the most flexible memory
-allocation possible, this code makes it possible to request memory that has certain capabilities. The code will then use
-its knowledge of how the memory is configured along with a priority scheme to allocate that memory in the most sane way
-possible. This should optimize the amount of RAM accessible to the code without hardwiring addresses.
-*/
-
-static esp_alloc_failed_hook_t alloc_failed_callback;
-
-/*
-  This takes a memory chunk in a region that can be addressed as both DRAM as well as IRAM. It will convert it to
-  IRAM in such a way that it can be later freed. It assumes both the address as well as the length to be word-aligned.
-  It returns a region that's 1 word smaller than the region given because it stores the original Dram address there.
-*/
-IRAM_ATTR static void *dram_alloc_to_iram_addr(void *addr, size_t len)
-{
-    uintptr_t dstart = (uintptr_t)addr; //First word
-    uintptr_t dend = dstart + len - 4; //Last word
-    assert(esp_ptr_in_diram_dram((void *)dstart));
-    assert(esp_ptr_in_diram_dram((void *)dend));
-    assert((dstart & 3) == 0);
-    assert((dend & 3) == 0);
-#if SOC_DIRAM_INVERTED // We want the word before the result to hold the DRAM address
-    uint32_t *iptr = esp_ptr_diram_dram_to_iram((void *)dend);
-#else
-    uint32_t *iptr = esp_ptr_diram_dram_to_iram((void *)dstart);
-#endif
-    *iptr = dstart;
-    return iptr + 1;
-}
-
-
-static void heap_caps_alloc_failed(size_t requested_size, uint32_t caps, const char *function_name)
-{
-    if (alloc_failed_callback) {
-        alloc_failed_callback(requested_size, caps, function_name);
-    }
-
-    #ifdef CONFIG_HEAP_ABORT_WHEN_ALLOCATION_FAILS
-    esp_system_abort("Memory allocation failed");
-    #endif
-}
-
-esp_err_t heap_caps_register_failed_alloc_callback(esp_alloc_failed_hook_t callback)
-{
-    if (callback == NULL) {
-        return ESP_ERR_INVALID_ARG;
-    }
-
-    alloc_failed_callback = callback;
-
-    return ESP_OK;
-}
-
-bool heap_caps_match(const heap_t *heap, uint32_t caps)
-{
-    return heap->heap != NULL && ((get_all_caps(heap) & caps) == caps);
-}
-
-/*
-Routine to allocate a bit of memory with certain capabilities. caps is a bitfield of MALLOC_CAP_* bits.
-*/
-IRAM_ATTR void *heap_caps_malloc( size_t size, uint32_t caps )
-{
-    void *ret = NULL;
-
-    if (size > HEAP_SIZE_MAX) {
-        // Avoids int overflow when adding small numbers to size, or
-        // calculating 'end' from start+size, by limiting 'size' to the possible range
-        heap_caps_alloc_failed(size, caps, __func__);
-
-        return NULL;
-    }
-
-    if (caps & MALLOC_CAP_EXEC) {
-        //MALLOC_CAP_EXEC forces an alloc from IRAM. There is a region which has both this as well as the following
-        //caps, but the following caps are not possible for IRAM.  Thus, the combination is impossible and we return
-        //NULL directly, even although our heap capabilities (based on soc_memory_tags & soc_memory_regions) would
-        //indicate there is a tag for this.
-        if ((caps & MALLOC_CAP_8BIT) || (caps & MALLOC_CAP_DMA)) {
-            heap_caps_alloc_failed(size, caps, __func__);
-
-            return NULL;
-        }
-        caps |= MALLOC_CAP_32BIT; // IRAM is 32-bit accessible RAM
-    }
-
-    if (caps & MALLOC_CAP_32BIT) {
-        /* 32-bit accessible RAM should allocated in 4 byte aligned sizes
-         * (Future versions of ESP-IDF should possibly fail if an invalid size is requested)
-         */
-        size = (size + 3) & (~3); // int overflow checked above
-    }
-
-    for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
-        //Iterate over heaps and check capabilities at this priority
-        heap_t *heap;
-        SLIST_FOREACH(heap, &registered_heaps, next) {
-            if (heap->heap == NULL) {
-                continue;
-            }
-            if ((heap->caps[prio] & caps) != 0) {
-                //Heap has at least one of the caps requested. If caps has other bits set that this prio
-                //doesn't cover, see if they're available in other prios.
-                if ((get_all_caps(heap) & caps) == caps) {
-                    //This heap can satisfy all the requested capabilities. See if we can grab some memory using it.
-                    if ((caps & MALLOC_CAP_EXEC) && esp_ptr_in_diram_dram((void *)heap->start)) {
-                        //This is special, insofar that what we're going to get back is a DRAM address. If so,
-                        //we need to 'invert' it (lowest address in DRAM == highest address in IRAM and vice-versa) and
-                        //add a pointer to the DRAM equivalent before the address we're going to return.
-                        ret = multi_heap_malloc(heap->heap, size + 4);  // int overflow checked above
-
-                        if (ret != NULL) {
-                            return dram_alloc_to_iram_addr(ret, size + 4);  // int overflow checked above
-                        }
-                    } else {
-                        //Just try to alloc, nothing special.
-                        ret = multi_heap_malloc(heap->heap, size);
-                        if (ret != NULL) {
-                            return ret;
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    heap_caps_alloc_failed(size, caps, __func__);
-
-    //Nothing usable found.
-    return NULL;
-}
-
-
-#define MALLOC_DISABLE_EXTERNAL_ALLOCS -1
-//Dual-use: -1 (=MALLOC_DISABLE_EXTERNAL_ALLOCS) disables allocations in external memory, >=0 sets the limit for allocations preferring internal memory.
-static int malloc_alwaysinternal_limit=MALLOC_DISABLE_EXTERNAL_ALLOCS;
-
-void heap_caps_malloc_extmem_enable(size_t limit)
-{
-    malloc_alwaysinternal_limit=limit;
-}
-
-/*
- Default memory allocation implementation. Should return standard 8-bit memory. malloc() essentially resolves to this function.
-*/
-IRAM_ATTR void *heap_caps_malloc_default( size_t size )
-{
-    if (malloc_alwaysinternal_limit==MALLOC_DISABLE_EXTERNAL_ALLOCS) {
-        return heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL);
-    } else {
-        void *r;
-        if (size <= (size_t)malloc_alwaysinternal_limit) {
-            r=heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
-        } else {
-            r=heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_SPIRAM );
-        }
-        if (r==NULL) {
-            //try again while being less picky
-            r=heap_caps_malloc( size, MALLOC_CAP_DEFAULT );
-        }
-        return r;
-    }
-}
-
-/*
- Same for realloc()
- Note: keep the logic in here the same as in heap_caps_malloc_default (or merge the two as soon as this gets more complex...)
- */
-IRAM_ATTR void *heap_caps_realloc_default( void *ptr, size_t size )
-{
-    if (malloc_alwaysinternal_limit==MALLOC_DISABLE_EXTERNAL_ALLOCS) {
-        return heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
-    } else {
-        void *r;
-        if (size <= (size_t)malloc_alwaysinternal_limit) {
-            r=heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
-        } else {
-            r=heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_SPIRAM );
-        }
-        if (r==NULL && size>0) {
-            //We needed to allocate memory, but we didn't. Try again while being less picky.
-            r=heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT );
-        }
-        return r;
-    }
-}
-
-/*
- Memory allocation as preference in decreasing order.
- */
-IRAM_ATTR void *heap_caps_malloc_prefer( size_t size, size_t num, ... )
-{
-    va_list argp;
-    va_start( argp, num );
-    void *r = NULL;
-    while (num--) {
-        uint32_t caps = va_arg( argp, uint32_t );
-        r = heap_caps_malloc( size, caps );
-        if (r != NULL) {
-            break;
-        }
-    }
-    va_end( argp );
-    return r;
-}
-
-/*
- Memory reallocation as preference in decreasing order.
- */
-IRAM_ATTR void *heap_caps_realloc_prefer( void *ptr, size_t size, size_t num, ... )
-{
-    va_list argp;
-    va_start( argp, num );
-    void *r = NULL;
-    while (num--) {
-        uint32_t caps = va_arg( argp, uint32_t );
-        r = heap_caps_realloc( ptr, size, caps );
-        if (r != NULL || size == 0) {
-            break;
-        }
-    }
-    va_end( argp );
-    return r;
-}
-
-/*
- Memory callocation as preference in decreasing order.
- */
-IRAM_ATTR void *heap_caps_calloc_prefer( size_t n, size_t size, size_t num, ... )
-{
-    va_list argp;
-    va_start( argp, num );
-    void *r = NULL;
-    while (num--) {
-        uint32_t caps = va_arg( argp, uint32_t );
-        r = heap_caps_calloc( n, size, caps );
-        if (r != NULL) break;
-    }
-    va_end( argp );
-    return r;
-}
-
-/* Find the heap which belongs to ptr, or return NULL if it's
-   not in any heap.
-
-   (This confirms if ptr is inside the heap's region, doesn't confirm if 'ptr'
-   is an allocated block or is some other random address inside the heap.)
-*/
-IRAM_ATTR static heap_t *find_containing_heap(void *ptr )
-{
-    intptr_t p = (intptr_t)ptr;
-    heap_t *heap;
-    SLIST_FOREACH(heap, &registered_heaps, next) {
-        if (heap->heap != NULL && p >= heap->start && p < heap->end) {
-            return heap;
-        }
-    }
-    return NULL;
-}
-
-IRAM_ATTR void heap_caps_free( void *ptr)
-{
-    if (ptr == NULL) {
-        return;
-    }
-
-    if (esp_ptr_in_diram_iram(ptr)) {
-        //Memory allocated here is actually allocated in the DRAM alias region and
-        //cannot be de-allocated as usual. dram_alloc_to_iram_addr stores a pointer to
-        //the equivalent DRAM address, though; free that.
-        uint32_t *dramAddrPtr = (uint32_t *)ptr;
-        ptr = (void *)dramAddrPtr[-1];
-    }
-
-    heap_t *heap = find_containing_heap(ptr);
-    assert(heap != NULL && "free() target pointer is outside heap areas");
-    multi_heap_free(heap->heap, ptr);
-}
-
-IRAM_ATTR void *heap_caps_realloc( void *ptr, size_t size, uint32_t caps)
-{
-    bool ptr_in_diram_case = false;
-    heap_t *heap = NULL;
-    void *dram_ptr = NULL;
-
-    if (ptr == NULL) {
-        return heap_caps_malloc(size, caps);
-    }
-
-    if (size == 0) {
-        heap_caps_free(ptr);
-        return NULL;
-    }
-
-    if (size > HEAP_SIZE_MAX) {
-        heap_caps_alloc_failed(size, caps, __func__);
-
-        return NULL;
-    }
-
-    //The pointer to memory may be aliased, we need to
-    //recover the corresponding address before to manage a new allocation:
-    if(esp_ptr_in_diram_iram((void *)ptr)) {
-        uint32_t *dram_addr = (uint32_t *)ptr;
-        dram_ptr  = (void *)dram_addr[-1];
-
-        heap = find_containing_heap(dram_ptr);
-        assert(heap != NULL && "realloc() pointer is outside heap areas");
-
-        //with pointers that reside on diram space, we avoid using
-        //the realloc implementation due to address translation issues,
-        //instead force a malloc/copy/free
-        ptr_in_diram_case = true;
-
-    } else {
-        heap = find_containing_heap(ptr);
-        assert(heap != NULL && "realloc() pointer is outside heap areas");
-    }
-
-    // are the existing heap's capabilities compatible with the
-    // requested ones?
-    bool compatible_caps = (caps & get_all_caps(heap)) == caps;
-
-    if (compatible_caps && !ptr_in_diram_case) {
-        // try to reallocate this memory within the same heap
-        // (which will resize the block if it can)
-        void *r = multi_heap_realloc(heap->heap, ptr, size);
-        if (r != NULL) {
-            return r;
-        }
-    }
-
-    // if we couldn't do that, try to see if we can reallocate
-    // in a different heap with requested capabilities.
-    void *new_p = heap_caps_malloc(size, caps);
-    if (new_p != NULL) {
-        size_t old_size = 0;
-
-        //If we're dealing with aliased ptr, information regarding its containing
-        //heap can only be obtained with translated address.
-        if(ptr_in_diram_case) {
-            old_size = multi_heap_get_allocated_size(heap->heap, dram_ptr);
-        } else {
-            old_size = multi_heap_get_allocated_size(heap->heap, ptr);
-        }
-
-        assert(old_size > 0);
-        memcpy(new_p, ptr, MIN(size, old_size));
-        heap_caps_free(ptr);
-        return new_p;
-    }
-
-    heap_caps_alloc_failed(size, caps, __func__);
-
-    return NULL;
-}
-
-IRAM_ATTR void *heap_caps_calloc( size_t n, size_t size, uint32_t caps)
-{
-    void *result;
-    size_t size_bytes;
-
-    if (__builtin_mul_overflow(n, size, &size_bytes)) {
-        return NULL;
-    }
-
-    result = heap_caps_malloc(size_bytes, caps);
-    if (result != NULL) {
-        bzero(result, size_bytes);
-    }
-    return result;
-}
-
-size_t heap_caps_get_total_size(uint32_t caps)
-{
-    size_t total_size = 0;
-    heap_t *heap;
-    SLIST_FOREACH(heap, &registered_heaps, next) {
-        if (heap_caps_match(heap, caps)) {
-            total_size += (heap->end - heap->start);
-        }
-    }
-    return total_size;
-}
-
-size_t heap_caps_get_free_size( uint32_t caps )
-{
-    size_t ret = 0;
-    heap_t *heap;
-    SLIST_FOREACH(heap, &registered_heaps, next) {
-        if (heap_caps_match(heap, caps)) {
-            ret += multi_heap_free_size(heap->heap);
-        }
-    }
-    return ret;
-}
-
-size_t heap_caps_get_minimum_free_size( uint32_t caps )
-{
-    size_t ret = 0;
-    heap_t *heap;
-    SLIST_FOREACH(heap, &registered_heaps, next) {
-        if (heap_caps_match(heap, caps)) {
-            ret += multi_heap_minimum_free_size(heap->heap);
-        }
-    }
-    return ret;
-}
-
-size_t heap_caps_get_largest_free_block( uint32_t caps )
-{
-    multi_heap_info_t info;
-    heap_caps_get_info(&info, caps);
-    return info.largest_free_block;
-}
-
-void heap_caps_get_info( multi_heap_info_t *info, uint32_t caps )
-{
-    bzero(info, sizeof(multi_heap_info_t));
-
-    heap_t *heap;
-    SLIST_FOREACH(heap, &registered_heaps, next) {
-        if (heap_caps_match(heap, caps)) {
-            multi_heap_info_t hinfo;
-            multi_heap_get_info(heap->heap, &hinfo);
-
-            info->total_free_bytes += hinfo.total_free_bytes;
-            info->total_allocated_bytes += hinfo.total_allocated_bytes;
-            info->largest_free_block = MAX(info->largest_free_block,
-                                           hinfo.largest_free_block);
-            info->minimum_free_bytes += hinfo.minimum_free_bytes;
-            info->allocated_blocks += hinfo.allocated_blocks;
-            info->free_blocks += hinfo.free_blocks;
-            info->total_blocks += hinfo.total_blocks;
-        }
-    }
-}
-
-void heap_caps_print_heap_info( uint32_t caps )
-{
-    multi_heap_info_t info;
-    printf("Heap summary for capabilities 0x%08X:\n", caps);
-    heap_t *heap;
-    SLIST_FOREACH(heap, &registered_heaps, next) {
-        if (heap_caps_match(heap, caps)) {
-            multi_heap_get_info(heap->heap, &info);
-
-            printf("  At 0x%08x len %d free %d allocated %d min_free %d\n",
-                   heap->start, heap->end - heap->start, info.total_free_bytes, info.total_allocated_bytes, info.minimum_free_bytes);
-            printf("    largest_free_block %d alloc_blocks %d free_blocks %d total_blocks %d\n",
-                   info.largest_free_block, info.allocated_blocks,
-                   info.free_blocks, info.total_blocks);
-        }
-    }
-    printf("  Totals:\n");
-    heap_caps_get_info(&info, caps);
-
-    printf("    free %d allocated %d min_free %d largest_free_block %d\n", info.total_free_bytes, info.total_allocated_bytes, info.minimum_free_bytes, info.largest_free_block);
-}
-
-bool heap_caps_check_integrity(uint32_t caps, bool print_errors)
-{
-    bool all_heaps = caps & MALLOC_CAP_INVALID;
-    bool valid = true;
-
-    heap_t *heap;
-    SLIST_FOREACH(heap, &registered_heaps, next) {
-        if (heap->heap != NULL
-            && (all_heaps || (get_all_caps(heap) & caps) == caps)) {
-            valid = multi_heap_check(heap->heap, print_errors) && valid;
-        }
-    }
-
-    return valid;
-}
-
-bool heap_caps_check_integrity_all(bool print_errors)
-{
-    return heap_caps_check_integrity(MALLOC_CAP_INVALID, print_errors);
-}
-
-bool heap_caps_check_integrity_addr(intptr_t addr, bool print_errors)
-{
-    heap_t *heap = find_containing_heap((void *)addr);
-    if (heap == NULL) {
-        return false;
-    }
-    return multi_heap_check(heap->heap, print_errors);
-}
-
-void heap_caps_dump(uint32_t caps)
-{
-    bool all_heaps = caps & MALLOC_CAP_INVALID;
-    heap_t *heap;
-    SLIST_FOREACH(heap, &registered_heaps, next) {
-        if (heap->heap != NULL
-            && (all_heaps || (get_all_caps(heap) & caps) == caps)) {
-            multi_heap_dump(heap->heap);
-        }
-    }
-}
-
-void heap_caps_dump_all(void)
-{
-    heap_caps_dump(MALLOC_CAP_INVALID);
-}
-
-size_t heap_caps_get_allocated_size( void *ptr )
-{
-    heap_t *heap = find_containing_heap(ptr);
-    size_t size = multi_heap_get_allocated_size(heap->heap, ptr);
-    return size;
-}
-
-IRAM_ATTR void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint32_t caps)
-{
-    void *ret = NULL;
-
-    if(!alignment) {
-        return NULL;
-    }
-
-    //Alignment must be a power of two:
-    if((alignment & (alignment - 1)) != 0) {
-        return NULL;
-    }
-
-    if (size > HEAP_SIZE_MAX) {
-        // Avoids int overflow when adding small numbers to size, or
-        // calculating 'end' from start+size, by limiting 'size' to the possible range
-        heap_caps_alloc_failed(size, caps, __func__);
-
-        return NULL;
-    }
-
-    for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
-        //Iterate over heaps and check capabilities at this priority
-        heap_t *heap;
-        SLIST_FOREACH(heap, &registered_heaps, next) {
-            if (heap->heap == NULL) {
-                continue;
-            }
-            if ((heap->caps[prio] & caps) != 0) {
-                //Heap has at least one of the caps requested. If caps has other bits set that this prio
-                //doesn't cover, see if they're available in other prios.
-                if ((get_all_caps(heap) & caps) == caps) {
-                    //Just try to alloc, nothing special.
-                    ret = multi_heap_aligned_alloc(heap->heap, size, alignment);
-                    if (ret != NULL) {
-                        return ret;
-                    }
-                }
-            }
-        }
-    }
-
-    heap_caps_alloc_failed(size, caps, __func__);
-
-    //Nothing usable found.
-    return NULL;
-}
-
-IRAM_ATTR void heap_caps_aligned_free(void *ptr)
-{
-    heap_caps_free(ptr);
-}
-
-void *heap_caps_aligned_calloc(size_t alignment, size_t n, size_t size, uint32_t caps)
-{
-    size_t size_bytes;
-    if (__builtin_mul_overflow(n, size, &size_bytes)) {
-        return NULL;
-    }
-
-    void *ptr = heap_caps_aligned_alloc(alignment,size_bytes, caps);
-    if(ptr != NULL) {
-        memset(ptr, 0, size_bytes);
-    }
-
-    return ptr;
-}

+ 0 - 241
components/heap/heap_caps_init.c

@@ -1,241 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#include "heap_private.h"
-#include <assert.h>
-#include <string.h>
-#include <sys/lock.h>
-
-#include "esp_log.h"
-#include "multi_heap.h"
-#include "multi_heap_platform.h"
-#include "esp_heap_caps_init.h"
-#include "soc/soc_memory_layout.h"
-
-static const char *TAG = "heap_init";
-
-/* Linked-list of registered heaps */
-struct registered_heap_ll registered_heaps;
-
-static void register_heap(heap_t *region)
-{
-    size_t heap_size = region->end - region->start;
-    assert(heap_size <= HEAP_SIZE_MAX);
-    region->heap = multi_heap_register((void *)region->start, heap_size);
-    if (region->heap != NULL) {
-        ESP_EARLY_LOGD(TAG, "New heap initialised at %p", region->heap);
-    }
-}
-
-void heap_caps_enable_nonos_stack_heaps(void)
-{
-    heap_t *heap;
-    SLIST_FOREACH(heap, &registered_heaps, next) {
-        // Assume any not-yet-registered heap is
-        // a nonos-stack heap
-        if (heap->heap == NULL) {
-            register_heap(heap);
-            if (heap->heap != NULL) {
-                multi_heap_set_lock(heap->heap, &heap->heap_mux);
-            }
-        }
-    }
-}
-
-/* Initialize the heap allocator to use all of the memory not
-   used by static data or reserved for other purposes
- */
-void heap_caps_init(void)
-{
-    /* Get the array of regions that we can use for heaps
-       (with reserved memory removed already.)
-     */
-    size_t num_regions = soc_get_available_memory_region_max_count();
-    soc_memory_region_t regions[num_regions];
-    num_regions = soc_get_available_memory_regions(regions);
-
-    //The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory,
-    //it's useful to coalesce adjacent regions that have the same type.
-    for (size_t i = 1; i < num_regions; i++) {
-        soc_memory_region_t *a = &regions[i - 1];
-        soc_memory_region_t *b = &regions[i];
-        if (b->start == (intptr_t)(a->start + a->size) && b->type == a->type ) {
-            a->type = -1;
-            b->start = a->start;
-            b->size += a->size;
-        }
-    }
-
-    /* Count the heaps left after merging */
-    size_t num_heaps = 0;
-    for (size_t i = 0; i < num_regions; i++) {
-        if (regions[i].type != -1) {
-            num_heaps++;
-        }
-    }
-
-    /* Start by allocating the registered heap data on the stack.
-
-       Once we have a heap to copy it to, we will copy it to a heap buffer.
-    */
-    heap_t temp_heaps[num_heaps];
-    size_t heap_idx = 0;
-
-    ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:");
-    for (size_t i = 0; i < num_regions; i++) {
-        soc_memory_region_t *region = &regions[i];
-        const soc_memory_type_desc_t *type = &soc_memory_types[region->type];
-        heap_t *heap = &temp_heaps[heap_idx];
-        if (region->type == -1) {
-            continue;
-        }
-        heap_idx++;
-        assert(heap_idx <= num_heaps);
-
-        memcpy(heap->caps, type->caps, sizeof(heap->caps));
-        heap->start = region->start;
-        heap->end = region->start + region->size;
-        MULTI_HEAP_LOCK_INIT(&heap->heap_mux);
-        if (type->startup_stack) {
-            /* Will be registered when OS scheduler starts */
-            heap->heap = NULL;
-        } else {
-            register_heap(heap);
-        }
-        SLIST_NEXT(heap, next) = NULL;
-
-        ESP_EARLY_LOGI(TAG, "At %08X len %08X (%d KiB): %s",
-                       region->start, region->size, region->size / 1024, type->name);
-    }
-
-    assert(heap_idx == num_heaps);
-
-    /* Allocate the permanent heap data that we'll use as a linked list at runtime.
-
-       Allocate this part of data contiguously, even though it's a linked list... */
-    assert(SLIST_EMPTY(&registered_heaps));
-
-    heap_t *heaps_array = NULL;
-    for (size_t i = 0; i < num_heaps; i++) {
-        if (heap_caps_match(&temp_heaps[i], MALLOC_CAP_8BIT|MALLOC_CAP_INTERNAL)) {
-            /* use the first DRAM heap which can fit the data */
-            heaps_array = multi_heap_malloc(temp_heaps[i].heap, sizeof(heap_t) * num_heaps);
-            if (heaps_array != NULL) {
-                break;
-            }
-        }
-    }
-    assert(heaps_array != NULL); /* if NULL, there's not enough free startup heap space */
-
-    memcpy(heaps_array, temp_heaps, sizeof(heap_t)*num_heaps);
-
-    /* Iterate the heaps and set their locks, also add them to the linked list. */
-    for (size_t i = 0; i < num_heaps; i++) {
-        if (heaps_array[i].heap != NULL) {
-            multi_heap_set_lock(heaps_array[i].heap, &heaps_array[i].heap_mux);
-        }
-        if (i == 0) {
-            SLIST_INSERT_HEAD(&registered_heaps, &heaps_array[0], next);
-        } else {
-            SLIST_INSERT_AFTER(&heaps_array[i-1], &heaps_array[i], next);
-        }
-    }
-}
-
-esp_err_t heap_caps_add_region(intptr_t start, intptr_t end)
-{
-    if (start == 0) {
-        return ESP_ERR_INVALID_ARG;
-    }
-
-    for (size_t i = 0; i < soc_memory_region_count; i++) {
-        const soc_memory_region_t *region = &soc_memory_regions[i];
-        // Test requested start only as 'end' may be in a different region entry, assume 'end' has same caps
-        if (region->start <= start && (intptr_t)(region->start + region->size) > start) {
-            const uint32_t *caps = soc_memory_types[region->type].caps;
-            return heap_caps_add_region_with_caps(caps, start, end);
-        }
-    }
-
-    return ESP_ERR_NOT_FOUND;
-}
-
-esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end)
-{
-    esp_err_t err = ESP_FAIL;
-    if (caps == NULL || start == 0 || end == 0 || end <= start) {
-        return ESP_ERR_INVALID_ARG;
-    }
-
-    //Check if region overlaps the start and/or end of an existing region. If so, the
-    //region is invalid (or maybe added twice)
-    /*
-     *  assume that in on region, start must be less than end (cannot equal to) !!
-     *  Specially, the 4th scenario can be allowed. For example, allocate memory from heap,
-     *  then change the capability and call this function to create a new region for special
-     *  application.
-     *  In the following chart, 'start = start' and 'end = end' is contained in 3rd scenario.
-     *  This all equal scenario is incorrect because the same region cannot be add twice. For example,
-     *  add the .bss memory to region twice, if not do the check, it will cause exception.
-     *
-     *  the existing heap region                                  s(tart)                e(nd)
-     *                                                            |----------------------|
-     *  1.add region  [Correct]   (s1<s && e1<=s)           |-----|
-     *  2.add region  [Incorrect] (s2<=s && s<e2<=e)        |---------------|
-     *  3.add region  [Incorrect] (s3<=s && e<e3)           |-------------------------------------|
-     *  4 add region  [Correct]   (s<s4<e && s<e4<=e)                  |-------|
-     *  5.add region  [Incorrect] (s<s5<e && e<e5)                     |----------------------------|
-     *  6.add region  [Correct]   (e<=s6 && e<e6)                                        |----|
-     */
-
-    heap_t *heap;
-    SLIST_FOREACH(heap, &registered_heaps, next) {
-        if ((start <= heap->start && end > heap->start)
-                || (start < heap->end && end > heap->end)) {
-            return ESP_FAIL;
-        }
-    }
-
-    heap_t *p_new = heap_caps_malloc(sizeof(heap_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
-    if (p_new == NULL) {
-        err = ESP_ERR_NO_MEM;
-        goto done;
-    }
-    memcpy(p_new->caps, caps, sizeof(p_new->caps));
-    p_new->start = start;
-    p_new->end = end;
-    MULTI_HEAP_LOCK_INIT(&p_new->heap_mux);
-    p_new->heap = multi_heap_register((void *)start, end - start);
-    SLIST_NEXT(p_new, next) = NULL;
-    if (p_new->heap == NULL) {
-        err = ESP_ERR_INVALID_SIZE;
-        goto done;
-    }
-    multi_heap_set_lock(p_new->heap, &p_new->heap_mux);
-
-    /* (This insertion is atomic to registered_heaps, so
-       we don't need to worry about thread safety for readers,
-       only for writers. */
-    static multi_heap_lock_t registered_heaps_write_lock = MULTI_HEAP_LOCK_STATIC_INITIALIZER;
-    MULTI_HEAP_LOCK(&registered_heaps_write_lock);
-    SLIST_INSERT_HEAD(&registered_heaps, p_new, next);
-    MULTI_HEAP_UNLOCK(&registered_heaps_write_lock);
-
-    err = ESP_OK;
-
- done:
-    if (err != ESP_OK) {
-        free(p_new);
-    }
-    return err;
-}

+ 0 - 77
components/heap/heap_private.h

@@ -1,77 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#pragma once
-
-#include <stdlib.h>
-#include <stdint.h>
-#include <soc/soc_memory_layout.h>
-#include "multi_heap.h"
-#include "multi_heap_platform.h"
-#include "sys/queue.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Some common heap registration data structures used
-   for heap_caps_init.c to share heap information with heap_caps.c
-*/
-
-#define HEAP_SIZE_MAX (SOC_MAX_CONTIGUOUS_RAM_SIZE)
-
-/* Type for describing each registered heap */
-typedef struct heap_t_ {
-    uint32_t caps[SOC_MEMORY_TYPE_NO_PRIOS]; ///< Capabilities for the type of memory in this heap (as a prioritised set). Copied from soc_memory_types so it's in RAM not flash.
-    intptr_t start;
-    intptr_t end;
-    multi_heap_lock_t heap_mux;
-    multi_heap_handle_t heap;
-    SLIST_ENTRY(heap_t_) next;
-} heap_t;
-
-/* All registered heaps.
-
-   Forms a single linked list, even though most entries are contiguous.
-   This means at the expense of 4 bytes per heap, new heaps can be
-   added at runtime in a fast & thread-safe way.
-*/
-extern SLIST_HEAD(registered_heap_ll, heap_t_) registered_heaps;
-
-bool heap_caps_match(const heap_t *heap, uint32_t caps);
-
-/* return all possible capabilities (across all priorities) for a given heap */
-inline static IRAM_ATTR uint32_t get_all_caps(const heap_t *heap)
-{
-    if (heap->heap == NULL) {
-        return 0;
-    }
-    uint32_t all_caps = 0;
-    for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
-        all_caps |= heap->caps[prio];
-    }
-    return all_caps;
-}
-
-/*
- Because we don't want to add _another_ known allocation method to the stack of functions to trace wrt memory tracing,
- these are declared private. The newlib malloc()/realloc() implementation also calls these, so they are declared
- separately in newlib/syscalls.c.
-*/
-void *heap_caps_realloc_default(void *p, size_t size);
-void *heap_caps_malloc_default(size_t size);
-
-
-#ifdef __cplusplus
-}
-#endif

+ 0 - 129
components/heap/heap_task_info.c

@@ -1,129 +0,0 @@
-// Copyright 2018 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <freertos/FreeRTOS.h>
-#include <freertos/task.h>
-#include <multi_heap.h>
-#include "multi_heap_internal.h"
-#include "heap_private.h"
-#include "esp_heap_task_info.h"
-
-#ifdef CONFIG_HEAP_TASK_TRACKING
-
-/*
- * Return per-task heap allocation totals and lists of blocks.
- *
- * For each task that has allocated memory from the heap, return totals for
- * allocations within regions matching one or more sets of capabilities.
- *
- * Optionally also return an array of structs providing details about each
- * block allocated by one or more requested tasks, or by all tasks.
- *
- * Returns the number of block detail structs returned.
- */
-size_t heap_caps_get_per_task_info(heap_task_info_params_t *params)
-{
-    heap_t *reg;
-    heap_task_block_t *blocks = params->blocks;
-    size_t count = *params->num_totals;
-    size_t remaining = params->max_blocks;
-
-    // Clear out totals for any prepopulated tasks.
-    if (params->totals) {
-        for (size_t i = 0; i < count; ++i) {
-            for (size_t type = 0; type < NUM_HEAP_TASK_CAPS; ++type) {
-                params->totals[i].size[type] = 0;
-                params->totals[i].count[type] = 0;
-            }
-        }
-    }
-
-    SLIST_FOREACH(reg, &registered_heaps, next) {
-        multi_heap_handle_t heap = reg->heap;
-        if (heap == NULL) {
-            continue;
-        }
-
-        // Find if the capabilities of this heap region match on of the desired
-        // sets of capabilities.
-        uint32_t caps = get_all_caps(reg);
-        uint32_t type;
-        for (type = 0; type < NUM_HEAP_TASK_CAPS; ++type) {
-            if ((caps & params->mask[type]) == params->caps[type]) {
-                break;
-            }
-        }
-        if (type == NUM_HEAP_TASK_CAPS) {
-            continue;
-        }
-
-        multi_heap_block_handle_t b = multi_heap_get_first_block(heap);
-        multi_heap_internal_lock(heap);
-        for ( ; b ; b = multi_heap_get_next_block(heap, b)) {
-            if (multi_heap_is_free(b)) {
-                continue;
-            }
-            void *p = multi_heap_get_block_address(b);  // Safe, only arithmetic
-            size_t bsize = multi_heap_get_allocated_size(heap, p); // Validates
-            TaskHandle_t btask = (TaskHandle_t)multi_heap_get_block_owner(b);
-
-            // Accumulate per-task allocation totals.
-            if (params->totals) {
-                size_t i;
-                for (i = 0; i < count; ++i) {
-                    if (params->totals[i].task == btask) {
-                        break;
-                    }
-                }
-                if (i < count) {
-                    params->totals[i].size[type] += bsize;
-                    params->totals[i].count[type] += 1;
-                }
-                else {
-                    if (count < params->max_totals) {
-                        params->totals[count].task = btask;
-                        params->totals[count].size[type] = bsize;
-                        params->totals[i].count[type] = 1;
-                        ++count;
-                    }
-                }
-            }
-
-            // Return details about allocated blocks for selected tasks.
-            if (blocks && remaining > 0) {
-                if (params->tasks) {
-                    size_t i;
-                    for (i = 0; i < params->num_tasks; ++i) {
-                        if (btask == params->tasks[i]) {
-                            break;
-                        }
-                    }
-                    if (i == params->num_tasks) {
-                        continue;
-                    }
-                }
-                blocks->task = btask;
-                blocks->address = p;
-                blocks->size = bsize;
-                ++blocks;
-                --remaining;
-            }
-        }
-        multi_heap_internal_unlock(heap);
-    }
-    *params->num_totals = count;
-    return params->max_blocks - remaining;
-}
-
-#endif // CONFIG_HEAP_TASK_TRACKING

+ 0 - 1015
components/heap/heap_tlsf.c

@@ -1,1015 +0,0 @@
-/*
-** Two Level Segregated Fit memory allocator, version 3.1.
-** Written by Matthew Conte
-**	http://tlsf.baisoku.org
-**
-** Based on the original documentation by Miguel Masmano:
-**	http://www.gii.upv.es/tlsf/main/docs
-**
-** This implementation was written to the specification
-** of the document, therefore no GPL restrictions apply.
-**
-** Copyright (c) 2006-2016, Matthew Conte
-** All rights reserved.
-**
-** Redistribution and use in source and binary forms, with or without
-** modification, are permitted provided that the following conditions are met:
-**     * Redistributions of source code must retain the above copyright
-**       notice, this list of conditions and the following disclaimer.
-**     * Redistributions in binary form must reproduce the above copyright
-**       notice, this list of conditions and the following disclaimer in the
-**       documentation and/or other materials provided with the distribution.
-**     * Neither the name of the copyright holder nor the
-**       names of its contributors may be used to endorse or promote products
-**       derived from this software without specific prior written permission.
-**
-** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
-** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-#include "multi_heap_config.h"
-#include "multi_heap.h"
-#include "multi_heap_internal.h"
-#include "heap_tlsf_config.h"
-#include "heap_tlsf.h"
-
-#include "esp_log.h"
-/*
-** Architecture-specific bit manipulation routines.
-**
-** TLSF achieves O(1) cost for malloc and free operations by limiting
-** the search for a free block to a free list of guaranteed size
-** adequate to fulfill the request, combined with efficient free list
-** queries using bitmasks and architecture-specific bit-manipulation
-** routines.
-**
-** Most modern processors provide instructions to count leading zeroes
-** in a word, find the lowest and highest set bit, etc. These
-** specific implementations will be used when available, falling back
-** to a reasonably efficient generic implementation.
-**
-** NOTE: TLSF spec relies on ffs/fls returning value 0..31.
-** ffs/fls return 1-32 by default, returning 0 for error.
-*/
-
-/* The TLSF control structure. */
-typedef struct control_t
-{
-	/* Empty lists point at this block to indicate they are free. */
-	block_header_t block_null;
-	
-	/* Local parameter for the pool */
-	unsigned int fl_index_count;
-	unsigned int fl_index_shift;
-	unsigned int fl_index_max;	
-	unsigned int sl_index_count;
-	unsigned int sl_index_count_log2;
-	unsigned int small_block_size;
-	size_t size;
-
-	/* Bitmaps for free lists. */
-	unsigned int fl_bitmap;
-	unsigned int *sl_bitmap;	
-
-	/* Head of free lists. */
-	block_header_t** blocks;
-} control_t;
-
-static inline __attribute__((__always_inline__)) int tlsf_ffs(unsigned int word)
-{
-	const unsigned int reverse = word & (~word + 1);
-	const int bit = 32 - __builtin_clz(reverse);
-	return bit - 1;
-}
-
-static inline __attribute__((__always_inline__)) int tlsf_fls(unsigned int word)
-{
-	const int bit = word ? 32 - __builtin_clz(word) : 0;
-	return bit - 1;
-}
-
-/*
-** Set assert macro, if it has not been provided by the user.
-*/
-#if !defined (tlsf_assert)
-#define tlsf_assert assert
-#endif
-
-/*
-** Static assertion mechanism.
-*/
-#define _tlsf_glue2(x, y) x ## y
-#define _tlsf_glue(x, y) _tlsf_glue2(x, y)
-#define tlsf_static_assert(exp) \
-	typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]
-
-/* This code has been tested on 32- and 64-bit (LP/LLP) architectures. */
-tlsf_static_assert(sizeof(int) * CHAR_BIT == 32);
-tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32);
-tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64);
-
-static inline __attribute__((__always_inline__)) size_t align_up(size_t x, size_t align)
-{
-	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
-	return (x + (align - 1)) & ~(align - 1);
-}
-
-static inline __attribute__((__always_inline__)) size_t align_down(size_t x, size_t align)
-{
-	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
-	return x - (x & (align - 1));
-}
-
-static inline __attribute__((__always_inline__)) void* align_ptr(const void* ptr, size_t align)
-{
-	const tlsfptr_t aligned =
-		(tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);
-	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
-	return tlsf_cast(void*, aligned);
-}
-
-/*
-** Adjust an allocation size to be aligned to word size, and no smaller
-** than internal minimum.
-*/
-static inline __attribute__((__always_inline__)) size_t adjust_request_size(tlsf_t tlsf, size_t size, size_t align)
-{
-	size_t adjust = 0;
-	if (size)
-	{
-		const size_t aligned = align_up(size, align);
-
-		/* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */
-		if (aligned < tlsf_block_size_max(tlsf))
-		{
-			adjust = tlsf_max(aligned, block_size_min);
-		}
-	}
-	return adjust;
-}
-
-/*
-** TLSF utility functions. In most cases, these are direct translations of
-** the documentation found in the white paper.
-*/
-
-static inline __attribute__((__always_inline__)) void mapping_insert(control_t *control, size_t size, int* fli, int* sli)
-{
-	int fl, sl;
-	if (size < control->small_block_size)
-	{
-		/* Store small blocks in first list. */
-		fl = 0;
-		sl = tlsf_cast(int, size) >> 2;
-	}
-	else
-	{
-		fl = tlsf_fls(size);
-		sl = tlsf_cast(int, size >> (fl - control->sl_index_count_log2)) ^ (1 << control->sl_index_count_log2);
-		fl -= (control->fl_index_shift - 1);
-	}
-	*fli = fl;
-	*sli = sl;
-}
-
-/* This version rounds up to the next block size (for allocations) */
-static inline __attribute__((__always_inline__)) void mapping_search(control_t *control, size_t size, int* fli, int* sli)
-{
-	if (size >= control->small_block_size)
-	{
-		const size_t round = (1 << (tlsf_fls(size) - control->sl_index_count_log2)) - 1;
-		size += round;
-	}
-	mapping_insert(control, size, fli, sli);
-}
-
-static inline __attribute__((__always_inline__)) block_header_t* search_suitable_block(control_t* control, int* fli, int* sli)
-{
-	int fl = *fli;
-	int sl = *sli;
-
-	/*
-	** First, search for a block in the list associated with the given
-	** fl/sl index.
-	*/
-	unsigned int sl_map = control->sl_bitmap[fl] & (~0U << sl);
-	if (!sl_map)
-	{
-		/* No block exists. Search in the next largest first-level list. */
-		const unsigned int fl_map = control->fl_bitmap & (~0U << (fl + 1));
-		if (!fl_map)
-		{
-			/* No free blocks available, memory has been exhausted. */
-			return 0;
-		}
-
-		fl = tlsf_ffs(fl_map);
-		*fli = fl;
-		sl_map = control->sl_bitmap[fl];
-	}
-	tlsf_assert(sl_map && "internal error - second level bitmap is null");
-	sl = tlsf_ffs(sl_map);
-	*sli = sl;
-
-	/* Return the first block in the free list. */
-	return control->blocks[fl*control->sl_index_count + sl];
-}
-
-/* Remove a free block from the free list.*/
-static inline __attribute__((__always_inline__)) void remove_free_block(control_t* control, block_header_t* block, int fl, int sl)
-{
-	block_header_t* prev = block->prev_free;
-	block_header_t* next = block->next_free;
-	tlsf_assert(prev && "prev_free field can not be null");
-	tlsf_assert(next && "next_free field can not be null");
-	next->prev_free = prev;
-	prev->next_free = next;
-
-	/* If this block is the head of the free list, set new head. */
-	if (control->blocks[fl*control->sl_index_count + sl] == block)
-	{
-		control->blocks[fl*control->sl_index_count + sl] = next;
-
-		/* If the new head is null, clear the bitmap. */
-		if (next == &control->block_null)
-		{
-			control->sl_bitmap[fl] &= ~(1 << sl);
-
-			/* If the second bitmap is now empty, clear the fl bitmap. */
-			if (!control->sl_bitmap[fl])
-			{
-				control->fl_bitmap &= ~(1 << fl);
-			}
-		}
-	}
-}
-
-/* Insert a free block into the free block list. */
-static inline __attribute__((__always_inline__)) void insert_free_block(control_t* control, block_header_t* block, int fl, int sl)
-{
-	block_header_t* current = control->blocks[fl*control->sl_index_count + sl];
-	tlsf_assert(current && "free list cannot have a null entry");
-	tlsf_assert(block && "cannot insert a null entry into the free list");
-	block->next_free = current;
-	block->prev_free = &control->block_null;
-	current->prev_free = block;
-
-	tlsf_assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE)
-		&& "block not aligned properly");
-	/*
-	** Insert the new block at the head of the list, and mark the first-
-	** and second-level bitmaps appropriately.
-	*/
-	control->blocks[fl*control->sl_index_count + sl] = block;
-	control->fl_bitmap |= (1 << fl);
-	control->sl_bitmap[fl] |= (1 << sl);
-}
-
-/* Remove a given block from the free list. */
-static inline __attribute__((__always_inline__)) void block_remove(control_t* control, block_header_t* block)
-{
-	int fl, sl;
-	mapping_insert(control, block_size(block), &fl, &sl);
-	remove_free_block(control, block, fl, sl);
-}
-
-/* Insert a given block into the free list. */
-static inline __attribute__((__always_inline__)) void block_insert(control_t* control, block_header_t* block)
-{
-	int fl, sl;
-	mapping_insert(control, block_size(block), &fl, &sl);
-	insert_free_block(control, block, fl, sl);
-}
-
-static inline __attribute__((__always_inline__)) int block_can_split(block_header_t* block, size_t size)
-{
-	return block_size(block) >= sizeof(block_header_t) + size;
-}
-
-/* Split a block into two, the second of which is free. */
-static inline __attribute__((__always_inline__)) block_header_t* block_split(block_header_t* block, size_t size)
-{
-    /* Calculate the amount of space left in the remaining block.
-     * REMINDER: remaining pointer's first field is `prev_phys_block` but this field is part of the
-     * previous physical block. */
-	block_header_t* remaining =
-		offset_to_block(block_to_ptr(block), size - block_header_overhead);
-
-    /* `size` passed as an argument is the first block's new size, thus, the remaining block's size
-     * is `block_size(block) - size`. However, the block's data must be precedeed by the data size.
-     * This field is NOT part of the size, so it has to be substracted from the calculation. */
-	const size_t remain_size = block_size(block) - (size + block_header_overhead);
-
-	tlsf_assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE)
-		&& "remaining block not aligned properly");
-
-	tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);
-	block_set_size(remaining, remain_size);
-	tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");
-
-	block_set_size(block, size);
-	block_mark_as_free(remaining);
-
-    /**
-     * Here is the final outcome of this function:
-     *
-     * block             remaining (block_ptr + size - BHO)
-     * +                                +
-     * |                                |
-     * v                                v
-     * +----------------------------------------------------------------------+
-     * |0000|    |xxxxxxxxxxxxxxxxxxxxxx|xxxx|    |###########################|
-     * |0000|    |xxxxxxxxxxxxxxxxxxxxxx|xxxx|    |###########################|
-     * |0000|    |xxxxxxxxxxxxxxxxxxxxxx|xxxx|    |###########################|
-     * |0000|    |xxxxxxxxxxxxxxxxxxxxxx|xxxx|    |###########################|
-     * +----------------------------------------------------------------------+
-     *      |    |                           |    |
-     *      +    +<------------------------->+    +<------------------------->
-     *       BHO    `size` (argument) bytes   BHO      `remain_size` bytes
-     *
-     * Where BHO = block_header_overhead,
-     * 0: part of the memory owned by a `block`'s previous neighbour,
-     * x: part of the memory owned by `block`.
-     * #: part of the memory owned by `remaining`.
-     */
-
-	return remaining;
-}
-
-/* Absorb a free block's storage into an adjacent previous free block. */
-static inline __attribute__((__always_inline__)) block_header_t* block_absorb(block_header_t* prev, block_header_t* block)
-{
-	tlsf_assert(!block_is_last(prev) && "previous block can't be last");
-	/* Note: Leaves flags untouched. */
-	prev->size += block_size(block) + block_header_overhead;
-	block_link_next(prev);
-
-#ifdef MULTI_HEAP_POISONING_SLOW
-        /* next_block header needs to be replaced with a fill pattern */
-        multi_heap_internal_poison_fill_region(block, sizeof(block_header_t), true /* free */);
-#endif
-
-	return prev;
-}
-
-/* Merge a just-freed block with an adjacent previous free block. */
-static inline __attribute__((__always_inline__)) block_header_t* block_merge_prev(control_t* control, block_header_t* block)
-{
-	if (block_is_prev_free(block))
-	{
-		block_header_t* prev = block_prev(block);
-		tlsf_assert(prev && "prev physical block can't be null");
-		tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");
-		block_remove(control, prev);
-		block = block_absorb(prev, block);
-	}
-
-	return block;
-}
-
-/* Merge a just-freed block with an adjacent free block. */
-static inline __attribute__((__always_inline__)) block_header_t* block_merge_next(control_t* control, block_header_t* block)
-{
-	block_header_t* next = block_next(block);
-	tlsf_assert(next && "next physical block can't be null");
-
-	if (block_is_free(next))
-	{
-		tlsf_assert(!block_is_last(block) && "previous block can't be last");
-		block_remove(control, next);
-		block = block_absorb(block, next);
-	}
-
-	return block;
-}
-
-/* Trim any trailing block space off the end of a block, return to pool. */
-static inline __attribute__((__always_inline__)) void block_trim_free(control_t* control, block_header_t* block, size_t size)
-{
-	tlsf_assert(block_is_free(block) && "block must be free");
-	if (block_can_split(block, size))
-	{
-		block_header_t* remaining_block = block_split(block, size);
-		block_link_next(block);
-		block_set_prev_free(remaining_block);
-		block_insert(control, remaining_block);
-	}
-}
-
-/* Trim any trailing block space off the end of a used block, return to pool. */
-static inline __attribute__((__always_inline__)) void block_trim_used(control_t* control, block_header_t* block, size_t size)
-{
-	tlsf_assert(!block_is_free(block) && "block must be used");
-	if (block_can_split(block, size))
-	{
-		/* If the next block is free, we must coalesce. */
-		block_header_t* remaining_block = block_split(block, size);
-		block_set_prev_used(remaining_block);
-
-		remaining_block = block_merge_next(control, remaining_block);
-		block_insert(control, remaining_block);
-	}
-}
-
-static inline __attribute__((__always_inline__)) block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size)
-{
-	block_header_t* remaining_block = block;
-	if (block_can_split(block, size))
-	{
-        /* We want to split `block` in two: the first block will be freed and the
-         * second block will be returned. */
-		remaining_block = block_split(block, size - block_header_overhead);
-
-        /* `remaining_block` is the second block, mark its predecessor (first
-         * block) as free. */
-		block_set_prev_free(remaining_block);
-
-		block_link_next(block);
-
-        /* Put back the first block into the free memory list. */
-		block_insert(control, block);
-	}
-
-	return remaining_block;
-}
-
-static inline  __attribute__((__always_inline__)) block_header_t* block_locate_free(control_t* control, size_t size)
-{
-	int fl = 0, sl = 0;
-	block_header_t* block = 0;
-
-	if (size)
-	{
-		mapping_search(control, size, &fl, &sl);
-
-		/*
-		** mapping_search can futz with the size, so for excessively large sizes it can sometimes wind up
-		** with indices that are off the end of the block array.
-		** So, we protect against that here, since this is the only callsite of mapping_search.
-		** Note that we don't need to check sl, since it comes from a modulo operation that guarantees it's always in range.
-		*/
-		if (fl < control->fl_index_count)
-		{
-			block = search_suitable_block(control, &fl, &sl);
-		}
-	}
-
-	if (block)
-	{
-		tlsf_assert(block_size(block) >= size);
-		remove_free_block(control, block, fl, sl);
-	}
-
-	return block;
-}
-
-static inline __attribute__((__always_inline__)) void* block_prepare_used(control_t* control, block_header_t* block, size_t size)
-{
-	void* p = 0;
-	if (block)
-	{
-		tlsf_assert(size && "size must be non-zero");
-		block_trim_free(control, block, size);
-		block_mark_as_used(block);
-		p = block_to_ptr(block);
-	}
-	return p;
-}
-
-/* Clear structure and point all empty lists at the null block. */
-static void control_construct(control_t* control, size_t bytes)
-{
-	int i, j;
-
-	control->block_null.next_free = &control->block_null;
-	control->block_null.prev_free = &control->block_null;
-
-	/* find the closest ^2 for first layer */
-	i = (bytes - 1) / (16 * 1024);
-	control->fl_index_max = FL_INDEX_MAX_MIN + sizeof(i) * 8 - __builtin_clz(i);
-
-	/* adapt second layer to the pool */
-	if (bytes <= 16 * 1024) control->sl_index_count_log2 = 3;
-	else if (bytes <= 256 * 1024) control->sl_index_count_log2 = 4;
-	else control->sl_index_count_log2 = 5;
-	
-	control->fl_index_shift = (control->sl_index_count_log2 + ALIGN_SIZE_LOG2);
-	control->sl_index_count = 1 << control->sl_index_count_log2;
-	control->fl_index_count = control->fl_index_max - control->fl_index_shift + 1;
-	control->small_block_size = 1 << control->fl_index_shift;
-	control->fl_bitmap = 0;
-	
-	control->sl_bitmap = align_ptr(control + 1, sizeof(*control->sl_bitmap));
-	control->blocks = align_ptr(control->sl_bitmap + control->fl_index_count, sizeof(*control->blocks));
-	control->size = (void*) (control->blocks + control->sl_index_count * control->fl_index_count) - (void*) control;
-	
-	ESP_EARLY_LOGW( "REMOVE", "NEW POOL of %d bytes, ctrl_size: %d sli_c:%d fli_c:%d small_b %d max_b:%d", 
-					bytes, 
-					control->size, control->sl_index_count, control->fl_index_count,
-					control->small_block_size, 1 << control->fl_index_max );		
-
-	/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
-	tlsf_assert(sizeof(unsigned int) * CHAR_BIT >= control->sl_index_count && "CHAR_BIT less than sl_index_count");
-
-	/* Ensure we've properly tuned our sizes. */
-	tlsf_assert(ALIGN_SIZE == control->small_block_size / control->sl_index_count && "ALIGN_SIZE does not match");
-	
-	for (i = 0; i < control->fl_index_count; ++i)
-	{
-		control->sl_bitmap[i] = 0;
-		for (j = 0; j < control->sl_index_count; ++j)
-		{
-			control->blocks[i*control->sl_index_count + j] = &control->block_null;
-		}
-	}
-}
-
-/*
-** Debugging utilities.
-*/
-
-typedef struct integrity_t
-{
-	int prev_status;
-	int status;
-} integrity_t;
-
-#define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } }
-
-static void integrity_walker(void* ptr, size_t size, int used, void* user)
-{
-	block_header_t* block = block_from_ptr(ptr);
-	integrity_t* integ = tlsf_cast(integrity_t*, user);
-	const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
-	const int this_status = block_is_free(block) ? 1 : 0;
-	const size_t this_block_size = block_size(block);
-
-	int status = 0;
-	(void)used;
-	tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
-	tlsf_insist(size == this_block_size && "block size incorrect");
-
-	integ->prev_status = this_status;
-	integ->status += status;
-}
-
-int tlsf_check(tlsf_t tlsf)
-{
-	int i, j;
-
-	control_t* control = tlsf_cast(control_t*, tlsf);
-	int status = 0;
-
-	/* Check that the free lists and bitmaps are accurate. */
-	for (i = 0; i < control->fl_index_count; ++i)
-	{
-		for (j = 0; j < control->sl_index_count; ++j)
-		{
-			const int fl_map = control->fl_bitmap & (1 << i);
-			const int sl_list = control->sl_bitmap[i];
-			const int sl_map = sl_list & (1 << j);
-			const block_header_t* block = control->blocks[i*control->sl_index_count + j];
-
-			/* Check that first- and second-level lists agree. */
-			if (!fl_map)
-			{
-				tlsf_insist(!sl_map && "second-level map must be null");
-			}
-
-			if (!sl_map)
-			{
-				tlsf_insist(block == &control->block_null && "block list must be null");
-				continue;
-			}
-
-			/* Check that there is at least one free block. */
-			tlsf_insist(sl_list && "no free blocks in second-level map");
-			tlsf_insist(block != &control->block_null && "block should not be null");
-
-			while (block != &control->block_null)
-			{
-				int fli, sli;
-				tlsf_insist(block_is_free(block) && "block should be free");
-				tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");
-				tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");
-				tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
-				tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
-
-				mapping_insert(control, block_size(block), &fli, &sli);
-				tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
-				block = block->next_free;
-			}
-		}
-	}
-
-	return status;
-}
-
-#undef tlsf_insist
-
-static void default_walker(void* ptr, size_t size, int used, void* user)
-{
-	(void)user;
-	printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr));
-}
-
-void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
-{
-	tlsf_walker pool_walker = walker ? walker : default_walker;
-	block_header_t* block =
-		offset_to_block(pool, -(int)block_header_overhead);
-
-	while (block && !block_is_last(block))
-	{
-		pool_walker(
-			block_to_ptr(block),
-			block_size(block),
-			!block_is_free(block),
-			user);
-		block = block_next(block);
-	}
-}
-
-size_t tlsf_block_size(void* ptr)
-{
-	size_t size = 0;
-	if (ptr)
-	{
-		const block_header_t* block = block_from_ptr(ptr);
-		size = block_size(block);
-	}
-	return size;
-}
-
-int tlsf_check_pool(pool_t pool)
-{
-	/* Check that the blocks are physically correct. */
-	integrity_t integ = { 0, 0 };
-	tlsf_walk_pool(pool, integrity_walker, &integ);
-
-	return integ.status;
-}
-
-size_t tlsf_fit_size(tlsf_t tlsf, size_t size)
-{
-	/* because it's GoodFit, allocable size is one range lower */
-    if (size) 
-	{
-		control_t* control = tlsf_cast(control_t*, tlsf);
-        size_t sl_interval = (1 << ((sizeof(size_t) * 8 - 1) - __builtin_clz(size))) / control->sl_index_count;
-        return size & ~(sl_interval - 1);
-    }
-	
-	return 0;
-}	
-
-
-/*
-** Size of the TLSF structures in a given memory block passed to
-** tlsf_create, equal to the size of a control_t
-*/
-size_t tlsf_size(tlsf_t tlsf)
-{
-	if (tlsf) 
-	{
-		control_t* control = tlsf_cast(control_t*, tlsf);
-		return control->size;
-	}	
-	
-	/* no tlsf, we'll just return a min size */
-	return sizeof(control_t) + 
-	       sizeof(int) * SL_INDEX_COUNT_MIN + 
-	       sizeof(block_header_t*) * SL_INDEX_COUNT_MIN * FL_INDEX_COUNT_MIN;
-}
-
-size_t tlsf_align_size(void)
-{
-	return ALIGN_SIZE;
-}
-
-size_t tlsf_block_size_min(void)
-{
-	return block_size_min;
-}
-
-size_t tlsf_block_size_max(tlsf_t tlsf)
-{
-	control_t* control = tlsf_cast(control_t*, tlsf);
-	return tlsf_cast(size_t, 1) << control->fl_index_max;
-}
-
-/*
-** Overhead of the TLSF structures in a given memory block passed to
-** tlsf_add_pool, equal to the overhead of a free block and the
-** sentinel block.
-*/
-size_t tlsf_pool_overhead(void)
-{
-	return 2 * block_header_overhead;
-}
-
-size_t tlsf_alloc_overhead(void)
-{
-	return block_header_overhead;
-}
-
-pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
-{
-	block_header_t* block;
-	block_header_t* next;
-
-	const size_t pool_overhead = tlsf_pool_overhead();
-	const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
-
-	if (((ptrdiff_t)mem % ALIGN_SIZE) != 0)
-	{
-		printf("tlsf_add_pool: Memory must be aligned by %u bytes.\n",
-			(unsigned int)ALIGN_SIZE);
-		return 0;
-	}
-
-	if (pool_bytes < block_size_min || pool_bytes > tlsf_block_size_max(tlsf))
-	{
-#if defined (TLSF_64BIT)
-		printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n",
-			(unsigned int)(pool_overhead + block_size_min),
-			(unsigned int)((pool_overhead + tlsf_block_size_max(tlsf)) / 256));
-#else
-		printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n",
-			(unsigned int)(pool_overhead + block_size_min),
-			(unsigned int)(pool_overhead + tlsf_block_size_max(tlsf)));
-#endif
-		return 0;
-	}
-
-	/*
-	** Create the main free block. Offset the start of the block slightly
-	** so that the prev_phys_block field falls outside of the pool -
-	** it will never be used.
-	*/
-	block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead);
-	block_set_size(block, pool_bytes);
-	block_set_free(block);
-	block_set_prev_used(block);
-	block_insert(tlsf_cast(control_t*, tlsf), block);
-
-	/* Split the block to create a zero-size sentinel block. */
-	next = block_link_next(block);
-	block_set_size(next, 0);
-	block_set_used(next);
-	block_set_prev_free(next);
-
-	return mem;
-}
-
-void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
-{
-	control_t* control = tlsf_cast(control_t*, tlsf);
-	block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
-
-	int fl = 0, sl = 0;
-
-	tlsf_assert(block_is_free(block) && "block should be free");
-	tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free");
-	tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero");
-
-	mapping_insert(control, block_size(block), &fl, &sl);
-	remove_free_block(control, block, fl, sl);
-}
-
-/*
-** TLSF main interface.
-*/
-
-
-tlsf_t tlsf_create(void* mem, size_t max_bytes)
-{
-#if _DEBUG
-	if (test_ffs_fls())
-	{
-		return 0;
-	}
-#endif
-
-	if (((tlsfptr_t)mem % ALIGN_SIZE) != 0)
-	{
-		printf("tlsf_create: Memory must be aligned to %u bytes.\n",
-			(unsigned int)ALIGN_SIZE);
-		return 0;
-	}
-
-	control_construct(tlsf_cast(control_t*, mem), max_bytes);
-
-	return tlsf_cast(tlsf_t, mem);
-}
-
-pool_t tlsf_get_pool(tlsf_t tlsf)
-{
-	return tlsf_cast(pool_t, (char*)tlsf + tlsf_size(tlsf));
-}
-
-tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes)
-{
-	tlsf_t tlsf = tlsf_create(mem, max_bytes ? max_bytes : pool_bytes);
-	tlsf_add_pool(tlsf, (char*)mem + tlsf_size(tlsf), pool_bytes - tlsf_size(tlsf));
-	return tlsf;
-}
-
-void* tlsf_malloc(tlsf_t tlsf, size_t size)
-{
-	control_t* control = tlsf_cast(control_t*, tlsf);
-	size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
-	block_header_t* block = block_locate_free(control, adjust);
-	return block_prepare_used(control, block, adjust);
-}
-
-/**
- * @brief Allocate memory of at least `size` bytes where byte at `data_offset` will be aligned to `alignment`.
- *
- * This function will allocate memory pointed by `ptr`. However, the byte at `data_offset` of
- * this piece of memory (i.e., byte at `ptr` + `data_offset`) will be aligned to `alignment`.
- * This function is useful for allocating memory that will internally have a header, and the
- * usable memory following the header (i.e. `ptr` + `data_offset`) must be aligned.
- *
- * For example, a call to `multi_heap_aligned_alloc_impl_offs(heap, 64, 256, 20)` will return a
- * pointer `ptr` to free memory of minimum 64 bytes, where `ptr + 20` is aligned on `256`.
- * So `(ptr + 20) % 256` equals 0.
- *
- * @param tlsf TLSF structure to allocate memory from.
- * @param align Alignment for the returned pointer's offset.
- * @param size Minimum size, in bytes, of the memory to allocate INCLUDING
- *             `data_offset` bytes.
- * @param data_offset Offset to be aligned on `alignment`. This can be 0, in
- *                    this case, the returned pointer will be aligned on
- *                    `alignment`. If it is not a multiple of CPU word size,
- *                    it will be aligned up to the closest multiple of it.
- *
- * @return pointer to free memory.
- */
-void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_offset)
-{
-    control_t* control = tlsf_cast(control_t*, tlsf);
-    const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
-    const size_t off_adjust = align_up(data_offset, ALIGN_SIZE);
-
-	/*
-	** We must allocate an additional minimum block size bytes so that if
-	** our free block will leave an alignment gap which is smaller, we can
-	** trim a leading free block and release it back to the pool. We must
-	** do this because the previous physical block is in use, therefore
-	** the prev_phys_block field is not valid, and we can't simply adjust
-	** the size of that block.
-	*/
-	const size_t gap_minimum = sizeof(block_header_t) + off_adjust;
-    /* The offset is included in both `adjust` and `gap_minimum`, so we
-    ** need to subtract it once.
-    */
-	const size_t size_with_gap = adjust_request_size(tlsf, adjust + align + gap_minimum - off_adjust, align);
-
-	/*
-	** If alignment is less than or equals base alignment, we're done.
-	** If we requested 0 bytes, return null, as tlsf_malloc(0) does.
-	*/
-	const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust;
-
-	block_header_t* block = block_locate_free(control, aligned_size);
-
-	/* This can't be a static assert. */
-	tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
-
-	if (block)
-	{
-		void* ptr = block_to_ptr(block);
-		void* aligned = align_ptr(ptr, align);
-		size_t gap = tlsf_cast(size_t,
-			tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
-
-       /*
-        ** If gap size is too small or if there is not gap but we need one,
-        ** offset to next aligned boundary.
-        */
-		if ((gap && gap < gap_minimum) || (!gap && off_adjust))
-		{
-			const size_t gap_remain = gap_minimum - gap;
-			const size_t offset = tlsf_max(gap_remain, align);
-			const void* next_aligned = tlsf_cast(void*,
-				tlsf_cast(tlsfptr_t, aligned) + offset);
-
-			aligned = align_ptr(next_aligned, align);
-			gap = tlsf_cast(size_t,
-				tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
-		}
-
-		if (gap)
-		{
-			tlsf_assert(gap >= gap_minimum && "gap size too small");
-			block = block_trim_free_leading(control, block, gap - off_adjust);
-		}
-	}
-
-    /* Preparing the block will also the trailing free memory. */
-	return block_prepare_used(control, block, adjust);
-}
-
-/**
- * @brief Same as `tlsf_memalign_offs` function but with a 0 offset.
- * The pointer returned is aligned on `align`.
- */
-void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
-{
-    return tlsf_memalign_offs(tlsf, align, size, 0);
-}
-
-
-void tlsf_free(tlsf_t tlsf, void* ptr)
-{
-	/* Don't attempt to free a NULL pointer. */
-	if (ptr)
-	{
-		control_t* control = tlsf_cast(control_t*, tlsf);
-		block_header_t* block = block_from_ptr(ptr);
-		tlsf_assert(!block_is_free(block) && "block already marked as free");
-		block_mark_as_free(block);
-		block = block_merge_prev(control, block);
-		block = block_merge_next(control, block);
-		block_insert(control, block);
-	}
-}
-
-/*
-** The TLSF block information provides us with enough information to
-** provide a reasonably intelligent implementation of realloc, growing or
-** shrinking the currently allocated block as required.
-**
-** This routine handles the somewhat esoteric edge cases of realloc:
-** - a non-zero size with a null pointer will behave like malloc
-** - a zero size with a non-null pointer will behave like free
-** - a request that cannot be satisfied will leave the original buffer
-**   untouched
-** - an extended buffer size will leave the newly-allocated area with
-**   contents undefined
-*/
-void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
-{
-	control_t* control = tlsf_cast(control_t*, tlsf);
-	void* p = 0;
-
-	/* Zero-size requests are treated as free. */
-	if (ptr && size == 0)
-	{
-		tlsf_free(tlsf, ptr);
-	}
-	/* Requests with NULL pointers are treated as malloc. */
-	else if (!ptr)
-	{
-		p = tlsf_malloc(tlsf, size);
-	}
-	else
-	{
-		block_header_t* block = block_from_ptr(ptr);
-		block_header_t* next = block_next(block);
-
-		const size_t cursize = block_size(block);
-		const size_t combined = cursize + block_size(next) + block_header_overhead;
-		const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
-
-		tlsf_assert(!block_is_free(block) && "block already marked as free");
-
-		/*
-		** If the next block is used, or when combined with the current
-		** block, does not offer enough space, we must reallocate and copy.
-		*/
-		if (adjust > cursize && (!block_is_free(next) || adjust > combined))
-		{
-			p = tlsf_malloc(tlsf, size);
-			if (p)
-			{
-				const size_t minsize = tlsf_min(cursize, size);
-				memcpy(p, ptr, minsize);
-				tlsf_free(tlsf, ptr);
-			}
-		}
-		else
-		{
-			/* Do we need to expand to the next block? */
-			if (adjust > cursize)
-			{
-				block_merge_next(control, block);
-				block_mark_as_used(block);
-			}
-
-			/* Trim the resulting block and return the original pointer. */
-			block_trim_used(control, block, adjust);
-			p = ptr;
-		}
-	}
-
-	return p;
-}

+ 0 - 119
components/heap/heap_tlsf.h

@@ -1,119 +0,0 @@
-/*
-** Two Level Segregated Fit memory allocator, version 3.1.
-** Written by Matthew Conte
-**	http://tlsf.baisoku.org
-**
-** Based on the original documentation by Miguel Masmano:
-**	http://www.gii.upv.es/tlsf/main/docs
-**
-** This implementation was written to the specification
-** of the document, therefore no GPL restrictions apply.
-**
-** Copyright (c) 2006-2016, Matthew Conte
-** All rights reserved.
-**
-** Redistribution and use in source and binary forms, with or without
-** modification, are permitted provided that the following conditions are met:
-**     * Redistributions of source code must retain the above copyright
-**       notice, this list of conditions and the following disclaimer.
-**     * Redistributions in binary form must reproduce the above copyright
-**       notice, this list of conditions and the following disclaimer in the
-**       documentation and/or other materials provided with the distribution.
-**     * Neither the name of the copyright holder nor the
-**       names of its contributors may be used to endorse or promote products
-**       derived from this software without specific prior written permission.
-**
-** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
-** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#pragma once
-#include <assert.h>
-#include <limits.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stddef.h>
-#include "heap_tlsf_config.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/*
-** Cast and min/max macros.
-*/
-#define tlsf_cast(t, exp)	((t) (exp))
-#define tlsf_min(a, b)		((a) < (b) ? (a) : (b))
-#define tlsf_max(a, b)		((a) > (b) ? (a) : (b))
-
-/* A type used for casting when doing pointer arithmetic. */
-typedef ptrdiff_t tlsfptr_t;
-
-typedef struct block_header_t
-{
-	/* Points to the previous physical block. */
-	struct block_header_t* prev_phys_block;
-
-	/* The size of this block, excluding the block header. */
-	size_t size;
-
-	/* Next and previous free blocks. */
-	struct block_header_t* next_free;
-	struct block_header_t* prev_free;
-} block_header_t;
-
-#include "heap_tlsf_block_functions.h"
-
-/* tlsf_t: a TLSF structure. Can contain 1 to N pools. */
-/* pool_t: a block of memory that TLSF can manage. */
-typedef void* tlsf_t;
-typedef void* pool_t;
-
-/* Create/destroy a memory pool. */
-tlsf_t tlsf_create(void* mem, size_t max_bytes);
-tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes);
-pool_t tlsf_get_pool(tlsf_t tlsf);
-
-/* Add/remove memory pools. */
-pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes);
-void tlsf_remove_pool(tlsf_t tlsf, pool_t pool);
-
-/* malloc/memalign/realloc/free replacements. */
-void* tlsf_malloc(tlsf_t tlsf, size_t size);
-void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size);
-void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t offset);
-void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size);
-void tlsf_free(tlsf_t tlsf, void* ptr);
-
-/* Returns internal block size, not original request size */
-size_t tlsf_block_size(void* ptr);
-
-/* Overheads/limits of internal structures. */
-size_t tlsf_size(tlsf_t tlsf);
-size_t tlsf_align_size(void);
-size_t tlsf_block_size_min(void);
-size_t tlsf_block_size_max(tlsf_t tlsf);
-size_t tlsf_pool_overhead(void);
-size_t tlsf_alloc_overhead(void);
-size_t tlsf_fit_size(tlsf_t tlsf, size_t size);
-
-/* Debugging. */
-typedef void (*tlsf_walker)(void* ptr, size_t size, int used, void* user);
-void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user);
-/* Returns nonzero if any internal consistency check fails. */
-int tlsf_check(tlsf_t tlsf);
-int tlsf_check_pool(pool_t pool);
-
-#if defined(__cplusplus)
-};
-#endif

+ 0 - 174
components/heap/heap_tlsf_block_functions.h

@@ -1,174 +0,0 @@
-/*
-** Two Level Segregated Fit memory allocator, version 3.1.
-** Written by Matthew Conte
-**	http://tlsf.baisoku.org
-**
-** Based on the original documentation by Miguel Masmano:
-**	http://www.gii.upv.es/tlsf/main/docs
-**
-** This implementation was written to the specification
-** of the document, therefore no GPL restrictions apply.
-**
-** Copyright (c) 2006-2016, Matthew Conte
-** All rights reserved.
-**
-** Redistribution and use in source and binary forms, with or without
-** modification, are permitted provided that the following conditions are met:
-**     * Redistributions of source code must retain the above copyright
-**       notice, this list of conditions and the following disclaimer.
-**     * Redistributions in binary form must reproduce the above copyright
-**       notice, this list of conditions and the following disclaimer in the
-**       documentation and/or other materials provided with the distribution.
-**     * Neither the name of the copyright holder nor the
-**       names of its contributors may be used to endorse or promote products
-**       derived from this software without specific prior written permission.
-**
-** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
-** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#pragma once
-
-/*
-** Data structures and associated constants.
-*/
-
-/*
-** Since block sizes are always at least a multiple of 4, the two least
-** significant bits of the size field are used to store the block status:
-** - bit 0: whether block is busy or free
-** - bit 1: whether previous block is busy or free
-*/
-#define block_header_free_bit  (1 << 0)
-#define block_header_prev_free_bit  (1 << 1)
-
-/*
-** The size of the block header exposed to used blocks is the size field.
-** The prev_phys_block field is stored *inside* the previous free block.
-*/
-#define block_header_overhead  (sizeof(size_t))
-
-/* User data starts directly after the size field in a used block. */
-#define block_start_offset (offsetof(block_header_t, size) + sizeof(size_t))
-
-/*
-** A free block must be large enough to store its header minus the size of
-** the prev_phys_block field, and no larger than the number of addressable
-** bits for FL_INDEX.
-** The block_size_max macro returns the maximum block for the minimum pool
-** use tlsf_block_size_max for a value specific to the pool
-*/
-#define block_size_min  (sizeof(block_header_t) - sizeof(block_header_t*))
-#define block_size_max  (tlsf_cast(size_t, 1) << FL_INDEX_MAX_MIN)
-
-/*
-** block_header_t member functions.
-*/
-static inline __attribute__((__always_inline__)) size_t block_size(const block_header_t* block)
-{
-	return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
-}
-
-static inline __attribute__((__always_inline__)) void block_set_size(block_header_t* block, size_t size)
-{
-	const size_t oldsize = block->size;
-	block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
-}
-
-static inline __attribute__((__always_inline__)) int block_is_last(const block_header_t* block)
-{
-	return block_size(block) == 0;
-}
-
-static inline __attribute__((__always_inline__)) int block_is_free(const block_header_t* block)
-{
-	return tlsf_cast(int, block->size & block_header_free_bit);
-}
-
-static inline __attribute__((__always_inline__)) void block_set_free(block_header_t* block)
-{
-	block->size |= block_header_free_bit;
-}
-
-static inline __attribute__((__always_inline__)) void block_set_used(block_header_t* block)
-{
-	block->size &= ~block_header_free_bit;
-}
-
-static inline __attribute__((__always_inline__)) int block_is_prev_free(const block_header_t* block)
-{
-	return tlsf_cast(int, block->size & block_header_prev_free_bit);
-}
-
-static inline __attribute__((__always_inline__)) void block_set_prev_free(block_header_t* block)
-{
-	block->size |= block_header_prev_free_bit;
-}
-
-static inline __attribute__((__always_inline__)) void block_set_prev_used(block_header_t* block)
-{
-	block->size &= ~block_header_prev_free_bit;
-}
-
-static inline __attribute__((__always_inline__)) block_header_t* block_from_ptr(const void* ptr)
-{
-	return tlsf_cast(block_header_t*,
-		tlsf_cast(unsigned char*, ptr) - block_start_offset);
-}
-
-static inline __attribute__((__always_inline__)) void* block_to_ptr(const block_header_t* block)
-{
-	return tlsf_cast(void*,
-		tlsf_cast(unsigned char*, block) + block_start_offset);
-}
-
-/* Return location of next block after block of given size. */
-static inline __attribute__((__always_inline__)) block_header_t* offset_to_block(const void* ptr, size_t size)
-{
-	return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size);
-}
-
-/* Return location of previous block. */
-static inline __attribute__((__always_inline__)) block_header_t* block_prev(const block_header_t* block)
-{
-	return block->prev_phys_block;
-}
-
-/* Return location of next existing block. */
-static inline __attribute__((__always_inline__)) block_header_t* block_next(const block_header_t* block)
-{
-	block_header_t* next = offset_to_block(block_to_ptr(block),
-		block_size(block) - block_header_overhead);
-	return next;
-}
-
-/* Link a new block with its physical neighbor, return the neighbor. */
-static inline __attribute__((__always_inline__)) block_header_t* block_link_next(block_header_t* block)
-{
-	block_header_t* next = block_next(block);
-	next->prev_phys_block = block;
-	return next;
-}
-
-static inline __attribute__((__always_inline__)) void block_mark_as_free(block_header_t* block)
-{
-	/* Link the block to the next block, first. */
-	block_header_t* next = block_link_next(block);
-	block_set_prev_free(next);
-	block_set_free(block);
-}
-
-static inline __attribute__((__always_inline__)) void block_mark_as_used(block_header_t* block)
-{
-	block_header_t* next = block_next(block);
-	block_set_prev_used(next);
-	block_set_used(block);
-}

+ 0 - 66
components/heap/heap_tlsf_config.h

@@ -1,66 +0,0 @@
-/*
-** Two Level Segregated Fit memory allocator, version 3.1.
-** Written by Matthew Conte
-**	http://tlsf.baisoku.org
-**
-** Based on the original documentation by Miguel Masmano:
-**	http://www.gii.upv.es/tlsf/main/docs
-**
-** This implementation was written to the specification
-** of the document, therefore no GPL restrictions apply.
-**
-** Copyright (c) 2006-2016, Matthew Conte
-** All rights reserved.
-**
-** Redistribution and use in source and binary forms, with or without
-** modification, are permitted provided that the following conditions are met:
-**     * Redistributions of source code must retain the above copyright
-**       notice, this list of conditions and the following disclaimer.
-**     * Redistributions in binary form must reproduce the above copyright
-**       notice, this list of conditions and the following disclaimer in the
-**       documentation and/or other materials provided with the distribution.
-**     * Neither the name of the copyright holder nor the
-**       names of its contributors may be used to endorse or promote products
-**       derived from this software without specific prior written permission.
-**
-** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
-** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#pragma once
-
-enum tlsf_config
-{
-	/* log2 of number of linear subdivisions of block sizes. Larger
-	** values require more memory in the control structure. Values of
-	** 4 or 5 are typical, 3 is for very small pools.
-	*/
-	SL_INDEX_COUNT_LOG2_MIN = 3, 
-
-	/* All allocation sizes and addresses are aligned to 4 bytes. */
-	ALIGN_SIZE_LOG2 = 2,
-	ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
-
-	/*
-	** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
-	** However, because we linearly subdivide the second-level lists, and
-	** our minimum size granularity is 4 bytes, it doesn't make sense to
-	** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
-	** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
-	** trying to split size ranges into more slots than we have available.
-	** Instead, we calculate the minimum threshold size, and place all
-	** blocks below that size into the 0th first-level list.
-	** Values below are the absolute minimum to accept a pool addition
-	*/
-	FL_INDEX_MAX_MIN = 14, // For a less than 16kB pool
-	SL_INDEX_COUNT_MIN = (1 << SL_INDEX_COUNT_LOG2_MIN),
-	FL_INDEX_COUNT_MIN = (FL_INDEX_MAX_MIN - (SL_INDEX_COUNT_LOG2_MIN + ALIGN_SIZE_LOG2) + 1),
-};

+ 0 - 255
components/heap/heap_trace_standalone.c

@@ -1,255 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#include <string.h>
-#include <sdkconfig.h>
-
-#define HEAP_TRACE_SRCFILE /* don't warn on inclusion here */
-#include "esp_heap_trace.h"
-#undef HEAP_TRACE_SRCFILE
-
-#include "esp_attr.h"
-#include "freertos/FreeRTOS.h"
-#include "freertos/task.h"
-
-
-#define STACK_DEPTH CONFIG_HEAP_TRACING_STACK_DEPTH
-
-#if CONFIG_HEAP_TRACING_STANDALONE
-
-static portMUX_TYPE trace_mux = portMUX_INITIALIZER_UNLOCKED;
-static bool tracing;
-static heap_trace_mode_t mode;
-
-/* Buffer used for records, starting at offset 0
-*/
-static heap_trace_record_t *buffer;
-static size_t total_records;
-
-/* Count of entries logged in the buffer.
-
-   Maximum total_records
-*/
-static size_t count;
-
-/* Actual number of allocations logged */
-static size_t total_allocations;
-
-/* Actual number of frees logged */
-static size_t total_frees;
-
-/* Has the buffer overflowed and lost trace entries? */
-static bool has_overflowed = false;
-
-esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t num_records)
-{
-    if (tracing) {
-        return ESP_ERR_INVALID_STATE;
-    }
-    buffer = record_buffer;
-    total_records = num_records;
-    memset(buffer, 0, num_records * sizeof(heap_trace_record_t));
-    return ESP_OK;
-}
-
-esp_err_t heap_trace_start(heap_trace_mode_t mode_param)
-{
-    if (buffer == NULL || total_records == 0) {
-        return ESP_ERR_INVALID_STATE;
-    }
-
-    portENTER_CRITICAL(&trace_mux);
-
-    tracing = false;
-    mode = mode_param;
-    count = 0;
-    total_allocations = 0;
-    total_frees = 0;
-    has_overflowed = false;
-    heap_trace_resume();
-
-    portEXIT_CRITICAL(&trace_mux);
-    return ESP_OK;
-}
-
-static esp_err_t set_tracing(bool enable)
-{
-    if (tracing == enable) {
-        return ESP_ERR_INVALID_STATE;
-    }
-    tracing = enable;
-    return ESP_OK;
-}
-
-esp_err_t heap_trace_stop(void)
-{
-    return set_tracing(false);
-}
-
-esp_err_t heap_trace_resume(void)
-{
-    return set_tracing(true);
-}
-
-size_t heap_trace_get_count(void)
-{
-    return count;
-}
-
-esp_err_t heap_trace_get(size_t index, heap_trace_record_t *record)
-{
-    if (record == NULL) {
-        return ESP_ERR_INVALID_STATE;
-    }
-    esp_err_t result = ESP_OK;
-
-    portENTER_CRITICAL(&trace_mux);
-    if (index >= count) {
-        result = ESP_ERR_INVALID_ARG; /* out of range for 'count' */
-    } else {
-        memcpy(record, &buffer[index], sizeof(heap_trace_record_t));
-    }
-    portEXIT_CRITICAL(&trace_mux);
-    return result;
-}
-
-
-void heap_trace_dump(void)
-{
-    size_t delta_size = 0;
-    size_t delta_allocs = 0;
-    printf("%u allocations trace (%u entry buffer)\n",
-           count, total_records);
-    size_t start_count = count;
-    for (int i = 0; i < count; i++) {
-        heap_trace_record_t *rec = &buffer[i];
-
-        if (rec->address != NULL) {
-            printf("%d bytes (@ %p) allocated CPU %d ccount 0x%08x caller ",
-                   rec->size, rec->address, rec->ccount & 1, rec->ccount & ~3);
-            for (int j = 0; j < STACK_DEPTH && rec->alloced_by[j] != 0; j++) {
-                printf("%p%s", rec->alloced_by[j],
-                       (j < STACK_DEPTH - 1) ? ":" : "");
-            }
-
-            if (mode != HEAP_TRACE_ALL || STACK_DEPTH == 0 || rec->freed_by[0] == NULL) {
-                delta_size += rec->size;
-                delta_allocs++;
-                printf("\n");
-            } else {
-                printf("\nfreed by ");
-                for (int j = 0; j < STACK_DEPTH; j++) {
-                    printf("%p%s", rec->freed_by[j],
-                           (j < STACK_DEPTH - 1) ? ":" : "\n");
-                }
-            }
-        }
-    }
-    if (mode == HEAP_TRACE_ALL) {
-        printf("%u bytes alive in trace (%u/%u allocations)\n",
-               delta_size, delta_allocs, heap_trace_get_count());
-    } else {
-        printf("%u bytes 'leaked' in trace (%u allocations)\n", delta_size, delta_allocs);
-    }
-    printf("total allocations %u total frees %u\n", total_allocations, total_frees);
-    if (start_count != count) { // only a problem if trace isn't stopped before dumping
-        printf("(NB: New entries were traced while dumping, so trace dump may have duplicate entries.)\n");
-    }
-    if (has_overflowed) {
-        printf("(NB: Buffer has overflowed, so trace data is incomplete.)\n");
-    }
-}
-
-/* Add a new allocation to the heap trace records */
-static IRAM_ATTR void record_allocation(const heap_trace_record_t *record)
-{
-    if (!tracing || record->address == NULL) {
-        return;
-    }
-
-    portENTER_CRITICAL(&trace_mux);
-    if (tracing) {
-        if (count == total_records) {
-            has_overflowed = true;
-            /* Move the whole buffer back one slot.
-
-               This is a bit slow, compared to treating this buffer as a ringbuffer and rotating a head pointer.
-
-               However, ringbuffer code gets tricky when we remove elements in mid-buffer (for leak trace mode) while
-               trying to keep track of an item count that may overflow.
-            */
-            memmove(&buffer[0], &buffer[1], sizeof(heap_trace_record_t) * (total_records -1));
-            count--;
-        }
-        // Copy new record into place
-        memcpy(&buffer[count], record, sizeof(heap_trace_record_t));
-        count++;
-        total_allocations++;
-    }
-    portEXIT_CRITICAL(&trace_mux);
-}
-
-// remove a record, used when freeing
-static void remove_record(int index);
-
-/* record a free event in the heap trace log
-
-   For HEAP_TRACE_ALL, this means filling in the freed_by pointer.
-   For HEAP_TRACE_LEAKS, this means removing the record from the log.
-*/
-static IRAM_ATTR void record_free(void *p, void **callers)
-{
-    if (!tracing || p == NULL) {
-        return;
-    }
-
-    portENTER_CRITICAL(&trace_mux);
-    if (tracing && count > 0) {
-        total_frees++;
-        /* search backwards for the allocation record matching this free */
-        int i;
-        for (i = count - 1; i >= 0; i--) {
-            if (buffer[i].address == p) {
-                break;
-            }
-        }
-
-        if (i >= 0) {
-            if (mode == HEAP_TRACE_ALL) {
-                memcpy(buffer[i].freed_by, callers, sizeof(void *) * STACK_DEPTH);
-            } else { // HEAP_TRACE_LEAKS
-                // Leak trace mode, once an allocation is freed we remove it from the list
-                remove_record(i);
-            }
-        }
-    }
-    portEXIT_CRITICAL(&trace_mux);
-}
-
-/* remove the entry at 'index' from the ringbuffer of saved records */
-static IRAM_ATTR void remove_record(int index)
-{
-    if (index < count - 1) {
-        // Remove the buffer entry from the list
-        memmove(&buffer[index], &buffer[index+1],
-                sizeof(heap_trace_record_t) * (total_records - index - 1));
-    } else {
-        // For last element, just zero it out to avoid ambiguity
-        memset(&buffer[index], 0, sizeof(heap_trace_record_t));
-    }
-    count--;
-}
-
-#include "heap_trace.inc"
-
-#endif /*CONFIG_HEAP_TRACING_STANDALONE*/

+ 0 - 402
components/heap/include/esp_heap_caps.h

@@ -1,402 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#pragma once
-
-#include <stdint.h>
-#include <stdlib.h>
-#include "multi_heap.h"
-#include <sdkconfig.h>
-#include "esp_err.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * @brief Flags to indicate the capabilities of the various memory systems
- */
-#define MALLOC_CAP_EXEC             (1<<0)  ///< Memory must be able to run executable code
-#define MALLOC_CAP_32BIT            (1<<1)  ///< Memory must allow for aligned 32-bit data accesses
-#define MALLOC_CAP_8BIT             (1<<2)  ///< Memory must allow for 8/16/...-bit data accesses
-#define MALLOC_CAP_DMA              (1<<3)  ///< Memory must be able to accessed by DMA
-#define MALLOC_CAP_PID2             (1<<4)  ///< Memory must be mapped to PID2 memory space (PIDs are not currently used)
-#define MALLOC_CAP_PID3             (1<<5)  ///< Memory must be mapped to PID3 memory space (PIDs are not currently used)
-#define MALLOC_CAP_PID4             (1<<6)  ///< Memory must be mapped to PID4 memory space (PIDs are not currently used)
-#define MALLOC_CAP_PID5             (1<<7)  ///< Memory must be mapped to PID5 memory space (PIDs are not currently used)
-#define MALLOC_CAP_PID6             (1<<8)  ///< Memory must be mapped to PID6 memory space (PIDs are not currently used)
-#define MALLOC_CAP_PID7             (1<<9)  ///< Memory must be mapped to PID7 memory space (PIDs are not currently used)
-#define MALLOC_CAP_SPIRAM           (1<<10) ///< Memory must be in SPI RAM
-#define MALLOC_CAP_INTERNAL         (1<<11) ///< Memory must be internal; specifically it should not disappear when flash/spiram cache is switched off
-#define MALLOC_CAP_DEFAULT          (1<<12) ///< Memory can be returned in a non-capability-specific memory allocation (e.g. malloc(), calloc()) call
-#define MALLOC_CAP_IRAM_8BIT        (1<<13) ///< Memory must be in IRAM and allow unaligned access
-#define MALLOC_CAP_RETENTION        (1<<14)
-
-#define MALLOC_CAP_INVALID          (1<<31) ///< Memory can't be used / list end marker
-
-/**
- * @brief callback called when a allocation operation fails, if registered
- * @param size in bytes of failed allocation
- * @param caps capabillites requested of failed allocation
- * @param function_name function which generated the failure
- */
-typedef void (*esp_alloc_failed_hook_t) (size_t size, uint32_t caps, const char * function_name);
-
-/**
- * @brief registers a callback function to be invoked if a memory allocation operation fails
- * @param callback caller defined callback to be invoked
- * @return ESP_OK if callback was registered.
- */
-esp_err_t heap_caps_register_failed_alloc_callback(esp_alloc_failed_hook_t callback);
-
-/**
- * @brief Allocate a chunk of memory which has the given capabilities
- *
- * Equivalent semantics to libc malloc(), for capability-aware memory.
- *
- * In IDF, ``malloc(p)`` is equivalent to ``heap_caps_malloc(p, MALLOC_CAP_8BIT)``.
- *
- * @param size Size, in bytes, of the amount of memory to allocate
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory to be returned
- *
- * @return A pointer to the memory allocated on success, NULL on failure
- */
-void *heap_caps_malloc(size_t size, uint32_t caps);
-
-
-/**
- * @brief Free memory previously allocated via heap_caps_malloc() or heap_caps_realloc().
- *
- * Equivalent semantics to libc free(), for capability-aware memory.
- *
- *  In IDF, ``free(p)`` is equivalent to ``heap_caps_free(p)``.
- *
- * @param ptr Pointer to memory previously returned from heap_caps_malloc() or heap_caps_realloc(). Can be NULL.
- */
-void heap_caps_free( void *ptr);
-
-/**
- * @brief Reallocate memory previously allocated via heap_caps_malloc() or heap_caps_realloc().
- *
- * Equivalent semantics to libc realloc(), for capability-aware memory.
- *
- * In IDF, ``realloc(p, s)`` is equivalent to ``heap_caps_realloc(p, s, MALLOC_CAP_8BIT)``.
- *
- * 'caps' parameter can be different to the capabilities that any original 'ptr' was allocated with. In this way,
- * realloc can be used to "move" a buffer if necessary to ensure it meets a new set of capabilities.
- *
- * @param ptr Pointer to previously allocated memory, or NULL for a new allocation.
- * @param size Size of the new buffer requested, or 0 to free the buffer.
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory desired for the new allocation.
- *
- * @return Pointer to a new buffer of size 'size' with capabilities 'caps', or NULL if allocation failed.
- */
-void *heap_caps_realloc( void *ptr, size_t size, uint32_t caps);
-
-/**
- * @brief Allocate a aligned chunk of memory which has the given capabilities
- *
- * Equivalent semantics to libc aligned_alloc(), for capability-aware memory.
- * @param alignment  How the pointer received needs to be aligned
- *                   must be a power of two
- * @param size Size, in bytes, of the amount of memory to allocate
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory to be returned
- *
- * @return A pointer to the memory allocated on success, NULL on failure
- *
- *
- */
-void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint32_t caps);
-
-/**
- * @brief Used to deallocate memory previously allocated with heap_caps_aligned_alloc
- *
- * @param ptr Pointer to the memory allocated
- * @note This function is deprecated, plase consider using heap_caps_free() instead
- */
-void __attribute__((deprecated))  heap_caps_aligned_free(void *ptr);
-
-/**
- * @brief Allocate a aligned chunk of memory which has the given capabilities. The initialized value in the memory is set to zero.
- *
- * @param alignment  How the pointer received needs to be aligned
- *                   must be a power of two
- * @param n    Number of continuing chunks of memory to allocate
- * @param size Size, in bytes, of a chunk of memory to allocate
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory to be returned
- *
- * @return A pointer to the memory allocated on success, NULL on failure
- *
- */
-void *heap_caps_aligned_calloc(size_t alignment, size_t n, size_t size, uint32_t caps);
-
-
-/**
- * @brief Allocate a chunk of memory which has the given capabilities. The initialized value in the memory is set to zero.
- *
- * Equivalent semantics to libc calloc(), for capability-aware memory.
- *
- * In IDF, ``calloc(p)`` is equivalent to ``heap_caps_calloc(p, MALLOC_CAP_8BIT)``.
- *
- * @param n    Number of continuing chunks of memory to allocate
- * @param size Size, in bytes, of a chunk of memory to allocate
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory to be returned
- *
- * @return A pointer to the memory allocated on success, NULL on failure
- */
-void *heap_caps_calloc(size_t n, size_t size, uint32_t caps);
-
-/**
- * @brief Get the total size of all the regions that have the given capabilities
- *
- * This function takes all regions capable of having the given capabilities allocated in them
- * and adds up the total space they have.
- *
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory
- *
- * @return total size in bytes
- */
-
-size_t heap_caps_get_total_size(uint32_t caps);
-
-/**
- * @brief Get the total free size of all the regions that have the given capabilities
- *
- * This function takes all regions capable of having the given capabilities allocated in them
- * and adds up the free space they have.
- *
- * Note that because of heap fragmentation it is probably not possible to allocate a single block of memory
- * of this size. Use heap_caps_get_largest_free_block() for this purpose.
-
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory
- *
- * @return Amount of free bytes in the regions
- */
-size_t heap_caps_get_free_size( uint32_t caps );
-
-
-/**
- * @brief Get the total minimum free memory of all regions with the given capabilities
- *
- * This adds all the low water marks of the regions capable of delivering the memory
- * with the given capabilities.
- *
- * Note the result may be less than the global all-time minimum available heap of this kind, as "low water marks" are
- * tracked per-region. Individual regions' heaps may have reached their "low water marks" at different points in time. However
- * this result still gives a "worst case" indication for all-time minimum free heap.
- *
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory
- *
- * @return Amount of free bytes in the regions
- */
-size_t heap_caps_get_minimum_free_size( uint32_t caps );
-
-/**
- * @brief Get the largest free block of memory able to be allocated with the given capabilities.
- *
- * Returns the largest value of ``s`` for which ``heap_caps_malloc(s, caps)`` will succeed.
- *
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory
- *
- * @return Size of largest free block in bytes.
- */
-size_t heap_caps_get_largest_free_block( uint32_t caps );
-
-
-/**
- * @brief Get heap info for all regions with the given capabilities.
- *
- * Calls multi_heap_info() on all heaps which share the given capabilities.  The information returned is an aggregate
- * across all matching heaps.  The meanings of fields are the same as defined for multi_heap_info_t, except that
- * ``minimum_free_bytes`` has the same caveats described in heap_caps_get_minimum_free_size().
- *
- * @param info        Pointer to a structure which will be filled with relevant
- *                    heap metadata.
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory
- *
- */
-void heap_caps_get_info( multi_heap_info_t *info, uint32_t caps );
-
-
-/**
- * @brief Print a summary of all memory with the given capabilities.
- *
- * Calls multi_heap_info on all heaps which share the given capabilities, and
- * prints a two-line summary for each, then a total summary.
- *
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory
- *
- */
-void heap_caps_print_heap_info( uint32_t caps );
-
-/**
- * @brief Check integrity of all heap memory in the system.
- *
- * Calls multi_heap_check on all heaps. Optionally print errors if heaps are corrupt.
- *
- * Calling this function is equivalent to calling heap_caps_check_integrity
- * with the caps argument set to MALLOC_CAP_INVALID.
- *
- * @param print_errors Print specific errors if heap corruption is found.
- *
- * @return True if all heaps are valid, False if at least one heap is corrupt.
- */
-bool heap_caps_check_integrity_all(bool print_errors);
-
-/**
- * @brief Check integrity of all heaps with the given capabilities.
- *
- * Calls multi_heap_check on all heaps which share the given capabilities. Optionally
- * print errors if the heaps are corrupt.
- *
- * See also heap_caps_check_integrity_all to check all heap memory
- * in the system and heap_caps_check_integrity_addr to check memory
- * around a single address.
- *
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory
- * @param print_errors Print specific errors if heap corruption is found.
- *
- * @return True if all heaps are valid, False if at least one heap is corrupt.
- */
-bool heap_caps_check_integrity(uint32_t caps, bool print_errors);
-
-/**
- * @brief Check integrity of heap memory around a given address.
- *
- * This function can be used to check the integrity of a single region of heap memory,
- * which contains the given address.
- *
- * This can be useful if debugging heap integrity for corruption at a known address,
- * as it has a lower overhead than checking all heap regions. Note that if the corrupt
- * address moves around between runs (due to timing or other factors) then this approach
- * won't work and you should call heap_caps_check_integrity or
- * heap_caps_check_integrity_all instead.
- *
- * @note The entire heap region around the address is checked, not only the adjacent
- * heap blocks.
- *
- * @param addr Address in memory. Check for corruption in region containing this address.
- * @param print_errors Print specific errors if heap corruption is found.
- *
- * @return True if the heap containing the specified address is valid,
- * False if at least one heap is corrupt or the address doesn't belong to a heap region.
- */
-bool heap_caps_check_integrity_addr(intptr_t addr, bool print_errors);
-
-/**
- * @brief Enable malloc() in external memory and set limit below which
- *        malloc() attempts are placed in internal memory.
- *
- * When external memory is in use, the allocation strategy is to initially try to
- * satisfy smaller allocation requests with internal memory and larger requests
- * with external memory. This sets the limit between the two, as well as generally
- * enabling allocation in external memory.
- *
- * @param limit       Limit, in bytes.
- */
-void heap_caps_malloc_extmem_enable(size_t limit);
-
-/**
- * @brief Allocate a chunk of memory as preference in decreasing order.
- *
- * @attention The variable parameters are bitwise OR of MALLOC_CAP_* flags indicating the type of memory.
- *            This API prefers to allocate memory with the first parameter. If failed, allocate memory with
- *            the next parameter. It will try in this order until allocating a chunk of memory successfully
- *            or fail to allocate memories with any of the parameters.
- *
- * @param size Size, in bytes, of the amount of memory to allocate
- * @param num Number of variable paramters
- *
- * @return A pointer to the memory allocated on success, NULL on failure
- */
-void *heap_caps_malloc_prefer( size_t size, size_t num, ... );
-
-/**
- * @brief Allocate a chunk of memory as preference in decreasing order.
- *
- * @param ptr Pointer to previously allocated memory, or NULL for a new allocation.
- * @param size Size of the new buffer requested, or 0 to free the buffer.
- * @param num Number of variable paramters
- *
- * @return Pointer to a new buffer of size 'size', or NULL if allocation failed.
- */
-void *heap_caps_realloc_prefer( void *ptr, size_t size, size_t num, ... );
-
-/**
- * @brief Allocate a chunk of memory as preference in decreasing order.
- *
- * @param n    Number of continuing chunks of memory to allocate
- * @param size Size, in bytes, of a chunk of memory to allocate
- * @param num  Number of variable paramters
- *
- * @return A pointer to the memory allocated on success, NULL on failure
- */
-void *heap_caps_calloc_prefer( size_t n, size_t size, size_t num, ... );
-
-/**
- * @brief Dump the full structure of all heaps with matching capabilities.
- *
- * Prints a large amount of output to serial (because of locking limitations,
- * the output bypasses stdout/stderr). For each (variable sized) block
- * in each matching heap, the following output is printed on a single line:
- *
- * - Block address (the data buffer returned by malloc is 4 bytes after this
- *   if heap debugging is set to Basic, or 8 bytes otherwise).
- * - Data size (the data size may be larger than the size requested by malloc,
- *   either due to heap fragmentation or because of heap debugging level).
- * - Address of next block in the heap.
- * - If the block is free, the address of the next free block is also printed.
- *
- * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
- *                    of memory
- */
-void heap_caps_dump(uint32_t caps);
-
-/**
- * @brief Dump the full structure of all heaps.
- *
- * Covers all registered heaps. Prints a large amount of output to serial.
- *
- * Output is the same as for heap_caps_dump.
- *
- */
-void heap_caps_dump_all(void);
-
-/**
- * @brief Return the size that a particular pointer was allocated with.
- *
- * @param ptr Pointer to currently allocated heap memory. Must be a pointer value previously
- * returned by heap_caps_malloc,malloc,calloc, etc. and not yet freed.
- *
- * @note The app will crash with an assertion failure if the pointer is not valid.
- *
- * @return Size of the memory allocated at this block.
- *
- */
-size_t heap_caps_get_allocated_size( void *ptr );
-
-#ifdef __cplusplus
-}
-#endif

+ 0 - 92
components/heap/include/esp_heap_caps_init.h

@@ -1,92 +0,0 @@
-// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#pragma once
-
-#include "esp_err.h"
-#include "esp_heap_caps.h"
-#include "soc/soc_memory_layout.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * @brief Initialize the capability-aware heap allocator.
- *
- * This is called once in the IDF startup code. Do not call it
- * at other times.
- */
-void heap_caps_init(void);
-
-/**
- * @brief Enable heap(s) in memory regions where the startup stacks are located.
- *
- * On startup, the pro/app CPUs have a certain memory region they use as stack, so we
- * cannot do allocations in the regions these stack frames are. When FreeRTOS is
- * completely started, they do not use that memory anymore and heap(s) there can
- * be enabled.
- */
-void heap_caps_enable_nonos_stack_heaps(void);
-
-/**
- * @brief Add a region of memory to the collection of heaps at runtime.
- *
- * Most memory regions are defined in soc_memory_layout.c for the SoC,
- * and are registered via heap_caps_init(). Some regions can't be used
- * immediately and are later enabled via heap_caps_enable_nonos_stack_heaps().
- *
- * Call this function to add a region of memory to the heap at some later time.
- *
- * This function does not consider any of the "reserved" regions or other data in soc_memory_layout, caller needs to
- * consider this themselves.
- *
- * All memory within the region specified by start & end parameters must be otherwise unused.
- *
- * The capabilities of the newly registered memory will be determined by the start address, as looked up in the regions
- * specified in soc_memory_layout.c.
- *
- * Use heap_caps_add_region_with_caps() to register a region with custom capabilities.
- *
- * @param start Start address of new region.
- * @param end End address of new region.
- *
- * @return ESP_OK on success, ESP_ERR_INVALID_ARG if a parameter is invalid, ESP_ERR_NOT_FOUND if the
- * specified start address doesn't reside in a known region, or any error returned by heap_caps_add_region_with_caps().
- */
-esp_err_t heap_caps_add_region(intptr_t start, intptr_t end);
-
-
-/**
- * @brief Add a region of memory to the collection of heaps at runtime, with custom capabilities.
- *
- * Similar to heap_caps_add_region(), only custom memory capabilities are specified by the caller.
- *
- * @param caps Ordered array of capability masks for the new region, in order of priority. Must have length
- * SOC_MEMORY_TYPE_NO_PRIOS. Does not need to remain valid after the call returns.
- * @param start Start address of new region.
- * @param end End address of new region.
- *
- * @return
- *         - ESP_OK on success
- *         - ESP_ERR_INVALID_ARG if a parameter is invalid
- *         - ESP_ERR_NO_MEM if no memory to register new heap.
- *         - ESP_ERR_INVALID_SIZE if the memory region is too small to fit a heap
- *         - ESP_FAIL if region overlaps the start and/or end of an existing region
- */
-esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end);
-
-
-#ifdef __cplusplus
-}
-#endif

+ 0 - 98
components/heap/include/esp_heap_task_info.h

@@ -1,98 +0,0 @@
-// Copyright 2018 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#pragma once
-
-#ifdef CONFIG_HEAP_TASK_TRACKING
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// This macro controls how much space is provided for partitioning the per-task
-// heap allocation info according to one or more sets of heap capabilities.
-#define NUM_HEAP_TASK_CAPS 4
-
-/** @brief Structure to collect per-task heap allocation totals partitioned by selected caps */
-typedef struct {
-    TaskHandle_t task;                ///< Task to which these totals belong
-    size_t size[NUM_HEAP_TASK_CAPS];  ///< Total allocations partitioned by selected caps
-    size_t count[NUM_HEAP_TASK_CAPS]; ///< Number of blocks partitioned by selected caps
-} heap_task_totals_t;
-
-/** @brief Structure providing details about a block allocated by a task */
-typedef struct {
-    TaskHandle_t task;                ///< Task that allocated the block
-    void *address;                    ///< User address of allocated block
-    uint32_t size;                    ///< Size of the allocated block
-} heap_task_block_t;
-
-/** @brief Structure to provide parameters to heap_caps_get_per_task_info
- *
- * The 'caps' and 'mask' arrays allow partitioning the per-task heap allocation
- * totals by selected sets of heap region capabilities so that totals for
- * multiple regions can be accumulated in one scan.  The capabilities flags for
- * each region ANDed with mask[i] are compared to caps[i] in order; the
- * allocations in that region are added to totals->size[i] and totals->count[i]
- * for the first i that matches.  To collect the totals without any
- * partitioning, set mask[0] and caps[0] both to zero.  The allocation totals
- * are returned in the 'totals' array of heap_task_totals_t structs.  To allow
- * easily comparing the totals array between consecutive calls, that array can
- * be left populated from one call to the next so the order of tasks is the
- * same even if some tasks have freed their blocks or have been deleted.  The
- * number of blocks prepopulated is given by num_totals, which is updated upon
- * return.  If there are more tasks with allocations than the capacity of the
- * totals array (given by max_totals), information for the excess tasks will be
- * not be collected.  The totals array pointer can be NULL if the totals are
- * not desired.
- *
- * The 'tasks' array holds a list of handles for tasks whose block details are
- * to be returned in the 'blocks' array of heap_task_block_t structs.  If the
- * tasks array pointer is NULL, block details for all tasks will be returned up
- * to the capacity of the buffer array, given by max_blocks.  The function
- * return value tells the number of blocks filled into the array.  The blocks
- * array pointer can be NULL if block details are not desired, or max_blocks
- * can be set to zero.
- */
-typedef struct {
-    int32_t caps[NUM_HEAP_TASK_CAPS]; ///< Array of caps for partitioning task totals
-    int32_t mask[NUM_HEAP_TASK_CAPS]; ///< Array of masks under which caps must match
-    TaskHandle_t *tasks;              ///< Array of tasks whose block info is returned
-    size_t num_tasks;                 ///< Length of tasks array
-    heap_task_totals_t *totals;       ///< Array of structs to collect task totals
-    size_t *num_totals;               ///< Number of task structs currently in array
-    size_t max_totals;                ///< Capacity of array of task totals structs
-    heap_task_block_t *blocks;        ///< Array of task block details structs
-    size_t max_blocks;                ///< Capacity of array of task block info structs
-} heap_task_info_params_t;
-
-/**
- * @brief Return per-task heap allocation totals and lists of blocks.
- *
- * For each task that has allocated memory from the heap, return totals for
- * allocations within regions matching one or more sets of capabilities.
- *
- * Optionally also return an array of structs providing details about each
- * block allocated by one or more requested tasks, or by all tasks.
- *
- * @param params Structure to hold all the parameters for the function
- * (@see heap_task_info_params_t).
- * @return Number of block detail structs returned (@see heap_task_block_t).
- */
-extern size_t heap_caps_get_per_task_info(heap_task_info_params_t *params);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // CONFIG_HEAP_TASK_TRACKING

+ 0 - 154
components/heap/include/esp_heap_trace.h

@@ -1,154 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#pragma once
-
-#include "sdkconfig.h"
-#include <stdint.h>
-#include <esp_err.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if !defined(CONFIG_HEAP_TRACING) && !defined(HEAP_TRACE_SRCFILE)
-#warning "esp_heap_trace.h is included but heap tracing is disabled in menuconfig, functions are no-ops"
-#endif
-
-#ifndef CONFIG_HEAP_TRACING_STACK_DEPTH
-#define CONFIG_HEAP_TRACING_STACK_DEPTH 0
-#endif
-
-typedef enum {
-    HEAP_TRACE_ALL,
-    HEAP_TRACE_LEAKS,
-} heap_trace_mode_t;
-
-/**
- * @brief Trace record data type. Stores information about an allocated region of memory.
- */
-typedef struct {
-    uint32_t ccount; ///< CCOUNT of the CPU when the allocation was made. LSB (bit value 1) is the CPU number (0 or 1).
-    void *address;   ///< Address which was allocated
-    size_t size;     ///< Size of the allocation
-    void *alloced_by[CONFIG_HEAP_TRACING_STACK_DEPTH]; ///< Call stack of the caller which allocated the memory.
-    void *freed_by[CONFIG_HEAP_TRACING_STACK_DEPTH];   ///< Call stack of the caller which freed the memory (all zero if not freed.)
-} heap_trace_record_t;
-
-/**
- * @brief Initialise heap tracing in standalone mode.
- *
- * This function must be called before any other heap tracing functions.
- *
- * To disable heap tracing and allow the buffer to be freed, stop tracing and then call heap_trace_init_standalone(NULL, 0);
- *
- * @param record_buffer Provide a buffer to use for heap trace data. Must remain valid any time heap tracing is enabled, meaning
- * it must be allocated from internal memory not in PSRAM.
- * @param num_records Size of the heap trace buffer, as number of record structures.
- * @return
- *  - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
- *  - ESP_ERR_INVALID_STATE Heap tracing is currently in progress.
- *  - ESP_OK Heap tracing initialised successfully.
- */
-esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t num_records);
-
-/**
- * @brief Initialise heap tracing in host-based mode.
- *
- * This function must be called before any other heap tracing functions.
- *
- * @return
- *  - ESP_ERR_INVALID_STATE Heap tracing is currently in progress.
- *  - ESP_OK Heap tracing initialised successfully.
- */
-esp_err_t heap_trace_init_tohost(void);
-
-/**
- * @brief Start heap tracing. All heap allocations & frees will be traced, until heap_trace_stop() is called.
- *
- * @note heap_trace_init_standalone() must be called to provide a valid buffer, before this function is called.
- *
- * @note Calling this function while heap tracing is running will reset the heap trace state and continue tracing.
- *
- * @param mode Mode for tracing.
- * - HEAP_TRACE_ALL means all heap allocations and frees are traced.
- * - HEAP_TRACE_LEAKS means only suspected memory leaks are traced. (When memory is freed, the record is removed from the trace buffer.)
- * @return
- * - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
- * - ESP_ERR_INVALID_STATE A non-zero-length buffer has not been set via heap_trace_init_standalone().
- * - ESP_OK Tracing is started.
- */
-esp_err_t heap_trace_start(heap_trace_mode_t mode);
-
-/**
- * @brief Stop heap tracing.
- *
- * @return
- * - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
- * - ESP_ERR_INVALID_STATE Heap tracing was not in progress.
- * - ESP_OK Heap tracing stopped..
- */
-esp_err_t heap_trace_stop(void);
-
-/**
- * @brief Resume heap tracing which was previously stopped.
- *
- * Unlike heap_trace_start(), this function does not clear the
- * buffer of any pre-existing trace records.
- *
- * The heap trace mode is the same as when heap_trace_start() was
- * last called (or HEAP_TRACE_ALL if heap_trace_start() was never called).
- *
- * @return
- * - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
- * - ESP_ERR_INVALID_STATE Heap tracing was already started.
- * - ESP_OK Heap tracing resumed.
- */
-esp_err_t heap_trace_resume(void);
-
-/**
- * @brief Return number of records in the heap trace buffer
- *
- * It is safe to call this function while heap tracing is running.
- */
-size_t heap_trace_get_count(void);
-
-/**
- * @brief Return a raw record from the heap trace buffer
- *
- * @note It is safe to call this function while heap tracing is running, however in HEAP_TRACE_LEAK mode record indexing may
- * skip entries unless heap tracing is stopped first.
- *
- * @param index Index (zero-based) of the record to return.
- * @param[out] record Record where the heap trace record will be copied.
- * @return
- * - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
- * - ESP_ERR_INVALID_STATE Heap tracing was not initialised.
- * - ESP_ERR_INVALID_ARG Index is out of bounds for current heap trace record count.
- * - ESP_OK Record returned successfully.
- */
-esp_err_t heap_trace_get(size_t index, heap_trace_record_t *record);
-
-/**
- * @brief Dump heap trace record data to stdout
- *
- * @note It is safe to call this function while heap tracing is running, however in HEAP_TRACE_LEAK mode the dump may skip
- * entries unless heap tracing is stopped first.
- *
- *
- */
-void heap_trace_dump(void);
-
-#ifdef __cplusplus
-}
-#endif

+ 0 - 200
components/heap/include/heap_trace.inc

@@ -1,200 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#include <string.h>
-#include <sdkconfig.h>
-#include "soc/soc_memory_layout.h"
-#include "esp_attr.h"
-
-/* Encode the CPU ID in the LSB of the ccount value */
-inline static uint32_t get_ccount(void)
-{
-    uint32_t ccount = cpu_hal_get_cycle_count() & ~3;
-#ifndef CONFIG_FREERTOS_UNICORE
-    ccount |= xPortGetCoreID();
-#endif
-    return ccount;
-}
-
-/* Architecture-specific return value of __builtin_return_address which
- * should be interpreted as an invalid address.
- */
-#ifdef __XTENSA__
-#define HEAP_ARCH_INVALID_PC  0x40000000
-#else
-#define HEAP_ARCH_INVALID_PC  0x00000000
-#endif
-
-// Caller is 2 stack frames deeper than we care about
-#define STACK_OFFSET  2
-
-#define TEST_STACK(N) do {                                              \
-        if (STACK_DEPTH == N) {                                         \
-            return;                                                     \
-        }                                                               \
-        callers[N] = __builtin_return_address(N+STACK_OFFSET);          \
-        if (!esp_ptr_executable(callers[N])                             \
-            || callers[N] == (void*) HEAP_ARCH_INVALID_PC) {            \
-            callers[N] = 0;                                             \
-            return;                                                     \
-        }                                                               \
-    } while(0)
-
-/* Static function to read the call stack for a traced heap call.
-
-   Calls to __builtin_return_address are "unrolled" via TEST_STACK macro as gcc requires the
-   argument to be a compile-time constant.
-*/
-static IRAM_ATTR __attribute__((noinline)) void get_call_stack(void **callers)
-{
-    bzero(callers, sizeof(void *) * STACK_DEPTH);
-    TEST_STACK(0);
-    TEST_STACK(1);
-    TEST_STACK(2);
-    TEST_STACK(3);
-    TEST_STACK(4);
-    TEST_STACK(5);
-    TEST_STACK(6);
-    TEST_STACK(7);
-    TEST_STACK(8);
-    TEST_STACK(9);
-}
-
-_Static_assert(STACK_DEPTH >= 0 && STACK_DEPTH <= 10, "CONFIG_HEAP_TRACING_STACK_DEPTH must be in range 0-10");
-
-
-typedef enum {
-    TRACE_MALLOC_CAPS,
-    TRACE_MALLOC_DEFAULT
-} trace_malloc_mode_t;
-
-
-void *__real_heap_caps_malloc(size_t size, uint32_t caps);
-void *__real_heap_caps_malloc_default( size_t size );
-void *__real_heap_caps_realloc_default( void *ptr, size_t size );
-
-/* trace any 'malloc' event */
-static IRAM_ATTR __attribute__((noinline)) void *trace_malloc(size_t size, uint32_t caps, trace_malloc_mode_t mode)
-{
-    uint32_t ccount = get_ccount();
-    void *p;
-
-    if ( mode == TRACE_MALLOC_CAPS ) {
-        p = __real_heap_caps_malloc(size, caps);
-    } else { //TRACE_MALLOC_DEFAULT
-        p = __real_heap_caps_malloc_default(size);
-    }
-
-    heap_trace_record_t rec = {
-        .address = p,
-        .ccount = ccount,
-        .size = size,
-    };
-    get_call_stack(rec.alloced_by);
-    record_allocation(&rec);
-    return p;
-}
-
-void __real_heap_caps_free(void *p);
-
-/* trace any 'free' event */
-static IRAM_ATTR __attribute__((noinline)) void trace_free(void *p)
-{
-    void *callers[STACK_DEPTH];
-    get_call_stack(callers);
-    record_free(p, callers);
-
-    __real_heap_caps_free(p);
-}
-
-void * __real_heap_caps_realloc(void *p, size_t size, uint32_t caps);
-
-/* trace any 'realloc' event */
-static IRAM_ATTR __attribute__((noinline)) void *trace_realloc(void *p, size_t size, uint32_t caps, trace_malloc_mode_t mode)
-{
-    void *callers[STACK_DEPTH];
-    uint32_t ccount = get_ccount();
-    void *r;
-
-    /* trace realloc as free-then-alloc */
-    get_call_stack(callers);
-    record_free(p, callers);
-
-    if (mode == TRACE_MALLOC_CAPS ) {
-        r = __real_heap_caps_realloc(p, size, caps);
-    } else { //TRACE_MALLOC_DEFAULT
-        r = __real_heap_caps_realloc_default(p, size);
-    }
-    /* realloc with zero size is a free */
-    if (size != 0) {
-        heap_trace_record_t rec = {
-            .address = r,
-            .ccount = ccount,
-            .size = size,
-        };
-        memcpy(rec.alloced_by, callers, sizeof(void *) * STACK_DEPTH);
-        record_allocation(&rec);
-    }
-    return r;
-}
-
-/* Note: this changes the behaviour of libc malloc/realloc/free a bit,
-   as they no longer go via the libc functions in ROM. But more or less
-   the same in the end. */
-
-IRAM_ATTR void *__wrap_malloc(size_t size)
-{
-    return trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
-}
-
-IRAM_ATTR void __wrap_free(void *p)
-{
-    trace_free(p);
-}
-
-IRAM_ATTR void *__wrap_realloc(void *p, size_t size)
-{
-    return trace_realloc(p, size, 0, TRACE_MALLOC_DEFAULT);
-}
-
-IRAM_ATTR void *__wrap_calloc(size_t nmemb, size_t size)
-{
-    size = size * nmemb;
-    void *result = trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
-    if (result != NULL) {
-        memset(result, 0, size);
-    }
-    return result;
-}
-
-IRAM_ATTR void *__wrap_heap_caps_malloc(size_t size, uint32_t caps)
-{
-    return trace_malloc(size, caps, TRACE_MALLOC_CAPS);
-}
-
-void __wrap_heap_caps_free(void *p) __attribute__((alias("__wrap_free")));
-
-IRAM_ATTR void *__wrap_heap_caps_realloc(void *p, size_t size, uint32_t caps)
-{
-    return trace_realloc(p, size, caps, TRACE_MALLOC_CAPS);
-}
-
-IRAM_ATTR void *__wrap_heap_caps_malloc_default( size_t size )
-{
-    return trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
-}
-
-IRAM_ATTR void *__wrap_heap_caps_realloc_default( void *ptr, size_t size )
-{
-    return trace_realloc(ptr, size, 0, TRACE_MALLOC_DEFAULT);
-}

+ 0 - 190
components/heap/include/multi_heap.h

@@ -1,190 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#pragma once
-#include <stdint.h>
-#include <stdlib.h>
-#include <stdbool.h>
-
-/* multi_heap is a heap implementation for handling multiple
-   heterogenous heaps in a single program.
-
-   Any contiguous block of memory can be registered as a heap.
-*/
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @brief Opaque handle to a registered heap */
-typedef struct multi_heap_info *multi_heap_handle_t;
-
-/**
- * @brief allocate a chunk of memory with specific alignment
- *
- * @param heap  Handle to a registered heap.
- * @param size  size in bytes of memory chunk
- * @param alignment  how the memory must be aligned
- *
- * @return pointer to the memory allocated, NULL on failure
- */
-void *multi_heap_aligned_alloc(multi_heap_handle_t heap, size_t size, size_t alignment);
-
-/** @brief malloc() a buffer in a given heap
- *
- * Semantics are the same as standard malloc(), only the returned buffer will be allocated in the specified heap.
- *
- * @param heap Handle to a registered heap.
- * @param size Size of desired buffer.
- *
- * @return Pointer to new memory, or NULL if allocation fails.
- */
-void *multi_heap_malloc(multi_heap_handle_t heap, size_t size);
-
-/** @brief free() a buffer aligned in a given heap.
- *
- * @param heap Handle to a registered heap.
- * @param p NULL, or a pointer previously returned from multi_heap_aligned_alloc() for the same heap.
- * @note This function is deprecated, consider using  multi_heap_free() instead
- */
-void __attribute__((deprecated)) multi_heap_aligned_free(multi_heap_handle_t heap, void *p);
-
-/** @brief free() a buffer in a given heap.
- *
- * Semantics are the same as standard free(), only the argument 'p' must be NULL or have been allocated in the specified heap.
- *
- * @param heap Handle to a registered heap.
- * @param p NULL, or a pointer previously returned from multi_heap_malloc() or multi_heap_realloc() for the same heap.
- */
-void multi_heap_free(multi_heap_handle_t heap, void *p);
-
-/** @brief realloc() a buffer in a given heap.
- *
- * Semantics are the same as standard realloc(), only the argument 'p' must be NULL or have been allocated in the specified heap.
- *
- * @param heap Handle to a registered heap.
- * @param p NULL, or a pointer previously returned from multi_heap_malloc() or multi_heap_realloc() for the same heap.
- * @param size Desired new size for buffer.
- *
- * @return New buffer of 'size' containing contents of 'p', or NULL if reallocation failed.
- */
-void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size);
-
-
-/** @brief Return the size that a particular pointer was allocated with.
- *
- * @param heap Handle to a registered heap.
- * @param p Pointer, must have been previously returned from multi_heap_malloc() or multi_heap_realloc() for the same heap.
- *
- * @return Size of the memory allocated at this block. May be more than the original size argument, due
- * to padding and minimum block sizes.
- */
-size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p);
-
-
-/** @brief Register a new heap for use
- *
- * This function initialises a heap at the specified address, and returns a handle for future heap operations.
- *
- * There is no equivalent function for deregistering a heap - if all blocks in the heap are free, you can immediately start using the memory for other purposes.
- *
- * @param start Start address of the memory to use for a new heap.
- * @param size Size (in bytes) of the new heap.
- *
- * @return Handle of a new heap ready for use, or NULL if the heap region was too small to be initialised.
- */
-multi_heap_handle_t multi_heap_register(void *start, size_t size);
-
-
-/** @brief Associate a private lock pointer with a heap
- *
- * The lock argument is supplied to the MULTI_HEAP_LOCK() and MULTI_HEAP_UNLOCK() macros, defined in multi_heap_platform.h.
- *
- * The lock in question must be recursive.
- *
- * When the heap is first registered, the associated lock is NULL.
- *
- * @param heap Handle to a registered heap.
- * @param lock Optional pointer to a locking structure to associate with this heap.
- */
-void multi_heap_set_lock(multi_heap_handle_t heap, void* lock);
-
-/** @brief Dump heap information to stdout
- *
- * For debugging purposes, this function dumps information about every block in the heap to stdout.
- *
- * @param heap Handle to a registered heap.
- */
-void multi_heap_dump(multi_heap_handle_t heap);
-
-/** @brief Check heap integrity
- *
- * Walks the heap and checks all heap data structures are valid. If any errors are detected, an error-specific message
- * can be optionally printed to stderr. Print behaviour can be overriden at compile time by defining
- * MULTI_CHECK_FAIL_PRINTF in multi_heap_platform.h.
- *
- * @param heap Handle to a registered heap.
- * @param print_errors If true, errors will be printed to stderr.
- * @return true if heap is valid, false otherwise.
- */
-bool multi_heap_check(multi_heap_handle_t heap, bool print_errors);
-
-/** @brief Return free heap size
- *
- * Returns the number of bytes available in the heap.
- *
- * Equivalent to the total_free_bytes member returned by multi_heap_get_heap_info().
- *
- * Note that the heap may be fragmented, so the actual maximum size for a single malloc() may be lower. To know this
- * size, see the largest_free_block member returned by multi_heap_get_heap_info().
- *
- * @param heap Handle to a registered heap.
- * @return Number of free bytes.
- */
-size_t multi_heap_free_size(multi_heap_handle_t heap);
-
-/** @brief Return the lifetime minimum free heap size
- *
- * Equivalent to the minimum_free_bytes member returned by multi_heap_get_info().
- *
- * Returns the lifetime "low water mark" of possible values returned from multi_free_heap_size(), for the specified
- * heap.
- *
- * @param heap Handle to a registered heap.
- * @return Number of free bytes.
- */
-size_t multi_heap_minimum_free_size(multi_heap_handle_t heap);
-
-/** @brief Structure to access heap metadata via multi_heap_get_info */
-typedef struct {
-    size_t total_free_bytes;      ///<  Total free bytes in the heap. Equivalent to multi_free_heap_size().
-    size_t total_allocated_bytes; ///<  Total bytes allocated to data in the heap.
-    size_t largest_free_block;    ///<  Size of largest free block in the heap. This is the largest malloc-able size.
-    size_t minimum_free_bytes;    ///<  Lifetime minimum free heap size. Equivalent to multi_minimum_free_heap_size().
-    size_t allocated_blocks;      ///<  Number of (variable size) blocks allocated in the heap.
-    size_t free_blocks;           ///<  Number of (variable size) free blocks in the heap.
-    size_t total_blocks;          ///<  Total number of (variable size) blocks in the heap.
-} multi_heap_info_t;
-
-/** @brief Return metadata about a given heap
- *
- * Fills a multi_heap_info_t structure with information about the specified heap.
- *
- * @param heap Handle to a registered heap.
- * @param info Pointer to a structure to fill with heap metadata.
- */
-void multi_heap_get_info(multi_heap_handle_t heap, multi_heap_info_t *info);
-
-#ifdef __cplusplus
-}
-#endif

+ 0 - 7
components/heap/linker.lf

@@ -1,7 +0,0 @@
-[mapping:heap]
-archive: libheap.a
-entries:
-    heap_tlsf (noflash)
-    multi_heap (noflash)
-    if HEAP_POISONING_DISABLED = n:
-        multi_heap_poisoning (noflash)

+ 0 - 376
components/heap/multi_heap.c

@@ -1,376 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#include <stdint.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <assert.h>
-#include <string.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <sys/cdefs.h>
-#include "heap_tlsf.h"
-#include <multi_heap.h>
-#include "multi_heap_internal.h"
-
-/* Note: Keep platform-specific parts in this header, this source
-   file should depend on libc only */
-#include "multi_heap_platform.h"
-
-/* Defines compile-time configuration macros */
-#include "multi_heap_config.h"
-
-#ifndef MULTI_HEAP_POISONING
-/* if no heap poisoning, public API aliases directly to these implementations */
-void *multi_heap_malloc(multi_heap_handle_t heap, size_t size)
-    __attribute__((alias("multi_heap_malloc_impl")));
-
-void *multi_heap_aligned_alloc(multi_heap_handle_t heap, size_t size, size_t alignment)
-    __attribute__((alias("multi_heap_aligned_alloc_impl")));
-
-void multi_heap_aligned_free(multi_heap_handle_t heap, void *p)
-    __attribute__((alias("multi_heap_free_impl")));
-
-void multi_heap_free(multi_heap_handle_t heap, void *p)
-    __attribute__((alias("multi_heap_free_impl")));
-
-void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size)
-    __attribute__((alias("multi_heap_realloc_impl")));
-
-size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p)
-    __attribute__((alias("multi_heap_get_allocated_size_impl")));
-
-multi_heap_handle_t multi_heap_register(void *start, size_t size)
-    __attribute__((alias("multi_heap_register_impl")));
-
-void multi_heap_get_info(multi_heap_handle_t heap, multi_heap_info_t *info)
-    __attribute__((alias("multi_heap_get_info_impl")));
-
-size_t multi_heap_free_size(multi_heap_handle_t heap)
-    __attribute__((alias("multi_heap_free_size_impl")));
-
-size_t multi_heap_minimum_free_size(multi_heap_handle_t heap)
-    __attribute__((alias("multi_heap_minimum_free_size_impl")));
-
-void *multi_heap_get_block_address(multi_heap_block_handle_t block)
-    __attribute__((alias("multi_heap_get_block_address_impl")));
-
-void *multi_heap_get_block_owner(multi_heap_block_handle_t block)
-{
-    return NULL;
-}
-
-#endif
-
-#define ALIGN(X) ((X) & ~(sizeof(void *)-1))
-#define ALIGN_UP(X) ALIGN((X)+sizeof(void *)-1)
-#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
-
-
-typedef struct multi_heap_info {
-    void *lock;
-    size_t free_bytes;
-    size_t minimum_free_bytes;
-    size_t pool_size;
-    tlsf_t heap_data;
-} heap_t;
-
-/* Return true if this block is free. */
-static inline bool is_free(const block_header_t *block)
-{
-    return ((block->size & 0x01) != 0);
-}
-
-/* Data size of the block (excludes this block's header) */
-static inline size_t block_data_size(const block_header_t *block)
-{
-    return (block->size & ~0x03);
-}
-
-/* Check a block is valid for this heap. Used to verify parameters. */
-static void assert_valid_block(const heap_t *heap, const block_header_t *block)
-{
-    pool_t pool = tlsf_get_pool(heap->heap_data);
-    void *ptr = block_to_ptr(block);
-
-    MULTI_HEAP_ASSERT((ptr >= pool) &&
-                    (ptr < pool + heap->pool_size),
-                    (uintptr_t)ptr);
-}
-
-void *multi_heap_get_block_address_impl(multi_heap_block_handle_t block)
-{
-    void *ptr = block_to_ptr(block);
-    return (ptr);
-}
-
-size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p)
-{
-    return tlsf_block_size(p);
-}
-
-multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size)
-{
-    assert(start_ptr);
-    if(size < (tlsf_size(NULL) + tlsf_block_size_min() + sizeof(heap_t))) {
-        //Region too small to be a heap.
-        return NULL;
-    }
-
-    heap_t *result = (heap_t *)start_ptr;
-    size -= sizeof(heap_t);
-
-    result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size, 0);
-    if(!result->heap_data) {
-        return NULL;
-    }
-
-    result->lock = NULL;
-    result->free_bytes = size - tlsf_size(result->heap_data);
-    result->pool_size = size;
-    result->minimum_free_bytes = result->free_bytes;
-    return result;
-}
-
-void multi_heap_set_lock(multi_heap_handle_t heap, void *lock)
-{
-    heap->lock = lock;
-}
-
-void inline multi_heap_internal_lock(multi_heap_handle_t heap)
-{
-    MULTI_HEAP_LOCK(heap->lock);
-}
-
-void inline multi_heap_internal_unlock(multi_heap_handle_t heap)
-{
-    MULTI_HEAP_UNLOCK(heap->lock);
-}
-
-multi_heap_block_handle_t multi_heap_get_first_block(multi_heap_handle_t heap)
-{
-    assert(heap != NULL);
-    pool_t pool = tlsf_get_pool(heap->heap_data);
-    block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
-
-    return (multi_heap_block_handle_t)block;
-}
-
-multi_heap_block_handle_t multi_heap_get_next_block(multi_heap_handle_t heap, multi_heap_block_handle_t block)
-{
-    assert(heap != NULL);
-    assert_valid_block(heap, block);
-    block_header_t* next = block_next(block);
-
-    if(block_data_size(next) == 0) {
-        //Last block:
-        return NULL;
-    } else {
-        return (multi_heap_block_handle_t)next;
-    }
-
-}
-
-bool multi_heap_is_free(multi_heap_block_handle_t block)
-{
-    return is_free(block);
-}
-
-void *multi_heap_malloc_impl(multi_heap_handle_t heap, size_t size)
-{
-    if (size == 0 || heap == NULL) {
-        return NULL;
-    }
-
-
-    multi_heap_internal_lock(heap);
-    void *result = tlsf_malloc(heap->heap_data, size);
-    if(result) {
-        heap->free_bytes -= tlsf_block_size(result);
-        if (heap->free_bytes < heap->minimum_free_bytes) {
-            heap->minimum_free_bytes = heap->free_bytes;
-        }
-    }
-    multi_heap_internal_unlock(heap);
-
-    return result;
-}
-
-void multi_heap_free_impl(multi_heap_handle_t heap, void *p)
-{
-
-    if (heap == NULL || p == NULL) {
-        return;
-    }
-
-    assert_valid_block(heap, p);
-
-    multi_heap_internal_lock(heap);
-    heap->free_bytes += tlsf_block_size(p);
-    tlsf_free(heap->heap_data, p);
-    multi_heap_internal_unlock(heap);
-}
-
-void *multi_heap_realloc_impl(multi_heap_handle_t heap, void *p, size_t size)
-{
-    assert(heap != NULL);
-
-    if (p == NULL) {
-        return multi_heap_malloc_impl(heap, size);
-    }
-
-    assert_valid_block(heap, p);
-
-    if (heap == NULL) {
-        return NULL;
-    }
-
-    multi_heap_internal_lock(heap);
-    size_t previous_block_size =  tlsf_block_size(p);
-    void *result = tlsf_realloc(heap->heap_data, p, size);
-    if(result) {
-        heap->free_bytes += previous_block_size;
-        heap->free_bytes -= tlsf_block_size(result);
-        if (heap->free_bytes < heap->minimum_free_bytes) {
-            heap->minimum_free_bytes = heap->free_bytes;
-        }
-    }
-
-    multi_heap_internal_unlock(heap);
-
-    return result;
-}
-
-void *multi_heap_aligned_alloc_impl_offs(multi_heap_handle_t heap, size_t size, size_t alignment, size_t offset)
-{
-    if(heap == NULL) {
-        return NULL;
-    }
-
-    if(!size) {
-        return NULL;
-    }
-
-    //Alignment must be a power of two:
-    if(((alignment & (alignment - 1)) != 0) ||(!alignment)) {
-        return NULL;
-    }
-
-    multi_heap_internal_lock(heap);
-    void *result = tlsf_memalign_offs(heap->heap_data, alignment, size, offset);
-    if(result) {
-        heap->free_bytes -= tlsf_block_size(result);
-        if(heap->free_bytes < heap->minimum_free_bytes) {
-            heap->minimum_free_bytes = heap->free_bytes;
-        }
-    }
-    multi_heap_internal_unlock(heap);
-
-    return result;
-}
-
-
-void *multi_heap_aligned_alloc_impl(multi_heap_handle_t heap, size_t size, size_t alignment)
-{
-    return multi_heap_aligned_alloc_impl_offs(heap, size, alignment, 0);
-}
-
-bool multi_heap_check(multi_heap_handle_t heap, bool print_errors)
-{
-    (void)print_errors;
-    bool valid = true;
-    assert(heap != NULL);
-
-    multi_heap_internal_lock(heap);
-    if(tlsf_check(heap->heap_data)) {
-        valid = false;
-    }
-
-    if(tlsf_check_pool(tlsf_get_pool(heap->heap_data))) {
-        valid = false;
-    }
-
-    multi_heap_internal_unlock(heap);
-    return valid;
-}
-
-static void multi_heap_dump_tlsf(void* ptr, size_t size, int used, void* user)
-{
-    (void)user;
-    MULTI_HEAP_STDERR_PRINTF("Block %p data, size: %d bytes, Free: %s \n",
-                            (void *)ptr,
-                            size,
-                            used ? "No" : "Yes");
-}
-
-void multi_heap_dump(multi_heap_handle_t heap)
-{
-    assert(heap != NULL);
-
-    multi_heap_internal_lock(heap);
-    MULTI_HEAP_STDERR_PRINTF("Showing data for heap: %p \n", (void *)heap);
-    tlsf_walk_pool(tlsf_get_pool(heap->heap_data), multi_heap_dump_tlsf, NULL);
-    multi_heap_internal_unlock(heap);
-}
-
-size_t multi_heap_free_size_impl(multi_heap_handle_t heap)
-{
-    if (heap == NULL) {
-        return 0;
-    }
-
-    return heap->free_bytes;
-}
-
-size_t multi_heap_minimum_free_size_impl(multi_heap_handle_t heap)
-{
-    if (heap == NULL) {
-        return 0;
-    }
-
-    return heap->minimum_free_bytes;
-}
-
-static void multi_heap_get_info_tlsf(void* ptr, size_t size, int used, void* user)
-{
-    multi_heap_info_t *info = user;
-
-    if(used) {
-        info->allocated_blocks++;
-    } else {
-        info->free_blocks++;
-
-        if(size > info->largest_free_block ) {
-            info->largest_free_block = size;
-        }
-    }
-
-    info->total_blocks++;
-}
-
-void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info)
-{
-    memset(info, 0, sizeof(multi_heap_info_t));
-
-    if (heap == NULL) {
-        return;
-    }
-
-    multi_heap_internal_lock(heap);
-    tlsf_walk_pool(tlsf_get_pool(heap->heap_data), multi_heap_get_info_tlsf, info);
-    info->total_allocated_bytes = (heap->pool_size - tlsf_size(heap->heap_data)) - heap->free_bytes;
-    info->minimum_free_bytes = heap->minimum_free_bytes;
-    info->total_free_bytes = heap->free_bytes;
-	info->largest_free_block = tlsf_fit_size(heap->heap_data, info->largest_free_block);
-    multi_heap_internal_unlock(heap);
-}

+ 0 - 31
components/heap/multi_heap_config.h

@@ -1,31 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#pragma once
-
-#ifdef ESP_PLATFORM
-#include "sdkconfig.h"
-#include "soc/soc.h"
-#include "soc/soc_caps.h"
-#endif
-
-/* Configuration macros for multi-heap */
-
-#ifdef CONFIG_HEAP_POISONING_LIGHT
-#define MULTI_HEAP_POISONING
-#endif
-
-#ifdef CONFIG_HEAP_POISONING_COMPREHENSIVE
-#define MULTI_HEAP_POISONING
-#define MULTI_HEAP_POISONING_SLOW
-#endif

+ 0 - 76
components/heap/multi_heap_internal.h

@@ -1,76 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#pragma once
-
-/* Opaque handle to a heap block */
-typedef const struct block_header_t *multi_heap_block_handle_t;
-
-/* Internal definitions for the "implementation" of the multi_heap API,
-   as defined in multi_heap.c.
-
-   If heap poisioning is disabled, these are aliased directly to the public API.
-
-   If heap poisoning is enabled, wrapper functions call each of these.
-*/
-
-void *multi_heap_malloc_impl(multi_heap_handle_t heap, size_t size);
-
-/* Allocate a memory region of minimum `size` bytes, aligned on `alignment`. */
-void *multi_heap_aligned_alloc_impl(multi_heap_handle_t heap, size_t size, size_t alignment);
-
-/* Allocate a memory region of minimum `size` bytes, where memory's `offset` is aligned on `alignment`. */
-void *multi_heap_aligned_alloc_impl_offs(multi_heap_handle_t heap, size_t size, size_t alignment, size_t offset);
-
-void multi_heap_free_impl(multi_heap_handle_t heap, void *p);
-void *multi_heap_realloc_impl(multi_heap_handle_t heap, void *p, size_t size);
-multi_heap_handle_t multi_heap_register_impl(void *start, size_t size);
-void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info);
-size_t multi_heap_free_size_impl(multi_heap_handle_t heap);
-size_t multi_heap_minimum_free_size_impl(multi_heap_handle_t heap);
-size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p);
-void *multi_heap_get_block_address_impl(multi_heap_block_handle_t block);
-
-/* Some internal functions for heap poisoning use */
-
-/* Check an allocated block's poison bytes are correct. Called by multi_heap_check(). */
-bool multi_heap_internal_check_block_poisoning(void *start, size_t size, bool is_free, bool print_errors);
-
-/* Fill a region of memory with the free or malloced pattern.
-   Called when merging blocks, to overwrite the old block header.
-*/
-void multi_heap_internal_poison_fill_region(void *start, size_t size, bool is_free);
-
-/* Allow heap poisoning to lock/unlock the heap to avoid race conditions
-   if multi_heap_check() is running concurrently.
-*/
-void multi_heap_internal_lock(multi_heap_handle_t heap);
-
-void multi_heap_internal_unlock(multi_heap_handle_t heap);
-
-/* Some internal functions for heap debugging code to use */
-
-/* Get the handle to the first (fixed free) block in a heap */
-multi_heap_block_handle_t multi_heap_get_first_block(multi_heap_handle_t heap);
-
-/* Get the handle to the next block in a heap, with validation */
-multi_heap_block_handle_t multi_heap_get_next_block(multi_heap_handle_t heap, multi_heap_block_handle_t block);
-
-/* Test if a heap block is free */
-bool multi_heap_is_free(const multi_heap_block_handle_t block);
-
-/* Get the data address of a heap block */
-void *multi_heap_get_block_address(multi_heap_block_handle_t block);
-
-/* Get the owner identification for a heap block */
-void *multi_heap_get_block_owner(multi_heap_block_handle_t block);

+ 0 - 108
components/heap/multi_heap_platform.h

@@ -1,108 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#pragma once
-
-#ifdef MULTI_HEAP_FREERTOS
-
-#include "freertos/FreeRTOS.h"
-
-#include "sdkconfig.h"
-#include "esp_rom_sys.h"
-#if CONFIG_IDF_TARGET_ESP32
-#include "esp32/rom/ets_sys.h" // will be removed in idf v5.0
-#elif CONFIG_IDF_TARGET_ESP32S2
-#include "esp32s2/rom/ets_sys.h"
-#endif
-#include <assert.h>
-
-typedef portMUX_TYPE multi_heap_lock_t;
-
-/* Because malloc/free can happen inside an ISR context,
-   we need to use portmux spinlocks here not RTOS mutexes */
-#define MULTI_HEAP_LOCK(PLOCK) do {                         \
-        if((PLOCK) != NULL) {                               \
-            portENTER_CRITICAL((PLOCK));                    \
-        }                                                   \
-    } while(0)
-
-
-#define MULTI_HEAP_UNLOCK(PLOCK) do {                       \
-        if ((PLOCK) != NULL) {                              \
-            portEXIT_CRITICAL((PLOCK));                     \
-        }                                                   \
-    } while(0)
-
-#define MULTI_HEAP_LOCK_INIT(PLOCK) do {                    \
-        vPortCPUInitializeMutex((PLOCK));                   \
-    } while(0)
-
-#define MULTI_HEAP_LOCK_STATIC_INITIALIZER     portMUX_INITIALIZER_UNLOCKED
-
-/* Not safe to use std i/o while in a portmux critical section,
-   can deadlock, so we use the ROM equivalent functions. */
-
-#define MULTI_HEAP_PRINTF esp_rom_printf
-#define MULTI_HEAP_STDERR_PRINTF(MSG, ...) esp_rom_printf(MSG, __VA_ARGS__)
-
-inline static void multi_heap_assert(bool condition, const char *format, int line, intptr_t address)
-{
-    /* Can't use libc assert() here as it calls printf() which can cause another malloc() for a newlib lock.
-
-       Also, it's useful to be able to print the memory address where corruption was detected.
-    */
-#ifndef NDEBUG
-    if(!condition) {
-#ifndef CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT
-        esp_rom_printf(format, line, address);
-#endif  // CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT
-        abort();
-    }
-#else // NDEBUG
-    (void) condition;
-#endif // NDEBUG
-}
-
-#define MULTI_HEAP_ASSERT(CONDITION, ADDRESS) \
-    multi_heap_assert((CONDITION), "CORRUPT HEAP: multi_heap.c:%d detected at 0x%08x\n", \
-                      __LINE__, (intptr_t)(ADDRESS))
-
-#ifdef CONFIG_HEAP_TASK_TRACKING
-#include <freertos/task.h>
-#define MULTI_HEAP_BLOCK_OWNER TaskHandle_t task;
-#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD) (HEAD)->task = xTaskGetCurrentTaskHandle()
-#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) ((HEAD)->task)
-#else
-#define MULTI_HEAP_BLOCK_OWNER
-#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD)
-#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) (NULL)
-#endif
-
-#else // MULTI_HEAP_FREERTOS
-
-#include <assert.h>
-
-#define MULTI_HEAP_PRINTF printf
-#define MULTI_HEAP_STDERR_PRINTF(MSG, ...) fprintf(stderr, MSG, __VA_ARGS__)
-#define MULTI_HEAP_LOCK(PLOCK)  (void) (PLOCK)
-#define MULTI_HEAP_UNLOCK(PLOCK)  (void) (PLOCK)
-#define MULTI_HEAP_LOCK_INIT(PLOCK)  (void) (PLOCK)
-#define MULTI_HEAP_LOCK_STATIC_INITIALIZER  0
-
-#define MULTI_HEAP_ASSERT(CONDITION, ADDRESS) assert((CONDITION) && "Heap corrupt")
-
-#define MULTI_HEAP_BLOCK_OWNER
-#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD)
-#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) (NULL)
-
-#endif // MULTI_HEAP_FREERTOS

+ 0 - 426
components/heap/multi_heap_poisoning.c

@@ -1,426 +0,0 @@
-// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#include <stdint.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <assert.h>
-#include <string.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <sys/param.h>
-#include <multi_heap.h>
-#include "multi_heap_internal.h"
-
-/* Note: Keep platform-specific parts in this header, this source
-   file should depend on libc only */
-#include "multi_heap_platform.h"
-
-/* Defines compile-time configuration macros */
-#include "multi_heap_config.h"
-
-#ifdef MULTI_HEAP_POISONING
-
-/* Alias MULTI_HEAP_POISONING_SLOW to SLOW for better readabilty */
-#ifdef SLOW
-#error "external header has defined SLOW"
-#endif
-#ifdef MULTI_HEAP_POISONING_SLOW
-#define SLOW 1
-#endif
-
-#define MALLOC_FILL_PATTERN 0xce
-#define FREE_FILL_PATTERN 0xfe
-
-#define HEAD_CANARY_PATTERN 0xABBA1234
-#define TAIL_CANARY_PATTERN 0xBAAD5678
-
-
-#define ALIGN_UP(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
-
-typedef struct {
-    uint32_t head_canary;
-    MULTI_HEAP_BLOCK_OWNER
-    size_t alloc_size;
-} poison_head_t;
-
-typedef struct {
-    uint32_t tail_canary;
-} poison_tail_t;
-
-#define POISON_OVERHEAD (sizeof(poison_head_t) + sizeof(poison_tail_t))
-
-/* Given a "poisoned" region with pre-data header 'head', and actual data size 'alloc_size', fill in the head and tail
-   region checks.
-
-   Returns the pointer to the actual usable data buffer (ie after 'head')
-*/
-static uint8_t *poison_allocated_region(poison_head_t *head, size_t alloc_size)
-{
-    uint8_t *data = (uint8_t *)(&head[1]); /* start of data ie 'real' allocated buffer */
-    poison_tail_t *tail = (poison_tail_t *)(data + alloc_size);
-    head->alloc_size = alloc_size;
-    head->head_canary = HEAD_CANARY_PATTERN;
-    MULTI_HEAP_SET_BLOCK_OWNER(head);
-
-    uint32_t tail_canary = TAIL_CANARY_PATTERN;
-    if ((intptr_t)tail % sizeof(void *) == 0) {
-        tail->tail_canary = tail_canary;
-    } else {
-        /* unaligned tail_canary */
-        memcpy(&tail->tail_canary, &tail_canary, sizeof(uint32_t));
-    }
-
-    return data;
-}
-
-/* Given a pointer to some allocated data, check the head & tail poison structures (before & after it) that were
-   previously injected by poison_allocated_region().
-
-   Returns a pointer to the poison header structure, or NULL if the poison structures are corrupt.
-*/
-static poison_head_t *verify_allocated_region(void *data, bool print_errors)
-{
-    poison_head_t *head = (poison_head_t *)((intptr_t)data - sizeof(poison_head_t));
-    poison_tail_t *tail = (poison_tail_t *)((intptr_t)data + head->alloc_size);
-
-    /* check if the beginning of the data was overwritten */
-    if (head->head_canary != HEAD_CANARY_PATTERN) {
-        if (print_errors) {
-            MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Bad head at %p. Expected 0x%08x got 0x%08x\n", &head->head_canary,
-                   HEAD_CANARY_PATTERN, head->head_canary);
-        }
-        return NULL;
-    }
-
-    /* check if the end of the data was overrun */
-    uint32_t canary;
-    if ((intptr_t)tail % sizeof(void *) == 0) {
-        canary = tail->tail_canary;
-    } else {
-        /* tail is unaligned */
-        memcpy(&canary, &tail->tail_canary, sizeof(canary));
-    }
-    if (canary != TAIL_CANARY_PATTERN) {
-        if (print_errors) {
-            MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Bad tail at %p. Expected 0x%08x got 0x%08x\n", &tail->tail_canary,
-                   TAIL_CANARY_PATTERN, canary);
-        }
-        return NULL;
-    }
-
-    return head;
-}
-
-#ifdef SLOW
-/* Go through a region that should have the specified fill byte 'pattern',
-   verify it.
-
-   if expect_free is true, expect FREE_FILL_PATTERN otherwise MALLOC_FILL_PATTERN.
-
-   if swap_pattern is true, swap patterns in the buffer (ie replace MALLOC_FILL_PATTERN with FREE_FILL_PATTERN, and vice versa.)
-
-   Returns true if verification checks out.
-*/
-static bool verify_fill_pattern(void *data, size_t size, bool print_errors, bool expect_free, bool swap_pattern)
-{
-    const uint32_t FREE_FILL_WORD = (FREE_FILL_PATTERN << 24) | (FREE_FILL_PATTERN << 16) | (FREE_FILL_PATTERN << 8) | FREE_FILL_PATTERN;
-    const uint32_t MALLOC_FILL_WORD = (MALLOC_FILL_PATTERN << 24) | (MALLOC_FILL_PATTERN << 16) | (MALLOC_FILL_PATTERN << 8) | MALLOC_FILL_PATTERN;
-
-    const uint32_t EXPECT_WORD = expect_free ? FREE_FILL_WORD : MALLOC_FILL_WORD;
-    const uint32_t REPLACE_WORD = expect_free ? MALLOC_FILL_WORD : FREE_FILL_WORD;
-    bool valid = true;
-
-    /* Use 4-byte operations as much as possible */
-    if ((intptr_t)data % 4 == 0) {
-        uint32_t *p = data;
-        while (size >= 4) {
-            if (*p != EXPECT_WORD) {
-                if (print_errors) {
-                    MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Invalid data at %p. Expected 0x%08x got 0x%08x\n", p, EXPECT_WORD, *p);
-                }
-                valid = false;
-#ifndef NDEBUG
-                /* If an assertion is going to fail as soon as we're done verifying the pattern, leave the rest of the
-                   buffer contents as-is for better post-mortem analysis
-                */
-                swap_pattern = false;
-#endif
-            }
-            if (swap_pattern) {
-                *p = REPLACE_WORD;
-            }
-            p++;
-            size -= 4;
-        }
-        data = p;
-    }
-
-    uint8_t *p = data;
-    for (size_t i = 0; i < size; i++) {
-        if (p[i] != (uint8_t)EXPECT_WORD) {
-            if (print_errors) {
-                MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Invalid data at %p. Expected 0x%02x got 0x%02x\n", p, (uint8_t)EXPECT_WORD, *p);
-            }
-            valid = false;
-#ifndef NDEBUG
-            swap_pattern = false; // same as above
-#endif
-        }
-        if (swap_pattern) {
-            p[i] = (uint8_t)REPLACE_WORD;
-        }
-    }
-    return valid;
-}
-#endif
-
-void *multi_heap_aligned_alloc(multi_heap_handle_t heap, size_t size, size_t alignment)
-{
-    if (!size) {
-        return NULL;
-    }
-
-    if (size > SIZE_MAX  - POISON_OVERHEAD) {
-        return NULL;
-    }
-
-    multi_heap_internal_lock(heap);
-    poison_head_t *head = multi_heap_aligned_alloc_impl_offs(heap, size + POISON_OVERHEAD,
-                                                             alignment, sizeof(poison_head_t));
-    uint8_t *data = NULL;
-    if (head != NULL) {
-        data = poison_allocated_region(head, size);
-#ifdef SLOW
-        /* check everything we got back is FREE_FILL_PATTERN & swap for MALLOC_FILL_PATTERN */
-        bool ret = verify_fill_pattern(data, size, true, true, true);
-        assert( ret );
-#endif
-    } else {
-        multi_heap_internal_unlock(heap);
-        return NULL;
-    }
-
-    multi_heap_internal_unlock(heap);
-
-    return data;
-}
-
-void *multi_heap_malloc(multi_heap_handle_t heap, size_t size)
-{
-    if (!size) {
-        return NULL;
-    }
-
-    if(size > SIZE_MAX - POISON_OVERHEAD) {
-        return NULL;
-    }
-
-    multi_heap_internal_lock(heap);
-    poison_head_t *head = multi_heap_malloc_impl(heap, size + POISON_OVERHEAD);
-    uint8_t *data = NULL;
-    if (head != NULL) {
-        data = poison_allocated_region(head, size);
-#ifdef SLOW
-        /* check everything we got back is FREE_FILL_PATTERN & swap for MALLOC_FILL_PATTERN */
-        bool ret = verify_fill_pattern(data, size, true, true, true);
-        assert( ret );
-#endif
-    }
-
-    multi_heap_internal_unlock(heap);
-    return data;
-}
-
-void multi_heap_free(multi_heap_handle_t heap, void *p)
-{
-    if (p == NULL) {
-        return;
-    }
-    multi_heap_internal_lock(heap);
-
-    poison_head_t *head = verify_allocated_region(p, true);
-    assert(head != NULL);
-
-    #ifdef SLOW
-    /* replace everything with FREE_FILL_PATTERN, including the poison head/tail */
-    memset(head, FREE_FILL_PATTERN,
-           head->alloc_size + POISON_OVERHEAD);
-    #endif
-    multi_heap_free_impl(heap, head);
-
-    multi_heap_internal_unlock(heap);
-}
-
-void multi_heap_aligned_free(multi_heap_handle_t heap, void *p)
-{
-    multi_heap_free(heap, p);
-}
-
-void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size)
-{
-    poison_head_t *head = NULL;
-    poison_head_t *new_head;
-    void *result = NULL;
-
-    if(size > SIZE_MAX - POISON_OVERHEAD) {
-        return NULL;
-    }
-    if (p == NULL) {
-        return multi_heap_malloc(heap, size);
-    }
-    if (size == 0) {
-        multi_heap_free(heap, p);
-        return NULL;
-    }
-
-    /* p != NULL, size != 0 */
-    head = verify_allocated_region(p, true);
-    assert(head != NULL);
-
-    multi_heap_internal_lock(heap);
-
-#ifndef SLOW
-    new_head = multi_heap_realloc_impl(heap, head, size + POISON_OVERHEAD);
-    if (new_head != NULL) {
-        /* For "fast" poisoning, we only overwrite the head/tail of the new block so it's safe
-           to poison, so no problem doing this even if realloc resized in place.
-        */
-        result = poison_allocated_region(new_head, size);
-    }
-#else // SLOW
-    /* When slow poisoning is enabled, it becomes very fiddly to try and correctly fill memory when resizing in place
-       (where the buffer may be moved (including to an overlapping address with the old buffer), grown, or shrunk in
-       place.)
-
-       For now we just malloc a new buffer, copy, and free. :|
-
-       Note: If this ever changes, multi_heap defrag realloc test should be enabled.
-    */
-    size_t orig_alloc_size = head->alloc_size;
-
-    new_head = multi_heap_malloc_impl(heap, size + POISON_OVERHEAD);
-    if (new_head != NULL) {
-        result = poison_allocated_region(new_head, size);
-        memcpy(result, p, MIN(size, orig_alloc_size));
-        multi_heap_free(heap, p);
-    }
-#endif
-
-    multi_heap_internal_unlock(heap);
-
-    return result;
-}
-
-void *multi_heap_get_block_address(multi_heap_block_handle_t block)
-{
-    char *head = multi_heap_get_block_address_impl(block);
-    return head + sizeof(poison_head_t);
-}
-
-void *multi_heap_get_block_owner(multi_heap_block_handle_t block)
-{
-    return MULTI_HEAP_GET_BLOCK_OWNER((poison_head_t*)multi_heap_get_block_address_impl(block));
-}
-
-multi_heap_handle_t multi_heap_register(void *start, size_t size)
-{
-#ifdef SLOW
-    if (start != NULL) {
-        memset(start, FREE_FILL_PATTERN, size);
-    }
-#endif
-    return multi_heap_register_impl(start, size);
-}
-
-static inline void subtract_poison_overhead(size_t *arg) {
-    if (*arg > POISON_OVERHEAD) {
-        *arg -= POISON_OVERHEAD;
-    } else {
-        *arg = 0;
-    }
-}
-
-size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p)
-{
-    poison_head_t *head = verify_allocated_region(p, true);
-    assert(head != NULL);
-    size_t result = multi_heap_get_allocated_size_impl(heap, head);
-    return result;
-}
-
-void multi_heap_get_info(multi_heap_handle_t heap, multi_heap_info_t *info)
-{
-    multi_heap_get_info_impl(heap, info);
-    /* don't count the heap poison head & tail overhead in the allocated bytes size */
-    info->total_allocated_bytes -= info->allocated_blocks * POISON_OVERHEAD;
-    /* trim largest_free_block to account for poison overhead */
-    subtract_poison_overhead(&info->largest_free_block);
-    /* similarly, trim total_free_bytes so there's no suggestion that
-       a block this big may be available. */
-    subtract_poison_overhead(&info->total_free_bytes);
-    subtract_poison_overhead(&info->minimum_free_bytes);
-}
-
-size_t multi_heap_free_size(multi_heap_handle_t heap)
-{
-    size_t r = multi_heap_free_size_impl(heap);
-    subtract_poison_overhead(&r);
-    return r;
-}
-
-size_t multi_heap_minimum_free_size(multi_heap_handle_t heap)
-{
-    size_t r = multi_heap_minimum_free_size_impl(heap);
-    subtract_poison_overhead(&r);
-    return r;
-}
-
-/* Internal hooks used by multi_heap to manage poisoning, while keeping some modularity */
-
-bool multi_heap_internal_check_block_poisoning(void *start, size_t size, bool is_free, bool print_errors)
-{
-    if (is_free) {
-#ifdef SLOW
-        return verify_fill_pattern(start, size, print_errors, true, false);
-#else
-        return true; /* can only verify empty blocks in SLOW mode */
-#endif
-    } else {
-        void *data = (void *)((intptr_t)start + sizeof(poison_head_t));
-        poison_head_t *head = verify_allocated_region(data, print_errors);
-        if (head != NULL && head->alloc_size > size - POISON_OVERHEAD) {
-            /* block can be bigger than alloc_size, for reasons of alignment & fragmentation,
-               but block can never be smaller than head->alloc_size... */
-            if (print_errors) {
-                MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Size at %p expected <=0x%08x got 0x%08x\n", &head->alloc_size,
-                       size - POISON_OVERHEAD, head->alloc_size);
-            }
-            return false;
-        }
-        return head != NULL;
-    }
-}
-
-void multi_heap_internal_poison_fill_region(void *start, size_t size, bool is_free)
-{
-    memset(start, is_free ? FREE_FILL_PATTERN : MALLOC_FILL_PATTERN, size);
-}
-
-#else // !MULTI_HEAP_POISONING
-
-#ifdef MULTI_HEAP_POISONING_SLOW
-#error "MULTI_HEAP_POISONING_SLOW requires MULTI_HEAP_POISONING"
-#endif
-
-#endif  // MULTI_HEAP_POISONING

+ 0 - 3
components/heap/test/CMakeLists.txt

@@ -1,3 +0,0 @@
-idf_component_register(SRC_DIRS "."
-                    PRIV_INCLUDE_DIRS "."
-                    PRIV_REQUIRES cmock test_utils heap)

+ 0 - 5
components/heap/test/component.mk

@@ -1,5 +0,0 @@
-#
-#Component Makefile
-#
-
-COMPONENT_ADD_LDFLAGS = -Wl,--whole-archive -l$(COMPONENT_NAME) -Wl,--no-whole-archive

+ 0 - 147
components/heap/test/test_aligned_alloc_caps.c

@@ -1,147 +0,0 @@
-/*
- Tests for the capabilities-based memory allocator.
-*/
-
-#include <esp_types.h>
-#include <stdio.h>
-#include "unity.h"
-#include "esp_attr.h"
-#include "esp_heap_caps.h"
-#include "esp_spi_flash.h"
-#include <stdlib.h>
-#include <sys/param.h>
-#include <string.h>
-#include <malloc.h>
-
-TEST_CASE("Capabilities aligned allocator test", "[heap]")
-{
-    uint32_t alignments = 0;
-
-    printf("[ALIGNED_ALLOC] Allocating from default CAP: \n");
-
-    for(;alignments <= 1024; alignments++) {
-        uint8_t *buf = (uint8_t *)memalign(alignments, (alignments + 137));
-        if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
-            TEST_ASSERT( buf == NULL );
-            //printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
-        } else {
-            TEST_ASSERT( buf != NULL );
-            printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
-            printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
-            //Address of obtained block must be aligned with selected value
-            TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
-
-            //Write some data, if it corrupts memory probably the heap
-            //canary verification will fail:
-            memset(buf, 0xA5, (alignments + 137));
-
-            free(buf);
-        }
-    }
-
-    //Alloc from a non permitted area:
-    uint32_t *not_permitted_buf = (uint32_t *)heap_caps_aligned_alloc(alignments, (alignments + 137), MALLOC_CAP_EXEC | MALLOC_CAP_32BIT);
-    TEST_ASSERT( not_permitted_buf == NULL );
-
-#if CONFIG_ESP32_SPIRAM_SUPPORT || CONFIG_ESP32S2_SPIRAM_SUPPORT
-    alignments = 0;
-    printf("[ALIGNED_ALLOC] Allocating from external memory: \n");
-
-    for(;alignments <= 1024 * 1024; alignments++) {
-        //Now try to take aligned memory from IRAM:
-        uint8_t *buf = (uint8_t *)heap_caps_aligned_alloc(alignments, 10*1024, MALLOC_CAP_SPIRAM);
-        if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
-            TEST_ASSERT( buf == NULL );
-            //printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
-        } else {
-            TEST_ASSERT( buf != NULL );
-            printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
-            printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
-            //Address of obtained block must be aligned with selected value
-            TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
-
-            //Write some data, if it corrupts memory probably the heap
-            //canary verification will fail:
-            memset(buf, 0xA5, (10*1024));
-            heap_caps_free(buf);
-        }
-    }
-#endif
-
-}
-
-TEST_CASE("Capabilities aligned calloc test", "[heap]")
-{
-    uint32_t alignments = 0;
-
-    printf("[ALIGNED_ALLOC] Allocating from default CAP: \n");
-
-    for(;alignments <= 1024; alignments++) {
-        uint8_t *buf = (uint8_t *)heap_caps_aligned_calloc(alignments, 1, (alignments + 137), MALLOC_CAP_DEFAULT);
-        if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
-            TEST_ASSERT( buf == NULL );
-            //printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
-        } else {
-            TEST_ASSERT( buf != NULL );
-            printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
-            printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
-            //Address of obtained block must be aligned with selected value
-            TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
-
-            //Write some data, if it corrupts memory probably the heap
-            //canary verification will fail:
-            memset(buf, 0xA5, (alignments + 137));
-
-            heap_caps_free(buf);
-        }
-    }
-
-    //Check if memory is initialized with zero:
-    uint8_t byte_array[1024];
-    memset(&byte_array, 0, sizeof(byte_array));
-    uint8_t *buf = (uint8_t *)heap_caps_aligned_calloc(1024, 1, 1024, MALLOC_CAP_DEFAULT);
-    TEST_ASSERT(memcmp(byte_array, buf, sizeof(byte_array)) == 0);
-    heap_caps_free(buf);
-
-    //Same size, but different chunk:
-    buf = (uint8_t *)heap_caps_aligned_calloc(1024, 1024, 1, MALLOC_CAP_DEFAULT);
-    TEST_ASSERT(memcmp(byte_array, buf, sizeof(byte_array)) == 0);
-    heap_caps_free(buf);
-
-    //Alloc from a non permitted area:
-    uint32_t *not_permitted_buf = (uint32_t *)heap_caps_aligned_calloc(alignments, 1, (alignments + 137), MALLOC_CAP_32BIT);
-    TEST_ASSERT( not_permitted_buf == NULL );
-
-#if CONFIG_ESP32_SPIRAM_SUPPORT || CONFIG_ESP32S2_SPIRAM_SUPPORT
-    alignments = 0;
-    printf("[ALIGNED_ALLOC] Allocating from external memory: \n");
-
-    for(;alignments <= 1024 * 1024; alignments++) {
-        //Now try to take aligned memory from IRAM:
-        uint8_t *buf = (uint8_t *)(uint8_t *)heap_caps_aligned_calloc(alignments, 1, 10*1024, MALLOC_CAP_SPIRAM);
-        if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
-            TEST_ASSERT( buf == NULL );
-            //printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
-        } else {
-            TEST_ASSERT( buf != NULL );
-            printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
-            printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
-            //Address of obtained block must be aligned with selected value
-            TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
-
-            //Write some data, if it corrupts memory probably the heap
-            //canary verification will fail:
-            memset(buf, 0xA5, (10*1024));
-            heap_caps_free(buf);
-        }
-    }
-#endif
-
-}
-
-TEST_CASE("aligned_alloc(0) should return a NULL pointer", "[heap]")
-{
-    void *p;
-    p = heap_caps_aligned_alloc(32, 0, MALLOC_CAP_DEFAULT);
-    TEST_ASSERT(p == NULL);
-}

+ 0 - 108
components/heap/test/test_allocator_timings.c

@@ -1,108 +0,0 @@
-#include "freertos/FreeRTOS.h"
-#include <esp_types.h>
-#include <stdio.h>
-#include "unity.h"
-#include "esp_attr.h"
-#include "esp_heap_caps.h"
-#include <stdlib.h>
-#include <sys/param.h>
-#include <string.h>
-#include <test_utils.h>
-
-//This test only makes sense with poisoning disabled (light or comprehensive)
-#if !defined(CONFIG_HEAP_POISONING_COMPREHENSIVE) && !defined(CONFIG_HEAP_POISONING_LIGHT)
-
-#define NUM_POINTERS 128
-#define ITERATIONS 10000
-
-TEST_CASE("Heap many random allocations timings", "[heap]")
-{
-    void *p[NUM_POINTERS] = { 0 };
-    size_t s[NUM_POINTERS] = { 0 };
-
-    uint32_t cycles_before;
-    uint64_t alloc_time_average = 0;
-    uint64_t free_time_average = 0;
-    uint64_t realloc_time_average = 0;
-
-    for (int i = 0; i < ITERATIONS; i++) {
-        uint8_t n = esp_random() % NUM_POINTERS;
-
-        if (esp_random() % 4 == 0) {
-            /* 1 in 4 iterations, try to realloc the buffer instead
-               of using malloc/free
-            */
-            size_t new_size = esp_random() % 1024;
-
-            cycles_before = portGET_RUN_TIME_COUNTER_VALUE();
-            void *new_p = heap_caps_realloc(p[n], new_size, MALLOC_CAP_DEFAULT);
-            realloc_time_average = portGET_RUN_TIME_COUNTER_VALUE() - cycles_before;
-
-            printf("realloc %p -> %p (%zu -> %zu) time spent cycles: %lld \n", p[n], new_p, s[n], new_size, realloc_time_average);
-            heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true);
-            if (new_size == 0 || new_p != NULL) {
-                p[n] = new_p;
-                s[n] = new_size;
-                if (new_size > 0) {
-                    memset(p[n], n, new_size);
-                }
-            }
-            continue;
-        }
-
-        if (p[n] != NULL) {
-            if (s[n] > 0) {
-                /* Verify pre-existing contents of p[n] */
-                uint8_t compare[s[n]];
-                memset(compare, n, s[n]);
-                TEST_ASSERT(( memcmp(compare, p[n], s[n]) == 0 ));
-            }
-            TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true));
-
-            cycles_before = portGET_RUN_TIME_COUNTER_VALUE();
-            heap_caps_free(p[n]);
-            free_time_average = portGET_RUN_TIME_COUNTER_VALUE() - cycles_before;
-
-            printf("freed %p (%zu) time spent cycles: %lld\n", p[n], s[n], free_time_average);
-
-            if (!heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true)) {
-                printf("FAILED iteration %d after freeing %p\n", i, p[n]);
-                heap_caps_dump(MALLOC_CAP_DEFAULT);
-                TEST_ASSERT(0);
-            }
-        }
-
-        s[n] = rand() % 1024;
-        heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true);
-        cycles_before = portGET_RUN_TIME_COUNTER_VALUE();
-        p[n] = heap_caps_malloc(s[n], MALLOC_CAP_DEFAULT);
-        alloc_time_average = portGET_RUN_TIME_COUNTER_VALUE() - cycles_before;
-
-        printf("malloc %p (%zu) time spent cycles: %lld \n", p[n], s[n], alloc_time_average);
-
-        if (!heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true)) {
-            printf("FAILED iteration %d after mallocing %p (%zu bytes)\n", i, p[n], s[n]);
-            heap_caps_dump(MALLOC_CAP_DEFAULT);
-            TEST_ASSERT(0);
-        }
-
-        if (p[n] != NULL) {
-            memset(p[n], n, s[n]);
-        }
-    }
-
-    for (int i = 0; i < NUM_POINTERS; i++) {
-        cycles_before = portGET_RUN_TIME_COUNTER_VALUE();
-        heap_caps_free( p[i]);
-        free_time_average = portGET_RUN_TIME_COUNTER_VALUE() - cycles_before;
-
-        if (!heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true)) {
-            printf("FAILED during cleanup after freeing %p\n", p[i]);
-            heap_caps_dump(MALLOC_CAP_DEFAULT);
-            TEST_ASSERT(0);
-        }
-    }
-
-    TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true));
-}
-#endif

+ 0 - 74
components/heap/test/test_diram.c

@@ -1,74 +0,0 @@
-/*
- Tests for D/IRAM support in heap capability allocator
-*/
-
-#include <esp_types.h>
-#include <stdio.h>
-#include "unity.h"
-#include "esp_heap_caps.h"
-#include "soc/soc_memory_layout.h"
-
-#define ALLOC_SZ 1024
-
-static void *malloc_block_diram(uint32_t caps)
-{
-    void *attempts[256] = { 0 }; // Allocate up to 256 ALLOC_SZ blocks to exhaust all non-D/IRAM memory temporarily
-    int count = 0;
-    void *result;
-
-    while(count < sizeof(attempts)/sizeof(void *)) {
-        result = heap_caps_malloc(ALLOC_SZ, caps);
-        TEST_ASSERT_NOT_NULL_MESSAGE(result, "not enough free heap to perform test");
-
-        if (esp_ptr_in_diram_dram(result) || esp_ptr_in_diram_iram(result)) {
-            break;
-        }
-
-        attempts[count] = result;
-        result = NULL;
-        count++;
-    }
-
-    for (int i = 0; i < count; i++) {
-        free(attempts[i]);
-    }
-
-    TEST_ASSERT_NOT_NULL_MESSAGE(result, "not enough D/IRAM memory is free");
-    return result;
-}
-
-TEST_CASE("Allocate D/IRAM as DRAM", "[heap]")
-{
-    uint32_t *dram = malloc_block_diram(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
-
-    for (int i = 0; i < ALLOC_SZ / sizeof(uint32_t); i++) {
-        uint32_t v = i + 0xAAAA;
-        dram[i] = v;
-        volatile uint32_t *iram = esp_ptr_diram_dram_to_iram(dram + i);
-        TEST_ASSERT_EQUAL(v, dram[i]);
-        TEST_ASSERT_EQUAL(v, *iram);
-        *iram = UINT32_MAX;
-        TEST_ASSERT_EQUAL(UINT32_MAX, *iram);
-        TEST_ASSERT_EQUAL(UINT32_MAX, dram[i]);
-    }
-
-    free(dram);
-}
-
-TEST_CASE("Allocate D/IRAM as IRAM", "[heap]")
-{
-    uint32_t *iram = malloc_block_diram(MALLOC_CAP_EXEC);
-
-    for (int i = 0; i < ALLOC_SZ / sizeof(uint32_t); i++) {
-        uint32_t v = i + 0xEEE;
-        iram[i] = v;
-        volatile uint32_t *dram = esp_ptr_diram_iram_to_dram(iram + i);
-        TEST_ASSERT_EQUAL_HEX32(v, iram[i]);
-        TEST_ASSERT_EQUAL_HEX32(v, *dram);
-        *dram = UINT32_MAX;
-        TEST_ASSERT_EQUAL_HEX32(UINT32_MAX, *dram);
-        TEST_ASSERT_EQUAL_HEX32(UINT32_MAX, iram[i]);
-    }
-
-    free(iram);
-}

+ 0 - 164
components/heap/test/test_heap_trace.c

@@ -1,164 +0,0 @@
-/*
- Generic test for heap tracing support
-
- Only compiled in if CONFIG_HEAP_TRACING is set
-*/
-
-#include <esp_types.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "sdkconfig.h"
-#include "unity.h"
-
-#include "freertos/FreeRTOS.h"
-#include "freertos/task.h"
-
-#ifdef CONFIG_HEAP_TRACING
-// only compile in heap tracing tests if tracing is enabled
-
-#include "esp_heap_trace.h"
-
-TEST_CASE("heap trace leak check", "[heap]")
-{
-    heap_trace_record_t recs[8];
-    heap_trace_init_standalone(recs, 8);
-
-    printf("Leak check test\n"); // Print something before trace starts, or stdout allocations skew total counts
-    fflush(stdout);
-
-    heap_trace_start(HEAP_TRACE_LEAKS);
-
-    void *a = malloc(64);
-    memset(a, '3', 64);
-
-    void *b = malloc(96);
-    memset(b, '4', 11);
-
-    printf("a.address %p vs %p b.address %p vs %p\n", a, recs[0].address, b, recs[1].address);
-
-    heap_trace_dump();
-    TEST_ASSERT_EQUAL(2, heap_trace_get_count());
-
-    heap_trace_record_t trace_a, trace_b;
-    heap_trace_get(0, &trace_a);
-    heap_trace_get(1, &trace_b);
-
-    printf("trace_a.address %p trace_bb.address %p\n", trace_a.address, trace_b.address);
-
-    TEST_ASSERT_EQUAL_PTR(a, trace_a.address);
-    TEST_ASSERT_EQUAL_PTR(b, trace_b.address);
-
-    TEST_ASSERT_EQUAL_PTR(recs[0].address, trace_a.address);
-    TEST_ASSERT_EQUAL_PTR(recs[1].address, trace_b.address);
-
-    free(a);
-
-    TEST_ASSERT_EQUAL(1, heap_trace_get_count());
-
-    heap_trace_get(0, &trace_b);
-    TEST_ASSERT_EQUAL_PTR(b, trace_b.address);
-
-    /* buffer deletes trace_a when freed,
-       so trace_b at head of buffer */
-    TEST_ASSERT_EQUAL_PTR(recs[0].address, trace_b.address);
-
-    heap_trace_stop();
-}
-
-TEST_CASE("heap trace wrapped buffer check", "[heap]")
-{
-    const size_t N = 8;
-    heap_trace_record_t recs[N];
-    heap_trace_init_standalone(recs, N);
-
-    heap_trace_start(HEAP_TRACE_LEAKS);
-
-    void *ptrs[N+1];
-    for (int i = 0; i < N+1; i++) {
-        ptrs[i] = malloc(i*3);
-    }
-
-    // becuase other mallocs happen as part of this control flow,
-    // we can't guarantee N entries of ptrs[] are in the heap check buffer.
-    // but we should guarantee at least the last one is
-    bool saw_last_ptr = false;
-    for (int i = 0; i < N; i++) {
-        heap_trace_record_t rec;
-        heap_trace_get(i, &rec);
-        if (rec.address == ptrs[N-1]) {
-            saw_last_ptr = true;
-        }
-    }
-    TEST_ASSERT(saw_last_ptr);
-
-    void *other = malloc(6);
-
-    heap_trace_dump();
-
-    for (int i = 0; i < N+1; i++) {
-        free(ptrs[i]);
-    }
-
-    heap_trace_dump();
-
-    bool saw_other = false;
-
-    for (int i = 0; i < heap_trace_get_count(); i++) {
-        heap_trace_record_t rec;
-        heap_trace_get(i, &rec);
-
-        // none of ptr[]s should be in the heap trace any more
-        for (int j = 0; j < N+1; j++) {
-            TEST_ASSERT_NOT_EQUAL(ptrs[j], rec.address);
-        }
-        if (rec.address == other) {
-            saw_other = true;
-        }
-    }
-
-    // 'other' pointer should be somewhere in the leak dump
-    TEST_ASSERT(saw_other);
-
-    heap_trace_stop();
-}
-
-static void print_floats_task(void *ignore)
-{
-    heap_trace_start(HEAP_TRACE_ALL);
-    char buf[16] = { };
-    volatile float f = 12.3456;
-    sprintf(buf, "%.4f", f);
-    TEST_ASSERT_EQUAL_STRING("12.3456", buf);
-    heap_trace_stop();
-
-    vTaskDelete(NULL);
-}
-
-TEST_CASE("can trace allocations made by newlib", "[heap]")
-{
-    const size_t N = 8;
-    heap_trace_record_t recs[N];
-    heap_trace_init_standalone(recs, N);
-
-    /* Verifying that newlib code performs an allocation is very fiddly:
-
-       - Printing a float allocates data associated with the task, but only the
-       first time a task prints a float of this length. So we do it in a one-shot task
-       to avoid possibility it already happened.
-
-       - If newlib is updated this test may start failing if the printf() implementation
-       changes. (This version passes for both nano & regular formatting in newlib 2.2.0)
-
-       - We also do the tracing in the task so we only capture things directly related to it.
-    */
-
-    xTaskCreate(print_floats_task, "print_float", 4096, NULL, 5, NULL);
-    vTaskDelay(10);
-
-    /* has to be at least a few as newlib allocates via multiple different function calls */
-    TEST_ASSERT(heap_trace_get_count() > 3);
-}
-
-
-#endif

+ 0 - 60
components/heap/test/test_leak.c

@@ -1,60 +0,0 @@
-/*
- Tests for a leak tag
-*/
-
-#include <stdio.h>
-#include "unity.h"
-#include "esp_heap_caps_init.h"
-#include "esp_system.h"
-#include <stdlib.h>
-
-
-static char* check_calloc(int size)
-{
-    char *arr = calloc(size, sizeof(char));
-    TEST_ASSERT_NOT_NULL(arr);
-    return arr;
-}
-
-TEST_CASE("Check for leaks (no leak)", "[heap]")
-{
-    char *arr = check_calloc(1000);
-    free(arr);
-}
-
-TEST_CASE("Check for leaks (leak)", "[heap][ignore]")
-{
-    check_calloc(1000);
-}
-
-TEST_CASE("Not check for leaks", "[heap][leaks]")
-{
-    check_calloc(1000);
-}
-
-TEST_CASE("Set a leak level = 7016", "[heap][leaks=7016]")
-{
-    check_calloc(7000);
-}
-
-static void test_fn(void)
-{
-    check_calloc(1000);
-}
-
-TEST_CASE_MULTIPLE_STAGES("Not check for leaks in MULTIPLE_STAGES mode", "[heap][leaks]", test_fn, test_fn, test_fn);
-
-TEST_CASE_MULTIPLE_STAGES("Check for leaks in MULTIPLE_STAGES mode (leak)", "[heap][ignore]", test_fn, test_fn, test_fn);
-
-static void test_fn2(void)
-{
-    check_calloc(1000);
-    esp_restart();
-}
-
-static void test_fn3(void)
-{
-    check_calloc(1000);
-}
-
-TEST_CASE_MULTIPLE_STAGES("Check for leaks in MULTIPLE_STAGES mode (manual reset)", "[heap][leaks][reset=SW_CPU_RESET, SW_CPU_RESET]", test_fn2, test_fn2, test_fn3);

+ 0 - 134
components/heap/test/test_malloc.c

@@ -1,134 +0,0 @@
-/*
- Generic test for malloc/free
-*/
-
-#include <esp_types.h>
-#include <stdio.h>
-
-#include "freertos/FreeRTOS.h"
-#include "freertos/task.h"
-#include "freertos/semphr.h"
-#include "freertos/queue.h"
-#include "unity.h"
-#include "esp_heap_caps.h"
-
-#include "sdkconfig.h"
-
-
-static int **allocatedMem;
-static int noAllocated;
-
-
-static int tryAllocMem(void) {
-    int i, j;
-    const int allocateMaxK=1024*5; //try to allocate a max of 5MiB
-
-    allocatedMem=malloc(sizeof(int *)*allocateMaxK);
-    if (!allocatedMem) return 0;
-
-    for (i=0; i<allocateMaxK; i++) {
-        allocatedMem[i]=malloc(1024);
-        if (allocatedMem[i]==NULL) break;
-        for (j=0; j<1024/4; j++) allocatedMem[i][j]=(0xdeadbeef);
-    }
-    noAllocated=i;
-    return i;
-}
-
-
-static void tryAllocMemFree(void) {
-    int i, j;
-    for (i=0; i<noAllocated; i++) {
-        for (j=0; j<1024/4; j++) {
-            TEST_ASSERT(allocatedMem[i][j]==(0xdeadbeef));
-        }
-        free(allocatedMem[i]);
-    }
-    free(allocatedMem);
-}
-
-
-TEST_CASE("Malloc/overwrite, then free all available DRAM", "[heap]")
-{
-    int m1=0, m2=0;
-    m1=tryAllocMem();
-    tryAllocMemFree();
-    m2=tryAllocMem();
-    tryAllocMemFree();
-    printf("Could allocate %dK on first try, %dK on 2nd try.\n", m1, m2);
-    TEST_ASSERT(m1==m2);
-}
-
-#if CONFIG_SPIRAM_USE_MALLOC
-
-#if (CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL > 1024)
-TEST_CASE("Check if reserved DMA pool still can allocate even when malloc()'ed memory is exhausted", "[heap]")
-{
-    char** dmaMem=malloc(sizeof(char*)*512);
-    assert(dmaMem);
-    int m=tryAllocMem();
-    int i=0;
-    for (i=0; i<512; i++) {
-        dmaMem[i]=heap_caps_malloc(1024, MALLOC_CAP_DMA);
-        if (dmaMem[i]==NULL) break;
-    }
-    for (int j=0; j<i; j++) free(dmaMem[j]);
-    free(dmaMem);
-    tryAllocMemFree();
-    printf("Could allocate %dK of DMA memory after allocating all of %dK of normal memory.\n", i, m);
-    TEST_ASSERT(i);
-}
-#endif
-
-#endif
-
-
-/* As you see, we are desperately trying to outsmart the compiler, so that it
- * doesn't warn about oversized allocations in the next two unit tests.
- * To be removed when we switch to GCC 8.2 and add
- * -Wno-alloc-size-larger-than=PTRDIFF_MAX to CFLAGS for this file.
- */
-void* (*g_test_malloc_ptr)(size_t) = &malloc;
-void* (*g_test_calloc_ptr)(size_t, size_t) = &calloc;
-
-void* test_malloc_wrapper(size_t size)
-{
-    return (*g_test_malloc_ptr)(size);
-}
-
-void* test_calloc_wrapper(size_t count, size_t size)
-{
-    return (*g_test_calloc_ptr)(count, size);
-}
-
-TEST_CASE("alloc overflows should all fail", "[heap]")
-{
-    /* allocates 8 bytes if size_t overflows */
-    TEST_ASSERT_NULL(test_calloc_wrapper(SIZE_MAX / 2 + 4, 2));
-
-    /* will overflow if any poisoning is enabled
-       (should fail for sensible OOM reasons, otherwise) */
-    TEST_ASSERT_NULL(test_malloc_wrapper(SIZE_MAX - 1));
-    TEST_ASSERT_NULL(test_calloc_wrapper(SIZE_MAX - 1, 1));
-
-    /* will overflow when the size is rounded up to word align it */
-    TEST_ASSERT_NULL(heap_caps_malloc(SIZE_MAX-1, MALLOC_CAP_32BIT));
-
-    TEST_ASSERT_NULL(heap_caps_malloc(SIZE_MAX-1, MALLOC_CAP_EXEC));
-}
-
-TEST_CASE("unreasonable allocs should all fail", "[heap]")
-{
-    TEST_ASSERT_NULL(test_calloc_wrapper(16, 1024*1024));
-    TEST_ASSERT_NULL(test_malloc_wrapper(16*1024*1024));
-    TEST_ASSERT_NULL(test_malloc_wrapper(SIZE_MAX / 2));
-    TEST_ASSERT_NULL(test_malloc_wrapper(SIZE_MAX - 256));
-    TEST_ASSERT_NULL(test_malloc_wrapper(xPortGetFreeHeapSize() - 1));
-}
-
-TEST_CASE("malloc(0) should return a NULL pointer", "[heap]")
-{
-    void *p;
-    p = malloc(0);
-    TEST_ASSERT(p == NULL);
-}

+ 0 - 247
components/heap/test/test_malloc_caps.c

@@ -1,247 +0,0 @@
-/*
- Tests for the capabilities-based memory allocator.
-*/
-
-#include <esp_types.h>
-#include <stdio.h>
-#include "unity.h"
-#include "esp_attr.h"
-#include "esp_heap_caps.h"
-#include "esp_spi_flash.h"
-#include <stdlib.h>
-#include <sys/param.h>
-
-#ifndef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
-TEST_CASE("Capabilities allocator test", "[heap]")
-{
-    char *m1, *m2[10];
-    int x;
-    size_t free8start, free32start, free8, free32;
-
-    /* It's important we printf() something before we take the empty heap sizes,
-       as the first printf() in a task allocates heap resources... */
-    printf("Testing capabilities allocator...\n");
-
-    free8start = heap_caps_get_free_size(MALLOC_CAP_8BIT);
-    free32start = heap_caps_get_free_size(MALLOC_CAP_32BIT);
-    printf("Free 8bit-capable memory (start): %dK, 32-bit capable memory %dK\n", free8start, free32start);
-    TEST_ASSERT(free32start >= free8start);
-
-    printf("Allocating 10K of 8-bit capable RAM\n");
-    m1= heap_caps_malloc(10*1024, MALLOC_CAP_8BIT);
-    printf("--> %p\n", m1);
-    free8 = heap_caps_get_free_size(MALLOC_CAP_8BIT);
-    free32 = heap_caps_get_free_size(MALLOC_CAP_32BIT);
-    printf("Free 8bit-capable memory (both reduced): %dK, 32-bit capable memory %dK\n", free8, free32);
-    //Both should have gone down by 10K; 8bit capable ram is also 32-bit capable
-    TEST_ASSERT(free8<=(free8start-10*1024));
-    TEST_ASSERT(free32<=(free32start-10*1024));
-    //Assume we got DRAM back
-    TEST_ASSERT((((int)m1)&0xFF000000)==0x3F000000);
-    free(m1);
-
-    //The goal here is to allocate from IRAM. Since there is no external IRAM (yet)
-    //the following gives size of IRAM-only (not D/IRAM) memory.
-    size_t free_iram = heap_caps_get_free_size(MALLOC_CAP_INTERNAL) -
-                           heap_caps_get_free_size(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
-    size_t alloc32 = MIN(free_iram / 2, 10*1024) & (~3);
-    if(free_iram) {
-        printf("Freeing; allocating %u bytes of 32K-capable RAM\n", alloc32);
-        m1 = heap_caps_malloc(alloc32, MALLOC_CAP_32BIT);
-        printf("--> %p\n", m1);
-        //Check that we got IRAM back
-        TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000);
-        free8 = heap_caps_get_free_size(MALLOC_CAP_8BIT);
-        free32 = heap_caps_get_free_size(MALLOC_CAP_32BIT);
-        printf("Free 8bit-capable memory (after 32-bit): %dK, 32-bit capable memory %dK\n", free8, free32);
-        //Only 32-bit should have gone down by alloc32: 32-bit isn't necessarily 8bit capable
-        TEST_ASSERT(free32<=(free32start-alloc32));
-        TEST_ASSERT(free8==free8start);
-        free(m1);
-    } else {
-        printf("This platform has no 32-bit only capable RAM, jumping to next test \n");
-    }
-
-    printf("Allocating impossible caps\n");
-    m1= heap_caps_malloc(10*1024, MALLOC_CAP_8BIT|MALLOC_CAP_EXEC);
-    printf("--> %p\n", m1);
-    TEST_ASSERT(m1==NULL);
-
-    if(free_iram) {
-        printf("Testing changeover iram -> dram");
-        // priorities will exhaust IRAM first, then start allocating from DRAM
-        for (x=0; x<10; x++) {
-            m2[x]= heap_caps_malloc(alloc32, MALLOC_CAP_32BIT);
-            printf("--> %p\n", m2[x]);
-        }
-        TEST_ASSERT((((int)m2[0])&0xFF000000)==0x40000000);
-        TEST_ASSERT((((int)m2[9])&0xFF000000)==0x3F000000);
-
-    } else {
-        printf("This platform has no IRAM-only so changeover will never occur, jumping to next test\n");
-    }
-
-    printf("Test if allocating executable code still gives IRAM, even with dedicated IRAM region depleted\n");
-    if(free_iram) {
-        // (the allocation should come from D/IRAM)
-        free_iram = heap_caps_get_free_size(MALLOC_CAP_EXEC);
-        m1= heap_caps_malloc(MIN(free_iram / 2, 10*1024), MALLOC_CAP_EXEC);
-        printf("--> %p\n", m1);
-        TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000);
-        for (x=0; x<10; x++) free(m2[x]);
-
-    } else {
-        // (the allocation should come from D/IRAM)
-        free_iram = heap_caps_get_free_size(MALLOC_CAP_EXEC);
-        m1= heap_caps_malloc(MIN(free_iram / 2, 10*1024), MALLOC_CAP_EXEC);
-        printf("--> %p\n", m1);
-        TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000);
-    }
-
-    free(m1);
-    printf("Done.\n");
-}
-#endif
-
-#ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
-TEST_CASE("IRAM_8BIT capability test", "[heap]")
-{
-    uint8_t *ptr;
-    size_t free_size, free_size32, largest_free_size;
-
-    /* need to print something as first printf allocates some heap */
-    printf("IRAM_8BIT capability test\n");
-
-    free_size = heap_caps_get_free_size(MALLOC_CAP_IRAM_8BIT);
-    free_size32 = heap_caps_get_free_size(MALLOC_CAP_32BIT);
-
-    largest_free_size = heap_caps_get_largest_free_block(MALLOC_CAP_IRAM_8BIT);
-
-    ptr = heap_caps_malloc(largest_free_size, MALLOC_CAP_IRAM_8BIT);
-
-    TEST_ASSERT((((int)ptr)&0xFF000000)==0x40000000);
-
-    TEST_ASSERT(heap_caps_get_free_size(MALLOC_CAP_IRAM_8BIT) == (free_size - heap_caps_get_allocated_size(ptr)));
-    TEST_ASSERT(heap_caps_get_free_size(MALLOC_CAP_32BIT) == (free_size32 - heap_caps_get_allocated_size(ptr)));
-
-    free(ptr);
-}
-#endif
-
-TEST_CASE("heap_caps metadata test", "[heap]")
-{
-    /* need to print something as first printf allocates some heap */
-    printf("heap_caps metadata test\n");
-    heap_caps_print_heap_info(MALLOC_CAP_8BIT);
-
-    multi_heap_info_t original;
-    heap_caps_get_info(&original, MALLOC_CAP_8BIT);
-
-    void *b = heap_caps_malloc(original.largest_free_block, MALLOC_CAP_8BIT);
-    TEST_ASSERT_NOT_NULL(b);
-
-    printf("After allocating %d bytes:\n", original.largest_free_block);
-    heap_caps_print_heap_info(MALLOC_CAP_8BIT);
-
-    multi_heap_info_t after;
-    heap_caps_get_info(&after, MALLOC_CAP_8BIT);
-    TEST_ASSERT(after.largest_free_block <= original.largest_free_block);
-    TEST_ASSERT(after.total_free_bytes <= original.total_free_bytes);
-
-    free(b);
-    heap_caps_get_info(&after, MALLOC_CAP_8BIT);
-
-    printf("\n\n After test, heap status:\n");
-    heap_caps_print_heap_info(MALLOC_CAP_8BIT);
-
-    /* Allow some leeway here, because LWIP sometimes allocates up to 144 bytes in the background
-       as part of timer management.
-    */
-    TEST_ASSERT_INT32_WITHIN(200, after.total_free_bytes, original.total_free_bytes);
-    TEST_ASSERT_INT32_WITHIN(200, after.largest_free_block, original.largest_free_block);
-    TEST_ASSERT(after.minimum_free_bytes < original.total_free_bytes);
-}
-
-/* Small function runs from IRAM to check that malloc/free/realloc
-   all work OK when cache is disabled...
-*/
-static IRAM_ATTR __attribute__((noinline)) bool iram_malloc_test(void)
-{
-    spi_flash_guard_get()->start(); // Disables flash cache
-
-    bool result = true;
-    void *x = heap_caps_malloc(64, MALLOC_CAP_EXEC);
-    result = result && (x != NULL);
-    void *y = heap_caps_realloc(x, 32, MALLOC_CAP_EXEC);
-    result = result && (y != NULL);
-    heap_caps_free(y);
-
-    spi_flash_guard_get()->end(); // Re-enables flash cache
-
-    return result;
-}
-
-
-TEST_CASE("heap_caps_xxx functions work with flash cache disabled", "[heap]")
-{
-    TEST_ASSERT( iram_malloc_test() );
-}
-
-#ifdef CONFIG_HEAP_ABORT_WHEN_ALLOCATION_FAILS
-TEST_CASE("When enabled, allocation operation failure generates an abort", "[heap][reset=abort,SW_CPU_RESET]")
-{
-    const size_t stupid_allocation_size = (128 * 1024 * 1024);
-    void *ptr = heap_caps_malloc(stupid_allocation_size, MALLOC_CAP_DEFAULT);
-    (void)ptr;
-    TEST_FAIL_MESSAGE("should not be reached");
-}
-#endif
-
-static bool called_user_failed_hook = false;
-
-void heap_caps_alloc_failed_hook(size_t requested_size, uint32_t caps, const char *function_name)
-{
-    printf("%s was called but failed to allocate %d bytes with 0x%X capabilities. \n",function_name, requested_size, caps);
-    called_user_failed_hook = true;
-}
-
-TEST_CASE("user provided alloc failed hook must be called when allocation fails", "[heap]")
-{
-    TEST_ASSERT(heap_caps_register_failed_alloc_callback(heap_caps_alloc_failed_hook) == ESP_OK);
-
-    const size_t stupid_allocation_size = (128 * 1024 * 1024);
-    void *ptr = heap_caps_malloc(stupid_allocation_size, MALLOC_CAP_DEFAULT);
-    TEST_ASSERT(called_user_failed_hook != false);
-
-    called_user_failed_hook = false;
-    ptr = heap_caps_realloc(ptr, stupid_allocation_size, MALLOC_CAP_DEFAULT);
-    TEST_ASSERT(called_user_failed_hook != false);
-
-    called_user_failed_hook = false;
-    ptr = heap_caps_aligned_alloc(0x200, stupid_allocation_size, MALLOC_CAP_DEFAULT);
-    TEST_ASSERT(called_user_failed_hook != false);
-
-    (void)ptr;
-}
-
-TEST_CASE("allocation with invalid capability should also trigger the alloc failed hook", "[heap]")
-{
-    const size_t allocation_size = 64;
-    const uint32_t invalid_cap = MALLOC_CAP_INVALID;
-
-    TEST_ASSERT(heap_caps_register_failed_alloc_callback(heap_caps_alloc_failed_hook) == ESP_OK);
-
-    called_user_failed_hook = false;
-    void *ptr = heap_caps_malloc(allocation_size, invalid_cap);
-    TEST_ASSERT(called_user_failed_hook != false);
-
-    called_user_failed_hook = false;
-    ptr = heap_caps_realloc(ptr, allocation_size, invalid_cap);
-    TEST_ASSERT(called_user_failed_hook != false);
-
-    called_user_failed_hook = false;
-    ptr = heap_caps_aligned_alloc(0x200, allocation_size, invalid_cap);
-    TEST_ASSERT(called_user_failed_hook != false);
-
-    (void)ptr;
-}

+ 0 - 67
components/heap/test/test_realloc.c

@@ -1,67 +0,0 @@
-/*
- Generic test for realloc
-*/
-
-#include <stdlib.h>
-#include <string.h>
-#include "unity.h"
-#include "sdkconfig.h"
-#include "esp_heap_caps.h"
-#include "soc/soc_memory_layout.h"
-
-
-#ifndef CONFIG_HEAP_POISONING_COMPREHENSIVE
-/* (can't realloc in place if comprehensive is enabled) */
-
-TEST_CASE("realloc shrink buffer in place", "[heap]")
-{
-    void *x = malloc(64);
-    TEST_ASSERT(x);
-    void *y = realloc(x, 48);
-    TEST_ASSERT_EQUAL_PTR(x, y);
-}
-
-#endif
-
-#ifndef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
-TEST_CASE("realloc shrink buffer with EXEC CAPS", "[heap]")
-{
-    const size_t buffer_size = 64;
-
-    void *x = heap_caps_malloc(buffer_size, MALLOC_CAP_EXEC);
-    TEST_ASSERT(x);
-    void *y = heap_caps_realloc(x, buffer_size - 16, MALLOC_CAP_EXEC);
-    TEST_ASSERT(y);
-
-    //y needs to fall in a compatible memory area of IRAM:
-    TEST_ASSERT(esp_ptr_executable(y)|| esp_ptr_in_iram(y) || esp_ptr_in_diram_iram(y));
-
-    free(y);
-}
-
-TEST_CASE("realloc move data to a new heap type", "[heap]")
-{
-    const char *test = "I am some test content to put in the heap";
-    char buf[64];
-    memset(buf, 0xEE, 64);
-    strlcpy(buf, test, 64);
-
-    char *a = malloc(64);
-    memcpy(a, buf, 64);
-    // move data from 'a' to IRAM
-    char *b = heap_caps_realloc(a, 64, MALLOC_CAP_EXEC);
-    TEST_ASSERT_NOT_NULL(b);
-    TEST_ASSERT_NOT_EQUAL(a, b);
-    TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_INVALID, true));
-    TEST_ASSERT_EQUAL_HEX32_ARRAY(buf, b, 64 / sizeof(uint32_t));
-
-    // Move data back to DRAM
-    char *c = heap_caps_realloc(b, 48, MALLOC_CAP_8BIT);
-    TEST_ASSERT_NOT_NULL(c);
-    TEST_ASSERT_NOT_EQUAL(b, c);
-    TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_INVALID, true));
-    TEST_ASSERT_EQUAL_HEX8_ARRAY(buf, c, 48);
-
-    free(c);
-}
-#endif

+ 0 - 72
components/heap/test/test_runtime_heap_reg.c

@@ -1,72 +0,0 @@
-/*
- Tests for registering new heap memory at runtime
-*/
-
-#include <stdio.h>
-#include "unity.h"
-#include "esp_heap_caps_init.h"
-#include "esp_system.h"
-#include <stdlib.h>
-
-
-/* NOTE: This is not a well-formed unit test, it leaks memory */
-TEST_CASE("Allocate new heap at runtime", "[heap][ignore]")
-{
-    const size_t BUF_SZ = 1000;
-    const size_t HEAP_OVERHEAD_MAX = 200;
-    void *buffer = malloc(BUF_SZ);
-    TEST_ASSERT_NOT_NULL(buffer);
-    uint32_t before_free = esp_get_free_heap_size();
-    TEST_ESP_OK( heap_caps_add_region((intptr_t)buffer, (intptr_t)buffer + BUF_SZ) );
-    uint32_t after_free = esp_get_free_heap_size();
-    printf("Before %u after %u\n", before_free, after_free);
-    /* allow for some 'heap overhead' from accounting structures */
-    TEST_ASSERT(after_free >= before_free + BUF_SZ - HEAP_OVERHEAD_MAX);
-}
-
-/* NOTE: This is not a well-formed unit test, it leaks memory and
-   may fail if run twice in a row without a reset.
-*/
-TEST_CASE("Allocate new heap with new capability", "[heap][ignore]")
-{
-    const size_t BUF_SZ = 100;
-#ifdef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
-    const size_t ALLOC_SZ = 32;
-#else
-    const size_t ALLOC_SZ = 64; // More than half of BUF_SZ
-#endif
-    const uint32_t MALLOC_CAP_INVENTED = (1 << 30); /* this must be unused in esp_heap_caps.h */
-
-    /* no memory exists to provide this capability */
-    TEST_ASSERT_NULL( heap_caps_malloc(ALLOC_SZ, MALLOC_CAP_INVENTED) );
-
-    void *buffer = malloc(BUF_SZ);
-    TEST_ASSERT_NOT_NULL(buffer);
-    uint32_t caps[SOC_MEMORY_TYPE_NO_PRIOS] = { MALLOC_CAP_INVENTED };
-    TEST_ESP_OK( heap_caps_add_region_with_caps(caps, (intptr_t)buffer, (intptr_t)buffer + BUF_SZ) );
-
-    /* ta-da, it's now possible! */
-    TEST_ASSERT_NOT_NULL( heap_caps_malloc(ALLOC_SZ, MALLOC_CAP_INVENTED) );
-}
-
-/* NOTE: This is not a well-formed unit test.
- * If run twice without a reset, it will failed.
- */
-
-TEST_CASE("Add .bss memory to heap region runtime", "[heap][ignore]")
-{
-#define BUF_SZ 1000
-#define HEAP_OVERHEAD_MAX 200
-    static uint8_t s_buffer[BUF_SZ];
-
-    printf("s_buffer start %08x end %08x\n", (intptr_t)s_buffer, (intptr_t)s_buffer + BUF_SZ);
-    uint32_t before_free = esp_get_free_heap_size();
-    TEST_ESP_OK( heap_caps_add_region((intptr_t)s_buffer, (intptr_t)s_buffer + BUF_SZ) );
-    uint32_t after_free = esp_get_free_heap_size();
-    printf("Before %u after %u\n", before_free, after_free);
-    /* allow for some 'heap overhead' from accounting structures */
-    TEST_ASSERT(after_free >= before_free + BUF_SZ - HEAP_OVERHEAD_MAX);
-
-    /* Twice add must be failed */
-    TEST_ASSERT( (heap_caps_add_region((intptr_t)s_buffer, (intptr_t)s_buffer + BUF_SZ) != ESP_OK) );
-}

+ 0 - 54
components/heap/test_multi_heap_host/Makefile

@@ -1,54 +0,0 @@
-TEST_PROGRAM=test_multi_heap
-all: $(TEST_PROGRAM)
-
-ifneq ($(filter clean,$(MAKECMDGOALS)),)
-.NOTPARALLEL:  # prevent make clean racing the other targets
-endif
-
-SOURCE_FILES = $(abspath \
-    ../multi_heap.c \
-    ../heap_tlsf.c \
-	../multi_heap_poisoning.c \
-	test_multi_heap.cpp \
-	main.cpp \
-    )
-
-INCLUDE_FLAGS = -I../include -I../../../tools/catch
-
-GCOV ?= gcov
-
-CPPFLAGS += $(INCLUDE_FLAGS) -D CONFIG_LOG_DEFAULT_LEVEL -g -fstack-protector-all -m32  -DCONFIG_HEAP_POISONING_COMPREHENSIVE
-CFLAGS += -Wall -Werror -fprofile-arcs -ftest-coverage
-CXXFLAGS += -std=c++11 -Wall -Werror  -fprofile-arcs -ftest-coverage
-LDFLAGS += -lstdc++ -fprofile-arcs -ftest-coverage -m32
-
-OBJ_FILES = $(filter %.o, $(SOURCE_FILES:.cpp=.o) $(SOURCE_FILES:.c=.o))
-
-COVERAGE_FILES = $(OBJ_FILES:.o=.gc*)
-
-$(TEST_PROGRAM): $(OBJ_FILES)
-	g++ $(LDFLAGS) -o $(TEST_PROGRAM) $(OBJ_FILES)
-
-$(OUTPUT_DIR):
-	mkdir -p $(OUTPUT_DIR)
-
-test: $(TEST_PROGRAM)
-	./$(TEST_PROGRAM)
-
-$(COVERAGE_FILES): $(TEST_PROGRAM) test
-
-coverage.info: $(COVERAGE_FILES)
-	find ../ -name "*.gcno" -exec $(GCOV) -r -pb {} +
-	lcov --capture --directory $(abspath ../) --no-external --output-file coverage.info --gcov-tool $(GCOV)
-
-coverage_report: coverage.info
-	genhtml coverage.info --output-directory coverage_report
-	@echo "Coverage report is in coverage_report/index.html"
-
-clean:
-	rm -f $(OBJ_FILES) $(TEST_PROGRAM)
-	rm -f $(COVERAGE_FILES) *.gcov
-	rm -rf coverage_report/
-	rm -f coverage.info
-
-.PHONY: clean all test

+ 0 - 2
components/heap/test_multi_heap_host/main.cpp

@@ -1,2 +0,0 @@
-#define CATCH_CONFIG_MAIN
-#include "catch.hpp"

+ 0 - 20
components/heap/test_multi_heap_host/test_all_configs.sh

@@ -1,20 +0,0 @@
-#!/usr/bin/env bash
-#
-# Run the test suite with all configurations enabled
-#
-
-FAIL=0
-
-for FLAGS in "CONFIG_HEAP_POISONING_NONE" "CONFIG_HEAP_POISONING_LIGHT" "CONFIG_HEAP_POISONING_COMPREHENSIVE" ; do
-    echo "==== Testing with config: ${FLAGS} ===="
-    CPPFLAGS="-D${FLAGS}" make clean test || FAIL=1
-done
-
-make clean
-
-if [ $FAIL == 0 ]; then
-    echo "All configurations passed"
-else
-    echo "Some configurations failed, see log."
-    exit 1
-fi

+ 0 - 508
components/heap/test_multi_heap_host/test_multi_heap.cpp

@@ -1,508 +0,0 @@
-#include "catch.hpp"
-#include "multi_heap.h"
-
-#include "../multi_heap_config.h"
-
-#include <string.h>
-#include <assert.h>
-
-static void *__malloc__(size_t bytes) 
-{
-    return malloc(bytes);
-}
-
-static void __free__(void *ptr) 
-{
-    free(ptr);
-}
-
-/* Insurance against accidentally using libc heap functions in tests */
-#undef free
-#define free #error
-#undef malloc
-#define malloc #error
-#undef calloc
-#define calloc #error
-#undef realloc
-#define realloc #error
-
-TEST_CASE("multi_heap simple allocations", "[multi_heap]")
-{
-    uint8_t small_heap[4 * 1024];
-
-    multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
-
-    size_t test_alloc_size = (multi_heap_free_size(heap) + 4) / 2;
-
-    printf("New heap:\n");
-    multi_heap_dump(heap);
-    printf("*********************\n");
-
-    uint8_t *buf = (uint8_t *)multi_heap_malloc(heap, test_alloc_size);
-
-    printf("small_heap %p buf %p\n", small_heap, buf);
-    REQUIRE( buf != NULL );
-    REQUIRE((intptr_t)buf >= (intptr_t)small_heap);
-    REQUIRE( (intptr_t)buf < (intptr_t)(small_heap + sizeof(small_heap)));
-
-    REQUIRE( multi_heap_get_allocated_size(heap, buf) >= test_alloc_size );
-    REQUIRE( multi_heap_get_allocated_size(heap, buf) < test_alloc_size + 16);
-
-    memset(buf, 0xEE, test_alloc_size);
-
-    REQUIRE( multi_heap_malloc(heap, test_alloc_size) == NULL );
-
-    multi_heap_free(heap, buf);
-
-    printf("Empty?\n");
-    multi_heap_dump(heap);
-    printf("*********************\n");
-
-    /* Now there should be space for another allocation */
-    buf = (uint8_t *)multi_heap_malloc(heap, test_alloc_size);
-    REQUIRE( buf != NULL );
-    multi_heap_free(heap, buf);
-
-    REQUIRE( multi_heap_free_size(heap) > multi_heap_minimum_free_size(heap) );
-}
-
-
-TEST_CASE("multi_heap fragmentation", "[multi_heap]")
-{
-    uint8_t small_heap[4 * 1024];
-    multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
-
-    const size_t alloc_size = 128;
-
-    void *p[4];
-    for (int i = 0; i < 4; i++) {
-        multi_heap_dump(heap);
-        REQUIRE(  multi_heap_check(heap, true) );
-        p[i] = multi_heap_malloc(heap, alloc_size);
-        printf("%d = %p ****->\n", i, p[i]);
-        multi_heap_dump(heap);
-        REQUIRE( p[i] != NULL );
-    }
-
-    printf("allocated %p %p %p %p\n", p[0], p[1], p[2], p[3]);
-
-    REQUIRE( multi_heap_malloc(heap, alloc_size * 5) == NULL ); /* no room to allocate 5*alloc_size now */
-
-    printf("4 allocations:\n");
-    multi_heap_dump(heap);
-    printf("****************\n");
-
-    multi_heap_free(heap, p[0]);
-    multi_heap_free(heap, p[1]);
-    multi_heap_free(heap, p[3]);
-
-    printf("1 allocations:\n");
-    multi_heap_dump(heap);
-    printf("****************\n");
-
-    void *big = multi_heap_malloc(heap, alloc_size * 3);
-    //Blocks in TLSF are organized in different form, so this makes no sense
-    multi_heap_free(heap, big);
-
-    multi_heap_free(heap, p[2]);
-
-    printf("0 allocations:\n");
-    multi_heap_dump(heap);
-    printf("****************\n");
-
-    big = multi_heap_malloc(heap, alloc_size * 2);
-    //Blocks in TLSF are organized in different form, so this makes no sense
-    multi_heap_free(heap, big);
-}
-
-/* Test that malloc/free does not leave free space fragmented */
-TEST_CASE("multi_heap defrag", "[multi_heap]")
-{
-    void *p[4];
-    uint8_t small_heap[4 * 1024];
-    multi_heap_info_t info, info2;
-    multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
-
-    printf("0 ---\n");
-    multi_heap_dump(heap);
-    REQUIRE( multi_heap_check(heap, true) );
-    multi_heap_get_info(heap, &info);
-    REQUIRE( 0 == info.allocated_blocks );
-    REQUIRE( 1 == info.free_blocks );
-
-    printf("1 ---\n");
-    p[0] = multi_heap_malloc(heap, 128);
-    p[1] = multi_heap_malloc(heap, 32);
-    multi_heap_dump(heap);
-    REQUIRE( multi_heap_check(heap, true) );
-
-    printf("2 ---\n");
-    multi_heap_free(heap, p[0]);
-    p[2] = multi_heap_malloc(heap, 64);
-    multi_heap_dump(heap);
-    REQUIRE( p[2] == p[0] );
-    REQUIRE( multi_heap_check(heap, true) );
-
-    printf("3 ---\n");
-    multi_heap_free(heap, p[2]);
-    p[3] = multi_heap_malloc(heap, 32);
-    multi_heap_dump(heap);
-    REQUIRE( p[3] == p[0] );
-    REQUIRE( multi_heap_check(heap, true) );
-
-    multi_heap_get_info(heap, &info2);
-    REQUIRE( 2 == info2.allocated_blocks );
-    REQUIRE( 2 == info2.free_blocks );
-
-    multi_heap_free(heap, p[0]);
-    multi_heap_free(heap, p[1]);
-    multi_heap_get_info(heap, &info2);
-    REQUIRE( 0 == info2.allocated_blocks );
-    REQUIRE( 1 == info2.free_blocks );
-    REQUIRE( info.total_free_bytes == info2.total_free_bytes );
-}
-
-/* Test that malloc/free does not leave free space fragmented
-   Note: With fancy poisoning, realloc is implemented as malloc-copy-free and this test does not apply.
- */
-#ifndef MULTI_HEAP_POISONING_SLOW
-TEST_CASE("multi_heap defrag realloc", "[multi_heap]")
-{
-    void *p[4];
-    uint8_t small_heap[4 * 1024];
-    multi_heap_info_t info, info2;
-    multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
-
-    printf("0 ---\n");
-    multi_heap_dump(heap);
-    REQUIRE( multi_heap_check(heap, true) );
-    multi_heap_get_info(heap, &info);
-    REQUIRE( 0 == info.allocated_blocks );
-    REQUIRE( 1 == info.free_blocks );
-
-    printf("1 ---\n");
-    p[0] = multi_heap_malloc(heap, 128);
-    p[1] = multi_heap_malloc(heap, 32);
-    multi_heap_dump(heap);
-    REQUIRE( multi_heap_check(heap, true) );
-
-    printf("2 ---\n");
-    p[2] = multi_heap_realloc(heap, p[0], 64);
-    multi_heap_dump(heap);
-    REQUIRE( p[2] == p[0] );
-    REQUIRE( multi_heap_check(heap, true) );
-
-    printf("3 ---\n");
-    p[3] = multi_heap_realloc(heap, p[2], 32);
-    multi_heap_dump(heap);
-    REQUIRE( p[3] == p[0] );
-    REQUIRE( multi_heap_check(heap, true) );
-
-    multi_heap_get_info(heap, &info2);
-    REQUIRE( 2 == info2.allocated_blocks );
-    REQUIRE( 2 == info2.free_blocks );
-
-    multi_heap_free(heap, p[0]);
-    multi_heap_free(heap, p[1]);
-    multi_heap_get_info(heap, &info2);
-    REQUIRE( 0 == info2.allocated_blocks );
-    REQUIRE( 1 == info2.free_blocks );
-    REQUIRE( info.total_free_bytes == info2.total_free_bytes );
-}
-#endif
-
-
-void multi_heap_allocation_impl(int heap_size)
-{
-    uint8_t *big_heap = (uint8_t *) __malloc__(2*heap_size);
-    const int NUM_POINTERS = 64;
-
-    printf("Running multi-allocation test with heap_size %d...\n", heap_size);
-
-    REQUIRE( big_heap );
-    multi_heap_handle_t heap = multi_heap_register(big_heap, heap_size);
-
-    void *p[NUM_POINTERS] = { 0 };
-    size_t s[NUM_POINTERS] = { 0 };
-
-    const size_t initial_free = multi_heap_free_size(heap);
-
-    const int ITERATIONS = 10000;
-
-    for (int i = 0; i < ITERATIONS; i++) {
-        /* check all pointers allocated so far are valid inside big_heap */
-        for (int j = 0; j < NUM_POINTERS; j++) {
-            if (p[j] != NULL) {
-            }
-        }
-
-        uint8_t n = rand() % NUM_POINTERS;
-
-        if (rand() % 4 == 0) {
-            /* 1 in 4 iterations, try to realloc the buffer instead
-               of using malloc/free
-            */
-            size_t new_size = rand() % 1024;
-            void *new_p = multi_heap_realloc(heap, p[n], new_size);
-            printf("realloc %p -> %p (%zu -> %zu)\n", p[n], new_p, s[n], new_size);
-            multi_heap_check(heap, true);
-            if (new_size == 0 || new_p != NULL) {
-                p[n] = new_p;
-                s[n] = new_size;
-                if (new_size > 0) {
-                    REQUIRE( p[n] >= big_heap );
-                    REQUIRE( p[n] < big_heap + heap_size );
-                    memset(p[n], n, new_size);
-                }
-            }
-            continue;
-        }
-        if (p[n] != NULL) {
-            if (s[n] > 0) {
-                /* Verify pre-existing contents of p[n] */
-                uint8_t compare[s[n]];
-                memset(compare, n, s[n]);
-                /*REQUIRE*/assert( memcmp(compare, p[n], s[n]) == 0 );
-            }
-            REQUIRE( multi_heap_check(heap, true) );
-            multi_heap_free(heap, p[n]);
-            printf("freed %p (%zu)\n", p[n], s[n]);
-            if (!multi_heap_check(heap, true)) {
-                printf("FAILED iteration %d after freeing %p\n", i, p[n]);
-                multi_heap_dump(heap);
-                REQUIRE(0);
-            }
-        }
-
-        s[n] = rand() % 1024;
-        REQUIRE( multi_heap_check(heap, true) );
-        p[n] = multi_heap_malloc(heap, s[n]);
-        printf("malloc %p (%zu)\n", p[n], s[n]);
-        if (p[n] != NULL) {
-            REQUIRE( p[n] >= big_heap );
-            REQUIRE( p[n] < big_heap + heap_size );
-        }
-        if (!multi_heap_check(heap, true)) {
-            printf("FAILED iteration %d after mallocing %p (%zu bytes)\n", i, p[n], s[n]);
-            multi_heap_dump(heap);
-            REQUIRE(0);
-        }
-        if (p[n] != NULL) {
-            memset(p[n], n, s[n]);
-        }
-    }
-
-    for (int i = 0; i < NUM_POINTERS; i++) {
-        multi_heap_free(heap, p[i]);
-        if (!multi_heap_check(heap, true)) {
-            printf("FAILED during cleanup after freeing %p\n", p[i]);
-            multi_heap_dump(heap);
-            REQUIRE(0);
-        }
-    }
-
-    REQUIRE( initial_free == multi_heap_free_size(heap) );
-    __free__(big_heap);
-}
-
-TEST_CASE("multi_heap many random allocations", "[multi_heap]")
-{
-    size_t poolsize[] = { 15, 255, 4095, 8191 };
-    for (size_t i = 0; i < sizeof(poolsize)/sizeof(size_t); i++) {
-        multi_heap_allocation_impl(poolsize[i] * 1024);
-    }	
-}
-
-TEST_CASE("multi_heap_get_info() function", "[multi_heap]")
-{
-    uint8_t heapdata[4 * 1024];
-    multi_heap_handle_t heap = multi_heap_register(heapdata, sizeof(heapdata));
-    multi_heap_info_t before, after, freed;
-
-    multi_heap_get_info(heap, &before);
-    printf("before: total_free_bytes %zu\ntotal_allocated_bytes %zu\nlargest_free_block %zu\nminimum_free_bytes %zu\nallocated_blocks %zu\nfree_blocks %zu\ntotal_blocks %zu\n",
-           before.total_free_bytes,
-           before.total_allocated_bytes,
-           before.largest_free_block,
-           before.minimum_free_bytes,
-           before.allocated_blocks,
-           before.free_blocks,
-           before.total_blocks);
-
-    REQUIRE( 0 == before.allocated_blocks );
-    REQUIRE( 0 == before.total_allocated_bytes );
-    REQUIRE( before.total_free_bytes == before.minimum_free_bytes );
-
-    void *x = multi_heap_malloc(heap, 32);
-    multi_heap_get_info(heap, &after);
-    printf("after: total_free_bytes %zu\ntotal_allocated_bytes %zu\nlargest_free_block %zu\nminimum_free_bytes %zu\nallocated_blocks %zu\nfree_blocks %zu\ntotal_blocks %zu\n",
-           after.total_free_bytes,
-           after.total_allocated_bytes,
-           after.largest_free_block,
-           after.minimum_free_bytes,
-           after.allocated_blocks,
-           after.free_blocks,
-           after.total_blocks);
-
-    REQUIRE( 1 == after.allocated_blocks );
-    REQUIRE( 32 == after.total_allocated_bytes );
-    REQUIRE( after.minimum_free_bytes < before.minimum_free_bytes);
-    REQUIRE( after.minimum_free_bytes > 0 );
-
-    multi_heap_free(heap, x);
-    multi_heap_get_info(heap, &freed);
-    printf("freed: total_free_bytes %zu\ntotal_allocated_bytes %zu\nlargest_free_block %zu\nminimum_free_bytes %zu\nallocated_blocks %zu\nfree_blocks %zu\ntotal_blocks %zu\n",
-           freed.total_free_bytes,
-           freed.total_allocated_bytes,
-           freed.largest_free_block,
-           freed.minimum_free_bytes,
-           freed.allocated_blocks,
-           freed.free_blocks,
-           freed.total_blocks);
-
-    REQUIRE( 0 == freed.allocated_blocks );
-    REQUIRE( 0 == freed.total_allocated_bytes );
-    REQUIRE( before.total_free_bytes == freed.total_free_bytes );
-    REQUIRE( after.minimum_free_bytes == freed.minimum_free_bytes );
-}
-
-TEST_CASE("multi_heap minimum-size allocations", "[multi_heap]")
-{
-    uint8_t heapdata[4096];
-    void *p[sizeof(heapdata) / sizeof(void *)] = {NULL};
-    const size_t NUM_P = sizeof(p) / sizeof(void *);
-    size_t allocated_size = 0;
-    multi_heap_handle_t heap = multi_heap_register(heapdata, sizeof(heapdata));
-    size_t before_free = multi_heap_free_size(heap);
-
-    size_t i;
-    for (i = 0; i < NUM_P; i++) {
-        //TLSF minimum block size is 4 bytes
-        p[i] = multi_heap_malloc(heap, 1);
-        if (p[i] == NULL) {
-            break;
-        }
-    }
-
-    REQUIRE( i < NUM_P); // Should have run out of heap before we ran out of pointers
-    printf("Allocated %zu minimum size chunks\n", i);
-
-    REQUIRE(multi_heap_free_size(heap) < before_free);
-    multi_heap_check(heap, true);
-
-    /* Free in random order */
-    bool has_allocations = true;
-    while (has_allocations) {
-        i = rand() % NUM_P;
-        multi_heap_free(heap, p[i]);
-        p[i] = NULL;
-        multi_heap_check(heap, true);
-
-        has_allocations = false;
-        for (i = 0; i < NUM_P && !has_allocations; i++) {
-            has_allocations = (p[i] != NULL);
-        }
-    }
-
-    /* all freed! */
-    REQUIRE( before_free == multi_heap_free_size(heap) );
-}
-
-TEST_CASE("multi_heap_realloc()", "[multi_heap]")
-{
-    const uint32_t PATTERN = 0xABABDADA;
-    uint8_t small_heap[4 * 1024];
-    multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
-
-    uint32_t *a = (uint32_t *)multi_heap_malloc(heap, 64);
-    uint32_t *b = (uint32_t *)multi_heap_malloc(heap, 32);
-    REQUIRE( a != NULL );
-    REQUIRE( b != NULL );
-    REQUIRE( b > a); /* 'b' takes the block after 'a' */
-
-    *a = PATTERN;
-
-    uint32_t *c = (uint32_t *)multi_heap_realloc(heap, a, 72);
-    REQUIRE( multi_heap_check(heap, true));
-    REQUIRE(  c  != NULL );
-    REQUIRE( c > b ); /* 'a' moves, 'c' takes the block after 'b' */
-    REQUIRE( *c == PATTERN );
-
-#ifndef MULTI_HEAP_POISONING_SLOW
-    // "Slow" poisoning implementation doesn't reallocate in place, so these
-    // test will fail...
-
-    uint32_t *d = (uint32_t *)multi_heap_realloc(heap, c, 36);
-    REQUIRE( multi_heap_check(heap, true) );
-    REQUIRE( c == d ); /* 'c' block should be shrunk in-place */
-    REQUIRE( *d == PATTERN);
-
-    uint32_t *e = (uint32_t *)multi_heap_malloc(heap, 64);
-    REQUIRE( multi_heap_check(heap, true));
-    REQUIRE( a == e ); /* 'e' takes the block formerly occupied by 'a' */
-
-    multi_heap_free(heap, d);
-    uint32_t *f = (uint32_t *)multi_heap_realloc(heap, b, 64);
-    REQUIRE( multi_heap_check(heap, true) );
-    REQUIRE( f == b ); /* 'b' should be extended in-place, over space formerly occupied by 'd' */
-
-#ifdef MULTI_HEAP_POISONING
-#define TOO_MUCH 7420 + 1
-#else
-#define TOO_MUCH 7420 + 1
-#endif
-    /* not enough contiguous space left in the heap */
-    uint32_t *g = (uint32_t *)multi_heap_realloc(heap, e, TOO_MUCH);
-    REQUIRE( g == NULL );
-
-    multi_heap_free(heap, f);
-    /* try again */
-    g = (uint32_t *)multi_heap_realloc(heap, e, 128);
-    REQUIRE( multi_heap_check(heap, true) );
-    REQUIRE( e == g ); /* 'g' extends 'e' in place, into the space formerly held by 'f' */
-#endif
-}
-
-// TLSF only accepts heaps aligned to 4-byte boundary so
-// only aligned allocation tests make sense.
-TEST_CASE("multi_heap aligned allocations", "[multi_heap]")
-{
-    uint8_t test_heap[4 * 1024];
-    multi_heap_handle_t heap = multi_heap_register(test_heap, sizeof(test_heap));
-    uint32_t aligments = 0; // starts from alignment by 4-byte boundary
-    size_t old_size = multi_heap_free_size(heap);
-    size_t leakage = 1024;
-    printf("[ALIGNED_ALLOC] heap_size before: %d \n", old_size);
-
-    printf("New heap:\n");
-    multi_heap_dump(heap);
-    printf("*********************\n");
-
-    for(;aligments <= 256; aligments++) {
-
-        //Use some stupid size value to test correct alignment even in strange
-        //memory layout objects:
-        uint8_t *buf = (uint8_t *)multi_heap_aligned_alloc(heap, (aligments + 137), aligments );
-        if(((aligments & (aligments - 1)) != 0) || (!aligments)) {
-            REQUIRE( buf == NULL );
-        } else {
-            REQUIRE( buf != NULL );
-            REQUIRE((intptr_t)buf >= (intptr_t)test_heap);
-            REQUIRE((intptr_t)buf < (intptr_t)(test_heap + sizeof(test_heap)));
-
-            printf("[ALIGNED_ALLOC] alignment required: %u \n", aligments);
-            printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
-            //Address of obtained block must be aligned with selected value
-            REQUIRE(((intptr_t)buf & (aligments - 1)) == 0);
-
-            //Write some data, if it corrupts memory probably the heap
-            //canary verification will fail:
-            memset(buf, 0xA5, (aligments + 137));
-
-            multi_heap_free(heap, buf);
-        }
-    }
-
-    printf("[ALIGNED_ALLOC] heap_size after: %d \n", multi_heap_free_size(heap));
-    REQUIRE((old_size - multi_heap_free_size(heap)) <= leakage);
-}

+ 3 - 3
components/raop/raop.c

@@ -124,7 +124,7 @@ struct raop_ctx_s *raop_create(uint32_t host, char *name,
 					"ss=16", "sr=44100", "vn=3", "txtvers=1",
 					NULL };
 #else
-	mdns_txt_item_t txt[] = {
+	const mdns_txt_item_t txt[] = {
 		{"am", "airesp32"},
 		{"tp", "UDP"},
 		{"sm","false"},
@@ -765,7 +765,7 @@ static void search_remote(void *args) {
 
/*----------------------------------------------------------------------------*/
 static char *rsa_apply(unsigned char *input, int inlen, int *outlen, int mode)
 {
-	static char super_secret_key[] =
+	const static char super_secret_key[] =
 	"-----BEGIN RSA PRIVATE KEY-----\n"
 	"MIIEpQIBAAKCAQEA59dE8qLieItsH1WgjrcFRKj6eUWqi+bGLOX1HL3U3GhC/j0Qg90u3sG/1CUt\n"
 	"wC5vOYvfDmFI6oSFXi5ELabWJmT2dKHzBJKa3k9ok+8t9ucRqMd6DZHJ2YCCLlDRKSKv6kDqnw4U\n"
@@ -853,7 +853,7 @@ static char *rsa_apply(unsigned char *input, int inlen, int *outlen, int mode)
 
 #define DECODE_ERROR 0xffffffff
 
-static char base64_chars[] =
+const static char base64_chars[] =
 	"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
 
 /*----------------------------------------------------------------------------*/

+ 1 - 1
components/squeezelite/output_i2s.c

@@ -401,7 +401,7 @@ void output_init_i2s(log_level level, char *device, unsigned output_buf_size, ch
 	// create task as a FreeRTOS task but uses stack in internal RAM
 	{
 		static DRAM_ATTR StaticTask_t xTaskBuffer __attribute__ ((aligned (4)));
-		static DRAM_ATTR StackType_t xStack[OUTPUT_THREAD_STACK_SIZE] __attribute__ ((aligned (4)));
+		static EXT_RAM_ATTR StackType_t xStack[OUTPUT_THREAD_STACK_SIZE] __attribute__ ((aligned (4)));
 		output_i2s_task = xTaskCreateStaticPinnedToCore( (TaskFunction_t) output_thread_i2s, "output_i2s", OUTPUT_THREAD_STACK_SIZE, 
 											  NULL, CONFIG_ESP32_PTHREAD_TASK_PRIO_DEFAULT + 1, xStack, &xTaskBuffer, 0 );
 	}