Browse Source

big merge

Philippe G 3 years ago
parent
commit
898998efb0
100 changed files with 8979 additions and 398 deletions
  1. 1 1
      components/codecs/CMakeLists.txt
  2. 1 1
      components/display/SSD1675.c
  3. 9 3
      components/display/ST77xx.c
  4. 4 2
      components/display/core/gds_font.c
  5. 3 2
      components/display/core/gds_font.h
  6. 1 1
      components/display/core/gds_image.c
  7. 13 0
      components/display/core/gds_text.c
  8. 1 0
      components/display/core/gds_text.h
  9. 64 24
      components/display/display.c
  10. 1 1
      components/display/fonts/font_line_1.c
  11. 1 1
      components/driver_bt/bt_app_core.c
  12. 1 1
      components/driver_bt/bt_app_sink.c
  13. 1 2
      components/driver_bt/bt_app_source.c
  14. 10 0
      components/esp_http_server/CMakeLists.txt
  15. 1 0
      components/esp_http_server/Kconfig
  16. 68 0
      components/esp_http_server/osal.h
  17. 49 0
      components/heap/CMakeLists.txt
  18. 74 0
      components/heap/Kconfig
  19. 32 0
      components/heap/component.mk
  20. 609 0
      components/heap/heap_caps.c
  21. 241 0
      components/heap/heap_caps_init.c
  22. 77 0
      components/heap/heap_private.h
  23. 129 0
      components/heap/heap_task_info.c
  24. 1015 0
      components/heap/heap_tlsf.c
  25. 119 0
      components/heap/heap_tlsf.h
  26. 174 0
      components/heap/heap_tlsf_block_functions.h
  27. 66 0
      components/heap/heap_tlsf_config.h
  28. 255 0
      components/heap/heap_trace_standalone.c
  29. 402 0
      components/heap/include/esp_heap_caps.h
  30. 92 0
      components/heap/include/esp_heap_caps_init.h
  31. 98 0
      components/heap/include/esp_heap_task_info.h
  32. 154 0
      components/heap/include/esp_heap_trace.h
  33. 200 0
      components/heap/include/heap_trace.inc
  34. 190 0
      components/heap/include/multi_heap.h
  35. 7 0
      components/heap/linker.lf
  36. 376 0
      components/heap/multi_heap.c
  37. 31 0
      components/heap/multi_heap_config.h
  38. 76 0
      components/heap/multi_heap_internal.h
  39. 108 0
      components/heap/multi_heap_platform.h
  40. 426 0
      components/heap/multi_heap_poisoning.c
  41. 3 0
      components/heap/test/CMakeLists.txt
  42. 5 0
      components/heap/test/component.mk
  43. 147 0
      components/heap/test/test_aligned_alloc_caps.c
  44. 108 0
      components/heap/test/test_allocator_timings.c
  45. 74 0
      components/heap/test/test_diram.c
  46. 164 0
      components/heap/test/test_heap_trace.c
  47. 60 0
      components/heap/test/test_leak.c
  48. 134 0
      components/heap/test/test_malloc.c
  49. 247 0
      components/heap/test/test_malloc_caps.c
  50. 67 0
      components/heap/test/test_realloc.c
  51. 72 0
      components/heap/test/test_runtime_heap_reg.c
  52. 54 0
      components/heap/test_multi_heap_host/Makefile
  53. 2 0
      components/heap/test_multi_heap_host/main.cpp
  54. 20 0
      components/heap/test_multi_heap_host/test_all_configs.sh
  55. 508 0
      components/heap/test_multi_heap_host/test_multi_heap.cpp
  56. 1 1
      components/platform_config/CMakeLists.txt
  57. 1 1
      components/platform_config/nvs_utilities.c
  58. 1 2
      components/platform_config/platform_config.c
  59. 18 3
      components/platform_config/platform_config.h
  60. 1 1
      components/platform_console/CMakeLists.txt
  61. 1 1
      components/platform_console/app_squeezelite/CMakeLists.txt
  62. 1 1
      components/platform_console/app_squeezelite/cmd_squeezelite.c
  63. 1 2
      components/platform_console/cmd_config.c
  64. 1 1
      components/platform_console/cmd_i2ctools.c
  65. 1 2
      components/platform_console/cmd_nvs.c
  66. 1 2
      components/platform_console/cmd_system.c
  67. 121 82
      components/platform_console/platform_console.c
  68. 7 7
      components/raop/raop.c
  69. 2 6
      components/raop/raop.h
  70. 2 4
      components/raop/raop_sink.c
  71. 18 8
      components/raop/rtp.c
  72. 1 0
      components/raop/rtp.h
  73. 20 2
      components/raop/util.h
  74. 124 84
      components/services/accessors.c
  75. 3 1
      components/services/accessors.h
  76. 6 6
      components/services/audio_controls.c
  77. 4 5
      components/services/battery.c
  78. 69 52
      components/services/buttons.c
  79. 0 10
      components/services/globdefs.h
  80. 738 0
      components/services/gpio_exp.c
  81. 61 0
      components/services/gpio_exp.h
  82. 10 8
      components/services/led.c
  83. 1 2
      components/services/messaging.c
  84. 2 2
      components/services/monitor.c
  85. 26 19
      components/services/rotary_encoder.c
  86. 32 44
      components/services/services.c
  87. 28 0
      components/spotify/CMakeLists.txt
  88. 381 0
      components/spotify/Shim.cpp
  89. 50 0
      components/spotify/Shim.h
  90. 66 0
      components/spotify/cspot/CMakeLists.txt
  91. BIN
      components/spotify/cspot/bell/.DS_Store
  92. 7 0
      components/spotify/cspot/bell/.gitmodules
  93. 80 0
      components/spotify/cspot/bell/CMakeLists.txt
  94. 9 0
      components/spotify/cspot/bell/README.md
  95. 23 0
      components/spotify/cspot/bell/cJSON/.editorconfig
  96. 11 0
      components/spotify/cspot/bell/cJSON/.gitattributes
  97. 54 0
      components/spotify/cspot/bell/cJSON/.github/CONTRIBUTING.md
  98. 102 0
      components/spotify/cspot/bell/cJSON/.github/workflows/CI.yml
  99. 20 0
      components/spotify/cspot/bell/cJSON/.gitignore
  100. 28 0
      components/spotify/cspot/bell/cJSON/.travis.yml

+ 1 - 1
components/codecs/CMakeLists.txt

@@ -1,5 +1,5 @@
 idf_component_register(
-	   INCLUDE_DIRS .  ./inc  inc/alac inc/FLAC  inc/helix-aac inc/mad inc/ogg inc/opus inc/opusfile inc/resample16 inc/soxr inc/vorbis
+	   INCLUDE_DIRS . ./inc inc/alac inc/FLAC inc/helix-aac inc/mad inc/ogg inc/opus inc/opusfile inc/resample16 inc/soxr inc/vorbis
 )
 
 if (DEFINED AAC_DISABLE_SBR)

+ 1 - 1
components/display/SSD1675.c

@@ -243,7 +243,7 @@ struct GDS_Device* SSD1675_Detect(char *Driver, struct GDS_Device* Device) {
 	char *p;
 	struct PrivateSpace* Private = (struct PrivateSpace*) Device->Private;
 	Private->ReadyPin = -1;
-	if ((p = strcasestr(Driver, "ready")) != NULL) Private->ReadyPin = atoi(strchr(p, '=') + 1);
+	if ((p = strcasestr(Driver, "ready")) && (p = strchr(p, '='))) Private->ReadyPin = atoi(p + 1);
 	
 	ESP_LOGI(TAG, "SSD1675 driver with ready GPIO %d", Private->ReadyPin);
 	

+ 9 - 3
components/display/ST77xx.c

@@ -199,8 +199,8 @@ static void SetLayout( struct GDS_Device* Device, bool HFlip, bool VFlip, bool R
 	WriteByte( Device, Private->MADCtl );
 	
 	if (Private->Model == ST7789) {
-		if (Rotate) Private->Offset.Width = HFlip ? 320 - Device->Width : 0;
-		else Private->Offset.Height = HFlip ? 320 - Device->Height : 0;
+		if (Rotate) Private->Offset.Width += HFlip ? 320 - Device->Width : 0;
+		else Private->Offset.Height += HFlip ? 320 - Device->Height : 0;
 	}
 
 #ifdef SHADOW_BUFFER
@@ -235,7 +235,7 @@ static bool Init( struct GDS_Device* Device ) {
 	Private->iRAM = heap_caps_malloc( (Private->PageSize + 1) * Device->Width * Depth, MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA );
 #endif
 
-	ESP_LOGI(TAG, "ST77xx with bit depth %u, page %u, iRAM %p", Device->Depth, Private->PageSize, Private->iRAM);
+	ESP_LOGI(TAG, "ST77xx with bit depth %u, offsets %hu:%hu, page %u, iRAM %p", Device->Depth, Private->Offset.Height, Private->Offset.Width, Private->PageSize, Private->iRAM);
 	
 	// Sleepout + Booster
 	Device->WriteCommand( Device, 0x11 );
@@ -283,8 +283,14 @@ struct GDS_Device* ST77xx_Detect(char *Driver, struct GDS_Device* Device) {
 		
 	*Device = ST77xx;	
 	sscanf(Driver, "%*[^:]:%u", &Depth);
+
 	struct PrivateSpace* Private = (struct PrivateSpace*) Device->Private;
 	Private->Model = Model;
+
+	if (Model == ST7735) {
+		sscanf(Driver, "%*[^:]%*[^x]%*[^=]=%hu", &Private->Offset.Height);		
+		sscanf(Driver, "%*[^:]%*[^y]%*[^=]=%hu", &Private->Offset.Width);		
+	}
 	
 	if (Depth == 18) {
 		Device->Mode = GDS_RGB666;

+ 4 - 2
components/display/core/gds_font.c

@@ -98,12 +98,14 @@ void GDS_FontDrawChar( struct GDS_Device* Device, char Character, int x, int y,
     }
 }
 
-bool GDS_SetFont( struct GDS_Device* Display, const struct GDS_FontDef* Font ) {
+const struct GDS_FontDef* GDS_SetFont( struct GDS_Device* Display, const struct GDS_FontDef* Font ) {
+	const struct GDS_FontDef* OldFont = Display->Font;
+
     Display->FontForceProportional = false;
     Display->FontForceMonospace = false;
     Display->Font = Font;
 
-    return true;
+    return OldFont;
 }
 
 void GDS_FontForceProportional( struct GDS_Device* Display, bool Force ) {

+ 3 - 2
components/display/core/gds_font.h

@@ -46,7 +46,7 @@ typedef enum {
     TextAnchor_Center
 } TextAnchor;
 
-bool GDS_SetFont( struct GDS_Device* Display, const struct GDS_FontDef* Font );
+const struct GDS_FontDef* GDS_SetFont( struct GDS_Device* Display, const struct GDS_FontDef* Font );
 
 void GDS_FontForceProportional( struct GDS_Device* Display, bool Force );
 void GDS_FontForceMonospace( struct GDS_Device* Display, bool Force );
@@ -59,7 +59,8 @@ int GDS_FontGetMaxCharsPerColumn( struct GDS_Device* Display );
 
 int GDS_FontGetCharWidth( struct GDS_Device* Display, char Character );
 int GDS_FontGetCharHeight( struct GDS_Device* Display );
-int GDS_FontMeasureString( struct GDS_Device* Display, const char* Text );\
+int GDS_FontMeasureString( struct GDS_Device* Display, const char* Text );
+int GDS_FontMeasureStringLine( struct GDS_Device* Display, int Line, const char* Text );
 
 void GDS_FontDrawChar( struct GDS_Device* Display, char Character, int x, int y, int Color );
 void GDS_FontDrawString( struct GDS_Device* Display, int x, int y, const char* Text, int Color );

+ 1 - 1
components/display/core/gds_image.c

@@ -15,7 +15,7 @@
 #include "gds_private.h"
 #include "gds_image.h"
 
-const char TAG[] = "ImageDec";
+const static char TAG[] = "ImageDec";
 
 #define SCRATCH_SIZE	3100
 

+ 13 - 0
components/display/core/gds_text.c

@@ -121,6 +121,19 @@ bool GDS_TextLine(struct GDS_Device* Device, int N, int Pos, int Attr, char *Tex
 	return Width + X < Device->Width;
 }
 
+/****************************************************************************************
+ * 
+ */
+int GDS_GetTextWidth(struct GDS_Device* Device, int N, int Attr, char *Text) {
+	struct GDS_FontDef *Font = GDS_SetFont( Device, Device->Lines[N-1].Font );	
+
+	if (Attr & GDS_TEXT_MONOSPACE) GDS_FontForceMonospace( Device, true );
+	int Width = GDS_FontMeasureString( Device, Text );
+	GDS_SetFont( Device, Font );
+
+	return Width;
+}
+
 /****************************************************************************************
  * Try to align string for better scrolling visual. there is probably much better to do
  */

+ 1 - 0
components/display/core/gds_text.h

@@ -31,5 +31,6 @@ struct GDS_Device;
 bool 	GDS_TextSetFontAuto(struct GDS_Device* Device, int N, int FontType, int Space);
 bool 	GDS_TextSetFont(struct GDS_Device* Device, int N, const struct GDS_FontDef *Font, int Space);
 bool 	GDS_TextLine(struct GDS_Device* Device, int N, int Pos, int Attr, char *Text);
+int		GDS_GetTextWidth(struct GDS_Device* Device, int N, int Attr, char *Text);
 int 	GDS_TextStretch(struct GDS_Device* Device, int N, char *String, int Max);
 void 	GDS_TextPos(struct GDS_Device* Device, int FontType, int Where, int Attr, char *Text, ...);

+ 64 - 24
components/display/display.c

@@ -41,7 +41,12 @@ static EXT_RAM_ATTR struct {
 	int offset, boundary;
 	char *metadata_config;
 	bool timer, refresh;
-	uint32_t elapsed, duration;
+	uint32_t elapsed;
+	struct {
+		uint32_t value;
+		char string[8]; // H:MM:SS
+		bool visible;
+	} duration;
 	TickType_t tick;
 } displayer;
 
@@ -71,11 +76,11 @@ void display_init(char *welcome) {
 	char *config = config_alloc_get_str("display_config", CONFIG_DISPLAY_CONFIG, "N/A");
 	
 	int width = -1, height = -1, backlight_pin = -1;
-	char *p, *drivername = strstr(config, "driver");
+	char *drivername = strstr(config, "driver");
 
-	if ((p = strcasestr(config, "width")) != NULL) width = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(config, "height")) != NULL) height = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(config, "back")) != NULL) backlight_pin = atoi(strchr(p, '=') + 1);	
+	PARSE_PARAM(config, "width", '=', width);
+	PARSE_PARAM(config, "height", '=', height);
+	PARSE_PARAM(config, "back", '=', backlight_pin);
 		
 	// query drivers to see if we have a match
 	ESP_LOGI(TAG, "Trying to configure display with %s", config);
@@ -89,24 +94,24 @@ void display_init(char *welcome) {
 	// so far so good
 	if (display && width > 0 && height > 0) {
 		int RST_pin = -1;
-		if ((p = strcasestr(config, "reset")) != NULL) RST_pin = atoi(strchr(p, '=') + 1);
+		PARSE_PARAM(config, "reset", '=', RST_pin);
 		
 		// Detect driver interface
-		if (strstr(config, "I2C") && i2c_system_port != -1) {
+		if (strcasestr(config, "I2C") && i2c_system_port != -1) {
 			int address = 0x3C;
 				
-			if ((p = strcasestr(config, "address")) != NULL) address = atoi(strchr(p, '=') + 1);
+			PARSE_PARAM(config, "address", '=', address);
 				
 			init = true;
 			GDS_I2CInit( i2c_system_port, -1, -1, i2c_system_speed ) ;
 			GDS_I2CAttachDevice( display, width, height, address, RST_pin, backlight_pin );
 		
 			ESP_LOGI(TAG, "Display is I2C on port %u", address);
-		} else if (strstr(config, "SPI") && spi_system_host != -1) {
+		} else if (strcasestr(config, "SPI") && spi_system_host != -1) {
 			int CS_pin = -1, speed = 0;
 		
-			if ((p = strcasestr(config, "cs")) != NULL) CS_pin = atoi(strchr(p, '=') + 1);
-			if ((p = strcasestr(config, "speed")) != NULL) speed = atoi(strchr(p, '=') + 1);
+			PARSE_PARAM(config, "cs", '=', CS_pin);
+			PARSE_PARAM(config, "speed", '=', speed);
 		
 			init = true;
 			GDS_SPIInit( spi_system_host, spi_system_dc_gpio );
@@ -126,7 +131,7 @@ void display_init(char *welcome) {
 		static DRAM_ATTR StaticTask_t xTaskBuffer __attribute__ ((aligned (4)));
 		static EXT_RAM_ATTR StackType_t xStack[DISPLAYER_STACK_SIZE] __attribute__ ((aligned (4)));
 		
-		GDS_SetLayout( display, strcasestr(config, "HFlip"), strcasestr(config, "VFlip"), strcasestr(config, "rotate"));
+		GDS_SetLayout(display, strcasestr(config, "HFlip"), strcasestr(config, "VFlip"), strcasestr(config, "rotate"));
 		GDS_SetFont(display, &Font_droid_sans_fallback_15x17 );
 		GDS_TextPos(display, GDS_FONT_MEDIUM, GDS_TEXT_CENTERED, GDS_TEXT_CLEAR | GDS_TEXT_UPDATE, welcome);
 
@@ -193,18 +198,34 @@ static void displayer_task(void *args) {
 		
 		// handler elapsed track time
 		if (displayer.timer && displayer.state == DISPLAYER_ACTIVE) {
-			char counter[16];
+			char line[19] = "-", *_line = line + 1; // [-]H:MM:SS / H:MM:SS
 			TickType_t tick = xTaskGetTickCount();
 			uint32_t elapsed = (tick - displayer.tick) * portTICK_PERIOD_MS;
-			
+
 			if (elapsed >= 1000) {
 				xSemaphoreTake(displayer.mutex, portMAX_DELAY);
 				displayer.tick = tick;
-				displayer.elapsed += elapsed / 1000;
-				xSemaphoreGive(displayer.mutex);				
-				if (displayer.elapsed < 3600) snprintf(counter, 16, "%5u:%02u", displayer.elapsed / 60, displayer.elapsed % 60);
-				else snprintf(counter, 16, "%2u:%02u:%02u", displayer.elapsed / 3600, (displayer.elapsed % 3600) / 60, displayer.elapsed % 60);
-				GDS_TextLine(display, 1, GDS_TEXT_RIGHT, (GDS_TEXT_CLEAR | GDS_TEXT_CLEAR_EOL) | GDS_TEXT_UPDATE, counter);
+				elapsed = displayer.elapsed += elapsed / 1000;
+				xSemaphoreGive(displayer.mutex);
+
+				// when we have duration but no space, display remaining time
+				if (displayer.duration.value && !displayer.duration.visible) elapsed = displayer.duration.value - elapsed;
+
+				if (elapsed < 3600) sprintf(_line, "%u:%02u", elapsed / 60, elapsed % 60);
+				else sprintf(_line, "%u:%02u:%02u", (elapsed / 3600) % 100, (elapsed % 3600) / 60, elapsed % 60);
+
+				// concatenate if we have room for elapsed / duration
+				if (displayer.duration.visible) {
+					strcat(_line, "/");
+					strcat(_line, displayer.duration.string);
+				} else if (displayer.duration.value) {
+					_line--;
+				}
+
+				// just re-write the whole line it's easier
+				GDS_TextLine(display, 1, GDS_TEXT_LEFT, GDS_TEXT_CLEAR, displayer.header);	
+				GDS_TextLine(display, 1, GDS_TEXT_RIGHT, GDS_TEXT_UPDATE, _line);
+
 				timer_sleep = 1000;
 			} else timer_sleep = max(1000 - elapsed, 0);	
 		} else timer_sleep = DEFAULT_SLEEP;
@@ -279,8 +300,8 @@ void displayer_metadata(char *artist, char *album, char *title) {
 	}
 	
 	// get optional scroll speed & pause
-	if ((p = strcasestr(displayer.metadata_config, "speed")) != NULL) sscanf(p, "%*[^=]=%d", &displayer.speed);
-	if ((p = strcasestr(displayer.metadata_config, "pause")) != NULL) sscanf(p, "%*[^=]=%d", &displayer.pause);
+	PARSE_PARAM(displayer.metadata_config, "speed", '=', displayer.speed);
+	PARSE_PARAM(displayer.metadata_config, "pause", '=', displayer.pause);
 	
 	displayer.offset = 0;	
 	utf8_decode(displayer.string);
@@ -318,9 +339,27 @@ void displayer_timer(enum displayer_time_e mode, int elapsed, int duration) {
 	
 	xSemaphoreTake(displayer.mutex, portMAX_DELAY);
 
-	if (elapsed >= 0) displayer.elapsed = elapsed / 1000;	
-	if (duration >= 0) displayer.duration = duration / 1000;
 	if (displayer.timer) displayer.tick = xTaskGetTickCount();
+	if (elapsed >= 0) displayer.elapsed = elapsed / 1000;	
+	if (duration > 0) {
+		displayer.duration.visible = true;
+		displayer.duration.value = duration / 1000;
+
+		if (displayer.duration.value > 3600) sprintf(displayer.duration.string, "%u:%02u:%02u", (displayer.duration.value / 3600) % 10,
+													(displayer.duration.value % 3600) / 60, displayer.duration.value % 60);
+		else sprintf(displayer.duration.string, "%u:%02u", displayer.duration.value / 60, displayer.duration.value % 60);
+
+		char *buf;
+		asprintf(&buf, "%s %s/%s", displayer.header, displayer.duration.string, displayer.duration.string);
+		if (GDS_GetTextWidth(display, 1, 0, buf) > GDS_GetWidth(display)) {
+			ESP_LOGW(TAG, "Can't fit duration %s (%d) on screen using elapsed only", buf, GDS_GetTextWidth(display, 1, 0, buf));
+			displayer.duration.visible = false;
+		}
+		free(buf);
+	} else if (!duration) {
+		displayer.duration.visible = false;
+		displayer.duration.value = 0;
+	}
 		
 	xSemaphoreGive(displayer.mutex);
 }	
@@ -345,7 +384,8 @@ void displayer_control(enum displayer_cmd_e cmd, ...) {
 		displayer.timer = false;
 		displayer.refresh = true;
 		displayer.string[0] = '\0';
-		displayer.elapsed = displayer.duration = 0;
+		displayer.elapsed = displayer.duration.value = 0;
+		displayer.duration.visible = false;
 		displayer.offset = displayer.boundary = 0;
 		display_bus(&displayer, DISPLAY_BUS_TAKE);
 		vTaskResume(displayer.task);

+ 1 - 1
components/display/fonts/font_line_1.c

@@ -26,7 +26,7 @@ static const uint8_t Square721_BT11x14[] = {
         0x02, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // Code for char ,
         0x04, 0x00, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // Code for char -
         0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // Code for char .
-        0x04, 0x00, 0x0C, 0x80, 0x03, 0x70, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // Code for char /
+        0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x80, 0x03, 0x70, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // Code for char /
         0x08, 0x00, 0x00, 0x00, 0x00, 0xF0, 0x03, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0xF0, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // Code for char 0
         0x08, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x10, 0x04, 0x08, 0x04, 0xF8, 0x07, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // Code for char 1
         0x08, 0x00, 0x00, 0x00, 0x00, 0x30, 0x07, 0x08, 0x05, 0x88, 0x04, 0x88, 0x04, 0x88, 0x04, 0x70, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // Code for char 2

+ 1 - 1
components/driver_bt/bt_app_core.c

@@ -17,7 +17,7 @@
 #include "freertos/FreeRTOS.h"
 #include "freertos/queue.h"
 #include "freertos/task.h"
-#include "globdefs.h"
+#include "tools.h"
 
 static const char * TAG = "btappcore";
 

+ 1 - 1
components/driver_bt/bt_app_sink.c

@@ -25,7 +25,7 @@
 #include "platform_config.h"
 #include "freertos/FreeRTOS.h"
 #include "freertos/task.h"
-#include "trace.h"
+#include "tools.h"
 #include "audio_controls.h"
 #include "sys/lock.h"
 #include "display.h"

+ 1 - 2
components/driver_bt/bt_app_source.c

@@ -17,10 +17,9 @@
 #include "freertos/timers.h"
 #include "argtable3/argtable3.h"
 #include "platform_config.h"
-#include "trace.h"
 #include "messaging.h"
 #include "cJSON.h"
-#include "globdefs.h"
+#include "tools.h"
 
 static const char * TAG = "bt_app_source";
 static const char * BT_RC_CT_TAG="RCCT";

+ 10 - 0
components/esp_http_server/CMakeLists.txt

@@ -0,0 +1,10 @@
+idf_build_get_property(prefix IDF_PATH)
+string(CONCAT prefix "${prefix}" "/components/esp_http_server")
+
+idf_component_register(
+		SRC_DIRS "${prefix}/src"	"${prefix}/src/util"	
+		INCLUDE_DIRS "${prefix}/include"
+                PRIV_INCLUDE_DIRS "." "${prefix}/src/port/esp32" "${prefix}/src/util"
+                REQUIRES nghttp # for http_parser.h
+                    PRIV_REQUIRES lwip mbedtls esp_timer
+) 

+ 1 - 0
components/esp_http_server/Kconfig

@@ -0,0 +1 @@
+source "$IDF_PATH/components/esp_http_server/Kconfig"

+ 68 - 0
components/esp_http_server/osal.h

@@ -0,0 +1,68 @@
+// Copyright 2018 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef _OSAL_H_
+#define _OSAL_H_
+
+#include <freertos/FreeRTOS.h>
+#include <freertos/task.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <esp_timer.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define OS_SUCCESS ESP_OK
+#define OS_FAIL    ESP_FAIL
+
+typedef TaskHandle_t othread_t;
+
+static inline int httpd_os_thread_create(othread_t *thread,
+                                 const char *name, uint16_t stacksize, int prio,
+                                 void (*thread_routine)(void *arg), void *arg,
+                                 BaseType_t core_id)
+{
+	StaticTask_t *xTaskBuffer = (StaticTask_t*) heap_caps_malloc(sizeof(StaticTask_t), (MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT));
+	StackType_t *xStack = heap_caps_malloc(stacksize,(MALLOC_CAP_SPIRAM|MALLOC_CAP_8BIT));
+
+	*thread = xTaskCreateStaticPinnedToCore(thread_routine, name, stacksize, arg, prio, xStack,xTaskBuffer,core_id);
+    if (*thread) {
+        return OS_SUCCESS;
+    }
+    return OS_FAIL;
+}
+
+/* Only self delete is supported */
+static inline void httpd_os_thread_delete(void)
+{
+    vTaskDelete(xTaskGetCurrentTaskHandle());
+}
+
+static inline void httpd_os_thread_sleep(int msecs)
+{
+    vTaskDelay(msecs / portTICK_RATE_MS);
+}
+
+static inline othread_t httpd_os_thread_handle(void)
+{
+    return xTaskGetCurrentTaskHandle();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ! _OSAL_H_ */

+ 49 - 0
components/heap/CMakeLists.txt

@@ -0,0 +1,49 @@
+set(srcs
+    "heap_caps.c"
+    "heap_caps_init.c"
+    "multi_heap.c"
+    "heap_tlsf.c")
+
+if(NOT CONFIG_HEAP_POISONING_DISABLED)
+    list(APPEND srcs "multi_heap_poisoning.c")
+endif()
+
+if(CONFIG_HEAP_TASK_TRACKING)
+    list(APPEND srcs "heap_task_info.c")
+endif()
+
+if(CONFIG_HEAP_TRACING_STANDALONE)
+    list(APPEND srcs "heap_trace_standalone.c")
+    set_source_files_properties(heap_trace_standalone.c
+        PROPERTIES COMPILE_FLAGS
+        -Wno-frame-address)
+endif()
+
+idf_component_register(SRCS "${srcs}"
+                    INCLUDE_DIRS include
+                    LDFRAGMENTS linker.lf
+                    PRIV_REQUIRES soc)
+
+if(CONFIG_HEAP_TRACING)
+    set(WRAP_FUNCTIONS
+        calloc
+        malloc
+        free
+        realloc
+        heap_caps_malloc
+        heap_caps_free
+        heap_caps_realloc
+        heap_caps_malloc_default
+        heap_caps_realloc_default)
+
+    foreach(wrap ${WRAP_FUNCTIONS})
+        target_link_libraries(${COMPONENT_LIB} INTERFACE "-Wl,--wrap=${wrap}")
+    endforeach()
+endif()
+
+if(NOT CMAKE_BUILD_EARLY_EXPANSION)
+    idf_build_get_property(build_components BUILD_COMPONENTS)
+    if(freertos IN_LIST build_components)
+        target_compile_options(${COMPONENT_TARGET} PRIVATE "-DMULTI_HEAP_FREERTOS")
+    endif()
+endif()

+ 74 - 0
components/heap/Kconfig

@@ -0,0 +1,74 @@
+menu "Heap memory debugging"
+
+    choice HEAP_CORRUPTION_DETECTION
+        prompt "Heap corruption detection"
+        default HEAP_POISONING_DISABLED
+        help
+            Enable heap poisoning features to detect heap corruption caused by out-of-bounds access to heap memory.
+
+            See the "Heap Memory Debugging" page of the IDF documentation
+            for a description of each level of heap corruption detection.
+
+        config HEAP_POISONING_DISABLED
+            bool "Basic (no poisoning)"
+        config HEAP_POISONING_LIGHT
+            bool "Light impact"
+        config HEAP_POISONING_COMPREHENSIVE
+            bool "Comprehensive"
+    endchoice
+
+    choice HEAP_TRACING_DEST
+        bool "Heap tracing"
+        default HEAP_TRACING_OFF
+        help
+            Enables the heap tracing API defined in esp_heap_trace.h.
+
+            This function causes a moderate increase in IRAM code side and a minor increase in heap function
+            (malloc/free/realloc) CPU overhead, even when the tracing feature is not used.
+            So it's best to keep it disabled unless tracing is being used.
+
+        config HEAP_TRACING_OFF
+            bool "Disabled"
+        config HEAP_TRACING_STANDALONE
+            bool "Standalone"
+            select HEAP_TRACING
+        config HEAP_TRACING_TOHOST
+            bool "Host-based"
+            select HEAP_TRACING
+    endchoice
+
+    config HEAP_TRACING
+        bool
+        default F
+        help
+            Enables/disables heap tracing API.
+
+    config HEAP_TRACING_STACK_DEPTH
+        int "Heap tracing stack depth"
+        range 0 0 if IDF_TARGET_ARCH_RISCV # Disabled for RISC-V due to `__builtin_return_address` limitation
+        default 0 if IDF_TARGET_ARCH_RISCV
+        range 0 10
+        default 2
+        depends on HEAP_TRACING
+        help
+            Number of stack frames to save when tracing heap operation callers.
+
+            More stack frames uses more memory in the heap trace buffer (and slows down allocation), but
+            can provide useful information.
+
+    config HEAP_TASK_TRACKING
+        bool "Enable heap task tracking"
+        depends on !HEAP_POISONING_DISABLED
+        help
+            Enables tracking the task responsible for each heap allocation.
+
+            This function depends on heap poisoning being enabled and adds four more bytes of overhead for each block
+            allocated.
+
+    config HEAP_ABORT_WHEN_ALLOCATION_FAILS
+        bool "Abort if memory allocation fails"
+        default n
+        help
+            When enabled, if a memory allocation operation fails it will cause a system abort.
+
+endmenu

+ 32 - 0
components/heap/component.mk

@@ -0,0 +1,32 @@
+#
+# Component Makefile
+#
+
+COMPONENT_OBJS := heap_caps_init.o heap_caps.o multi_heap.o heap_tlsf.o
+
+ifndef CONFIG_HEAP_POISONING_DISABLED
+COMPONENT_OBJS += multi_heap_poisoning.o
+
+ifdef CONFIG_HEAP_TASK_TRACKING
+COMPONENT_OBJS += heap_task_info.o
+endif
+endif
+
+ifdef CONFIG_HEAP_TRACING_STANDALONE
+
+COMPONENT_OBJS += heap_trace_standalone.o
+
+endif
+
+ifdef CONFIG_HEAP_TRACING
+
+WRAP_FUNCTIONS = calloc malloc free realloc heap_caps_malloc heap_caps_free heap_caps_realloc heap_caps_malloc_default heap_caps_realloc_default
+WRAP_ARGUMENT := -Wl,--wrap=
+
+COMPONENT_ADD_LDFLAGS = -l$(COMPONENT_NAME) $(addprefix $(WRAP_ARGUMENT),$(WRAP_FUNCTIONS))
+
+endif
+
+COMPONENT_ADD_LDFRAGMENTS += linker.lf
+
+CFLAGS += -DMULTI_HEAP_FREERTOS

+ 609 - 0
components/heap/heap_caps.c

@@ -0,0 +1,609 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+#include <stdio.h>
+#include <sys/param.h>
+#include "esp_attr.h"
+#include "esp_heap_caps.h"
+#include "multi_heap.h"
+#include "esp_log.h"
+#include "heap_private.h"
+#include "esp_system.h"
+
+/*
+This file, combined with a region allocator that supports multiple heaps, solves the problem that the ESP32 has RAM
+that's slightly heterogeneous. Some RAM can be byte-accessed, some allows only 32-bit accesses, some can execute memory,
+some can be remapped by the MMU to only be accessed by a certain PID etc. In order to allow the most flexible memory
+allocation possible, this code makes it possible to request memory that has certain capabilities. The code will then use
+its knowledge of how the memory is configured along with a priority scheme to allocate that memory in the most sane way
+possible. This should optimize the amount of RAM accessible to the code without hardwiring addresses.
+*/
+
+static esp_alloc_failed_hook_t alloc_failed_callback;
+
+/*
+  This takes a memory chunk in a region that can be addressed as both DRAM as well as IRAM. It will convert it to
+  IRAM in such a way that it can be later freed. It assumes both the address as well as the length to be word-aligned.
+  It returns a region that's 1 word smaller than the region given because it stores the original Dram address there.
+*/
+IRAM_ATTR static void *dram_alloc_to_iram_addr(void *addr, size_t len)
+{
+    uintptr_t dstart = (uintptr_t)addr; //First word
+    uintptr_t dend = dstart + len - 4; //Last word
+    assert(esp_ptr_in_diram_dram((void *)dstart));
+    assert(esp_ptr_in_diram_dram((void *)dend));
+    assert((dstart & 3) == 0);
+    assert((dend & 3) == 0);
+#if SOC_DIRAM_INVERTED // We want the word before the result to hold the DRAM address
+    uint32_t *iptr = esp_ptr_diram_dram_to_iram((void *)dend);
+#else
+    uint32_t *iptr = esp_ptr_diram_dram_to_iram((void *)dstart);
+#endif
+    *iptr = dstart;
+    return iptr + 1;
+}
+
+
+static void heap_caps_alloc_failed(size_t requested_size, uint32_t caps, const char *function_name)
+{
+    if (alloc_failed_callback) {
+        alloc_failed_callback(requested_size, caps, function_name);
+    }
+
+    #ifdef CONFIG_HEAP_ABORT_WHEN_ALLOCATION_FAILS
+    esp_system_abort("Memory allocation failed");
+    #endif
+}
+
+esp_err_t heap_caps_register_failed_alloc_callback(esp_alloc_failed_hook_t callback)
+{
+    if (callback == NULL) {
+        return ESP_ERR_INVALID_ARG;
+    }
+
+    alloc_failed_callback = callback;
+
+    return ESP_OK;
+}
+
+bool heap_caps_match(const heap_t *heap, uint32_t caps)
+{
+    return heap->heap != NULL && ((get_all_caps(heap) & caps) == caps);
+}
+
+/*
+Routine to allocate a bit of memory with certain capabilities. caps is a bitfield of MALLOC_CAP_* bits.
+*/
+IRAM_ATTR void *heap_caps_malloc( size_t size, uint32_t caps )
+{
+    void *ret = NULL;
+
+    if (size > HEAP_SIZE_MAX) {
+        // Avoids int overflow when adding small numbers to size, or
+        // calculating 'end' from start+size, by limiting 'size' to the possible range
+        heap_caps_alloc_failed(size, caps, __func__);
+
+        return NULL;
+    }
+
+    if (caps & MALLOC_CAP_EXEC) {
+        //MALLOC_CAP_EXEC forces an alloc from IRAM. There is a region which has both this as well as the following
+        //caps, but the following caps are not possible for IRAM.  Thus, the combination is impossible and we return
+        //NULL directly, even although our heap capabilities (based on soc_memory_tags & soc_memory_regions) would
+        //indicate there is a tag for this.
+        if ((caps & MALLOC_CAP_8BIT) || (caps & MALLOC_CAP_DMA)) {
+            heap_caps_alloc_failed(size, caps, __func__);
+
+            return NULL;
+        }
+        caps |= MALLOC_CAP_32BIT; // IRAM is 32-bit accessible RAM
+    }
+
+    if (caps & MALLOC_CAP_32BIT) {
+        /* 32-bit accessible RAM should allocated in 4 byte aligned sizes
+         * (Future versions of ESP-IDF should possibly fail if an invalid size is requested)
+         */
+        size = (size + 3) & (~3); // int overflow checked above
+    }
+
+    for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
+        //Iterate over heaps and check capabilities at this priority
+        heap_t *heap;
+        SLIST_FOREACH(heap, &registered_heaps, next) {
+            if (heap->heap == NULL) {
+                continue;
+            }
+            if ((heap->caps[prio] & caps) != 0) {
+                //Heap has at least one of the caps requested. If caps has other bits set that this prio
+                //doesn't cover, see if they're available in other prios.
+                if ((get_all_caps(heap) & caps) == caps) {
+                    //This heap can satisfy all the requested capabilities. See if we can grab some memory using it.
+                    if ((caps & MALLOC_CAP_EXEC) && esp_ptr_in_diram_dram((void *)heap->start)) {
+                        //This is special, insofar that what we're going to get back is a DRAM address. If so,
+                        //we need to 'invert' it (lowest address in DRAM == highest address in IRAM and vice-versa) and
+                        //add a pointer to the DRAM equivalent before the address we're going to return.
+                        ret = multi_heap_malloc(heap->heap, size + 4);  // int overflow checked above
+
+                        if (ret != NULL) {
+                            return dram_alloc_to_iram_addr(ret, size + 4);  // int overflow checked above
+                        }
+                    } else {
+                        //Just try to alloc, nothing special.
+                        ret = multi_heap_malloc(heap->heap, size);
+                        if (ret != NULL) {
+                            return ret;
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    heap_caps_alloc_failed(size, caps, __func__);
+
+    //Nothing usable found.
+    return NULL;
+}
+
+
+#define MALLOC_DISABLE_EXTERNAL_ALLOCS -1
+//Dual-use: -1 (=MALLOC_DISABLE_EXTERNAL_ALLOCS) disables allocations in external memory, >=0 sets the limit for allocations preferring internal memory.
+static int malloc_alwaysinternal_limit=MALLOC_DISABLE_EXTERNAL_ALLOCS;
+
+void heap_caps_malloc_extmem_enable(size_t limit)
+{
+    malloc_alwaysinternal_limit=limit;
+}
+
+/*
+ Default memory allocation implementation. Should return standard 8-bit memory. malloc() essentially resolves to this function.
+*/
+IRAM_ATTR void *heap_caps_malloc_default( size_t size )
+{
+    if (malloc_alwaysinternal_limit==MALLOC_DISABLE_EXTERNAL_ALLOCS) {
+        return heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL);
+    } else {
+        void *r;
+        if (size <= (size_t)malloc_alwaysinternal_limit) {
+            r=heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
+        } else {
+            r=heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_SPIRAM );
+        }
+        if (r==NULL) {
+            //try again while being less picky
+            r=heap_caps_malloc( size, MALLOC_CAP_DEFAULT );
+        }
+        return r;
+    }
+}
+
+/*
+ Same for realloc()
+ Note: keep the logic in here the same as in heap_caps_malloc_default (or merge the two as soon as this gets more complex...)
+ */
+IRAM_ATTR void *heap_caps_realloc_default( void *ptr, size_t size )
+{
+    if (malloc_alwaysinternal_limit==MALLOC_DISABLE_EXTERNAL_ALLOCS) {
+        return heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
+    } else {
+        void *r;
+        if (size <= (size_t)malloc_alwaysinternal_limit) {
+            r=heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
+        } else {
+            r=heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_SPIRAM );
+        }
+        if (r==NULL && size>0) {
+            //We needed to allocate memory, but we didn't. Try again while being less picky.
+            r=heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT );
+        }
+        return r;
+    }
+}
+
+/*
+ Memory allocation as preference in decreasing order.
+ */
+IRAM_ATTR void *heap_caps_malloc_prefer( size_t size, size_t num, ... )
+{
+    va_list argp;
+    va_start( argp, num );
+    void *r = NULL;
+    while (num--) {
+        uint32_t caps = va_arg( argp, uint32_t );
+        r = heap_caps_malloc( size, caps );
+        if (r != NULL) {
+            break;
+        }
+    }
+    va_end( argp );
+    return r;
+}
+
+/*
+ Memory reallocation as preference in decreasing order.
+ */
+IRAM_ATTR void *heap_caps_realloc_prefer( void *ptr, size_t size, size_t num, ... )
+{
+    va_list argp;
+    va_start( argp, num );
+    void *r = NULL;
+    while (num--) {
+        uint32_t caps = va_arg( argp, uint32_t );
+        r = heap_caps_realloc( ptr, size, caps );
+        if (r != NULL || size == 0) {
+            break;
+        }
+    }
+    va_end( argp );
+    return r;
+}
+
+/*
+ Memory callocation as preference in decreasing order.
+ */
+IRAM_ATTR void *heap_caps_calloc_prefer( size_t n, size_t size, size_t num, ... )
+{
+    va_list argp;
+    va_start( argp, num );
+    void *r = NULL;
+    while (num--) {
+        uint32_t caps = va_arg( argp, uint32_t );
+        r = heap_caps_calloc( n, size, caps );
+        if (r != NULL) break;
+    }
+    va_end( argp );
+    return r;
+}
+
+/* Find the heap which belongs to ptr, or return NULL if it's
+   not in any heap.
+
+   (This confirms if ptr is inside the heap's region, doesn't confirm if 'ptr'
+   is an allocated block or is some other random address inside the heap.)
+*/
+IRAM_ATTR static heap_t *find_containing_heap(void *ptr )
+{
+    intptr_t p = (intptr_t)ptr;
+    heap_t *heap;
+    SLIST_FOREACH(heap, &registered_heaps, next) {
+        if (heap->heap != NULL && p >= heap->start && p < heap->end) {
+            return heap;
+        }
+    }
+    return NULL;
+}
+
+IRAM_ATTR void heap_caps_free( void *ptr)
+{
+    if (ptr == NULL) {
+        return;
+    }
+
+    if (esp_ptr_in_diram_iram(ptr)) {
+        //Memory allocated here is actually allocated in the DRAM alias region and
+        //cannot be de-allocated as usual. dram_alloc_to_iram_addr stores a pointer to
+        //the equivalent DRAM address, though; free that.
+        uint32_t *dramAddrPtr = (uint32_t *)ptr;
+        ptr = (void *)dramAddrPtr[-1];
+    }
+
+    heap_t *heap = find_containing_heap(ptr);
+    assert(heap != NULL && "free() target pointer is outside heap areas");
+    multi_heap_free(heap->heap, ptr);
+}
+
+IRAM_ATTR void *heap_caps_realloc( void *ptr, size_t size, uint32_t caps)
+{
+    bool ptr_in_diram_case = false;
+    heap_t *heap = NULL;
+    void *dram_ptr = NULL;
+
+    if (ptr == NULL) {
+        return heap_caps_malloc(size, caps);
+    }
+
+    if (size == 0) {
+        heap_caps_free(ptr);
+        return NULL;
+    }
+
+    if (size > HEAP_SIZE_MAX) {
+        heap_caps_alloc_failed(size, caps, __func__);
+
+        return NULL;
+    }
+
+    //The pointer to memory may be aliased, we need to
+    //recover the corresponding address before to manage a new allocation:
+    if(esp_ptr_in_diram_iram((void *)ptr)) {
+        uint32_t *dram_addr = (uint32_t *)ptr;
+        dram_ptr  = (void *)dram_addr[-1];
+
+        heap = find_containing_heap(dram_ptr);
+        assert(heap != NULL && "realloc() pointer is outside heap areas");
+
+        //with pointers that reside on diram space, we avoid using
+        //the realloc implementation due to address translation issues,
+        //instead force a malloc/copy/free
+        ptr_in_diram_case = true;
+
+    } else {
+        heap = find_containing_heap(ptr);
+        assert(heap != NULL && "realloc() pointer is outside heap areas");
+    }
+
+    // are the existing heap's capabilities compatible with the
+    // requested ones?
+    bool compatible_caps = (caps & get_all_caps(heap)) == caps;
+
+    if (compatible_caps && !ptr_in_diram_case) {
+        // try to reallocate this memory within the same heap
+        // (which will resize the block if it can)
+        void *r = multi_heap_realloc(heap->heap, ptr, size);
+        if (r != NULL) {
+            return r;
+        }
+    }
+
+    // if we couldn't do that, try to see if we can reallocate
+    // in a different heap with requested capabilities.
+    void *new_p = heap_caps_malloc(size, caps);
+    if (new_p != NULL) {
+        size_t old_size = 0;
+
+        //If we're dealing with aliased ptr, information regarding its containing
+        //heap can only be obtained with translated address.
+        if(ptr_in_diram_case) {
+            old_size = multi_heap_get_allocated_size(heap->heap, dram_ptr);
+        } else {
+            old_size = multi_heap_get_allocated_size(heap->heap, ptr);
+        }
+
+        assert(old_size > 0);
+        memcpy(new_p, ptr, MIN(size, old_size));
+        heap_caps_free(ptr);
+        return new_p;
+    }
+
+    heap_caps_alloc_failed(size, caps, __func__);
+
+    return NULL;
+}
+
+IRAM_ATTR void *heap_caps_calloc( size_t n, size_t size, uint32_t caps)
+{
+    void *result;
+    size_t size_bytes;
+
+    if (__builtin_mul_overflow(n, size, &size_bytes)) {
+        return NULL;
+    }
+
+    result = heap_caps_malloc(size_bytes, caps);
+    if (result != NULL) {
+        bzero(result, size_bytes);
+    }
+    return result;
+}
+
+size_t heap_caps_get_total_size(uint32_t caps)
+{
+    size_t total_size = 0;
+    heap_t *heap;
+    SLIST_FOREACH(heap, &registered_heaps, next) {
+        if (heap_caps_match(heap, caps)) {
+            total_size += (heap->end - heap->start);
+        }
+    }
+    return total_size;
+}
+
+size_t heap_caps_get_free_size( uint32_t caps )
+{
+    size_t ret = 0;
+    heap_t *heap;
+    SLIST_FOREACH(heap, &registered_heaps, next) {
+        if (heap_caps_match(heap, caps)) {
+            ret += multi_heap_free_size(heap->heap);
+        }
+    }
+    return ret;
+}
+
+size_t heap_caps_get_minimum_free_size( uint32_t caps )
+{
+    size_t ret = 0;
+    heap_t *heap;
+    SLIST_FOREACH(heap, &registered_heaps, next) {
+        if (heap_caps_match(heap, caps)) {
+            ret += multi_heap_minimum_free_size(heap->heap);
+        }
+    }
+    return ret;
+}
+
+size_t heap_caps_get_largest_free_block( uint32_t caps )
+{
+    multi_heap_info_t info;
+    heap_caps_get_info(&info, caps);
+    return info.largest_free_block;
+}
+
+void heap_caps_get_info( multi_heap_info_t *info, uint32_t caps )
+{
+    bzero(info, sizeof(multi_heap_info_t));
+
+    heap_t *heap;
+    SLIST_FOREACH(heap, &registered_heaps, next) {
+        if (heap_caps_match(heap, caps)) {
+            multi_heap_info_t hinfo;
+            multi_heap_get_info(heap->heap, &hinfo);
+
+            info->total_free_bytes += hinfo.total_free_bytes;
+            info->total_allocated_bytes += hinfo.total_allocated_bytes;
+            info->largest_free_block = MAX(info->largest_free_block,
+                                           hinfo.largest_free_block);
+            info->minimum_free_bytes += hinfo.minimum_free_bytes;
+            info->allocated_blocks += hinfo.allocated_blocks;
+            info->free_blocks += hinfo.free_blocks;
+            info->total_blocks += hinfo.total_blocks;
+        }
+    }
+}
+
+void heap_caps_print_heap_info( uint32_t caps )
+{
+    multi_heap_info_t info;
+    printf("Heap summary for capabilities 0x%08X:\n", caps);
+    heap_t *heap;
+    SLIST_FOREACH(heap, &registered_heaps, next) {
+        if (heap_caps_match(heap, caps)) {
+            multi_heap_get_info(heap->heap, &info);
+
+            printf("  At 0x%08x len %d free %d allocated %d min_free %d\n",
+                   heap->start, heap->end - heap->start, info.total_free_bytes, info.total_allocated_bytes, info.minimum_free_bytes);
+            printf("    largest_free_block %d alloc_blocks %d free_blocks %d total_blocks %d\n",
+                   info.largest_free_block, info.allocated_blocks,
+                   info.free_blocks, info.total_blocks);
+        }
+    }
+    printf("  Totals:\n");
+    heap_caps_get_info(&info, caps);
+
+    printf("    free %d allocated %d min_free %d largest_free_block %d\n", info.total_free_bytes, info.total_allocated_bytes, info.minimum_free_bytes, info.largest_free_block);
+}
+
+bool heap_caps_check_integrity(uint32_t caps, bool print_errors)
+{
+    bool all_heaps = caps & MALLOC_CAP_INVALID;
+    bool valid = true;
+
+    heap_t *heap;
+    SLIST_FOREACH(heap, &registered_heaps, next) {
+        if (heap->heap != NULL
+            && (all_heaps || (get_all_caps(heap) & caps) == caps)) {
+            valid = multi_heap_check(heap->heap, print_errors) && valid;
+        }
+    }
+
+    return valid;
+}
+
+bool heap_caps_check_integrity_all(bool print_errors)
+{
+    return heap_caps_check_integrity(MALLOC_CAP_INVALID, print_errors);
+}
+
+bool heap_caps_check_integrity_addr(intptr_t addr, bool print_errors)
+{
+    heap_t *heap = find_containing_heap((void *)addr);
+    if (heap == NULL) {
+        return false;
+    }
+    return multi_heap_check(heap->heap, print_errors);
+}
+
+void heap_caps_dump(uint32_t caps)
+{
+    bool all_heaps = caps & MALLOC_CAP_INVALID;
+    heap_t *heap;
+    SLIST_FOREACH(heap, &registered_heaps, next) {
+        if (heap->heap != NULL
+            && (all_heaps || (get_all_caps(heap) & caps) == caps)) {
+            multi_heap_dump(heap->heap);
+        }
+    }
+}
+
+void heap_caps_dump_all(void)
+{
+    heap_caps_dump(MALLOC_CAP_INVALID);
+}
+
+size_t heap_caps_get_allocated_size( void *ptr )
+{
+    heap_t *heap = find_containing_heap(ptr);
+    size_t size = multi_heap_get_allocated_size(heap->heap, ptr);
+    return size;
+}
+
+IRAM_ATTR void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint32_t caps)
+{
+    void *ret = NULL;
+
+    if(!alignment) {
+        return NULL;
+    }
+
+    //Alignment must be a power of two:
+    if((alignment & (alignment - 1)) != 0) {
+        return NULL;
+    }
+
+    if (size > HEAP_SIZE_MAX) {
+        // Avoids int overflow when adding small numbers to size, or
+        // calculating 'end' from start+size, by limiting 'size' to the possible range
+        heap_caps_alloc_failed(size, caps, __func__);
+
+        return NULL;
+    }
+
+    for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
+        //Iterate over heaps and check capabilities at this priority
+        heap_t *heap;
+        SLIST_FOREACH(heap, &registered_heaps, next) {
+            if (heap->heap == NULL) {
+                continue;
+            }
+            if ((heap->caps[prio] & caps) != 0) {
+                //Heap has at least one of the caps requested. If caps has other bits set that this prio
+                //doesn't cover, see if they're available in other prios.
+                if ((get_all_caps(heap) & caps) == caps) {
+                    //Just try to alloc, nothing special.
+                    ret = multi_heap_aligned_alloc(heap->heap, size, alignment);
+                    if (ret != NULL) {
+                        return ret;
+                    }
+                }
+            }
+        }
+    }
+
+    heap_caps_alloc_failed(size, caps, __func__);
+
+    //Nothing usable found.
+    return NULL;
+}
+
+IRAM_ATTR void heap_caps_aligned_free(void *ptr)
+{
+    heap_caps_free(ptr);
+}
+
+void *heap_caps_aligned_calloc(size_t alignment, size_t n, size_t size, uint32_t caps)
+{
+    size_t size_bytes;
+    if (__builtin_mul_overflow(n, size, &size_bytes)) {
+        return NULL;
+    }
+
+    void *ptr = heap_caps_aligned_alloc(alignment,size_bytes, caps);
+    if(ptr != NULL) {
+        memset(ptr, 0, size_bytes);
+    }
+
+    return ptr;
+}

+ 241 - 0
components/heap/heap_caps_init.c

@@ -0,0 +1,241 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "heap_private.h"
+#include <assert.h>
+#include <string.h>
+#include <sys/lock.h>
+
+#include "esp_log.h"
+#include "multi_heap.h"
+#include "multi_heap_platform.h"
+#include "esp_heap_caps_init.h"
+#include "soc/soc_memory_layout.h"
+
+static const char *TAG = "heap_init";
+
+/* Linked-list of registered heaps */
+struct registered_heap_ll registered_heaps;
+
+static void register_heap(heap_t *region)
+{
+    size_t heap_size = region->end - region->start;
+    assert(heap_size <= HEAP_SIZE_MAX);
+    region->heap = multi_heap_register((void *)region->start, heap_size);
+    if (region->heap != NULL) {
+        ESP_EARLY_LOGD(TAG, "New heap initialised at %p", region->heap);
+    }
+}
+
+void heap_caps_enable_nonos_stack_heaps(void)
+{
+    heap_t *heap;
+    SLIST_FOREACH(heap, &registered_heaps, next) {
+        // Assume any not-yet-registered heap is
+        // a nonos-stack heap
+        if (heap->heap == NULL) {
+            register_heap(heap);
+            if (heap->heap != NULL) {
+                multi_heap_set_lock(heap->heap, &heap->heap_mux);
+            }
+        }
+    }
+}
+
+/* Initialize the heap allocator to use all of the memory not
+   used by static data or reserved for other purposes
+ */
+void heap_caps_init(void)
+{
+    /* Get the array of regions that we can use for heaps
+       (with reserved memory removed already.)
+     */
+    size_t num_regions = soc_get_available_memory_region_max_count();
+    soc_memory_region_t regions[num_regions];
+    num_regions = soc_get_available_memory_regions(regions);
+
+    //The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory,
+    //it's useful to coalesce adjacent regions that have the same type.
+    for (size_t i = 1; i < num_regions; i++) {
+        soc_memory_region_t *a = &regions[i - 1];
+        soc_memory_region_t *b = &regions[i];
+        if (b->start == (intptr_t)(a->start + a->size) && b->type == a->type ) {
+            a->type = -1;
+            b->start = a->start;
+            b->size += a->size;
+        }
+    }
+
+    /* Count the heaps left after merging */
+    size_t num_heaps = 0;
+    for (size_t i = 0; i < num_regions; i++) {
+        if (regions[i].type != -1) {
+            num_heaps++;
+        }
+    }
+
+    /* Start by allocating the registered heap data on the stack.
+
+       Once we have a heap to copy it to, we will copy it to a heap buffer.
+    */
+    heap_t temp_heaps[num_heaps];
+    size_t heap_idx = 0;
+
+    ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:");
+    for (size_t i = 0; i < num_regions; i++) {
+        soc_memory_region_t *region = &regions[i];
+        const soc_memory_type_desc_t *type = &soc_memory_types[region->type];
+        heap_t *heap = &temp_heaps[heap_idx];
+        if (region->type == -1) {
+            continue;
+        }
+        heap_idx++;
+        assert(heap_idx <= num_heaps);
+
+        memcpy(heap->caps, type->caps, sizeof(heap->caps));
+        heap->start = region->start;
+        heap->end = region->start + region->size;
+        MULTI_HEAP_LOCK_INIT(&heap->heap_mux);
+        if (type->startup_stack) {
+            /* Will be registered when OS scheduler starts */
+            heap->heap = NULL;
+        } else {
+            register_heap(heap);
+        }
+        SLIST_NEXT(heap, next) = NULL;
+
+        ESP_EARLY_LOGI(TAG, "At %08X len %08X (%d KiB): %s",
+                       region->start, region->size, region->size / 1024, type->name);
+    }
+
+    assert(heap_idx == num_heaps);
+
+    /* Allocate the permanent heap data that we'll use as a linked list at runtime.
+
+       Allocate this part of data contiguously, even though it's a linked list... */
+    assert(SLIST_EMPTY(&registered_heaps));
+
+    heap_t *heaps_array = NULL;
+    for (size_t i = 0; i < num_heaps; i++) {
+        if (heap_caps_match(&temp_heaps[i], MALLOC_CAP_8BIT|MALLOC_CAP_INTERNAL)) {
+            /* use the first DRAM heap which can fit the data */
+            heaps_array = multi_heap_malloc(temp_heaps[i].heap, sizeof(heap_t) * num_heaps);
+            if (heaps_array != NULL) {
+                break;
+            }
+        }
+    }
+    assert(heaps_array != NULL); /* if NULL, there's not enough free startup heap space */
+
+    memcpy(heaps_array, temp_heaps, sizeof(heap_t)*num_heaps);
+
+    /* Iterate the heaps and set their locks, also add them to the linked list. */
+    for (size_t i = 0; i < num_heaps; i++) {
+        if (heaps_array[i].heap != NULL) {
+            multi_heap_set_lock(heaps_array[i].heap, &heaps_array[i].heap_mux);
+        }
+        if (i == 0) {
+            SLIST_INSERT_HEAD(&registered_heaps, &heaps_array[0], next);
+        } else {
+            SLIST_INSERT_AFTER(&heaps_array[i-1], &heaps_array[i], next);
+        }
+    }
+}
+
+esp_err_t heap_caps_add_region(intptr_t start, intptr_t end)
+{
+    if (start == 0) {
+        return ESP_ERR_INVALID_ARG;
+    }
+
+    for (size_t i = 0; i < soc_memory_region_count; i++) {
+        const soc_memory_region_t *region = &soc_memory_regions[i];
+        // Test requested start only as 'end' may be in a different region entry, assume 'end' has same caps
+        if (region->start <= start && (intptr_t)(region->start + region->size) > start) {
+            const uint32_t *caps = soc_memory_types[region->type].caps;
+            return heap_caps_add_region_with_caps(caps, start, end);
+        }
+    }
+
+    return ESP_ERR_NOT_FOUND;
+}
+
+esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end)
+{
+    esp_err_t err = ESP_FAIL;
+    if (caps == NULL || start == 0 || end == 0 || end <= start) {
+        return ESP_ERR_INVALID_ARG;
+    }
+
+    //Check if region overlaps the start and/or end of an existing region. If so, the
+    //region is invalid (or maybe added twice)
+    /*
+     *  assume that in on region, start must be less than end (cannot equal to) !!
+     *  Specially, the 4th scenario can be allowed. For example, allocate memory from heap,
+     *  then change the capability and call this function to create a new region for special
+     *  application.
+     *  In the following chart, 'start = start' and 'end = end' is contained in 3rd scenario.
+     *  This all equal scenario is incorrect because the same region cannot be add twice. For example,
+     *  add the .bss memory to region twice, if not do the check, it will cause exception.
+     *
+     *  the existing heap region                                  s(tart)                e(nd)
+     *                                                            |----------------------|
+     *  1.add region  [Correct]   (s1<s && e1<=s)           |-----|
+     *  2.add region  [Incorrect] (s2<=s && s<e2<=e)        |---------------|
+     *  3.add region  [Incorrect] (s3<=s && e<e3)           |-------------------------------------|
+     *  4 add region  [Correct]   (s<s4<e && s<e4<=e)                  |-------|
+     *  5.add region  [Incorrect] (s<s5<e && e<e5)                     |----------------------------|
+     *  6.add region  [Correct]   (e<=s6 && e<e6)                                        |----|
+     */
+
+    heap_t *heap;
+    SLIST_FOREACH(heap, &registered_heaps, next) {
+        if ((start <= heap->start && end > heap->start)
+                || (start < heap->end && end > heap->end)) {
+            return ESP_FAIL;
+        }
+    }
+
+    heap_t *p_new = heap_caps_malloc(sizeof(heap_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
+    if (p_new == NULL) {
+        err = ESP_ERR_NO_MEM;
+        goto done;
+    }
+    memcpy(p_new->caps, caps, sizeof(p_new->caps));
+    p_new->start = start;
+    p_new->end = end;
+    MULTI_HEAP_LOCK_INIT(&p_new->heap_mux);
+    p_new->heap = multi_heap_register((void *)start, end - start);
+    SLIST_NEXT(p_new, next) = NULL;
+    if (p_new->heap == NULL) {
+        err = ESP_ERR_INVALID_SIZE;
+        goto done;
+    }
+    multi_heap_set_lock(p_new->heap, &p_new->heap_mux);
+
+    /* (This insertion is atomic to registered_heaps, so
+       we don't need to worry about thread safety for readers,
+       only for writers. */
+    static multi_heap_lock_t registered_heaps_write_lock = MULTI_HEAP_LOCK_STATIC_INITIALIZER;
+    MULTI_HEAP_LOCK(&registered_heaps_write_lock);
+    SLIST_INSERT_HEAD(&registered_heaps, p_new, next);
+    MULTI_HEAP_UNLOCK(&registered_heaps_write_lock);
+
+    err = ESP_OK;
+
+ done:
+    if (err != ESP_OK) {
+        free(p_new);
+    }
+    return err;
+}

+ 77 - 0
components/heap/heap_private.h

@@ -0,0 +1,77 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <soc/soc_memory_layout.h>
+#include "multi_heap.h"
+#include "multi_heap_platform.h"
+#include "sys/queue.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Some common heap registration data structures used
+   for heap_caps_init.c to share heap information with heap_caps.c
+*/
+
+#define HEAP_SIZE_MAX (SOC_MAX_CONTIGUOUS_RAM_SIZE)
+
+/* Type for describing each registered heap */
+typedef struct heap_t_ {
+    uint32_t caps[SOC_MEMORY_TYPE_NO_PRIOS]; ///< Capabilities for the type of memory in this heap (as a prioritised set). Copied from soc_memory_types so it's in RAM not flash.
+    intptr_t start;
+    intptr_t end;
+    multi_heap_lock_t heap_mux;
+    multi_heap_handle_t heap;
+    SLIST_ENTRY(heap_t_) next;
+} heap_t;
+
+/* All registered heaps.
+
+   Forms a single linked list, even though most entries are contiguous.
+   This means at the expense of 4 bytes per heap, new heaps can be
+   added at runtime in a fast & thread-safe way.
+*/
+extern SLIST_HEAD(registered_heap_ll, heap_t_) registered_heaps;
+
+bool heap_caps_match(const heap_t *heap, uint32_t caps);
+
+/* return all possible capabilities (across all priorities) for a given heap */
+inline static IRAM_ATTR uint32_t get_all_caps(const heap_t *heap)
+{
+    if (heap->heap == NULL) {
+        return 0;
+    }
+    uint32_t all_caps = 0;
+    for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
+        all_caps |= heap->caps[prio];
+    }
+    return all_caps;
+}
+
+/*
+ Because we don't want to add _another_ known allocation method to the stack of functions to trace wrt memory tracing,
+ these are declared private. The newlib malloc()/realloc() implementation also calls these, so they are declared
+ separately in newlib/syscalls.c.
+*/
+void *heap_caps_realloc_default(void *p, size_t size);
+void *heap_caps_malloc_default(size_t size);
+
+
+#ifdef __cplusplus
+}
+#endif

+ 129 - 0
components/heap/heap_task_info.c

@@ -0,0 +1,129 @@
+// Copyright 2018 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <freertos/FreeRTOS.h>
+#include <freertos/task.h>
+#include <multi_heap.h>
+#include "multi_heap_internal.h"
+#include "heap_private.h"
+#include "esp_heap_task_info.h"
+
+#ifdef CONFIG_HEAP_TASK_TRACKING
+
+/*
+ * Return per-task heap allocation totals and lists of blocks.
+ *
+ * For each task that has allocated memory from the heap, return totals for
+ * allocations within regions matching one or more sets of capabilities.
+ *
+ * Optionally also return an array of structs providing details about each
+ * block allocated by one or more requested tasks, or by all tasks.
+ *
+ * Returns the number of block detail structs returned.
+ */
+size_t heap_caps_get_per_task_info(heap_task_info_params_t *params)
+{
+    heap_t *reg;
+    heap_task_block_t *blocks = params->blocks;
+    size_t count = *params->num_totals;
+    size_t remaining = params->max_blocks;
+
+    // Clear out totals for any prepopulated tasks.
+    if (params->totals) {
+        for (size_t i = 0; i < count; ++i) {
+            for (size_t type = 0; type < NUM_HEAP_TASK_CAPS; ++type) {
+                params->totals[i].size[type] = 0;
+                params->totals[i].count[type] = 0;
+            }
+        }
+    }
+
+    SLIST_FOREACH(reg, &registered_heaps, next) {
+        multi_heap_handle_t heap = reg->heap;
+        if (heap == NULL) {
+            continue;
+        }
+
+        // Find if the capabilities of this heap region match on of the desired
+        // sets of capabilities.
+        uint32_t caps = get_all_caps(reg);
+        uint32_t type;
+        for (type = 0; type < NUM_HEAP_TASK_CAPS; ++type) {
+            if ((caps & params->mask[type]) == params->caps[type]) {
+                break;
+            }
+        }
+        if (type == NUM_HEAP_TASK_CAPS) {
+            continue;
+        }
+
+        multi_heap_block_handle_t b = multi_heap_get_first_block(heap);
+        multi_heap_internal_lock(heap);
+        for ( ; b ; b = multi_heap_get_next_block(heap, b)) {
+            if (multi_heap_is_free(b)) {
+                continue;
+            }
+            void *p = multi_heap_get_block_address(b);  // Safe, only arithmetic
+            size_t bsize = multi_heap_get_allocated_size(heap, p); // Validates
+            TaskHandle_t btask = (TaskHandle_t)multi_heap_get_block_owner(b);
+
+            // Accumulate per-task allocation totals.
+            if (params->totals) {
+                size_t i;
+                for (i = 0; i < count; ++i) {
+                    if (params->totals[i].task == btask) {
+                        break;
+                    }
+                }
+                if (i < count) {
+                    params->totals[i].size[type] += bsize;
+                    params->totals[i].count[type] += 1;
+                }
+                else {
+                    if (count < params->max_totals) {
+                        params->totals[count].task = btask;
+                        params->totals[count].size[type] = bsize;
+                        params->totals[i].count[type] = 1;
+                        ++count;
+                    }
+                }
+            }
+
+            // Return details about allocated blocks for selected tasks.
+            if (blocks && remaining > 0) {
+                if (params->tasks) {
+                    size_t i;
+                    for (i = 0; i < params->num_tasks; ++i) {
+                        if (btask == params->tasks[i]) {
+                            break;
+                        }
+                    }
+                    if (i == params->num_tasks) {
+                        continue;
+                    }
+                }
+                blocks->task = btask;
+                blocks->address = p;
+                blocks->size = bsize;
+                ++blocks;
+                --remaining;
+            }
+        }
+        multi_heap_internal_unlock(heap);
+    }
+    *params->num_totals = count;
+    return params->max_blocks - remaining;
+}
+
+#endif // CONFIG_HEAP_TASK_TRACKING

+ 1015 - 0
components/heap/heap_tlsf.c

@@ -0,0 +1,1015 @@
+/*
+** Two Level Segregated Fit memory allocator, version 3.1.
+** Written by Matthew Conte
+**	http://tlsf.baisoku.org
+**
+** Based on the original documentation by Miguel Masmano:
+**	http://www.gii.upv.es/tlsf/main/docs
+**
+** This implementation was written to the specification
+** of the document, therefore no GPL restrictions apply.
+**
+** Copyright (c) 2006-2016, Matthew Conte
+** All rights reserved.
+**
+** Redistribution and use in source and binary forms, with or without
+** modification, are permitted provided that the following conditions are met:
+**     * Redistributions of source code must retain the above copyright
+**       notice, this list of conditions and the following disclaimer.
+**     * Redistributions in binary form must reproduce the above copyright
+**       notice, this list of conditions and the following disclaimer in the
+**       documentation and/or other materials provided with the distribution.
+**     * Neither the name of the copyright holder nor the
+**       names of its contributors may be used to endorse or promote products
+**       derived from this software without specific prior written permission.
+**
+** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
+** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "multi_heap_config.h"
+#include "multi_heap.h"
+#include "multi_heap_internal.h"
+#include "heap_tlsf_config.h"
+#include "heap_tlsf.h"
+
+#include "esp_log.h"
+/*
+** Architecture-specific bit manipulation routines.
+**
+** TLSF achieves O(1) cost for malloc and free operations by limiting
+** the search for a free block to a free list of guaranteed size
+** adequate to fulfill the request, combined with efficient free list
+** queries using bitmasks and architecture-specific bit-manipulation
+** routines.
+**
+** Most modern processors provide instructions to count leading zeroes
+** in a word, find the lowest and highest set bit, etc. These
+** specific implementations will be used when available, falling back
+** to a reasonably efficient generic implementation.
+**
+** NOTE: TLSF spec relies on ffs/fls returning value 0..31.
+** ffs/fls return 1-32 by default, returning 0 for error.
+*/
+
+/* The TLSF control structure. */
+typedef struct control_t
+{
+	/* Empty lists point at this block to indicate they are free. */
+	block_header_t block_null;
+	
+	/* Local parameter for the pool */
+	unsigned int fl_index_count;
+	unsigned int fl_index_shift;
+	unsigned int fl_index_max;	
+	unsigned int sl_index_count;
+	unsigned int sl_index_count_log2;
+	unsigned int small_block_size;
+	size_t size;
+
+	/* Bitmaps for free lists. */
+	unsigned int fl_bitmap;
+	unsigned int *sl_bitmap;	
+
+	/* Head of free lists. */
+	block_header_t** blocks;
+} control_t;
+
+static inline __attribute__((__always_inline__)) int tlsf_ffs(unsigned int word)
+{
+	const unsigned int reverse = word & (~word + 1);
+	const int bit = 32 - __builtin_clz(reverse);
+	return bit - 1;
+}
+
+static inline __attribute__((__always_inline__)) int tlsf_fls(unsigned int word)
+{
+	const int bit = word ? 32 - __builtin_clz(word) : 0;
+	return bit - 1;
+}
+
+/*
+** Set assert macro, if it has not been provided by the user.
+*/
+#if !defined (tlsf_assert)
+#define tlsf_assert assert
+#endif
+
+/*
+** Static assertion mechanism.
+*/
+#define _tlsf_glue2(x, y) x ## y
+#define _tlsf_glue(x, y) _tlsf_glue2(x, y)
+#define tlsf_static_assert(exp) \
+	typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]
+
+/* This code has been tested on 32- and 64-bit (LP/LLP) architectures. */
+tlsf_static_assert(sizeof(int) * CHAR_BIT == 32);
+tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32);
+tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64);
+
+static inline __attribute__((__always_inline__)) size_t align_up(size_t x, size_t align)
+{
+	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
+	return (x + (align - 1)) & ~(align - 1);
+}
+
+static inline __attribute__((__always_inline__)) size_t align_down(size_t x, size_t align)
+{
+	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
+	return x - (x & (align - 1));
+}
+
+static inline __attribute__((__always_inline__)) void* align_ptr(const void* ptr, size_t align)
+{
+	const tlsfptr_t aligned =
+		(tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);
+	tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
+	return tlsf_cast(void*, aligned);
+}
+
+/*
+** Adjust an allocation size to be aligned to word size, and no smaller
+** than internal minimum.
+*/
+static inline __attribute__((__always_inline__)) size_t adjust_request_size(tlsf_t tlsf, size_t size, size_t align)
+{
+	size_t adjust = 0;
+	if (size)
+	{
+		const size_t aligned = align_up(size, align);
+
+		/* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */
+		if (aligned < tlsf_block_size_max(tlsf))
+		{
+			adjust = tlsf_max(aligned, block_size_min);
+		}
+	}
+	return adjust;
+}
+
+/*
+** TLSF utility functions. In most cases, these are direct translations of
+** the documentation found in the white paper.
+*/
+
+static inline __attribute__((__always_inline__)) void mapping_insert(control_t *control, size_t size, int* fli, int* sli)
+{
+	int fl, sl;
+	if (size < control->small_block_size)
+	{
+		/* Store small blocks in first list. */
+		fl = 0;
+		sl = tlsf_cast(int, size) >> 2;
+	}
+	else
+	{
+		fl = tlsf_fls(size);
+		sl = tlsf_cast(int, size >> (fl - control->sl_index_count_log2)) ^ (1 << control->sl_index_count_log2);
+		fl -= (control->fl_index_shift - 1);
+	}
+	*fli = fl;
+	*sli = sl;
+}
+
+/* This version rounds up to the next block size (for allocations) */
+static inline __attribute__((__always_inline__)) void mapping_search(control_t *control, size_t size, int* fli, int* sli)
+{
+	if (size >= control->small_block_size)
+	{
+		const size_t round = (1 << (tlsf_fls(size) - control->sl_index_count_log2)) - 1;
+		size += round;
+	}
+	mapping_insert(control, size, fli, sli);
+}
+
+static inline __attribute__((__always_inline__)) block_header_t* search_suitable_block(control_t* control, int* fli, int* sli)
+{
+	int fl = *fli;
+	int sl = *sli;
+
+	/*
+	** First, search for a block in the list associated with the given
+	** fl/sl index.
+	*/
+	unsigned int sl_map = control->sl_bitmap[fl] & (~0U << sl);
+	if (!sl_map)
+	{
+		/* No block exists. Search in the next largest first-level list. */
+		const unsigned int fl_map = control->fl_bitmap & (~0U << (fl + 1));
+		if (!fl_map)
+		{
+			/* No free blocks available, memory has been exhausted. */
+			return 0;
+		}
+
+		fl = tlsf_ffs(fl_map);
+		*fli = fl;
+		sl_map = control->sl_bitmap[fl];
+	}
+	tlsf_assert(sl_map && "internal error - second level bitmap is null");
+	sl = tlsf_ffs(sl_map);
+	*sli = sl;
+
+	/* Return the first block in the free list. */
+	return control->blocks[fl*control->sl_index_count + sl];
+}
+
+/* Remove a free block from the free list.*/
+static inline __attribute__((__always_inline__)) void remove_free_block(control_t* control, block_header_t* block, int fl, int sl)
+{
+	block_header_t* prev = block->prev_free;
+	block_header_t* next = block->next_free;
+	tlsf_assert(prev && "prev_free field can not be null");
+	tlsf_assert(next && "next_free field can not be null");
+	next->prev_free = prev;
+	prev->next_free = next;
+
+	/* If this block is the head of the free list, set new head. */
+	if (control->blocks[fl*control->sl_index_count + sl] == block)
+	{
+		control->blocks[fl*control->sl_index_count + sl] = next;
+
+		/* If the new head is null, clear the bitmap. */
+		if (next == &control->block_null)
+		{
+			control->sl_bitmap[fl] &= ~(1 << sl);
+
+			/* If the second bitmap is now empty, clear the fl bitmap. */
+			if (!control->sl_bitmap[fl])
+			{
+				control->fl_bitmap &= ~(1 << fl);
+			}
+		}
+	}
+}
+
+/* Insert a free block into the free block list. */
+static inline __attribute__((__always_inline__)) void insert_free_block(control_t* control, block_header_t* block, int fl, int sl)
+{
+	block_header_t* current = control->blocks[fl*control->sl_index_count + sl];
+	tlsf_assert(current && "free list cannot have a null entry");
+	tlsf_assert(block && "cannot insert a null entry into the free list");
+	block->next_free = current;
+	block->prev_free = &control->block_null;
+	current->prev_free = block;
+
+	tlsf_assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE)
+		&& "block not aligned properly");
+	/*
+	** Insert the new block at the head of the list, and mark the first-
+	** and second-level bitmaps appropriately.
+	*/
+	control->blocks[fl*control->sl_index_count + sl] = block;
+	control->fl_bitmap |= (1 << fl);
+	control->sl_bitmap[fl] |= (1 << sl);
+}
+
+/* Remove a given block from the free list. */
+static inline __attribute__((__always_inline__)) void block_remove(control_t* control, block_header_t* block)
+{
+	int fl, sl;
+	mapping_insert(control, block_size(block), &fl, &sl);
+	remove_free_block(control, block, fl, sl);
+}
+
+/* Insert a given block into the free list. */
+static inline __attribute__((__always_inline__)) void block_insert(control_t* control, block_header_t* block)
+{
+	int fl, sl;
+	mapping_insert(control, block_size(block), &fl, &sl);
+	insert_free_block(control, block, fl, sl);
+}
+
+static inline __attribute__((__always_inline__)) int block_can_split(block_header_t* block, size_t size)
+{
+	return block_size(block) >= sizeof(block_header_t) + size;
+}
+
+/* Split a block into two, the second of which is free. */
+static inline __attribute__((__always_inline__)) block_header_t* block_split(block_header_t* block, size_t size)
+{
+    /* Calculate the amount of space left in the remaining block.
+     * REMINDER: remaining pointer's first field is `prev_phys_block` but this field is part of the
+     * previous physical block. */
+	block_header_t* remaining =
+		offset_to_block(block_to_ptr(block), size - block_header_overhead);
+
+    /* `size` passed as an argument is the first block's new size, thus, the remaining block's size
+     * is `block_size(block) - size`. However, the block's data must be precedeed by the data size.
+     * This field is NOT part of the size, so it has to be substracted from the calculation. */
+	const size_t remain_size = block_size(block) - (size + block_header_overhead);
+
+	tlsf_assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE)
+		&& "remaining block not aligned properly");
+
+	tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);
+	block_set_size(remaining, remain_size);
+	tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");
+
+	block_set_size(block, size);
+	block_mark_as_free(remaining);
+
+    /**
+     * Here is the final outcome of this function:
+     *
+     * block             remaining (block_ptr + size - BHO)
+     * +                                +
+     * |                                |
+     * v                                v
+     * +----------------------------------------------------------------------+
+     * |0000|    |xxxxxxxxxxxxxxxxxxxxxx|xxxx|    |###########################|
+     * |0000|    |xxxxxxxxxxxxxxxxxxxxxx|xxxx|    |###########################|
+     * |0000|    |xxxxxxxxxxxxxxxxxxxxxx|xxxx|    |###########################|
+     * |0000|    |xxxxxxxxxxxxxxxxxxxxxx|xxxx|    |###########################|
+     * +----------------------------------------------------------------------+
+     *      |    |                           |    |
+     *      +    +<------------------------->+    +<------------------------->
+     *       BHO    `size` (argument) bytes   BHO      `remain_size` bytes
+     *
+     * Where BHO = block_header_overhead,
+     * 0: part of the memory owned by a `block`'s previous neighbour,
+     * x: part of the memory owned by `block`.
+     * #: part of the memory owned by `remaining`.
+     */
+
+	return remaining;
+}
+
+/* Absorb a free block's storage into an adjacent previous free block. */
+static inline __attribute__((__always_inline__)) block_header_t* block_absorb(block_header_t* prev, block_header_t* block)
+{
+	tlsf_assert(!block_is_last(prev) && "previous block can't be last");
+	/* Note: Leaves flags untouched. */
+	prev->size += block_size(block) + block_header_overhead;
+	block_link_next(prev);
+
+#ifdef MULTI_HEAP_POISONING_SLOW
+        /* next_block header needs to be replaced with a fill pattern */
+        multi_heap_internal_poison_fill_region(block, sizeof(block_header_t), true /* free */);
+#endif
+
+	return prev;
+}
+
+/* Merge a just-freed block with an adjacent previous free block. */
+static inline __attribute__((__always_inline__)) block_header_t* block_merge_prev(control_t* control, block_header_t* block)
+{
+	if (block_is_prev_free(block))
+	{
+		block_header_t* prev = block_prev(block);
+		tlsf_assert(prev && "prev physical block can't be null");
+		tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");
+		block_remove(control, prev);
+		block = block_absorb(prev, block);
+	}
+
+	return block;
+}
+
+/* Merge a just-freed block with an adjacent free block. */
+static inline __attribute__((__always_inline__)) block_header_t* block_merge_next(control_t* control, block_header_t* block)
+{
+	block_header_t* next = block_next(block);
+	tlsf_assert(next && "next physical block can't be null");
+
+	if (block_is_free(next))
+	{
+		tlsf_assert(!block_is_last(block) && "previous block can't be last");
+		block_remove(control, next);
+		block = block_absorb(block, next);
+	}
+
+	return block;
+}
+
+/* Trim any trailing block space off the end of a block, return to pool. */
+static inline __attribute__((__always_inline__)) void block_trim_free(control_t* control, block_header_t* block, size_t size)
+{
+	tlsf_assert(block_is_free(block) && "block must be free");
+	if (block_can_split(block, size))
+	{
+		block_header_t* remaining_block = block_split(block, size);
+		block_link_next(block);
+		block_set_prev_free(remaining_block);
+		block_insert(control, remaining_block);
+	}
+}
+
+/* Trim any trailing block space off the end of a used block, return to pool. */
+static inline __attribute__((__always_inline__)) void block_trim_used(control_t* control, block_header_t* block, size_t size)
+{
+	tlsf_assert(!block_is_free(block) && "block must be used");
+	if (block_can_split(block, size))
+	{
+		/* If the next block is free, we must coalesce. */
+		block_header_t* remaining_block = block_split(block, size);
+		block_set_prev_used(remaining_block);
+
+		remaining_block = block_merge_next(control, remaining_block);
+		block_insert(control, remaining_block);
+	}
+}
+
+static inline __attribute__((__always_inline__)) block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size)
+{
+	block_header_t* remaining_block = block;
+	if (block_can_split(block, size))
+	{
+        /* We want to split `block` in two: the first block will be freed and the
+         * second block will be returned. */
+		remaining_block = block_split(block, size - block_header_overhead);
+
+        /* `remaining_block` is the second block, mark its predecessor (first
+         * block) as free. */
+		block_set_prev_free(remaining_block);
+
+		block_link_next(block);
+
+        /* Put back the first block into the free memory list. */
+		block_insert(control, block);
+	}
+
+	return remaining_block;
+}
+
+static inline  __attribute__((__always_inline__)) block_header_t* block_locate_free(control_t* control, size_t size)
+{
+	int fl = 0, sl = 0;
+	block_header_t* block = 0;
+
+	if (size)
+	{
+		mapping_search(control, size, &fl, &sl);
+
+		/*
+		** mapping_search can futz with the size, so for excessively large sizes it can sometimes wind up
+		** with indices that are off the end of the block array.
+		** So, we protect against that here, since this is the only callsite of mapping_search.
+		** Note that we don't need to check sl, since it comes from a modulo operation that guarantees it's always in range.
+		*/
+		if (fl < control->fl_index_count)
+		{
+			block = search_suitable_block(control, &fl, &sl);
+		}
+	}
+
+	if (block)
+	{
+		tlsf_assert(block_size(block) >= size);
+		remove_free_block(control, block, fl, sl);
+	}
+
+	return block;
+}
+
+static inline __attribute__((__always_inline__)) void* block_prepare_used(control_t* control, block_header_t* block, size_t size)
+{
+	void* p = 0;
+	if (block)
+	{
+		tlsf_assert(size && "size must be non-zero");
+		block_trim_free(control, block, size);
+		block_mark_as_used(block);
+		p = block_to_ptr(block);
+	}
+	return p;
+}
+
+/* Clear structure and point all empty lists at the null block. */
+static void control_construct(control_t* control, size_t bytes)
+{
+	int i, j;
+
+	control->block_null.next_free = &control->block_null;
+	control->block_null.prev_free = &control->block_null;
+
+	/* find the closest ^2 for first layer */
+	i = (bytes - 1) / (16 * 1024);
+	control->fl_index_max = FL_INDEX_MAX_MIN + sizeof(i) * 8 - __builtin_clz(i);
+
+	/* adapt second layer to the pool */
+	if (bytes <= 16 * 1024) control->sl_index_count_log2 = 3;
+	else if (bytes <= 256 * 1024) control->sl_index_count_log2 = 4;
+	else control->sl_index_count_log2 = 5;
+	
+	control->fl_index_shift = (control->sl_index_count_log2 + ALIGN_SIZE_LOG2);
+	control->sl_index_count = 1 << control->sl_index_count_log2;
+	control->fl_index_count = control->fl_index_max - control->fl_index_shift + 1;
+	control->small_block_size = 1 << control->fl_index_shift;
+	control->fl_bitmap = 0;
+	
+	control->sl_bitmap = align_ptr(control + 1, sizeof(*control->sl_bitmap));
+	control->blocks = align_ptr(control->sl_bitmap + control->fl_index_count, sizeof(*control->blocks));
+	control->size = (void*) (control->blocks + control->sl_index_count * control->fl_index_count) - (void*) control;
+	
+	ESP_EARLY_LOGW( "REMOVE", "NEW POOL of %d bytes, ctrl_size: %d sli_c:%d fli_c:%d small_b %d max_b:%d", 
+					bytes, 
+					control->size, control->sl_index_count, control->fl_index_count,
+					control->small_block_size, 1 << control->fl_index_max );		
+
+	/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
+	tlsf_assert(sizeof(unsigned int) * CHAR_BIT >= control->sl_index_count && "CHAR_BIT less than sl_index_count");
+
+	/* Ensure we've properly tuned our sizes. */
+	tlsf_assert(ALIGN_SIZE == control->small_block_size / control->sl_index_count && "ALIGN_SIZE does not match");
+	
+	for (i = 0; i < control->fl_index_count; ++i)
+	{
+		control->sl_bitmap[i] = 0;
+		for (j = 0; j < control->sl_index_count; ++j)
+		{
+			control->blocks[i*control->sl_index_count + j] = &control->block_null;
+		}
+	}
+}
+
+/*
+** Debugging utilities.
+*/
+
+typedef struct integrity_t
+{
+	int prev_status;
+	int status;
+} integrity_t;
+
+#define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } }
+
+static void integrity_walker(void* ptr, size_t size, int used, void* user)
+{
+	block_header_t* block = block_from_ptr(ptr);
+	integrity_t* integ = tlsf_cast(integrity_t*, user);
+	const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
+	const int this_status = block_is_free(block) ? 1 : 0;
+	const size_t this_block_size = block_size(block);
+
+	int status = 0;
+	(void)used;
+	tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
+	tlsf_insist(size == this_block_size && "block size incorrect");
+
+	integ->prev_status = this_status;
+	integ->status += status;
+}
+
+int tlsf_check(tlsf_t tlsf)
+{
+	int i, j;
+
+	control_t* control = tlsf_cast(control_t*, tlsf);
+	int status = 0;
+
+	/* Check that the free lists and bitmaps are accurate. */
+	for (i = 0; i < control->fl_index_count; ++i)
+	{
+		for (j = 0; j < control->sl_index_count; ++j)
+		{
+			const int fl_map = control->fl_bitmap & (1 << i);
+			const int sl_list = control->sl_bitmap[i];
+			const int sl_map = sl_list & (1 << j);
+			const block_header_t* block = control->blocks[i*control->sl_index_count + j];
+
+			/* Check that first- and second-level lists agree. */
+			if (!fl_map)
+			{
+				tlsf_insist(!sl_map && "second-level map must be null");
+			}
+
+			if (!sl_map)
+			{
+				tlsf_insist(block == &control->block_null && "block list must be null");
+				continue;
+			}
+
+			/* Check that there is at least one free block. */
+			tlsf_insist(sl_list && "no free blocks in second-level map");
+			tlsf_insist(block != &control->block_null && "block should not be null");
+
+			while (block != &control->block_null)
+			{
+				int fli, sli;
+				tlsf_insist(block_is_free(block) && "block should be free");
+				tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");
+				tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");
+				tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
+				tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
+
+				mapping_insert(control, block_size(block), &fli, &sli);
+				tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
+				block = block->next_free;
+			}
+		}
+	}
+
+	return status;
+}
+
+#undef tlsf_insist
+
+static void default_walker(void* ptr, size_t size, int used, void* user)
+{
+	(void)user;
+	printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr));
+}
+
+void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
+{
+	tlsf_walker pool_walker = walker ? walker : default_walker;
+	block_header_t* block =
+		offset_to_block(pool, -(int)block_header_overhead);
+
+	while (block && !block_is_last(block))
+	{
+		pool_walker(
+			block_to_ptr(block),
+			block_size(block),
+			!block_is_free(block),
+			user);
+		block = block_next(block);
+	}
+}
+
+size_t tlsf_block_size(void* ptr)
+{
+	size_t size = 0;
+	if (ptr)
+	{
+		const block_header_t* block = block_from_ptr(ptr);
+		size = block_size(block);
+	}
+	return size;
+}
+
+int tlsf_check_pool(pool_t pool)
+{
+	/* Check that the blocks are physically correct. */
+	integrity_t integ = { 0, 0 };
+	tlsf_walk_pool(pool, integrity_walker, &integ);
+
+	return integ.status;
+}
+
+size_t tlsf_fit_size(tlsf_t tlsf, size_t size)
+{
+	/* because it's GoodFit, allocable size is one range lower */
+    if (size) 
+	{
+		control_t* control = tlsf_cast(control_t*, tlsf);
+        size_t sl_interval = (1 << ((sizeof(size_t) * 8 - 1) - __builtin_clz(size))) / control->sl_index_count;
+        return size & ~(sl_interval - 1);
+    }
+	
+	return 0;
+}	
+
+
+/*
+** Size of the TLSF structures in a given memory block passed to
+** tlsf_create, equal to the size of a control_t
+*/
+size_t tlsf_size(tlsf_t tlsf)
+{
+	if (tlsf) 
+	{
+		control_t* control = tlsf_cast(control_t*, tlsf);
+		return control->size;
+	}	
+	
+	/* no tlsf, we'll just return a min size */
+	return sizeof(control_t) + 
+	       sizeof(int) * SL_INDEX_COUNT_MIN + 
+	       sizeof(block_header_t*) * SL_INDEX_COUNT_MIN * FL_INDEX_COUNT_MIN;
+}
+
+size_t tlsf_align_size(void)
+{
+	return ALIGN_SIZE;
+}
+
+size_t tlsf_block_size_min(void)
+{
+	return block_size_min;
+}
+
+size_t tlsf_block_size_max(tlsf_t tlsf)
+{
+	control_t* control = tlsf_cast(control_t*, tlsf);
+	return tlsf_cast(size_t, 1) << control->fl_index_max;
+}
+
+/*
+** Overhead of the TLSF structures in a given memory block passed to
+** tlsf_add_pool, equal to the overhead of a free block and the
+** sentinel block.
+*/
+size_t tlsf_pool_overhead(void)
+{
+	return 2 * block_header_overhead;
+}
+
+size_t tlsf_alloc_overhead(void)
+{
+	return block_header_overhead;
+}
+
+pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
+{
+	block_header_t* block;
+	block_header_t* next;
+
+	const size_t pool_overhead = tlsf_pool_overhead();
+	const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
+
+	if (((ptrdiff_t)mem % ALIGN_SIZE) != 0)
+	{
+		printf("tlsf_add_pool: Memory must be aligned by %u bytes.\n",
+			(unsigned int)ALIGN_SIZE);
+		return 0;
+	}
+
+	if (pool_bytes < block_size_min || pool_bytes > tlsf_block_size_max(tlsf))
+	{
+#if defined (TLSF_64BIT)
+		printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n",
+			(unsigned int)(pool_overhead + block_size_min),
+			(unsigned int)((pool_overhead + tlsf_block_size_max(tlsf)) / 256));
+#else
+		printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n",
+			(unsigned int)(pool_overhead + block_size_min),
+			(unsigned int)(pool_overhead + tlsf_block_size_max(tlsf)));
+#endif
+		return 0;
+	}
+
+	/*
+	** Create the main free block. Offset the start of the block slightly
+	** so that the prev_phys_block field falls outside of the pool -
+	** it will never be used.
+	*/
+	block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead);
+	block_set_size(block, pool_bytes);
+	block_set_free(block);
+	block_set_prev_used(block);
+	block_insert(tlsf_cast(control_t*, tlsf), block);
+
+	/* Split the block to create a zero-size sentinel block. */
+	next = block_link_next(block);
+	block_set_size(next, 0);
+	block_set_used(next);
+	block_set_prev_free(next);
+
+	return mem;
+}
+
+void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
+{
+	control_t* control = tlsf_cast(control_t*, tlsf);
+	block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
+
+	int fl = 0, sl = 0;
+
+	tlsf_assert(block_is_free(block) && "block should be free");
+	tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free");
+	tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero");
+
+	mapping_insert(control, block_size(block), &fl, &sl);
+	remove_free_block(control, block, fl, sl);
+}
+
+/*
+** TLSF main interface.
+*/
+
+
+tlsf_t tlsf_create(void* mem, size_t max_bytes)
+{
+#if _DEBUG
+	if (test_ffs_fls())
+	{
+		return 0;
+	}
+#endif
+
+	if (((tlsfptr_t)mem % ALIGN_SIZE) != 0)
+	{
+		printf("tlsf_create: Memory must be aligned to %u bytes.\n",
+			(unsigned int)ALIGN_SIZE);
+		return 0;
+	}
+
+	control_construct(tlsf_cast(control_t*, mem), max_bytes);
+
+	return tlsf_cast(tlsf_t, mem);
+}
+
+pool_t tlsf_get_pool(tlsf_t tlsf)
+{
+	return tlsf_cast(pool_t, (char*)tlsf + tlsf_size(tlsf));
+}
+
+tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes)
+{
+	tlsf_t tlsf = tlsf_create(mem, max_bytes ? max_bytes : pool_bytes);
+	tlsf_add_pool(tlsf, (char*)mem + tlsf_size(tlsf), pool_bytes - tlsf_size(tlsf));
+	return tlsf;
+}
+
+void* tlsf_malloc(tlsf_t tlsf, size_t size)
+{
+	control_t* control = tlsf_cast(control_t*, tlsf);
+	size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
+	block_header_t* block = block_locate_free(control, adjust);
+	return block_prepare_used(control, block, adjust);
+}
+
+/**
+ * @brief Allocate memory of at least `size` bytes where byte at `data_offset` will be aligned to `alignment`.
+ *
+ * This function will allocate memory pointed by `ptr`. However, the byte at `data_offset` of
+ * this piece of memory (i.e., byte at `ptr` + `data_offset`) will be aligned to `alignment`.
+ * This function is useful for allocating memory that will internally have a header, and the
+ * usable memory following the header (i.e. `ptr` + `data_offset`) must be aligned.
+ *
+ * For example, a call to `multi_heap_aligned_alloc_impl_offs(heap, 64, 256, 20)` will return a
+ * pointer `ptr` to free memory of minimum 64 bytes, where `ptr + 20` is aligned on `256`.
+ * So `(ptr + 20) % 256` equals 0.
+ *
+ * @param tlsf TLSF structure to allocate memory from.
+ * @param align Alignment for the returned pointer's offset.
+ * @param size Minimum size, in bytes, of the memory to allocate INCLUDING
+ *             `data_offset` bytes.
+ * @param data_offset Offset to be aligned on `alignment`. This can be 0, in
+ *                    this case, the returned pointer will be aligned on
+ *                    `alignment`. If it is not a multiple of CPU word size,
+ *                    it will be aligned up to the closest multiple of it.
+ *
+ * @return pointer to free memory.
+ */
+void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_offset)
+{
+    control_t* control = tlsf_cast(control_t*, tlsf);
+    const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
+    const size_t off_adjust = align_up(data_offset, ALIGN_SIZE);
+
+	/*
+	** We must allocate an additional minimum block size bytes so that if
+	** our free block will leave an alignment gap which is smaller, we can
+	** trim a leading free block and release it back to the pool. We must
+	** do this because the previous physical block is in use, therefore
+	** the prev_phys_block field is not valid, and we can't simply adjust
+	** the size of that block.
+	*/
+	const size_t gap_minimum = sizeof(block_header_t) + off_adjust;
+    /* The offset is included in both `adjust` and `gap_minimum`, so we
+    ** need to subtract it once.
+    */
+	const size_t size_with_gap = adjust_request_size(tlsf, adjust + align + gap_minimum - off_adjust, align);
+
+	/*
+	** If alignment is less than or equals base alignment, we're done.
+	** If we requested 0 bytes, return null, as tlsf_malloc(0) does.
+	*/
+	const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust;
+
+	block_header_t* block = block_locate_free(control, aligned_size);
+
+	/* This can't be a static assert. */
+	tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
+
+	if (block)
+	{
+		void* ptr = block_to_ptr(block);
+		void* aligned = align_ptr(ptr, align);
+		size_t gap = tlsf_cast(size_t,
+			tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
+
+       /*
+        ** If gap size is too small or if there is not gap but we need one,
+        ** offset to next aligned boundary.
+        */
+		if ((gap && gap < gap_minimum) || (!gap && off_adjust))
+		{
+			const size_t gap_remain = gap_minimum - gap;
+			const size_t offset = tlsf_max(gap_remain, align);
+			const void* next_aligned = tlsf_cast(void*,
+				tlsf_cast(tlsfptr_t, aligned) + offset);
+
+			aligned = align_ptr(next_aligned, align);
+			gap = tlsf_cast(size_t,
+				tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
+		}
+
+		if (gap)
+		{
+			tlsf_assert(gap >= gap_minimum && "gap size too small");
+			block = block_trim_free_leading(control, block, gap - off_adjust);
+		}
+	}
+
+    /* Preparing the block will also the trailing free memory. */
+	return block_prepare_used(control, block, adjust);
+}
+
+/**
+ * @brief Same as `tlsf_memalign_offs` function but with a 0 offset.
+ * The pointer returned is aligned on `align`.
+ */
+void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
+{
+    return tlsf_memalign_offs(tlsf, align, size, 0);
+}
+
+
+void tlsf_free(tlsf_t tlsf, void* ptr)
+{
+	/* Don't attempt to free a NULL pointer. */
+	if (ptr)
+	{
+		control_t* control = tlsf_cast(control_t*, tlsf);
+		block_header_t* block = block_from_ptr(ptr);
+		tlsf_assert(!block_is_free(block) && "block already marked as free");
+		block_mark_as_free(block);
+		block = block_merge_prev(control, block);
+		block = block_merge_next(control, block);
+		block_insert(control, block);
+	}
+}
+
+/*
+** The TLSF block information provides us with enough information to
+** provide a reasonably intelligent implementation of realloc, growing or
+** shrinking the currently allocated block as required.
+**
+** This routine handles the somewhat esoteric edge cases of realloc:
+** - a non-zero size with a null pointer will behave like malloc
+** - a zero size with a non-null pointer will behave like free
+** - a request that cannot be satisfied will leave the original buffer
+**   untouched
+** - an extended buffer size will leave the newly-allocated area with
+**   contents undefined
+*/
+void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
+{
+	control_t* control = tlsf_cast(control_t*, tlsf);
+	void* p = 0;
+
+	/* Zero-size requests are treated as free. */
+	if (ptr && size == 0)
+	{
+		tlsf_free(tlsf, ptr);
+	}
+	/* Requests with NULL pointers are treated as malloc. */
+	else if (!ptr)
+	{
+		p = tlsf_malloc(tlsf, size);
+	}
+	else
+	{
+		block_header_t* block = block_from_ptr(ptr);
+		block_header_t* next = block_next(block);
+
+		const size_t cursize = block_size(block);
+		const size_t combined = cursize + block_size(next) + block_header_overhead;
+		const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
+
+		tlsf_assert(!block_is_free(block) && "block already marked as free");
+
+		/*
+		** If the next block is used, or when combined with the current
+		** block, does not offer enough space, we must reallocate and copy.
+		*/
+		if (adjust > cursize && (!block_is_free(next) || adjust > combined))
+		{
+			p = tlsf_malloc(tlsf, size);
+			if (p)
+			{
+				const size_t minsize = tlsf_min(cursize, size);
+				memcpy(p, ptr, minsize);
+				tlsf_free(tlsf, ptr);
+			}
+		}
+		else
+		{
+			/* Do we need to expand to the next block? */
+			if (adjust > cursize)
+			{
+				block_merge_next(control, block);
+				block_mark_as_used(block);
+			}
+
+			/* Trim the resulting block and return the original pointer. */
+			block_trim_used(control, block, adjust);
+			p = ptr;
+		}
+	}
+
+	return p;
+}

+ 119 - 0
components/heap/heap_tlsf.h

@@ -0,0 +1,119 @@
+/*
+** Two Level Segregated Fit memory allocator, version 3.1.
+** Written by Matthew Conte
+**	http://tlsf.baisoku.org
+**
+** Based on the original documentation by Miguel Masmano:
+**	http://www.gii.upv.es/tlsf/main/docs
+**
+** This implementation was written to the specification
+** of the document, therefore no GPL restrictions apply.
+**
+** Copyright (c) 2006-2016, Matthew Conte
+** All rights reserved.
+**
+** Redistribution and use in source and binary forms, with or without
+** modification, are permitted provided that the following conditions are met:
+**     * Redistributions of source code must retain the above copyright
+**       notice, this list of conditions and the following disclaimer.
+**     * Redistributions in binary form must reproduce the above copyright
+**       notice, this list of conditions and the following disclaimer in the
+**       documentation and/or other materials provided with the distribution.
+**     * Neither the name of the copyright holder nor the
+**       names of its contributors may be used to endorse or promote products
+**       derived from this software without specific prior written permission.
+**
+** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
+** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#pragma once
+#include <assert.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stddef.h>
+#include "heap_tlsf_config.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+** Cast and min/max macros.
+*/
+#define tlsf_cast(t, exp)	((t) (exp))
+#define tlsf_min(a, b)		((a) < (b) ? (a) : (b))
+#define tlsf_max(a, b)		((a) > (b) ? (a) : (b))
+
+/* A type used for casting when doing pointer arithmetic. */
+typedef ptrdiff_t tlsfptr_t;
+
+typedef struct block_header_t
+{
+	/* Points to the previous physical block. */
+	struct block_header_t* prev_phys_block;
+
+	/* The size of this block, excluding the block header. */
+	size_t size;
+
+	/* Next and previous free blocks. */
+	struct block_header_t* next_free;
+	struct block_header_t* prev_free;
+} block_header_t;
+
+#include "heap_tlsf_block_functions.h"
+
+/* tlsf_t: a TLSF structure. Can contain 1 to N pools. */
+/* pool_t: a block of memory that TLSF can manage. */
+typedef void* tlsf_t;
+typedef void* pool_t;
+
+/* Create/destroy a memory pool. */
+tlsf_t tlsf_create(void* mem, size_t max_bytes);
+tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes);
+pool_t tlsf_get_pool(tlsf_t tlsf);
+
+/* Add/remove memory pools. */
+pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes);
+void tlsf_remove_pool(tlsf_t tlsf, pool_t pool);
+
+/* malloc/memalign/realloc/free replacements. */
+void* tlsf_malloc(tlsf_t tlsf, size_t size);
+void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size);
+void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t offset);
+void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size);
+void tlsf_free(tlsf_t tlsf, void* ptr);
+
+/* Returns internal block size, not original request size */
+size_t tlsf_block_size(void* ptr);
+
+/* Overheads/limits of internal structures. */
+size_t tlsf_size(tlsf_t tlsf);
+size_t tlsf_align_size(void);
+size_t tlsf_block_size_min(void);
+size_t tlsf_block_size_max(tlsf_t tlsf);
+size_t tlsf_pool_overhead(void);
+size_t tlsf_alloc_overhead(void);
+size_t tlsf_fit_size(tlsf_t tlsf, size_t size);
+
+/* Debugging. */
+typedef void (*tlsf_walker)(void* ptr, size_t size, int used, void* user);
+void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user);
+/* Returns nonzero if any internal consistency check fails. */
+int tlsf_check(tlsf_t tlsf);
+int tlsf_check_pool(pool_t pool);
+
+#if defined(__cplusplus)
+};
+#endif

+ 174 - 0
components/heap/heap_tlsf_block_functions.h

@@ -0,0 +1,174 @@
+/*
+** Two Level Segregated Fit memory allocator, version 3.1.
+** Written by Matthew Conte
+**	http://tlsf.baisoku.org
+**
+** Based on the original documentation by Miguel Masmano:
+**	http://www.gii.upv.es/tlsf/main/docs
+**
+** This implementation was written to the specification
+** of the document, therefore no GPL restrictions apply.
+**
+** Copyright (c) 2006-2016, Matthew Conte
+** All rights reserved.
+**
+** Redistribution and use in source and binary forms, with or without
+** modification, are permitted provided that the following conditions are met:
+**     * Redistributions of source code must retain the above copyright
+**       notice, this list of conditions and the following disclaimer.
+**     * Redistributions in binary form must reproduce the above copyright
+**       notice, this list of conditions and the following disclaimer in the
+**       documentation and/or other materials provided with the distribution.
+**     * Neither the name of the copyright holder nor the
+**       names of its contributors may be used to endorse or promote products
+**       derived from this software without specific prior written permission.
+**
+** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
+** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#pragma once
+
+/*
+** Data structures and associated constants.
+*/
+
+/*
+** Since block sizes are always at least a multiple of 4, the two least
+** significant bits of the size field are used to store the block status:
+** - bit 0: whether block is busy or free
+** - bit 1: whether previous block is busy or free
+*/
+#define block_header_free_bit  (1 << 0)
+#define block_header_prev_free_bit  (1 << 1)
+
+/*
+** The size of the block header exposed to used blocks is the size field.
+** The prev_phys_block field is stored *inside* the previous free block.
+*/
+#define block_header_overhead  (sizeof(size_t))
+
+/* User data starts directly after the size field in a used block. */
+#define block_start_offset (offsetof(block_header_t, size) + sizeof(size_t))
+
+/*
+** A free block must be large enough to store its header minus the size of
+** the prev_phys_block field, and no larger than the number of addressable
+** bits for FL_INDEX.
+** The block_size_max macro returns the maximum block for the minimum pool
+** use tlsf_block_size_max for a value specific to the pool
+*/
+#define block_size_min  (sizeof(block_header_t) - sizeof(block_header_t*))
+#define block_size_max  (tlsf_cast(size_t, 1) << FL_INDEX_MAX_MIN)
+
+/*
+** block_header_t member functions.
+*/
+static inline __attribute__((__always_inline__)) size_t block_size(const block_header_t* block)
+{
+	return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
+}
+
+static inline __attribute__((__always_inline__)) void block_set_size(block_header_t* block, size_t size)
+{
+	const size_t oldsize = block->size;
+	block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
+}
+
+static inline __attribute__((__always_inline__)) int block_is_last(const block_header_t* block)
+{
+	return block_size(block) == 0;
+}
+
+static inline __attribute__((__always_inline__)) int block_is_free(const block_header_t* block)
+{
+	return tlsf_cast(int, block->size & block_header_free_bit);
+}
+
+static inline __attribute__((__always_inline__)) void block_set_free(block_header_t* block)
+{
+	block->size |= block_header_free_bit;
+}
+
+static inline __attribute__((__always_inline__)) void block_set_used(block_header_t* block)
+{
+	block->size &= ~block_header_free_bit;
+}
+
+static inline __attribute__((__always_inline__)) int block_is_prev_free(const block_header_t* block)
+{
+	return tlsf_cast(int, block->size & block_header_prev_free_bit);
+}
+
+static inline __attribute__((__always_inline__)) void block_set_prev_free(block_header_t* block)
+{
+	block->size |= block_header_prev_free_bit;
+}
+
+static inline __attribute__((__always_inline__)) void block_set_prev_used(block_header_t* block)
+{
+	block->size &= ~block_header_prev_free_bit;
+}
+
+static inline __attribute__((__always_inline__)) block_header_t* block_from_ptr(const void* ptr)
+{
+	return tlsf_cast(block_header_t*,
+		tlsf_cast(unsigned char*, ptr) - block_start_offset);
+}
+
+static inline __attribute__((__always_inline__)) void* block_to_ptr(const block_header_t* block)
+{
+	return tlsf_cast(void*,
+		tlsf_cast(unsigned char*, block) + block_start_offset);
+}
+
+/* Return location of next block after block of given size. */
+static inline __attribute__((__always_inline__)) block_header_t* offset_to_block(const void* ptr, size_t size)
+{
+	return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size);
+}
+
+/* Return location of previous block. */
+static inline __attribute__((__always_inline__)) block_header_t* block_prev(const block_header_t* block)
+{
+	return block->prev_phys_block;
+}
+
+/* Return location of next existing block. */
+static inline __attribute__((__always_inline__)) block_header_t* block_next(const block_header_t* block)
+{
+	block_header_t* next = offset_to_block(block_to_ptr(block),
+		block_size(block) - block_header_overhead);
+	return next;
+}
+
+/* Link a new block with its physical neighbor, return the neighbor. */
+static inline __attribute__((__always_inline__)) block_header_t* block_link_next(block_header_t* block)
+{
+	block_header_t* next = block_next(block);
+	next->prev_phys_block = block;
+	return next;
+}
+
+static inline __attribute__((__always_inline__)) void block_mark_as_free(block_header_t* block)
+{
+	/* Link the block to the next block, first. */
+	block_header_t* next = block_link_next(block);
+	block_set_prev_free(next);
+	block_set_free(block);
+}
+
+static inline __attribute__((__always_inline__)) void block_mark_as_used(block_header_t* block)
+{
+	block_header_t* next = block_next(block);
+	block_set_prev_used(next);
+	block_set_used(block);
+}

+ 66 - 0
components/heap/heap_tlsf_config.h

@@ -0,0 +1,66 @@
+/*
+** Two Level Segregated Fit memory allocator, version 3.1.
+** Written by Matthew Conte
+**	http://tlsf.baisoku.org
+**
+** Based on the original documentation by Miguel Masmano:
+**	http://www.gii.upv.es/tlsf/main/docs
+**
+** This implementation was written to the specification
+** of the document, therefore no GPL restrictions apply.
+**
+** Copyright (c) 2006-2016, Matthew Conte
+** All rights reserved.
+**
+** Redistribution and use in source and binary forms, with or without
+** modification, are permitted provided that the following conditions are met:
+**     * Redistributions of source code must retain the above copyright
+**       notice, this list of conditions and the following disclaimer.
+**     * Redistributions in binary form must reproduce the above copyright
+**       notice, this list of conditions and the following disclaimer in the
+**       documentation and/or other materials provided with the distribution.
+**     * Neither the name of the copyright holder nor the
+**       names of its contributors may be used to endorse or promote products
+**       derived from this software without specific prior written permission.
+**
+** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
+** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#pragma once
+
+enum tlsf_config
+{
+	/* log2 of number of linear subdivisions of block sizes. Larger
+	** values require more memory in the control structure. Values of
+	** 4 or 5 are typical, 3 is for very small pools.
+	*/
+	SL_INDEX_COUNT_LOG2_MIN = 3, 
+
+	/* All allocation sizes and addresses are aligned to 4 bytes. */
+	ALIGN_SIZE_LOG2 = 2,
+	ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
+
+	/*
+	** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
+	** However, because we linearly subdivide the second-level lists, and
+	** our minimum size granularity is 4 bytes, it doesn't make sense to
+	** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
+	** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
+	** trying to split size ranges into more slots than we have available.
+	** Instead, we calculate the minimum threshold size, and place all
+	** blocks below that size into the 0th first-level list.
+	** Values below are the absolute minimum to accept a pool addition
+	*/
+	FL_INDEX_MAX_MIN = 14, // For a less than 16kB pool
+	SL_INDEX_COUNT_MIN = (1 << SL_INDEX_COUNT_LOG2_MIN),
+	FL_INDEX_COUNT_MIN = (FL_INDEX_MAX_MIN - (SL_INDEX_COUNT_LOG2_MIN + ALIGN_SIZE_LOG2) + 1),
+};

+ 255 - 0
components/heap/heap_trace_standalone.c

@@ -0,0 +1,255 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include <string.h>
+#include <sdkconfig.h>
+
+#define HEAP_TRACE_SRCFILE /* don't warn on inclusion here */
+#include "esp_heap_trace.h"
+#undef HEAP_TRACE_SRCFILE
+
+#include "esp_attr.h"
+#include "freertos/FreeRTOS.h"
+#include "freertos/task.h"
+
+
+#define STACK_DEPTH CONFIG_HEAP_TRACING_STACK_DEPTH
+
+#if CONFIG_HEAP_TRACING_STANDALONE
+
+static portMUX_TYPE trace_mux = portMUX_INITIALIZER_UNLOCKED;
+static bool tracing;
+static heap_trace_mode_t mode;
+
+/* Buffer used for records, starting at offset 0
+*/
+static heap_trace_record_t *buffer;
+static size_t total_records;
+
+/* Count of entries logged in the buffer.
+
+   Maximum total_records
+*/
+static size_t count;
+
+/* Actual number of allocations logged */
+static size_t total_allocations;
+
+/* Actual number of frees logged */
+static size_t total_frees;
+
+/* Has the buffer overflowed and lost trace entries? */
+static bool has_overflowed = false;
+
+esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t num_records)
+{
+    if (tracing) {
+        return ESP_ERR_INVALID_STATE;
+    }
+    buffer = record_buffer;
+    total_records = num_records;
+    memset(buffer, 0, num_records * sizeof(heap_trace_record_t));
+    return ESP_OK;
+}
+
+esp_err_t heap_trace_start(heap_trace_mode_t mode_param)
+{
+    if (buffer == NULL || total_records == 0) {
+        return ESP_ERR_INVALID_STATE;
+    }
+
+    portENTER_CRITICAL(&trace_mux);
+
+    tracing = false;
+    mode = mode_param;
+    count = 0;
+    total_allocations = 0;
+    total_frees = 0;
+    has_overflowed = false;
+    heap_trace_resume();
+
+    portEXIT_CRITICAL(&trace_mux);
+    return ESP_OK;
+}
+
+static esp_err_t set_tracing(bool enable)
+{
+    if (tracing == enable) {
+        return ESP_ERR_INVALID_STATE;
+    }
+    tracing = enable;
+    return ESP_OK;
+}
+
+esp_err_t heap_trace_stop(void)
+{
+    return set_tracing(false);
+}
+
+esp_err_t heap_trace_resume(void)
+{
+    return set_tracing(true);
+}
+
+size_t heap_trace_get_count(void)
+{
+    return count;
+}
+
+esp_err_t heap_trace_get(size_t index, heap_trace_record_t *record)
+{
+    if (record == NULL) {
+        return ESP_ERR_INVALID_STATE;
+    }
+    esp_err_t result = ESP_OK;
+
+    portENTER_CRITICAL(&trace_mux);
+    if (index >= count) {
+        result = ESP_ERR_INVALID_ARG; /* out of range for 'count' */
+    } else {
+        memcpy(record, &buffer[index], sizeof(heap_trace_record_t));
+    }
+    portEXIT_CRITICAL(&trace_mux);
+    return result;
+}
+
+
+void heap_trace_dump(void)
+{
+    size_t delta_size = 0;
+    size_t delta_allocs = 0;
+    printf("%u allocations trace (%u entry buffer)\n",
+           count, total_records);
+    size_t start_count = count;
+    for (int i = 0; i < count; i++) {
+        heap_trace_record_t *rec = &buffer[i];
+
+        if (rec->address != NULL) {
+            printf("%d bytes (@ %p) allocated CPU %d ccount 0x%08x caller ",
+                   rec->size, rec->address, rec->ccount & 1, rec->ccount & ~3);
+            for (int j = 0; j < STACK_DEPTH && rec->alloced_by[j] != 0; j++) {
+                printf("%p%s", rec->alloced_by[j],
+                       (j < STACK_DEPTH - 1) ? ":" : "");
+            }
+
+            if (mode != HEAP_TRACE_ALL || STACK_DEPTH == 0 || rec->freed_by[0] == NULL) {
+                delta_size += rec->size;
+                delta_allocs++;
+                printf("\n");
+            } else {
+                printf("\nfreed by ");
+                for (int j = 0; j < STACK_DEPTH; j++) {
+                    printf("%p%s", rec->freed_by[j],
+                           (j < STACK_DEPTH - 1) ? ":" : "\n");
+                }
+            }
+        }
+    }
+    if (mode == HEAP_TRACE_ALL) {
+        printf("%u bytes alive in trace (%u/%u allocations)\n",
+               delta_size, delta_allocs, heap_trace_get_count());
+    } else {
+        printf("%u bytes 'leaked' in trace (%u allocations)\n", delta_size, delta_allocs);
+    }
+    printf("total allocations %u total frees %u\n", total_allocations, total_frees);
+    if (start_count != count) { // only a problem if trace isn't stopped before dumping
+        printf("(NB: New entries were traced while dumping, so trace dump may have duplicate entries.)\n");
+    }
+    if (has_overflowed) {
+        printf("(NB: Buffer has overflowed, so trace data is incomplete.)\n");
+    }
+}
+
+/* Add a new allocation to the heap trace records */
+static IRAM_ATTR void record_allocation(const heap_trace_record_t *record)
+{
+    if (!tracing || record->address == NULL) {
+        return;
+    }
+
+    portENTER_CRITICAL(&trace_mux);
+    if (tracing) {
+        if (count == total_records) {
+            has_overflowed = true;
+            /* Move the whole buffer back one slot.
+
+               This is a bit slow, compared to treating this buffer as a ringbuffer and rotating a head pointer.
+
+               However, ringbuffer code gets tricky when we remove elements in mid-buffer (for leak trace mode) while
+               trying to keep track of an item count that may overflow.
+            */
+            memmove(&buffer[0], &buffer[1], sizeof(heap_trace_record_t) * (total_records -1));
+            count--;
+        }
+        // Copy new record into place
+        memcpy(&buffer[count], record, sizeof(heap_trace_record_t));
+        count++;
+        total_allocations++;
+    }
+    portEXIT_CRITICAL(&trace_mux);
+}
+
+// remove a record, used when freeing
+static void remove_record(int index);
+
+/* record a free event in the heap trace log
+
+   For HEAP_TRACE_ALL, this means filling in the freed_by pointer.
+   For HEAP_TRACE_LEAKS, this means removing the record from the log.
+*/
+static IRAM_ATTR void record_free(void *p, void **callers)
+{
+    if (!tracing || p == NULL) {
+        return;
+    }
+
+    portENTER_CRITICAL(&trace_mux);
+    if (tracing && count > 0) {
+        total_frees++;
+        /* search backwards for the allocation record matching this free */
+        int i;
+        for (i = count - 1; i >= 0; i--) {
+            if (buffer[i].address == p) {
+                break;
+            }
+        }
+
+        if (i >= 0) {
+            if (mode == HEAP_TRACE_ALL) {
+                memcpy(buffer[i].freed_by, callers, sizeof(void *) * STACK_DEPTH);
+            } else { // HEAP_TRACE_LEAKS
+                // Leak trace mode, once an allocation is freed we remove it from the list
+                remove_record(i);
+            }
+        }
+    }
+    portEXIT_CRITICAL(&trace_mux);
+}
+
+/* remove the entry at 'index' from the ringbuffer of saved records */
+static IRAM_ATTR void remove_record(int index)
+{
+    if (index < count - 1) {
+        // Remove the buffer entry from the list
+        memmove(&buffer[index], &buffer[index+1],
+                sizeof(heap_trace_record_t) * (total_records - index - 1));
+    } else {
+        // For last element, just zero it out to avoid ambiguity
+        memset(&buffer[index], 0, sizeof(heap_trace_record_t));
+    }
+    count--;
+}
+
+#include "heap_trace.inc"
+
+#endif /*CONFIG_HEAP_TRACING_STANDALONE*/

+ 402 - 0
components/heap/include/esp_heap_caps.h

@@ -0,0 +1,402 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#include <stdint.h>
+#include <stdlib.h>
+#include "multi_heap.h"
+#include <sdkconfig.h>
+#include "esp_err.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @brief Flags to indicate the capabilities of the various memory systems
+ */
+#define MALLOC_CAP_EXEC             (1<<0)  ///< Memory must be able to run executable code
+#define MALLOC_CAP_32BIT            (1<<1)  ///< Memory must allow for aligned 32-bit data accesses
+#define MALLOC_CAP_8BIT             (1<<2)  ///< Memory must allow for 8/16/...-bit data accesses
+#define MALLOC_CAP_DMA              (1<<3)  ///< Memory must be able to accessed by DMA
+#define MALLOC_CAP_PID2             (1<<4)  ///< Memory must be mapped to PID2 memory space (PIDs are not currently used)
+#define MALLOC_CAP_PID3             (1<<5)  ///< Memory must be mapped to PID3 memory space (PIDs are not currently used)
+#define MALLOC_CAP_PID4             (1<<6)  ///< Memory must be mapped to PID4 memory space (PIDs are not currently used)
+#define MALLOC_CAP_PID5             (1<<7)  ///< Memory must be mapped to PID5 memory space (PIDs are not currently used)
+#define MALLOC_CAP_PID6             (1<<8)  ///< Memory must be mapped to PID6 memory space (PIDs are not currently used)
+#define MALLOC_CAP_PID7             (1<<9)  ///< Memory must be mapped to PID7 memory space (PIDs are not currently used)
+#define MALLOC_CAP_SPIRAM           (1<<10) ///< Memory must be in SPI RAM
+#define MALLOC_CAP_INTERNAL         (1<<11) ///< Memory must be internal; specifically it should not disappear when flash/spiram cache is switched off
+#define MALLOC_CAP_DEFAULT          (1<<12) ///< Memory can be returned in a non-capability-specific memory allocation (e.g. malloc(), calloc()) call
+#define MALLOC_CAP_IRAM_8BIT        (1<<13) ///< Memory must be in IRAM and allow unaligned access
+#define MALLOC_CAP_RETENTION        (1<<14)
+
+#define MALLOC_CAP_INVALID          (1<<31) ///< Memory can't be used / list end marker
+
+/**
+ * @brief callback called when a allocation operation fails, if registered
+ * @param size in bytes of failed allocation
+ * @param caps capabillites requested of failed allocation
+ * @param function_name function which generated the failure
+ */
+typedef void (*esp_alloc_failed_hook_t) (size_t size, uint32_t caps, const char * function_name);
+
+/**
+ * @brief registers a callback function to be invoked if a memory allocation operation fails
+ * @param callback caller defined callback to be invoked
+ * @return ESP_OK if callback was registered.
+ */
+esp_err_t heap_caps_register_failed_alloc_callback(esp_alloc_failed_hook_t callback);
+
+/**
+ * @brief Allocate a chunk of memory which has the given capabilities
+ *
+ * Equivalent semantics to libc malloc(), for capability-aware memory.
+ *
+ * In IDF, ``malloc(p)`` is equivalent to ``heap_caps_malloc(p, MALLOC_CAP_8BIT)``.
+ *
+ * @param size Size, in bytes, of the amount of memory to allocate
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory to be returned
+ *
+ * @return A pointer to the memory allocated on success, NULL on failure
+ */
+void *heap_caps_malloc(size_t size, uint32_t caps);
+
+
+/**
+ * @brief Free memory previously allocated via heap_caps_malloc() or heap_caps_realloc().
+ *
+ * Equivalent semantics to libc free(), for capability-aware memory.
+ *
+ *  In IDF, ``free(p)`` is equivalent to ``heap_caps_free(p)``.
+ *
+ * @param ptr Pointer to memory previously returned from heap_caps_malloc() or heap_caps_realloc(). Can be NULL.
+ */
+void heap_caps_free( void *ptr);
+
+/**
+ * @brief Reallocate memory previously allocated via heap_caps_malloc() or heap_caps_realloc().
+ *
+ * Equivalent semantics to libc realloc(), for capability-aware memory.
+ *
+ * In IDF, ``realloc(p, s)`` is equivalent to ``heap_caps_realloc(p, s, MALLOC_CAP_8BIT)``.
+ *
+ * 'caps' parameter can be different to the capabilities that any original 'ptr' was allocated with. In this way,
+ * realloc can be used to "move" a buffer if necessary to ensure it meets a new set of capabilities.
+ *
+ * @param ptr Pointer to previously allocated memory, or NULL for a new allocation.
+ * @param size Size of the new buffer requested, or 0 to free the buffer.
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory desired for the new allocation.
+ *
+ * @return Pointer to a new buffer of size 'size' with capabilities 'caps', or NULL if allocation failed.
+ */
+void *heap_caps_realloc( void *ptr, size_t size, uint32_t caps);
+
+/**
+ * @brief Allocate a aligned chunk of memory which has the given capabilities
+ *
+ * Equivalent semantics to libc aligned_alloc(), for capability-aware memory.
+ * @param alignment  How the pointer received needs to be aligned
+ *                   must be a power of two
+ * @param size Size, in bytes, of the amount of memory to allocate
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory to be returned
+ *
+ * @return A pointer to the memory allocated on success, NULL on failure
+ *
+ *
+ */
+void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint32_t caps);
+
+/**
+ * @brief Used to deallocate memory previously allocated with heap_caps_aligned_alloc
+ *
+ * @param ptr Pointer to the memory allocated
+ * @note This function is deprecated, plase consider using heap_caps_free() instead
+ */
+void __attribute__((deprecated))  heap_caps_aligned_free(void *ptr);
+
+/**
+ * @brief Allocate a aligned chunk of memory which has the given capabilities. The initialized value in the memory is set to zero.
+ *
+ * @param alignment  How the pointer received needs to be aligned
+ *                   must be a power of two
+ * @param n    Number of continuing chunks of memory to allocate
+ * @param size Size, in bytes, of a chunk of memory to allocate
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory to be returned
+ *
+ * @return A pointer to the memory allocated on success, NULL on failure
+ *
+ */
+void *heap_caps_aligned_calloc(size_t alignment, size_t n, size_t size, uint32_t caps);
+
+
+/**
+ * @brief Allocate a chunk of memory which has the given capabilities. The initialized value in the memory is set to zero.
+ *
+ * Equivalent semantics to libc calloc(), for capability-aware memory.
+ *
+ * In IDF, ``calloc(p)`` is equivalent to ``heap_caps_calloc(p, MALLOC_CAP_8BIT)``.
+ *
+ * @param n    Number of continuing chunks of memory to allocate
+ * @param size Size, in bytes, of a chunk of memory to allocate
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory to be returned
+ *
+ * @return A pointer to the memory allocated on success, NULL on failure
+ */
+void *heap_caps_calloc(size_t n, size_t size, uint32_t caps);
+
+/**
+ * @brief Get the total size of all the regions that have the given capabilities
+ *
+ * This function takes all regions capable of having the given capabilities allocated in them
+ * and adds up the total space they have.
+ *
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory
+ *
+ * @return total size in bytes
+ */
+
+size_t heap_caps_get_total_size(uint32_t caps);
+
+/**
+ * @brief Get the total free size of all the regions that have the given capabilities
+ *
+ * This function takes all regions capable of having the given capabilities allocated in them
+ * and adds up the free space they have.
+ *
+ * Note that because of heap fragmentation it is probably not possible to allocate a single block of memory
+ * of this size. Use heap_caps_get_largest_free_block() for this purpose.
+
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory
+ *
+ * @return Amount of free bytes in the regions
+ */
+size_t heap_caps_get_free_size( uint32_t caps );
+
+
+/**
+ * @brief Get the total minimum free memory of all regions with the given capabilities
+ *
+ * This adds all the low water marks of the regions capable of delivering the memory
+ * with the given capabilities.
+ *
+ * Note the result may be less than the global all-time minimum available heap of this kind, as "low water marks" are
+ * tracked per-region. Individual regions' heaps may have reached their "low water marks" at different points in time. However
+ * this result still gives a "worst case" indication for all-time minimum free heap.
+ *
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory
+ *
+ * @return Amount of free bytes in the regions
+ */
+size_t heap_caps_get_minimum_free_size( uint32_t caps );
+
+/**
+ * @brief Get the largest free block of memory able to be allocated with the given capabilities.
+ *
+ * Returns the largest value of ``s`` for which ``heap_caps_malloc(s, caps)`` will succeed.
+ *
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory
+ *
+ * @return Size of largest free block in bytes.
+ */
+size_t heap_caps_get_largest_free_block( uint32_t caps );
+
+
+/**
+ * @brief Get heap info for all regions with the given capabilities.
+ *
+ * Calls multi_heap_info() on all heaps which share the given capabilities.  The information returned is an aggregate
+ * across all matching heaps.  The meanings of fields are the same as defined for multi_heap_info_t, except that
+ * ``minimum_free_bytes`` has the same caveats described in heap_caps_get_minimum_free_size().
+ *
+ * @param info        Pointer to a structure which will be filled with relevant
+ *                    heap metadata.
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory
+ *
+ */
+void heap_caps_get_info( multi_heap_info_t *info, uint32_t caps );
+
+
+/**
+ * @brief Print a summary of all memory with the given capabilities.
+ *
+ * Calls multi_heap_info on all heaps which share the given capabilities, and
+ * prints a two-line summary for each, then a total summary.
+ *
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory
+ *
+ */
+void heap_caps_print_heap_info( uint32_t caps );
+
+/**
+ * @brief Check integrity of all heap memory in the system.
+ *
+ * Calls multi_heap_check on all heaps. Optionally print errors if heaps are corrupt.
+ *
+ * Calling this function is equivalent to calling heap_caps_check_integrity
+ * with the caps argument set to MALLOC_CAP_INVALID.
+ *
+ * @param print_errors Print specific errors if heap corruption is found.
+ *
+ * @return True if all heaps are valid, False if at least one heap is corrupt.
+ */
+bool heap_caps_check_integrity_all(bool print_errors);
+
+/**
+ * @brief Check integrity of all heaps with the given capabilities.
+ *
+ * Calls multi_heap_check on all heaps which share the given capabilities. Optionally
+ * print errors if the heaps are corrupt.
+ *
+ * See also heap_caps_check_integrity_all to check all heap memory
+ * in the system and heap_caps_check_integrity_addr to check memory
+ * around a single address.
+ *
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory
+ * @param print_errors Print specific errors if heap corruption is found.
+ *
+ * @return True if all heaps are valid, False if at least one heap is corrupt.
+ */
+bool heap_caps_check_integrity(uint32_t caps, bool print_errors);
+
+/**
+ * @brief Check integrity of heap memory around a given address.
+ *
+ * This function can be used to check the integrity of a single region of heap memory,
+ * which contains the given address.
+ *
+ * This can be useful if debugging heap integrity for corruption at a known address,
+ * as it has a lower overhead than checking all heap regions. Note that if the corrupt
+ * address moves around between runs (due to timing or other factors) then this approach
+ * won't work and you should call heap_caps_check_integrity or
+ * heap_caps_check_integrity_all instead.
+ *
+ * @note The entire heap region around the address is checked, not only the adjacent
+ * heap blocks.
+ *
+ * @param addr Address in memory. Check for corruption in region containing this address.
+ * @param print_errors Print specific errors if heap corruption is found.
+ *
+ * @return True if the heap containing the specified address is valid,
+ * False if at least one heap is corrupt or the address doesn't belong to a heap region.
+ */
+bool heap_caps_check_integrity_addr(intptr_t addr, bool print_errors);
+
+/**
+ * @brief Enable malloc() in external memory and set limit below which
+ *        malloc() attempts are placed in internal memory.
+ *
+ * When external memory is in use, the allocation strategy is to initially try to
+ * satisfy smaller allocation requests with internal memory and larger requests
+ * with external memory. This sets the limit between the two, as well as generally
+ * enabling allocation in external memory.
+ *
+ * @param limit       Limit, in bytes.
+ */
+void heap_caps_malloc_extmem_enable(size_t limit);
+
+/**
+ * @brief Allocate a chunk of memory as preference in decreasing order.
+ *
+ * @attention The variable parameters are bitwise OR of MALLOC_CAP_* flags indicating the type of memory.
+ *            This API prefers to allocate memory with the first parameter. If failed, allocate memory with
+ *            the next parameter. It will try in this order until allocating a chunk of memory successfully
+ *            or fail to allocate memories with any of the parameters.
+ *
+ * @param size Size, in bytes, of the amount of memory to allocate
+ * @param num Number of variable paramters
+ *
+ * @return A pointer to the memory allocated on success, NULL on failure
+ */
+void *heap_caps_malloc_prefer( size_t size, size_t num, ... );
+
+/**
+ * @brief Allocate a chunk of memory as preference in decreasing order.
+ *
+ * @param ptr Pointer to previously allocated memory, or NULL for a new allocation.
+ * @param size Size of the new buffer requested, or 0 to free the buffer.
+ * @param num Number of variable paramters
+ *
+ * @return Pointer to a new buffer of size 'size', or NULL if allocation failed.
+ */
+void *heap_caps_realloc_prefer( void *ptr, size_t size, size_t num, ... );
+
+/**
+ * @brief Allocate a chunk of memory as preference in decreasing order.
+ *
+ * @param n    Number of continuing chunks of memory to allocate
+ * @param size Size, in bytes, of a chunk of memory to allocate
+ * @param num  Number of variable paramters
+ *
+ * @return A pointer to the memory allocated on success, NULL on failure
+ */
+void *heap_caps_calloc_prefer( size_t n, size_t size, size_t num, ... );
+
+/**
+ * @brief Dump the full structure of all heaps with matching capabilities.
+ *
+ * Prints a large amount of output to serial (because of locking limitations,
+ * the output bypasses stdout/stderr). For each (variable sized) block
+ * in each matching heap, the following output is printed on a single line:
+ *
+ * - Block address (the data buffer returned by malloc is 4 bytes after this
+ *   if heap debugging is set to Basic, or 8 bytes otherwise).
+ * - Data size (the data size may be larger than the size requested by malloc,
+ *   either due to heap fragmentation or because of heap debugging level).
+ * - Address of next block in the heap.
+ * - If the block is free, the address of the next free block is also printed.
+ *
+ * @param caps        Bitwise OR of MALLOC_CAP_* flags indicating the type
+ *                    of memory
+ */
+void heap_caps_dump(uint32_t caps);
+
+/**
+ * @brief Dump the full structure of all heaps.
+ *
+ * Covers all registered heaps. Prints a large amount of output to serial.
+ *
+ * Output is the same as for heap_caps_dump.
+ *
+ */
+void heap_caps_dump_all(void);
+
+/**
+ * @brief Return the size that a particular pointer was allocated with.
+ *
+ * @param ptr Pointer to currently allocated heap memory. Must be a pointer value previously
+ * returned by heap_caps_malloc,malloc,calloc, etc. and not yet freed.
+ *
+ * @note The app will crash with an assertion failure if the pointer is not valid.
+ *
+ * @return Size of the memory allocated at this block.
+ *
+ */
+size_t heap_caps_get_allocated_size( void *ptr );
+
+#ifdef __cplusplus
+}
+#endif

+ 92 - 0
components/heap/include/esp_heap_caps_init.h

@@ -0,0 +1,92 @@
+// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#include "esp_err.h"
+#include "esp_heap_caps.h"
+#include "soc/soc_memory_layout.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @brief Initialize the capability-aware heap allocator.
+ *
+ * This is called once in the IDF startup code. Do not call it
+ * at other times.
+ */
+void heap_caps_init(void);
+
+/**
+ * @brief Enable heap(s) in memory regions where the startup stacks are located.
+ *
+ * On startup, the pro/app CPUs have a certain memory region they use as stack, so we
+ * cannot do allocations in the regions these stack frames are. When FreeRTOS is
+ * completely started, they do not use that memory anymore and heap(s) there can
+ * be enabled.
+ */
+void heap_caps_enable_nonos_stack_heaps(void);
+
+/**
+ * @brief Add a region of memory to the collection of heaps at runtime.
+ *
+ * Most memory regions are defined in soc_memory_layout.c for the SoC,
+ * and are registered via heap_caps_init(). Some regions can't be used
+ * immediately and are later enabled via heap_caps_enable_nonos_stack_heaps().
+ *
+ * Call this function to add a region of memory to the heap at some later time.
+ *
+ * This function does not consider any of the "reserved" regions or other data in soc_memory_layout, caller needs to
+ * consider this themselves.
+ *
+ * All memory within the region specified by start & end parameters must be otherwise unused.
+ *
+ * The capabilities of the newly registered memory will be determined by the start address, as looked up in the regions
+ * specified in soc_memory_layout.c.
+ *
+ * Use heap_caps_add_region_with_caps() to register a region with custom capabilities.
+ *
+ * @param start Start address of new region.
+ * @param end End address of new region.
+ *
+ * @return ESP_OK on success, ESP_ERR_INVALID_ARG if a parameter is invalid, ESP_ERR_NOT_FOUND if the
+ * specified start address doesn't reside in a known region, or any error returned by heap_caps_add_region_with_caps().
+ */
+esp_err_t heap_caps_add_region(intptr_t start, intptr_t end);
+
+
+/**
+ * @brief Add a region of memory to the collection of heaps at runtime, with custom capabilities.
+ *
+ * Similar to heap_caps_add_region(), only custom memory capabilities are specified by the caller.
+ *
+ * @param caps Ordered array of capability masks for the new region, in order of priority. Must have length
+ * SOC_MEMORY_TYPE_NO_PRIOS. Does not need to remain valid after the call returns.
+ * @param start Start address of new region.
+ * @param end End address of new region.
+ *
+ * @return
+ *         - ESP_OK on success
+ *         - ESP_ERR_INVALID_ARG if a parameter is invalid
+ *         - ESP_ERR_NO_MEM if no memory to register new heap.
+ *         - ESP_ERR_INVALID_SIZE if the memory region is too small to fit a heap
+ *         - ESP_FAIL if region overlaps the start and/or end of an existing region
+ */
+esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end);
+
+
+#ifdef __cplusplus
+}
+#endif

+ 98 - 0
components/heap/include/esp_heap_task_info.h

@@ -0,0 +1,98 @@
+// Copyright 2018 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#ifdef CONFIG_HEAP_TASK_TRACKING
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// This macro controls how much space is provided for partitioning the per-task
+// heap allocation info according to one or more sets of heap capabilities.
+#define NUM_HEAP_TASK_CAPS 4
+
+/** @brief Structure to collect per-task heap allocation totals partitioned by selected caps */
+typedef struct {
+    TaskHandle_t task;                ///< Task to which these totals belong
+    size_t size[NUM_HEAP_TASK_CAPS];  ///< Total allocations partitioned by selected caps
+    size_t count[NUM_HEAP_TASK_CAPS]; ///< Number of blocks partitioned by selected caps
+} heap_task_totals_t;
+
+/** @brief Structure providing details about a block allocated by a task */
+typedef struct {
+    TaskHandle_t task;                ///< Task that allocated the block
+    void *address;                    ///< User address of allocated block
+    uint32_t size;                    ///< Size of the allocated block
+} heap_task_block_t;
+
+/** @brief Structure to provide parameters to heap_caps_get_per_task_info
+ *
+ * The 'caps' and 'mask' arrays allow partitioning the per-task heap allocation
+ * totals by selected sets of heap region capabilities so that totals for
+ * multiple regions can be accumulated in one scan.  The capabilities flags for
+ * each region ANDed with mask[i] are compared to caps[i] in order; the
+ * allocations in that region are added to totals->size[i] and totals->count[i]
+ * for the first i that matches.  To collect the totals without any
+ * partitioning, set mask[0] and caps[0] both to zero.  The allocation totals
+ * are returned in the 'totals' array of heap_task_totals_t structs.  To allow
+ * easily comparing the totals array between consecutive calls, that array can
+ * be left populated from one call to the next so the order of tasks is the
+ * same even if some tasks have freed their blocks or have been deleted.  The
+ * number of blocks prepopulated is given by num_totals, which is updated upon
+ * return.  If there are more tasks with allocations than the capacity of the
+ * totals array (given by max_totals), information for the excess tasks will be
+ * not be collected.  The totals array pointer can be NULL if the totals are
+ * not desired.
+ *
+ * The 'tasks' array holds a list of handles for tasks whose block details are
+ * to be returned in the 'blocks' array of heap_task_block_t structs.  If the
+ * tasks array pointer is NULL, block details for all tasks will be returned up
+ * to the capacity of the buffer array, given by max_blocks.  The function
+ * return value tells the number of blocks filled into the array.  The blocks
+ * array pointer can be NULL if block details are not desired, or max_blocks
+ * can be set to zero.
+ */
+typedef struct {
+    int32_t caps[NUM_HEAP_TASK_CAPS]; ///< Array of caps for partitioning task totals
+    int32_t mask[NUM_HEAP_TASK_CAPS]; ///< Array of masks under which caps must match
+    TaskHandle_t *tasks;              ///< Array of tasks whose block info is returned
+    size_t num_tasks;                 ///< Length of tasks array
+    heap_task_totals_t *totals;       ///< Array of structs to collect task totals
+    size_t *num_totals;               ///< Number of task structs currently in array
+    size_t max_totals;                ///< Capacity of array of task totals structs
+    heap_task_block_t *blocks;        ///< Array of task block details structs
+    size_t max_blocks;                ///< Capacity of array of task block info structs
+} heap_task_info_params_t;
+
+/**
+ * @brief Return per-task heap allocation totals and lists of blocks.
+ *
+ * For each task that has allocated memory from the heap, return totals for
+ * allocations within regions matching one or more sets of capabilities.
+ *
+ * Optionally also return an array of structs providing details about each
+ * block allocated by one or more requested tasks, or by all tasks.
+ *
+ * @param params Structure to hold all the parameters for the function
+ * (@see heap_task_info_params_t).
+ * @return Number of block detail structs returned (@see heap_task_block_t).
+ */
+extern size_t heap_caps_get_per_task_info(heap_task_info_params_t *params);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // CONFIG_HEAP_TASK_TRACKING

+ 154 - 0
components/heap/include/esp_heap_trace.h

@@ -0,0 +1,154 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#include "sdkconfig.h"
+#include <stdint.h>
+#include <esp_err.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !defined(CONFIG_HEAP_TRACING) && !defined(HEAP_TRACE_SRCFILE)
+#warning "esp_heap_trace.h is included but heap tracing is disabled in menuconfig, functions are no-ops"
+#endif
+
+#ifndef CONFIG_HEAP_TRACING_STACK_DEPTH
+#define CONFIG_HEAP_TRACING_STACK_DEPTH 0
+#endif
+
+typedef enum {
+    HEAP_TRACE_ALL,
+    HEAP_TRACE_LEAKS,
+} heap_trace_mode_t;
+
+/**
+ * @brief Trace record data type. Stores information about an allocated region of memory.
+ */
+typedef struct {
+    uint32_t ccount; ///< CCOUNT of the CPU when the allocation was made. LSB (bit value 1) is the CPU number (0 or 1).
+    void *address;   ///< Address which was allocated
+    size_t size;     ///< Size of the allocation
+    void *alloced_by[CONFIG_HEAP_TRACING_STACK_DEPTH]; ///< Call stack of the caller which allocated the memory.
+    void *freed_by[CONFIG_HEAP_TRACING_STACK_DEPTH];   ///< Call stack of the caller which freed the memory (all zero if not freed.)
+} heap_trace_record_t;
+
+/**
+ * @brief Initialise heap tracing in standalone mode.
+ *
+ * This function must be called before any other heap tracing functions.
+ *
+ * To disable heap tracing and allow the buffer to be freed, stop tracing and then call heap_trace_init_standalone(NULL, 0);
+ *
+ * @param record_buffer Provide a buffer to use for heap trace data. Must remain valid any time heap tracing is enabled, meaning
+ * it must be allocated from internal memory not in PSRAM.
+ * @param num_records Size of the heap trace buffer, as number of record structures.
+ * @return
+ *  - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
+ *  - ESP_ERR_INVALID_STATE Heap tracing is currently in progress.
+ *  - ESP_OK Heap tracing initialised successfully.
+ */
+esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t num_records);
+
+/**
+ * @brief Initialise heap tracing in host-based mode.
+ *
+ * This function must be called before any other heap tracing functions.
+ *
+ * @return
+ *  - ESP_ERR_INVALID_STATE Heap tracing is currently in progress.
+ *  - ESP_OK Heap tracing initialised successfully.
+ */
+esp_err_t heap_trace_init_tohost(void);
+
+/**
+ * @brief Start heap tracing. All heap allocations & frees will be traced, until heap_trace_stop() is called.
+ *
+ * @note heap_trace_init_standalone() must be called to provide a valid buffer, before this function is called.
+ *
+ * @note Calling this function while heap tracing is running will reset the heap trace state and continue tracing.
+ *
+ * @param mode Mode for tracing.
+ * - HEAP_TRACE_ALL means all heap allocations and frees are traced.
+ * - HEAP_TRACE_LEAKS means only suspected memory leaks are traced. (When memory is freed, the record is removed from the trace buffer.)
+ * @return
+ * - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
+ * - ESP_ERR_INVALID_STATE A non-zero-length buffer has not been set via heap_trace_init_standalone().
+ * - ESP_OK Tracing is started.
+ */
+esp_err_t heap_trace_start(heap_trace_mode_t mode);
+
+/**
+ * @brief Stop heap tracing.
+ *
+ * @return
+ * - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
+ * - ESP_ERR_INVALID_STATE Heap tracing was not in progress.
+ * - ESP_OK Heap tracing stopped..
+ */
+esp_err_t heap_trace_stop(void);
+
+/**
+ * @brief Resume heap tracing which was previously stopped.
+ *
+ * Unlike heap_trace_start(), this function does not clear the
+ * buffer of any pre-existing trace records.
+ *
+ * The heap trace mode is the same as when heap_trace_start() was
+ * last called (or HEAP_TRACE_ALL if heap_trace_start() was never called).
+ *
+ * @return
+ * - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
+ * - ESP_ERR_INVALID_STATE Heap tracing was already started.
+ * - ESP_OK Heap tracing resumed.
+ */
+esp_err_t heap_trace_resume(void);
+
+/**
+ * @brief Return number of records in the heap trace buffer
+ *
+ * It is safe to call this function while heap tracing is running.
+ */
+size_t heap_trace_get_count(void);
+
+/**
+ * @brief Return a raw record from the heap trace buffer
+ *
+ * @note It is safe to call this function while heap tracing is running, however in HEAP_TRACE_LEAK mode record indexing may
+ * skip entries unless heap tracing is stopped first.
+ *
+ * @param index Index (zero-based) of the record to return.
+ * @param[out] record Record where the heap trace record will be copied.
+ * @return
+ * - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
+ * - ESP_ERR_INVALID_STATE Heap tracing was not initialised.
+ * - ESP_ERR_INVALID_ARG Index is out of bounds for current heap trace record count.
+ * - ESP_OK Record returned successfully.
+ */
+esp_err_t heap_trace_get(size_t index, heap_trace_record_t *record);
+
+/**
+ * @brief Dump heap trace record data to stdout
+ *
+ * @note It is safe to call this function while heap tracing is running, however in HEAP_TRACE_LEAK mode the dump may skip
+ * entries unless heap tracing is stopped first.
+ *
+ *
+ */
+void heap_trace_dump(void);
+
+#ifdef __cplusplus
+}
+#endif

+ 200 - 0
components/heap/include/heap_trace.inc

@@ -0,0 +1,200 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include <string.h>
+#include <sdkconfig.h>
+#include "soc/soc_memory_layout.h"
+#include "esp_attr.h"
+
+/* Encode the CPU ID in the LSB of the ccount value */
+inline static uint32_t get_ccount(void)
+{
+    uint32_t ccount = cpu_hal_get_cycle_count() & ~3;
+#ifndef CONFIG_FREERTOS_UNICORE
+    ccount |= xPortGetCoreID();
+#endif
+    return ccount;
+}
+
+/* Architecture-specific return value of __builtin_return_address which
+ * should be interpreted as an invalid address.
+ */
+#ifdef __XTENSA__
+#define HEAP_ARCH_INVALID_PC  0x40000000
+#else
+#define HEAP_ARCH_INVALID_PC  0x00000000
+#endif
+
+// Caller is 2 stack frames deeper than we care about
+#define STACK_OFFSET  2
+
+#define TEST_STACK(N) do {                                              \
+        if (STACK_DEPTH == N) {                                         \
+            return;                                                     \
+        }                                                               \
+        callers[N] = __builtin_return_address(N+STACK_OFFSET);          \
+        if (!esp_ptr_executable(callers[N])                             \
+            || callers[N] == (void*) HEAP_ARCH_INVALID_PC) {            \
+            callers[N] = 0;                                             \
+            return;                                                     \
+        }                                                               \
+    } while(0)
+
+/* Static function to read the call stack for a traced heap call.
+
+   Calls to __builtin_return_address are "unrolled" via TEST_STACK macro as gcc requires the
+   argument to be a compile-time constant.
+*/
+static IRAM_ATTR __attribute__((noinline)) void get_call_stack(void **callers)
+{
+    bzero(callers, sizeof(void *) * STACK_DEPTH);
+    TEST_STACK(0);
+    TEST_STACK(1);
+    TEST_STACK(2);
+    TEST_STACK(3);
+    TEST_STACK(4);
+    TEST_STACK(5);
+    TEST_STACK(6);
+    TEST_STACK(7);
+    TEST_STACK(8);
+    TEST_STACK(9);
+}
+
+_Static_assert(STACK_DEPTH >= 0 && STACK_DEPTH <= 10, "CONFIG_HEAP_TRACING_STACK_DEPTH must be in range 0-10");
+
+
+typedef enum {
+    TRACE_MALLOC_CAPS,
+    TRACE_MALLOC_DEFAULT
+} trace_malloc_mode_t;
+
+
+void *__real_heap_caps_malloc(size_t size, uint32_t caps);
+void *__real_heap_caps_malloc_default( size_t size );
+void *__real_heap_caps_realloc_default( void *ptr, size_t size );
+
+/* trace any 'malloc' event */
+static IRAM_ATTR __attribute__((noinline)) void *trace_malloc(size_t size, uint32_t caps, trace_malloc_mode_t mode)
+{
+    uint32_t ccount = get_ccount();
+    void *p;
+
+    if ( mode == TRACE_MALLOC_CAPS ) {
+        p = __real_heap_caps_malloc(size, caps);
+    } else { //TRACE_MALLOC_DEFAULT
+        p = __real_heap_caps_malloc_default(size);
+    }
+
+    heap_trace_record_t rec = {
+        .address = p,
+        .ccount = ccount,
+        .size = size,
+    };
+    get_call_stack(rec.alloced_by);
+    record_allocation(&rec);
+    return p;
+}
+
+void __real_heap_caps_free(void *p);
+
+/* trace any 'free' event */
+static IRAM_ATTR __attribute__((noinline)) void trace_free(void *p)
+{
+    void *callers[STACK_DEPTH];
+    get_call_stack(callers);
+    record_free(p, callers);
+
+    __real_heap_caps_free(p);
+}
+
+void * __real_heap_caps_realloc(void *p, size_t size, uint32_t caps);
+
+/* trace any 'realloc' event */
+static IRAM_ATTR __attribute__((noinline)) void *trace_realloc(void *p, size_t size, uint32_t caps, trace_malloc_mode_t mode)
+{
+    void *callers[STACK_DEPTH];
+    uint32_t ccount = get_ccount();
+    void *r;
+
+    /* trace realloc as free-then-alloc */
+    get_call_stack(callers);
+    record_free(p, callers);
+
+    if (mode == TRACE_MALLOC_CAPS ) {
+        r = __real_heap_caps_realloc(p, size, caps);
+    } else { //TRACE_MALLOC_DEFAULT
+        r = __real_heap_caps_realloc_default(p, size);
+    }
+    /* realloc with zero size is a free */
+    if (size != 0) {
+        heap_trace_record_t rec = {
+            .address = r,
+            .ccount = ccount,
+            .size = size,
+        };
+        memcpy(rec.alloced_by, callers, sizeof(void *) * STACK_DEPTH);
+        record_allocation(&rec);
+    }
+    return r;
+}
+
+/* Note: this changes the behaviour of libc malloc/realloc/free a bit,
+   as they no longer go via the libc functions in ROM. But more or less
+   the same in the end. */
+
+IRAM_ATTR void *__wrap_malloc(size_t size)
+{
+    return trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
+}
+
+IRAM_ATTR void __wrap_free(void *p)
+{
+    trace_free(p);
+}
+
+IRAM_ATTR void *__wrap_realloc(void *p, size_t size)
+{
+    return trace_realloc(p, size, 0, TRACE_MALLOC_DEFAULT);
+}
+
+IRAM_ATTR void *__wrap_calloc(size_t nmemb, size_t size)
+{
+    size = size * nmemb;
+    void *result = trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
+    if (result != NULL) {
+        memset(result, 0, size);
+    }
+    return result;
+}
+
+IRAM_ATTR void *__wrap_heap_caps_malloc(size_t size, uint32_t caps)
+{
+    return trace_malloc(size, caps, TRACE_MALLOC_CAPS);
+}
+
+void __wrap_heap_caps_free(void *p) __attribute__((alias("__wrap_free")));
+
+IRAM_ATTR void *__wrap_heap_caps_realloc(void *p, size_t size, uint32_t caps)
+{
+    return trace_realloc(p, size, caps, TRACE_MALLOC_CAPS);
+}
+
+IRAM_ATTR void *__wrap_heap_caps_malloc_default( size_t size )
+{
+    return trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
+}
+
+IRAM_ATTR void *__wrap_heap_caps_realloc_default( void *ptr, size_t size )
+{
+    return trace_realloc(ptr, size, 0, TRACE_MALLOC_DEFAULT);
+}

+ 190 - 0
components/heap/include/multi_heap.h

@@ -0,0 +1,190 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+/* multi_heap is a heap implementation for handling multiple
+   heterogenous heaps in a single program.
+
+   Any contiguous block of memory can be registered as a heap.
+*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @brief Opaque handle to a registered heap */
+typedef struct multi_heap_info *multi_heap_handle_t;
+
+/**
+ * @brief allocate a chunk of memory with specific alignment
+ *
+ * @param heap  Handle to a registered heap.
+ * @param size  size in bytes of memory chunk
+ * @param alignment  how the memory must be aligned
+ *
+ * @return pointer to the memory allocated, NULL on failure
+ */
+void *multi_heap_aligned_alloc(multi_heap_handle_t heap, size_t size, size_t alignment);
+
+/** @brief malloc() a buffer in a given heap
+ *
+ * Semantics are the same as standard malloc(), only the returned buffer will be allocated in the specified heap.
+ *
+ * @param heap Handle to a registered heap.
+ * @param size Size of desired buffer.
+ *
+ * @return Pointer to new memory, or NULL if allocation fails.
+ */
+void *multi_heap_malloc(multi_heap_handle_t heap, size_t size);
+
+/** @brief free() a buffer aligned in a given heap.
+ *
+ * @param heap Handle to a registered heap.
+ * @param p NULL, or a pointer previously returned from multi_heap_aligned_alloc() for the same heap.
+ * @note This function is deprecated, consider using  multi_heap_free() instead
+ */
+void __attribute__((deprecated)) multi_heap_aligned_free(multi_heap_handle_t heap, void *p);
+
+/** @brief free() a buffer in a given heap.
+ *
+ * Semantics are the same as standard free(), only the argument 'p' must be NULL or have been allocated in the specified heap.
+ *
+ * @param heap Handle to a registered heap.
+ * @param p NULL, or a pointer previously returned from multi_heap_malloc() or multi_heap_realloc() for the same heap.
+ */
+void multi_heap_free(multi_heap_handle_t heap, void *p);
+
+/** @brief realloc() a buffer in a given heap.
+ *
+ * Semantics are the same as standard realloc(), only the argument 'p' must be NULL or have been allocated in the specified heap.
+ *
+ * @param heap Handle to a registered heap.
+ * @param p NULL, or a pointer previously returned from multi_heap_malloc() or multi_heap_realloc() for the same heap.
+ * @param size Desired new size for buffer.
+ *
+ * @return New buffer of 'size' containing contents of 'p', or NULL if reallocation failed.
+ */
+void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size);
+
+
+/** @brief Return the size that a particular pointer was allocated with.
+ *
+ * @param heap Handle to a registered heap.
+ * @param p Pointer, must have been previously returned from multi_heap_malloc() or multi_heap_realloc() for the same heap.
+ *
+ * @return Size of the memory allocated at this block. May be more than the original size argument, due
+ * to padding and minimum block sizes.
+ */
+size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p);
+
+
+/** @brief Register a new heap for use
+ *
+ * This function initialises a heap at the specified address, and returns a handle for future heap operations.
+ *
+ * There is no equivalent function for deregistering a heap - if all blocks in the heap are free, you can immediately start using the memory for other purposes.
+ *
+ * @param start Start address of the memory to use for a new heap.
+ * @param size Size (in bytes) of the new heap.
+ *
+ * @return Handle of a new heap ready for use, or NULL if the heap region was too small to be initialised.
+ */
+multi_heap_handle_t multi_heap_register(void *start, size_t size);
+
+
+/** @brief Associate a private lock pointer with a heap
+ *
+ * The lock argument is supplied to the MULTI_HEAP_LOCK() and MULTI_HEAP_UNLOCK() macros, defined in multi_heap_platform.h.
+ *
+ * The lock in question must be recursive.
+ *
+ * When the heap is first registered, the associated lock is NULL.
+ *
+ * @param heap Handle to a registered heap.
+ * @param lock Optional pointer to a locking structure to associate with this heap.
+ */
+void multi_heap_set_lock(multi_heap_handle_t heap, void* lock);
+
+/** @brief Dump heap information to stdout
+ *
+ * For debugging purposes, this function dumps information about every block in the heap to stdout.
+ *
+ * @param heap Handle to a registered heap.
+ */
+void multi_heap_dump(multi_heap_handle_t heap);
+
+/** @brief Check heap integrity
+ *
+ * Walks the heap and checks all heap data structures are valid. If any errors are detected, an error-specific message
+ * can be optionally printed to stderr. Print behaviour can be overriden at compile time by defining
+ * MULTI_CHECK_FAIL_PRINTF in multi_heap_platform.h.
+ *
+ * @param heap Handle to a registered heap.
+ * @param print_errors If true, errors will be printed to stderr.
+ * @return true if heap is valid, false otherwise.
+ */
+bool multi_heap_check(multi_heap_handle_t heap, bool print_errors);
+
+/** @brief Return free heap size
+ *
+ * Returns the number of bytes available in the heap.
+ *
+ * Equivalent to the total_free_bytes member returned by multi_heap_get_heap_info().
+ *
+ * Note that the heap may be fragmented, so the actual maximum size for a single malloc() may be lower. To know this
+ * size, see the largest_free_block member returned by multi_heap_get_heap_info().
+ *
+ * @param heap Handle to a registered heap.
+ * @return Number of free bytes.
+ */
+size_t multi_heap_free_size(multi_heap_handle_t heap);
+
+/** @brief Return the lifetime minimum free heap size
+ *
+ * Equivalent to the minimum_free_bytes member returned by multi_heap_get_info().
+ *
+ * Returns the lifetime "low water mark" of possible values returned from multi_free_heap_size(), for the specified
+ * heap.
+ *
+ * @param heap Handle to a registered heap.
+ * @return Number of free bytes.
+ */
+size_t multi_heap_minimum_free_size(multi_heap_handle_t heap);
+
+/** @brief Structure to access heap metadata via multi_heap_get_info */
+typedef struct {
+    size_t total_free_bytes;      ///<  Total free bytes in the heap. Equivalent to multi_free_heap_size().
+    size_t total_allocated_bytes; ///<  Total bytes allocated to data in the heap.
+    size_t largest_free_block;    ///<  Size of largest free block in the heap. This is the largest malloc-able size.
+    size_t minimum_free_bytes;    ///<  Lifetime minimum free heap size. Equivalent to multi_minimum_free_heap_size().
+    size_t allocated_blocks;      ///<  Number of (variable size) blocks allocated in the heap.
+    size_t free_blocks;           ///<  Number of (variable size) free blocks in the heap.
+    size_t total_blocks;          ///<  Total number of (variable size) blocks in the heap.
+} multi_heap_info_t;
+
+/** @brief Return metadata about a given heap
+ *
+ * Fills a multi_heap_info_t structure with information about the specified heap.
+ *
+ * @param heap Handle to a registered heap.
+ * @param info Pointer to a structure to fill with heap metadata.
+ */
+void multi_heap_get_info(multi_heap_handle_t heap, multi_heap_info_t *info);
+
+#ifdef __cplusplus
+}
+#endif

+ 7 - 0
components/heap/linker.lf

@@ -0,0 +1,7 @@
+[mapping:heap]
+archive: libheap.a
+entries:
+    heap_tlsf (noflash)
+    multi_heap (noflash)
+    if HEAP_POISONING_DISABLED = n:
+        multi_heap_poisoning (noflash)

+ 376 - 0
components/heap/multi_heap.c

@@ -0,0 +1,376 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <string.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <sys/cdefs.h>
+#include "heap_tlsf.h"
+#include <multi_heap.h>
+#include "multi_heap_internal.h"
+
+/* Note: Keep platform-specific parts in this header, this source
+   file should depend on libc only */
+#include "multi_heap_platform.h"
+
+/* Defines compile-time configuration macros */
+#include "multi_heap_config.h"
+
+#ifndef MULTI_HEAP_POISONING
+/* if no heap poisoning, public API aliases directly to these implementations */
+void *multi_heap_malloc(multi_heap_handle_t heap, size_t size)
+    __attribute__((alias("multi_heap_malloc_impl")));
+
+void *multi_heap_aligned_alloc(multi_heap_handle_t heap, size_t size, size_t alignment)
+    __attribute__((alias("multi_heap_aligned_alloc_impl")));
+
+void multi_heap_aligned_free(multi_heap_handle_t heap, void *p)
+    __attribute__((alias("multi_heap_free_impl")));
+
+void multi_heap_free(multi_heap_handle_t heap, void *p)
+    __attribute__((alias("multi_heap_free_impl")));
+
+void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size)
+    __attribute__((alias("multi_heap_realloc_impl")));
+
+size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p)
+    __attribute__((alias("multi_heap_get_allocated_size_impl")));
+
+multi_heap_handle_t multi_heap_register(void *start, size_t size)
+    __attribute__((alias("multi_heap_register_impl")));
+
+void multi_heap_get_info(multi_heap_handle_t heap, multi_heap_info_t *info)
+    __attribute__((alias("multi_heap_get_info_impl")));
+
+size_t multi_heap_free_size(multi_heap_handle_t heap)
+    __attribute__((alias("multi_heap_free_size_impl")));
+
+size_t multi_heap_minimum_free_size(multi_heap_handle_t heap)
+    __attribute__((alias("multi_heap_minimum_free_size_impl")));
+
+void *multi_heap_get_block_address(multi_heap_block_handle_t block)
+    __attribute__((alias("multi_heap_get_block_address_impl")));
+
+void *multi_heap_get_block_owner(multi_heap_block_handle_t block)
+{
+    return NULL;
+}
+
+#endif
+
+#define ALIGN(X) ((X) & ~(sizeof(void *)-1))
+#define ALIGN_UP(X) ALIGN((X)+sizeof(void *)-1)
+#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
+
+
+typedef struct multi_heap_info {
+    void *lock;
+    size_t free_bytes;
+    size_t minimum_free_bytes;
+    size_t pool_size;
+    tlsf_t heap_data;
+} heap_t;
+
+/* Return true if this block is free. */
+static inline bool is_free(const block_header_t *block)
+{
+    return ((block->size & 0x01) != 0);
+}
+
+/* Data size of the block (excludes this block's header) */
+static inline size_t block_data_size(const block_header_t *block)
+{
+    return (block->size & ~0x03);
+}
+
+/* Check a block is valid for this heap. Used to verify parameters. */
+static void assert_valid_block(const heap_t *heap, const block_header_t *block)
+{
+    pool_t pool = tlsf_get_pool(heap->heap_data);
+    void *ptr = block_to_ptr(block);
+
+    MULTI_HEAP_ASSERT((ptr >= pool) &&
+                    (ptr < pool + heap->pool_size),
+                    (uintptr_t)ptr);
+}
+
+void *multi_heap_get_block_address_impl(multi_heap_block_handle_t block)
+{
+    void *ptr = block_to_ptr(block);
+    return (ptr);
+}
+
+size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p)
+{
+    return tlsf_block_size(p);
+}
+
+multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size)
+{
+    assert(start_ptr);
+    if(size < (tlsf_size(NULL) + tlsf_block_size_min() + sizeof(heap_t))) {
+        //Region too small to be a heap.
+        return NULL;
+    }
+
+    heap_t *result = (heap_t *)start_ptr;
+    size -= sizeof(heap_t);
+
+    result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size, 0);
+    if(!result->heap_data) {
+        return NULL;
+    }
+
+    result->lock = NULL;
+    result->free_bytes = size - tlsf_size(result->heap_data);
+    result->pool_size = size;
+    result->minimum_free_bytes = result->free_bytes;
+    return result;
+}
+
+void multi_heap_set_lock(multi_heap_handle_t heap, void *lock)
+{
+    heap->lock = lock;
+}
+
+void inline multi_heap_internal_lock(multi_heap_handle_t heap)
+{
+    MULTI_HEAP_LOCK(heap->lock);
+}
+
+void inline multi_heap_internal_unlock(multi_heap_handle_t heap)
+{
+    MULTI_HEAP_UNLOCK(heap->lock);
+}
+
+multi_heap_block_handle_t multi_heap_get_first_block(multi_heap_handle_t heap)
+{
+    assert(heap != NULL);
+    pool_t pool = tlsf_get_pool(heap->heap_data);
+    block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
+
+    return (multi_heap_block_handle_t)block;
+}
+
+multi_heap_block_handle_t multi_heap_get_next_block(multi_heap_handle_t heap, multi_heap_block_handle_t block)
+{
+    assert(heap != NULL);
+    assert_valid_block(heap, block);
+    block_header_t* next = block_next(block);
+
+    if(block_data_size(next) == 0) {
+        //Last block:
+        return NULL;
+    } else {
+        return (multi_heap_block_handle_t)next;
+    }
+
+}
+
+bool multi_heap_is_free(multi_heap_block_handle_t block)
+{
+    return is_free(block);
+}
+
+void *multi_heap_malloc_impl(multi_heap_handle_t heap, size_t size)
+{
+    if (size == 0 || heap == NULL) {
+        return NULL;
+    }
+
+
+    multi_heap_internal_lock(heap);
+    void *result = tlsf_malloc(heap->heap_data, size);
+    if(result) {
+        heap->free_bytes -= tlsf_block_size(result);
+        if (heap->free_bytes < heap->minimum_free_bytes) {
+            heap->minimum_free_bytes = heap->free_bytes;
+        }
+    }
+    multi_heap_internal_unlock(heap);
+
+    return result;
+}
+
+void multi_heap_free_impl(multi_heap_handle_t heap, void *p)
+{
+
+    if (heap == NULL || p == NULL) {
+        return;
+    }
+
+    assert_valid_block(heap, p);
+
+    multi_heap_internal_lock(heap);
+    heap->free_bytes += tlsf_block_size(p);
+    tlsf_free(heap->heap_data, p);
+    multi_heap_internal_unlock(heap);
+}
+
+void *multi_heap_realloc_impl(multi_heap_handle_t heap, void *p, size_t size)
+{
+    assert(heap != NULL);
+
+    if (p == NULL) {
+        return multi_heap_malloc_impl(heap, size);
+    }
+
+    assert_valid_block(heap, p);
+
+    if (heap == NULL) {
+        return NULL;
+    }
+
+    multi_heap_internal_lock(heap);
+    size_t previous_block_size =  tlsf_block_size(p);
+    void *result = tlsf_realloc(heap->heap_data, p, size);
+    if(result) {
+        heap->free_bytes += previous_block_size;
+        heap->free_bytes -= tlsf_block_size(result);
+        if (heap->free_bytes < heap->minimum_free_bytes) {
+            heap->minimum_free_bytes = heap->free_bytes;
+        }
+    }
+
+    multi_heap_internal_unlock(heap);
+
+    return result;
+}
+
+void *multi_heap_aligned_alloc_impl_offs(multi_heap_handle_t heap, size_t size, size_t alignment, size_t offset)
+{
+    if(heap == NULL) {
+        return NULL;
+    }
+
+    if(!size) {
+        return NULL;
+    }
+
+    //Alignment must be a power of two:
+    if(((alignment & (alignment - 1)) != 0) ||(!alignment)) {
+        return NULL;
+    }
+
+    multi_heap_internal_lock(heap);
+    void *result = tlsf_memalign_offs(heap->heap_data, alignment, size, offset);
+    if(result) {
+        heap->free_bytes -= tlsf_block_size(result);
+        if(heap->free_bytes < heap->minimum_free_bytes) {
+            heap->minimum_free_bytes = heap->free_bytes;
+        }
+    }
+    multi_heap_internal_unlock(heap);
+
+    return result;
+}
+
+
+void *multi_heap_aligned_alloc_impl(multi_heap_handle_t heap, size_t size, size_t alignment)
+{
+    return multi_heap_aligned_alloc_impl_offs(heap, size, alignment, 0);
+}
+
+bool multi_heap_check(multi_heap_handle_t heap, bool print_errors)
+{
+    (void)print_errors;
+    bool valid = true;
+    assert(heap != NULL);
+
+    multi_heap_internal_lock(heap);
+    if(tlsf_check(heap->heap_data)) {
+        valid = false;
+    }
+
+    if(tlsf_check_pool(tlsf_get_pool(heap->heap_data))) {
+        valid = false;
+    }
+
+    multi_heap_internal_unlock(heap);
+    return valid;
+}
+
+static void multi_heap_dump_tlsf(void* ptr, size_t size, int used, void* user)
+{
+    (void)user;
+    MULTI_HEAP_STDERR_PRINTF("Block %p data, size: %d bytes, Free: %s \n",
+                            (void *)ptr,
+                            size,
+                            used ? "No" : "Yes");
+}
+
+void multi_heap_dump(multi_heap_handle_t heap)
+{
+    assert(heap != NULL);
+
+    multi_heap_internal_lock(heap);
+    MULTI_HEAP_STDERR_PRINTF("Showing data for heap: %p \n", (void *)heap);
+    tlsf_walk_pool(tlsf_get_pool(heap->heap_data), multi_heap_dump_tlsf, NULL);
+    multi_heap_internal_unlock(heap);
+}
+
+size_t multi_heap_free_size_impl(multi_heap_handle_t heap)
+{
+    if (heap == NULL) {
+        return 0;
+    }
+
+    return heap->free_bytes;
+}
+
+size_t multi_heap_minimum_free_size_impl(multi_heap_handle_t heap)
+{
+    if (heap == NULL) {
+        return 0;
+    }
+
+    return heap->minimum_free_bytes;
+}
+
+static void multi_heap_get_info_tlsf(void* ptr, size_t size, int used, void* user)
+{
+    multi_heap_info_t *info = user;
+
+    if(used) {
+        info->allocated_blocks++;
+    } else {
+        info->free_blocks++;
+
+        if(size > info->largest_free_block ) {
+            info->largest_free_block = size;
+        }
+    }
+
+    info->total_blocks++;
+}
+
+void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info)
+{
+    memset(info, 0, sizeof(multi_heap_info_t));
+
+    if (heap == NULL) {
+        return;
+    }
+
+    multi_heap_internal_lock(heap);
+    tlsf_walk_pool(tlsf_get_pool(heap->heap_data), multi_heap_get_info_tlsf, info);
+    info->total_allocated_bytes = (heap->pool_size - tlsf_size(heap->heap_data)) - heap->free_bytes;
+    info->minimum_free_bytes = heap->minimum_free_bytes;
+    info->total_free_bytes = heap->free_bytes;
+	info->largest_free_block = tlsf_fit_size(heap->heap_data, info->largest_free_block);
+    multi_heap_internal_unlock(heap);
+}

+ 31 - 0
components/heap/multi_heap_config.h

@@ -0,0 +1,31 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#ifdef ESP_PLATFORM
+#include "sdkconfig.h"
+#include "soc/soc.h"
+#include "soc/soc_caps.h"
+#endif
+
+/* Configuration macros for multi-heap */
+
+#ifdef CONFIG_HEAP_POISONING_LIGHT
+#define MULTI_HEAP_POISONING
+#endif
+
+#ifdef CONFIG_HEAP_POISONING_COMPREHENSIVE
+#define MULTI_HEAP_POISONING
+#define MULTI_HEAP_POISONING_SLOW
+#endif

+ 76 - 0
components/heap/multi_heap_internal.h

@@ -0,0 +1,76 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+/* Opaque handle to a heap block */
+typedef const struct block_header_t *multi_heap_block_handle_t;
+
+/* Internal definitions for the "implementation" of the multi_heap API,
+   as defined in multi_heap.c.
+
+   If heap poisioning is disabled, these are aliased directly to the public API.
+
+   If heap poisoning is enabled, wrapper functions call each of these.
+*/
+
+void *multi_heap_malloc_impl(multi_heap_handle_t heap, size_t size);
+
+/* Allocate a memory region of minimum `size` bytes, aligned on `alignment`. */
+void *multi_heap_aligned_alloc_impl(multi_heap_handle_t heap, size_t size, size_t alignment);
+
+/* Allocate a memory region of minimum `size` bytes, where memory's `offset` is aligned on `alignment`. */
+void *multi_heap_aligned_alloc_impl_offs(multi_heap_handle_t heap, size_t size, size_t alignment, size_t offset);
+
+void multi_heap_free_impl(multi_heap_handle_t heap, void *p);
+void *multi_heap_realloc_impl(multi_heap_handle_t heap, void *p, size_t size);
+multi_heap_handle_t multi_heap_register_impl(void *start, size_t size);
+void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info);
+size_t multi_heap_free_size_impl(multi_heap_handle_t heap);
+size_t multi_heap_minimum_free_size_impl(multi_heap_handle_t heap);
+size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p);
+void *multi_heap_get_block_address_impl(multi_heap_block_handle_t block);
+
+/* Some internal functions for heap poisoning use */
+
+/* Check an allocated block's poison bytes are correct. Called by multi_heap_check(). */
+bool multi_heap_internal_check_block_poisoning(void *start, size_t size, bool is_free, bool print_errors);
+
+/* Fill a region of memory with the free or malloced pattern.
+   Called when merging blocks, to overwrite the old block header.
+*/
+void multi_heap_internal_poison_fill_region(void *start, size_t size, bool is_free);
+
+/* Allow heap poisoning to lock/unlock the heap to avoid race conditions
+   if multi_heap_check() is running concurrently.
+*/
+void multi_heap_internal_lock(multi_heap_handle_t heap);
+
+void multi_heap_internal_unlock(multi_heap_handle_t heap);
+
+/* Some internal functions for heap debugging code to use */
+
+/* Get the handle to the first (fixed free) block in a heap */
+multi_heap_block_handle_t multi_heap_get_first_block(multi_heap_handle_t heap);
+
+/* Get the handle to the next block in a heap, with validation */
+multi_heap_block_handle_t multi_heap_get_next_block(multi_heap_handle_t heap, multi_heap_block_handle_t block);
+
+/* Test if a heap block is free */
+bool multi_heap_is_free(const multi_heap_block_handle_t block);
+
+/* Get the data address of a heap block */
+void *multi_heap_get_block_address(multi_heap_block_handle_t block);
+
+/* Get the owner identification for a heap block */
+void *multi_heap_get_block_owner(multi_heap_block_handle_t block);

+ 108 - 0
components/heap/multi_heap_platform.h

@@ -0,0 +1,108 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#ifdef MULTI_HEAP_FREERTOS
+
+#include "freertos/FreeRTOS.h"
+
+#include "sdkconfig.h"
+#include "esp_rom_sys.h"
+#if CONFIG_IDF_TARGET_ESP32
+#include "esp32/rom/ets_sys.h" // will be removed in idf v5.0
+#elif CONFIG_IDF_TARGET_ESP32S2
+#include "esp32s2/rom/ets_sys.h"
+#endif
+#include <assert.h>
+
+typedef portMUX_TYPE multi_heap_lock_t;
+
+/* Because malloc/free can happen inside an ISR context,
+   we need to use portmux spinlocks here not RTOS mutexes */
+#define MULTI_HEAP_LOCK(PLOCK) do {                         \
+        if((PLOCK) != NULL) {                               \
+            portENTER_CRITICAL((PLOCK));                    \
+        }                                                   \
+    } while(0)
+
+
+#define MULTI_HEAP_UNLOCK(PLOCK) do {                       \
+        if ((PLOCK) != NULL) {                              \
+            portEXIT_CRITICAL((PLOCK));                     \
+        }                                                   \
+    } while(0)
+
+#define MULTI_HEAP_LOCK_INIT(PLOCK) do {                    \
+        vPortCPUInitializeMutex((PLOCK));                   \
+    } while(0)
+
+#define MULTI_HEAP_LOCK_STATIC_INITIALIZER     portMUX_INITIALIZER_UNLOCKED
+
+/* Not safe to use std i/o while in a portmux critical section,
+   can deadlock, so we use the ROM equivalent functions. */
+
+#define MULTI_HEAP_PRINTF esp_rom_printf
+#define MULTI_HEAP_STDERR_PRINTF(MSG, ...) esp_rom_printf(MSG, __VA_ARGS__)
+
+inline static void multi_heap_assert(bool condition, const char *format, int line, intptr_t address)
+{
+    /* Can't use libc assert() here as it calls printf() which can cause another malloc() for a newlib lock.
+
+       Also, it's useful to be able to print the memory address where corruption was detected.
+    */
+#ifndef NDEBUG
+    if(!condition) {
+#ifndef CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT
+        esp_rom_printf(format, line, address);
+#endif  // CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT
+        abort();
+    }
+#else // NDEBUG
+    (void) condition;
+#endif // NDEBUG
+}
+
+#define MULTI_HEAP_ASSERT(CONDITION, ADDRESS) \
+    multi_heap_assert((CONDITION), "CORRUPT HEAP: multi_heap.c:%d detected at 0x%08x\n", \
+                      __LINE__, (intptr_t)(ADDRESS))
+
+#ifdef CONFIG_HEAP_TASK_TRACKING
+#include <freertos/task.h>
+#define MULTI_HEAP_BLOCK_OWNER TaskHandle_t task;
+#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD) (HEAD)->task = xTaskGetCurrentTaskHandle()
+#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) ((HEAD)->task)
+#else
+#define MULTI_HEAP_BLOCK_OWNER
+#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD)
+#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) (NULL)
+#endif
+
+#else // MULTI_HEAP_FREERTOS
+
+#include <assert.h>
+
+#define MULTI_HEAP_PRINTF printf
+#define MULTI_HEAP_STDERR_PRINTF(MSG, ...) fprintf(stderr, MSG, __VA_ARGS__)
+#define MULTI_HEAP_LOCK(PLOCK)  (void) (PLOCK)
+#define MULTI_HEAP_UNLOCK(PLOCK)  (void) (PLOCK)
+#define MULTI_HEAP_LOCK_INIT(PLOCK)  (void) (PLOCK)
+#define MULTI_HEAP_LOCK_STATIC_INITIALIZER  0
+
+#define MULTI_HEAP_ASSERT(CONDITION, ADDRESS) assert((CONDITION) && "Heap corrupt")
+
+#define MULTI_HEAP_BLOCK_OWNER
+#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD)
+#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) (NULL)
+
+#endif // MULTI_HEAP_FREERTOS

+ 426 - 0
components/heap/multi_heap_poisoning.c

@@ -0,0 +1,426 @@
+// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <string.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <sys/param.h>
+#include <multi_heap.h>
+#include "multi_heap_internal.h"
+
+/* Note: Keep platform-specific parts in this header, this source
+   file should depend on libc only */
+#include "multi_heap_platform.h"
+
+/* Defines compile-time configuration macros */
+#include "multi_heap_config.h"
+
+#ifdef MULTI_HEAP_POISONING
+
+/* Alias MULTI_HEAP_POISONING_SLOW to SLOW for better readabilty */
+#ifdef SLOW
+#error "external header has defined SLOW"
+#endif
+#ifdef MULTI_HEAP_POISONING_SLOW
+#define SLOW 1
+#endif
+
+#define MALLOC_FILL_PATTERN 0xce
+#define FREE_FILL_PATTERN 0xfe
+
+#define HEAD_CANARY_PATTERN 0xABBA1234
+#define TAIL_CANARY_PATTERN 0xBAAD5678
+
+
+#define ALIGN_UP(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
+
+typedef struct {
+    uint32_t head_canary;
+    MULTI_HEAP_BLOCK_OWNER
+    size_t alloc_size;
+} poison_head_t;
+
+typedef struct {
+    uint32_t tail_canary;
+} poison_tail_t;
+
+#define POISON_OVERHEAD (sizeof(poison_head_t) + sizeof(poison_tail_t))
+
+/* Given a "poisoned" region with pre-data header 'head', and actual data size 'alloc_size', fill in the head and tail
+   region checks.
+
+   Returns the pointer to the actual usable data buffer (ie after 'head')
+*/
+static uint8_t *poison_allocated_region(poison_head_t *head, size_t alloc_size)
+{
+    uint8_t *data = (uint8_t *)(&head[1]); /* start of data ie 'real' allocated buffer */
+    poison_tail_t *tail = (poison_tail_t *)(data + alloc_size);
+    head->alloc_size = alloc_size;
+    head->head_canary = HEAD_CANARY_PATTERN;
+    MULTI_HEAP_SET_BLOCK_OWNER(head);
+
+    uint32_t tail_canary = TAIL_CANARY_PATTERN;
+    if ((intptr_t)tail % sizeof(void *) == 0) {
+        tail->tail_canary = tail_canary;
+    } else {
+        /* unaligned tail_canary */
+        memcpy(&tail->tail_canary, &tail_canary, sizeof(uint32_t));
+    }
+
+    return data;
+}
+
+/* Given a pointer to some allocated data, check the head & tail poison structures (before & after it) that were
+   previously injected by poison_allocated_region().
+
+   Returns a pointer to the poison header structure, or NULL if the poison structures are corrupt.
+*/
+static poison_head_t *verify_allocated_region(void *data, bool print_errors)
+{
+    poison_head_t *head = (poison_head_t *)((intptr_t)data - sizeof(poison_head_t));
+    poison_tail_t *tail = (poison_tail_t *)((intptr_t)data + head->alloc_size);
+
+    /* check if the beginning of the data was overwritten */
+    if (head->head_canary != HEAD_CANARY_PATTERN) {
+        if (print_errors) {
+            MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Bad head at %p. Expected 0x%08x got 0x%08x\n", &head->head_canary,
+                   HEAD_CANARY_PATTERN, head->head_canary);
+        }
+        return NULL;
+    }
+
+    /* check if the end of the data was overrun */
+    uint32_t canary;
+    if ((intptr_t)tail % sizeof(void *) == 0) {
+        canary = tail->tail_canary;
+    } else {
+        /* tail is unaligned */
+        memcpy(&canary, &tail->tail_canary, sizeof(canary));
+    }
+    if (canary != TAIL_CANARY_PATTERN) {
+        if (print_errors) {
+            MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Bad tail at %p. Expected 0x%08x got 0x%08x\n", &tail->tail_canary,
+                   TAIL_CANARY_PATTERN, canary);
+        }
+        return NULL;
+    }
+
+    return head;
+}
+
+#ifdef SLOW
+/* Go through a region that should have the specified fill byte 'pattern',
+   verify it.
+
+   if expect_free is true, expect FREE_FILL_PATTERN otherwise MALLOC_FILL_PATTERN.
+
+   if swap_pattern is true, swap patterns in the buffer (ie replace MALLOC_FILL_PATTERN with FREE_FILL_PATTERN, and vice versa.)
+
+   Returns true if verification checks out.
+*/
+static bool verify_fill_pattern(void *data, size_t size, bool print_errors, bool expect_free, bool swap_pattern)
+{
+    const uint32_t FREE_FILL_WORD = (FREE_FILL_PATTERN << 24) | (FREE_FILL_PATTERN << 16) | (FREE_FILL_PATTERN << 8) | FREE_FILL_PATTERN;
+    const uint32_t MALLOC_FILL_WORD = (MALLOC_FILL_PATTERN << 24) | (MALLOC_FILL_PATTERN << 16) | (MALLOC_FILL_PATTERN << 8) | MALLOC_FILL_PATTERN;
+
+    const uint32_t EXPECT_WORD = expect_free ? FREE_FILL_WORD : MALLOC_FILL_WORD;
+    const uint32_t REPLACE_WORD = expect_free ? MALLOC_FILL_WORD : FREE_FILL_WORD;
+    bool valid = true;
+
+    /* Use 4-byte operations as much as possible */
+    if ((intptr_t)data % 4 == 0) {
+        uint32_t *p = data;
+        while (size >= 4) {
+            if (*p != EXPECT_WORD) {
+                if (print_errors) {
+                    MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Invalid data at %p. Expected 0x%08x got 0x%08x\n", p, EXPECT_WORD, *p);
+                }
+                valid = false;
+#ifndef NDEBUG
+                /* If an assertion is going to fail as soon as we're done verifying the pattern, leave the rest of the
+                   buffer contents as-is for better post-mortem analysis
+                */
+                swap_pattern = false;
+#endif
+            }
+            if (swap_pattern) {
+                *p = REPLACE_WORD;
+            }
+            p++;
+            size -= 4;
+        }
+        data = p;
+    }
+
+    uint8_t *p = data;
+    for (size_t i = 0; i < size; i++) {
+        if (p[i] != (uint8_t)EXPECT_WORD) {
+            if (print_errors) {
+                MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Invalid data at %p. Expected 0x%02x got 0x%02x\n", p, (uint8_t)EXPECT_WORD, *p);
+            }
+            valid = false;
+#ifndef NDEBUG
+            swap_pattern = false; // same as above
+#endif
+        }
+        if (swap_pattern) {
+            p[i] = (uint8_t)REPLACE_WORD;
+        }
+    }
+    return valid;
+}
+#endif
+
+void *multi_heap_aligned_alloc(multi_heap_handle_t heap, size_t size, size_t alignment)
+{
+    if (!size) {
+        return NULL;
+    }
+
+    if (size > SIZE_MAX  - POISON_OVERHEAD) {
+        return NULL;
+    }
+
+    multi_heap_internal_lock(heap);
+    poison_head_t *head = multi_heap_aligned_alloc_impl_offs(heap, size + POISON_OVERHEAD,
+                                                             alignment, sizeof(poison_head_t));
+    uint8_t *data = NULL;
+    if (head != NULL) {
+        data = poison_allocated_region(head, size);
+#ifdef SLOW
+        /* check everything we got back is FREE_FILL_PATTERN & swap for MALLOC_FILL_PATTERN */
+        bool ret = verify_fill_pattern(data, size, true, true, true);
+        assert( ret );
+#endif
+    } else {
+        multi_heap_internal_unlock(heap);
+        return NULL;
+    }
+
+    multi_heap_internal_unlock(heap);
+
+    return data;
+}
+
+void *multi_heap_malloc(multi_heap_handle_t heap, size_t size)
+{
+    if (!size) {
+        return NULL;
+    }
+
+    if(size > SIZE_MAX - POISON_OVERHEAD) {
+        return NULL;
+    }
+
+    multi_heap_internal_lock(heap);
+    poison_head_t *head = multi_heap_malloc_impl(heap, size + POISON_OVERHEAD);
+    uint8_t *data = NULL;
+    if (head != NULL) {
+        data = poison_allocated_region(head, size);
+#ifdef SLOW
+        /* check everything we got back is FREE_FILL_PATTERN & swap for MALLOC_FILL_PATTERN */
+        bool ret = verify_fill_pattern(data, size, true, true, true);
+        assert( ret );
+#endif
+    }
+
+    multi_heap_internal_unlock(heap);
+    return data;
+}
+
+void multi_heap_free(multi_heap_handle_t heap, void *p)
+{
+    if (p == NULL) {
+        return;
+    }
+    multi_heap_internal_lock(heap);
+
+    poison_head_t *head = verify_allocated_region(p, true);
+    assert(head != NULL);
+
+    #ifdef SLOW
+    /* replace everything with FREE_FILL_PATTERN, including the poison head/tail */
+    memset(head, FREE_FILL_PATTERN,
+           head->alloc_size + POISON_OVERHEAD);
+    #endif
+    multi_heap_free_impl(heap, head);
+
+    multi_heap_internal_unlock(heap);
+}
+
+void multi_heap_aligned_free(multi_heap_handle_t heap, void *p)
+{
+    multi_heap_free(heap, p);
+}
+
+void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size)
+{
+    poison_head_t *head = NULL;
+    poison_head_t *new_head;
+    void *result = NULL;
+
+    if(size > SIZE_MAX - POISON_OVERHEAD) {
+        return NULL;
+    }
+    if (p == NULL) {
+        return multi_heap_malloc(heap, size);
+    }
+    if (size == 0) {
+        multi_heap_free(heap, p);
+        return NULL;
+    }
+
+    /* p != NULL, size != 0 */
+    head = verify_allocated_region(p, true);
+    assert(head != NULL);
+
+    multi_heap_internal_lock(heap);
+
+#ifndef SLOW
+    new_head = multi_heap_realloc_impl(heap, head, size + POISON_OVERHEAD);
+    if (new_head != NULL) {
+        /* For "fast" poisoning, we only overwrite the head/tail of the new block so it's safe
+           to poison, so no problem doing this even if realloc resized in place.
+        */
+        result = poison_allocated_region(new_head, size);
+    }
+#else // SLOW
+    /* When slow poisoning is enabled, it becomes very fiddly to try and correctly fill memory when resizing in place
+       (where the buffer may be moved (including to an overlapping address with the old buffer), grown, or shrunk in
+       place.)
+
+       For now we just malloc a new buffer, copy, and free. :|
+
+       Note: If this ever changes, multi_heap defrag realloc test should be enabled.
+    */
+    size_t orig_alloc_size = head->alloc_size;
+
+    new_head = multi_heap_malloc_impl(heap, size + POISON_OVERHEAD);
+    if (new_head != NULL) {
+        result = poison_allocated_region(new_head, size);
+        memcpy(result, p, MIN(size, orig_alloc_size));
+        multi_heap_free(heap, p);
+    }
+#endif
+
+    multi_heap_internal_unlock(heap);
+
+    return result;
+}
+
+void *multi_heap_get_block_address(multi_heap_block_handle_t block)
+{
+    char *head = multi_heap_get_block_address_impl(block);
+    return head + sizeof(poison_head_t);
+}
+
+void *multi_heap_get_block_owner(multi_heap_block_handle_t block)
+{
+    return MULTI_HEAP_GET_BLOCK_OWNER((poison_head_t*)multi_heap_get_block_address_impl(block));
+}
+
+multi_heap_handle_t multi_heap_register(void *start, size_t size)
+{
+#ifdef SLOW
+    if (start != NULL) {
+        memset(start, FREE_FILL_PATTERN, size);
+    }
+#endif
+    return multi_heap_register_impl(start, size);
+}
+
+static inline void subtract_poison_overhead(size_t *arg) {
+    if (*arg > POISON_OVERHEAD) {
+        *arg -= POISON_OVERHEAD;
+    } else {
+        *arg = 0;
+    }
+}
+
+size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p)
+{
+    poison_head_t *head = verify_allocated_region(p, true);
+    assert(head != NULL);
+    size_t result = multi_heap_get_allocated_size_impl(heap, head);
+    return result;
+}
+
+void multi_heap_get_info(multi_heap_handle_t heap, multi_heap_info_t *info)
+{
+    multi_heap_get_info_impl(heap, info);
+    /* don't count the heap poison head & tail overhead in the allocated bytes size */
+    info->total_allocated_bytes -= info->allocated_blocks * POISON_OVERHEAD;
+    /* trim largest_free_block to account for poison overhead */
+    subtract_poison_overhead(&info->largest_free_block);
+    /* similarly, trim total_free_bytes so there's no suggestion that
+       a block this big may be available. */
+    subtract_poison_overhead(&info->total_free_bytes);
+    subtract_poison_overhead(&info->minimum_free_bytes);
+}
+
+size_t multi_heap_free_size(multi_heap_handle_t heap)
+{
+    size_t r = multi_heap_free_size_impl(heap);
+    subtract_poison_overhead(&r);
+    return r;
+}
+
+size_t multi_heap_minimum_free_size(multi_heap_handle_t heap)
+{
+    size_t r = multi_heap_minimum_free_size_impl(heap);
+    subtract_poison_overhead(&r);
+    return r;
+}
+
+/* Internal hooks used by multi_heap to manage poisoning, while keeping some modularity */
+
+bool multi_heap_internal_check_block_poisoning(void *start, size_t size, bool is_free, bool print_errors)
+{
+    if (is_free) {
+#ifdef SLOW
+        return verify_fill_pattern(start, size, print_errors, true, false);
+#else
+        return true; /* can only verify empty blocks in SLOW mode */
+#endif
+    } else {
+        void *data = (void *)((intptr_t)start + sizeof(poison_head_t));
+        poison_head_t *head = verify_allocated_region(data, print_errors);
+        if (head != NULL && head->alloc_size > size - POISON_OVERHEAD) {
+            /* block can be bigger than alloc_size, for reasons of alignment & fragmentation,
+               but block can never be smaller than head->alloc_size... */
+            if (print_errors) {
+                MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Size at %p expected <=0x%08x got 0x%08x\n", &head->alloc_size,
+                       size - POISON_OVERHEAD, head->alloc_size);
+            }
+            return false;
+        }
+        return head != NULL;
+    }
+}
+
+void multi_heap_internal_poison_fill_region(void *start, size_t size, bool is_free)
+{
+    memset(start, is_free ? FREE_FILL_PATTERN : MALLOC_FILL_PATTERN, size);
+}
+
+#else // !MULTI_HEAP_POISONING
+
+#ifdef MULTI_HEAP_POISONING_SLOW
+#error "MULTI_HEAP_POISONING_SLOW requires MULTI_HEAP_POISONING"
+#endif
+
+#endif  // MULTI_HEAP_POISONING

+ 3 - 0
components/heap/test/CMakeLists.txt

@@ -0,0 +1,3 @@
+idf_component_register(SRC_DIRS "."
+                    PRIV_INCLUDE_DIRS "."
+                    PRIV_REQUIRES cmock test_utils heap)

+ 5 - 0
components/heap/test/component.mk

@@ -0,0 +1,5 @@
+#
+#Component Makefile
+#
+
+COMPONENT_ADD_LDFLAGS = -Wl,--whole-archive -l$(COMPONENT_NAME) -Wl,--no-whole-archive

+ 147 - 0
components/heap/test/test_aligned_alloc_caps.c

@@ -0,0 +1,147 @@
+/*
+ Tests for the capabilities-based memory allocator.
+*/
+
+#include <esp_types.h>
+#include <stdio.h>
+#include "unity.h"
+#include "esp_attr.h"
+#include "esp_heap_caps.h"
+#include "esp_spi_flash.h"
+#include <stdlib.h>
+#include <sys/param.h>
+#include <string.h>
+#include <malloc.h>
+
+TEST_CASE("Capabilities aligned allocator test", "[heap]")
+{
+    uint32_t alignments = 0;
+
+    printf("[ALIGNED_ALLOC] Allocating from default CAP: \n");
+
+    for(;alignments <= 1024; alignments++) {
+        uint8_t *buf = (uint8_t *)memalign(alignments, (alignments + 137));
+        if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
+            TEST_ASSERT( buf == NULL );
+            //printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
+        } else {
+            TEST_ASSERT( buf != NULL );
+            printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
+            printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
+            //Address of obtained block must be aligned with selected value
+            TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
+
+            //Write some data, if it corrupts memory probably the heap
+            //canary verification will fail:
+            memset(buf, 0xA5, (alignments + 137));
+
+            free(buf);
+        }
+    }
+
+    //Alloc from a non permitted area:
+    uint32_t *not_permitted_buf = (uint32_t *)heap_caps_aligned_alloc(alignments, (alignments + 137), MALLOC_CAP_EXEC | MALLOC_CAP_32BIT);
+    TEST_ASSERT( not_permitted_buf == NULL );
+
+#if CONFIG_ESP32_SPIRAM_SUPPORT || CONFIG_ESP32S2_SPIRAM_SUPPORT
+    alignments = 0;
+    printf("[ALIGNED_ALLOC] Allocating from external memory: \n");
+
+    for(;alignments <= 1024 * 1024; alignments++) {
+        //Now try to take aligned memory from IRAM:
+        uint8_t *buf = (uint8_t *)heap_caps_aligned_alloc(alignments, 10*1024, MALLOC_CAP_SPIRAM);
+        if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
+            TEST_ASSERT( buf == NULL );
+            //printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
+        } else {
+            TEST_ASSERT( buf != NULL );
+            printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
+            printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
+            //Address of obtained block must be aligned with selected value
+            TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
+
+            //Write some data, if it corrupts memory probably the heap
+            //canary verification will fail:
+            memset(buf, 0xA5, (10*1024));
+            heap_caps_free(buf);
+        }
+    }
+#endif
+
+}
+
+TEST_CASE("Capabilities aligned calloc test", "[heap]")
+{
+    uint32_t alignments = 0;
+
+    printf("[ALIGNED_ALLOC] Allocating from default CAP: \n");
+
+    for(;alignments <= 1024; alignments++) {
+        uint8_t *buf = (uint8_t *)heap_caps_aligned_calloc(alignments, 1, (alignments + 137), MALLOC_CAP_DEFAULT);
+        if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
+            TEST_ASSERT( buf == NULL );
+            //printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
+        } else {
+            TEST_ASSERT( buf != NULL );
+            printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
+            printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
+            //Address of obtained block must be aligned with selected value
+            TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
+
+            //Write some data, if it corrupts memory probably the heap
+            //canary verification will fail:
+            memset(buf, 0xA5, (alignments + 137));
+
+            heap_caps_free(buf);
+        }
+    }
+
+    //Check if memory is initialized with zero:
+    uint8_t byte_array[1024];
+    memset(&byte_array, 0, sizeof(byte_array));
+    uint8_t *buf = (uint8_t *)heap_caps_aligned_calloc(1024, 1, 1024, MALLOC_CAP_DEFAULT);
+    TEST_ASSERT(memcmp(byte_array, buf, sizeof(byte_array)) == 0);
+    heap_caps_free(buf);
+
+    //Same size, but different chunk:
+    buf = (uint8_t *)heap_caps_aligned_calloc(1024, 1024, 1, MALLOC_CAP_DEFAULT);
+    TEST_ASSERT(memcmp(byte_array, buf, sizeof(byte_array)) == 0);
+    heap_caps_free(buf);
+
+    //Alloc from a non permitted area:
+    uint32_t *not_permitted_buf = (uint32_t *)heap_caps_aligned_calloc(alignments, 1, (alignments + 137), MALLOC_CAP_32BIT);
+    TEST_ASSERT( not_permitted_buf == NULL );
+
+#if CONFIG_ESP32_SPIRAM_SUPPORT || CONFIG_ESP32S2_SPIRAM_SUPPORT
+    alignments = 0;
+    printf("[ALIGNED_ALLOC] Allocating from external memory: \n");
+
+    for(;alignments <= 1024 * 1024; alignments++) {
+        //Now try to take aligned memory from IRAM:
+        uint8_t *buf = (uint8_t *)(uint8_t *)heap_caps_aligned_calloc(alignments, 1, 10*1024, MALLOC_CAP_SPIRAM);
+        if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
+            TEST_ASSERT( buf == NULL );
+            //printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
+        } else {
+            TEST_ASSERT( buf != NULL );
+            printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
+            printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
+            //Address of obtained block must be aligned with selected value
+            TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
+
+            //Write some data, if it corrupts memory probably the heap
+            //canary verification will fail:
+            memset(buf, 0xA5, (10*1024));
+            heap_caps_free(buf);
+        }
+    }
+#endif
+
+}
+
+TEST_CASE("aligned_alloc(0) should return a NULL pointer", "[heap]")
+{
+    void *p;
+    p = heap_caps_aligned_alloc(32, 0, MALLOC_CAP_DEFAULT);
+    TEST_ASSERT(p == NULL);
+}

+ 108 - 0
components/heap/test/test_allocator_timings.c

@@ -0,0 +1,108 @@
+#include "freertos/FreeRTOS.h"
+#include <esp_types.h>
+#include <stdio.h>
+#include "unity.h"
+#include "esp_attr.h"
+#include "esp_heap_caps.h"
+#include <stdlib.h>
+#include <sys/param.h>
+#include <string.h>
+#include <test_utils.h>
+
+//This test only makes sense with poisoning disabled (light or comprehensive)
+#if !defined(CONFIG_HEAP_POISONING_COMPREHENSIVE) && !defined(CONFIG_HEAP_POISONING_LIGHT)
+
+#define NUM_POINTERS 128
+#define ITERATIONS 10000
+
+TEST_CASE("Heap many random allocations timings", "[heap]")
+{
+    void *p[NUM_POINTERS] = { 0 };
+    size_t s[NUM_POINTERS] = { 0 };
+
+    uint32_t cycles_before;
+    uint64_t alloc_time_average = 0;
+    uint64_t free_time_average = 0;
+    uint64_t realloc_time_average = 0;
+
+    for (int i = 0; i < ITERATIONS; i++) {
+        uint8_t n = esp_random() % NUM_POINTERS;
+
+        if (esp_random() % 4 == 0) {
+            /* 1 in 4 iterations, try to realloc the buffer instead
+               of using malloc/free
+            */
+            size_t new_size = esp_random() % 1024;
+
+            cycles_before = portGET_RUN_TIME_COUNTER_VALUE();
+            void *new_p = heap_caps_realloc(p[n], new_size, MALLOC_CAP_DEFAULT);
+            realloc_time_average = portGET_RUN_TIME_COUNTER_VALUE() - cycles_before;
+
+            printf("realloc %p -> %p (%zu -> %zu) time spent cycles: %lld \n", p[n], new_p, s[n], new_size, realloc_time_average);
+            heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true);
+            if (new_size == 0 || new_p != NULL) {
+                p[n] = new_p;
+                s[n] = new_size;
+                if (new_size > 0) {
+                    memset(p[n], n, new_size);
+                }
+            }
+            continue;
+        }
+
+        if (p[n] != NULL) {
+            if (s[n] > 0) {
+                /* Verify pre-existing contents of p[n] */
+                uint8_t compare[s[n]];
+                memset(compare, n, s[n]);
+                TEST_ASSERT(( memcmp(compare, p[n], s[n]) == 0 ));
+            }
+            TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true));
+
+            cycles_before = portGET_RUN_TIME_COUNTER_VALUE();
+            heap_caps_free(p[n]);
+            free_time_average = portGET_RUN_TIME_COUNTER_VALUE() - cycles_before;
+
+            printf("freed %p (%zu) time spent cycles: %lld\n", p[n], s[n], free_time_average);
+
+            if (!heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true)) {
+                printf("FAILED iteration %d after freeing %p\n", i, p[n]);
+                heap_caps_dump(MALLOC_CAP_DEFAULT);
+                TEST_ASSERT(0);
+            }
+        }
+
+        s[n] = rand() % 1024;
+        heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true);
+        cycles_before = portGET_RUN_TIME_COUNTER_VALUE();
+        p[n] = heap_caps_malloc(s[n], MALLOC_CAP_DEFAULT);
+        alloc_time_average = portGET_RUN_TIME_COUNTER_VALUE() - cycles_before;
+
+        printf("malloc %p (%zu) time spent cycles: %lld \n", p[n], s[n], alloc_time_average);
+
+        if (!heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true)) {
+            printf("FAILED iteration %d after mallocing %p (%zu bytes)\n", i, p[n], s[n]);
+            heap_caps_dump(MALLOC_CAP_DEFAULT);
+            TEST_ASSERT(0);
+        }
+
+        if (p[n] != NULL) {
+            memset(p[n], n, s[n]);
+        }
+    }
+
+    for (int i = 0; i < NUM_POINTERS; i++) {
+        cycles_before = portGET_RUN_TIME_COUNTER_VALUE();
+        heap_caps_free( p[i]);
+        free_time_average = portGET_RUN_TIME_COUNTER_VALUE() - cycles_before;
+
+        if (!heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true)) {
+            printf("FAILED during cleanup after freeing %p\n", p[i]);
+            heap_caps_dump(MALLOC_CAP_DEFAULT);
+            TEST_ASSERT(0);
+        }
+    }
+
+    TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true));
+}
+#endif

+ 74 - 0
components/heap/test/test_diram.c

@@ -0,0 +1,74 @@
+/*
+ Tests for D/IRAM support in heap capability allocator
+*/
+
+#include <esp_types.h>
+#include <stdio.h>
+#include "unity.h"
+#include "esp_heap_caps.h"
+#include "soc/soc_memory_layout.h"
+
+#define ALLOC_SZ 1024
+
+static void *malloc_block_diram(uint32_t caps)
+{
+    void *attempts[256] = { 0 }; // Allocate up to 256 ALLOC_SZ blocks to exhaust all non-D/IRAM memory temporarily
+    int count = 0;
+    void *result;
+
+    while(count < sizeof(attempts)/sizeof(void *)) {
+        result = heap_caps_malloc(ALLOC_SZ, caps);
+        TEST_ASSERT_NOT_NULL_MESSAGE(result, "not enough free heap to perform test");
+
+        if (esp_ptr_in_diram_dram(result) || esp_ptr_in_diram_iram(result)) {
+            break;
+        }
+
+        attempts[count] = result;
+        result = NULL;
+        count++;
+    }
+
+    for (int i = 0; i < count; i++) {
+        free(attempts[i]);
+    }
+
+    TEST_ASSERT_NOT_NULL_MESSAGE(result, "not enough D/IRAM memory is free");
+    return result;
+}
+
+TEST_CASE("Allocate D/IRAM as DRAM", "[heap]")
+{
+    uint32_t *dram = malloc_block_diram(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
+
+    for (int i = 0; i < ALLOC_SZ / sizeof(uint32_t); i++) {
+        uint32_t v = i + 0xAAAA;
+        dram[i] = v;
+        volatile uint32_t *iram = esp_ptr_diram_dram_to_iram(dram + i);
+        TEST_ASSERT_EQUAL(v, dram[i]);
+        TEST_ASSERT_EQUAL(v, *iram);
+        *iram = UINT32_MAX;
+        TEST_ASSERT_EQUAL(UINT32_MAX, *iram);
+        TEST_ASSERT_EQUAL(UINT32_MAX, dram[i]);
+    }
+
+    free(dram);
+}
+
+TEST_CASE("Allocate D/IRAM as IRAM", "[heap]")
+{
+    uint32_t *iram = malloc_block_diram(MALLOC_CAP_EXEC);
+
+    for (int i = 0; i < ALLOC_SZ / sizeof(uint32_t); i++) {
+        uint32_t v = i + 0xEEE;
+        iram[i] = v;
+        volatile uint32_t *dram = esp_ptr_diram_iram_to_dram(iram + i);
+        TEST_ASSERT_EQUAL_HEX32(v, iram[i]);
+        TEST_ASSERT_EQUAL_HEX32(v, *dram);
+        *dram = UINT32_MAX;
+        TEST_ASSERT_EQUAL_HEX32(UINT32_MAX, *dram);
+        TEST_ASSERT_EQUAL_HEX32(UINT32_MAX, iram[i]);
+    }
+
+    free(iram);
+}

+ 164 - 0
components/heap/test/test_heap_trace.c

@@ -0,0 +1,164 @@
+/*
+ Generic test for heap tracing support
+
+ Only compiled in if CONFIG_HEAP_TRACING is set
+*/
+
+#include <esp_types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "sdkconfig.h"
+#include "unity.h"
+
+#include "freertos/FreeRTOS.h"
+#include "freertos/task.h"
+
+#ifdef CONFIG_HEAP_TRACING
+// only compile in heap tracing tests if tracing is enabled
+
+#include "esp_heap_trace.h"
+
+TEST_CASE("heap trace leak check", "[heap]")
+{
+    heap_trace_record_t recs[8];
+    heap_trace_init_standalone(recs, 8);
+
+    printf("Leak check test\n"); // Print something before trace starts, or stdout allocations skew total counts
+    fflush(stdout);
+
+    heap_trace_start(HEAP_TRACE_LEAKS);
+
+    void *a = malloc(64);
+    memset(a, '3', 64);
+
+    void *b = malloc(96);
+    memset(b, '4', 11);
+
+    printf("a.address %p vs %p b.address %p vs %p\n", a, recs[0].address, b, recs[1].address);
+
+    heap_trace_dump();
+    TEST_ASSERT_EQUAL(2, heap_trace_get_count());
+
+    heap_trace_record_t trace_a, trace_b;
+    heap_trace_get(0, &trace_a);
+    heap_trace_get(1, &trace_b);
+
+    printf("trace_a.address %p trace_bb.address %p\n", trace_a.address, trace_b.address);
+
+    TEST_ASSERT_EQUAL_PTR(a, trace_a.address);
+    TEST_ASSERT_EQUAL_PTR(b, trace_b.address);
+
+    TEST_ASSERT_EQUAL_PTR(recs[0].address, trace_a.address);
+    TEST_ASSERT_EQUAL_PTR(recs[1].address, trace_b.address);
+
+    free(a);
+
+    TEST_ASSERT_EQUAL(1, heap_trace_get_count());
+
+    heap_trace_get(0, &trace_b);
+    TEST_ASSERT_EQUAL_PTR(b, trace_b.address);
+
+    /* buffer deletes trace_a when freed,
+       so trace_b at head of buffer */
+    TEST_ASSERT_EQUAL_PTR(recs[0].address, trace_b.address);
+
+    heap_trace_stop();
+}
+
+TEST_CASE("heap trace wrapped buffer check", "[heap]")
+{
+    const size_t N = 8;
+    heap_trace_record_t recs[N];
+    heap_trace_init_standalone(recs, N);
+
+    heap_trace_start(HEAP_TRACE_LEAKS);
+
+    void *ptrs[N+1];
+    for (int i = 0; i < N+1; i++) {
+        ptrs[i] = malloc(i*3);
+    }
+
+    // becuase other mallocs happen as part of this control flow,
+    // we can't guarantee N entries of ptrs[] are in the heap check buffer.
+    // but we should guarantee at least the last one is
+    bool saw_last_ptr = false;
+    for (int i = 0; i < N; i++) {
+        heap_trace_record_t rec;
+        heap_trace_get(i, &rec);
+        if (rec.address == ptrs[N-1]) {
+            saw_last_ptr = true;
+        }
+    }
+    TEST_ASSERT(saw_last_ptr);
+
+    void *other = malloc(6);
+
+    heap_trace_dump();
+
+    for (int i = 0; i < N+1; i++) {
+        free(ptrs[i]);
+    }
+
+    heap_trace_dump();
+
+    bool saw_other = false;
+
+    for (int i = 0; i < heap_trace_get_count(); i++) {
+        heap_trace_record_t rec;
+        heap_trace_get(i, &rec);
+
+        // none of ptr[]s should be in the heap trace any more
+        for (int j = 0; j < N+1; j++) {
+            TEST_ASSERT_NOT_EQUAL(ptrs[j], rec.address);
+        }
+        if (rec.address == other) {
+            saw_other = true;
+        }
+    }
+
+    // 'other' pointer should be somewhere in the leak dump
+    TEST_ASSERT(saw_other);
+
+    heap_trace_stop();
+}
+
+static void print_floats_task(void *ignore)
+{
+    heap_trace_start(HEAP_TRACE_ALL);
+    char buf[16] = { };
+    volatile float f = 12.3456;
+    sprintf(buf, "%.4f", f);
+    TEST_ASSERT_EQUAL_STRING("12.3456", buf);
+    heap_trace_stop();
+
+    vTaskDelete(NULL);
+}
+
+TEST_CASE("can trace allocations made by newlib", "[heap]")
+{
+    const size_t N = 8;
+    heap_trace_record_t recs[N];
+    heap_trace_init_standalone(recs, N);
+
+    /* Verifying that newlib code performs an allocation is very fiddly:
+
+       - Printing a float allocates data associated with the task, but only the
+       first time a task prints a float of this length. So we do it in a one-shot task
+       to avoid possibility it already happened.
+
+       - If newlib is updated this test may start failing if the printf() implementation
+       changes. (This version passes for both nano & regular formatting in newlib 2.2.0)
+
+       - We also do the tracing in the task so we only capture things directly related to it.
+    */
+
+    xTaskCreate(print_floats_task, "print_float", 4096, NULL, 5, NULL);
+    vTaskDelay(10);
+
+    /* has to be at least a few as newlib allocates via multiple different function calls */
+    TEST_ASSERT(heap_trace_get_count() > 3);
+}
+
+
+#endif

+ 60 - 0
components/heap/test/test_leak.c

@@ -0,0 +1,60 @@
+/*
+ Tests for a leak tag
+*/
+
+#include <stdio.h>
+#include "unity.h"
+#include "esp_heap_caps_init.h"
+#include "esp_system.h"
+#include <stdlib.h>
+
+
+static char* check_calloc(int size)
+{
+    char *arr = calloc(size, sizeof(char));
+    TEST_ASSERT_NOT_NULL(arr);
+    return arr;
+}
+
+TEST_CASE("Check for leaks (no leak)", "[heap]")
+{
+    char *arr = check_calloc(1000);
+    free(arr);
+}
+
+TEST_CASE("Check for leaks (leak)", "[heap][ignore]")
+{
+    check_calloc(1000);
+}
+
+TEST_CASE("Not check for leaks", "[heap][leaks]")
+{
+    check_calloc(1000);
+}
+
+TEST_CASE("Set a leak level = 7016", "[heap][leaks=7016]")
+{
+    check_calloc(7000);
+}
+
+static void test_fn(void)
+{
+    check_calloc(1000);
+}
+
+TEST_CASE_MULTIPLE_STAGES("Not check for leaks in MULTIPLE_STAGES mode", "[heap][leaks]", test_fn, test_fn, test_fn);
+
+TEST_CASE_MULTIPLE_STAGES("Check for leaks in MULTIPLE_STAGES mode (leak)", "[heap][ignore]", test_fn, test_fn, test_fn);
+
+static void test_fn2(void)
+{
+    check_calloc(1000);
+    esp_restart();
+}
+
+static void test_fn3(void)
+{
+    check_calloc(1000);
+}
+
+TEST_CASE_MULTIPLE_STAGES("Check for leaks in MULTIPLE_STAGES mode (manual reset)", "[heap][leaks][reset=SW_CPU_RESET, SW_CPU_RESET]", test_fn2, test_fn2, test_fn3);

+ 134 - 0
components/heap/test/test_malloc.c

@@ -0,0 +1,134 @@
+/*
+ Generic test for malloc/free
+*/
+
+#include <esp_types.h>
+#include <stdio.h>
+
+#include "freertos/FreeRTOS.h"
+#include "freertos/task.h"
+#include "freertos/semphr.h"
+#include "freertos/queue.h"
+#include "unity.h"
+#include "esp_heap_caps.h"
+
+#include "sdkconfig.h"
+
+
+static int **allocatedMem;
+static int noAllocated;
+
+
+static int tryAllocMem(void) {
+    int i, j;
+    const int allocateMaxK=1024*5; //try to allocate a max of 5MiB
+
+    allocatedMem=malloc(sizeof(int *)*allocateMaxK);
+    if (!allocatedMem) return 0;
+
+    for (i=0; i<allocateMaxK; i++) {
+        allocatedMem[i]=malloc(1024);
+        if (allocatedMem[i]==NULL) break;
+        for (j=0; j<1024/4; j++) allocatedMem[i][j]=(0xdeadbeef);
+    }
+    noAllocated=i;
+    return i;
+}
+
+
+static void tryAllocMemFree(void) {
+    int i, j;
+    for (i=0; i<noAllocated; i++) {
+        for (j=0; j<1024/4; j++) {
+            TEST_ASSERT(allocatedMem[i][j]==(0xdeadbeef));
+        }
+        free(allocatedMem[i]);
+    }
+    free(allocatedMem);
+}
+
+
+TEST_CASE("Malloc/overwrite, then free all available DRAM", "[heap]")
+{
+    int m1=0, m2=0;
+    m1=tryAllocMem();
+    tryAllocMemFree();
+    m2=tryAllocMem();
+    tryAllocMemFree();
+    printf("Could allocate %dK on first try, %dK on 2nd try.\n", m1, m2);
+    TEST_ASSERT(m1==m2);
+}
+
+#if CONFIG_SPIRAM_USE_MALLOC
+
+#if (CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL > 1024)
+TEST_CASE("Check if reserved DMA pool still can allocate even when malloc()'ed memory is exhausted", "[heap]")
+{
+    char** dmaMem=malloc(sizeof(char*)*512);
+    assert(dmaMem);
+    int m=tryAllocMem();
+    int i=0;
+    for (i=0; i<512; i++) {
+        dmaMem[i]=heap_caps_malloc(1024, MALLOC_CAP_DMA);
+        if (dmaMem[i]==NULL) break;
+    }
+    for (int j=0; j<i; j++) free(dmaMem[j]);
+    free(dmaMem);
+    tryAllocMemFree();
+    printf("Could allocate %dK of DMA memory after allocating all of %dK of normal memory.\n", i, m);
+    TEST_ASSERT(i);
+}
+#endif
+
+#endif
+
+
+/* As you see, we are desperately trying to outsmart the compiler, so that it
+ * doesn't warn about oversized allocations in the next two unit tests.
+ * To be removed when we switch to GCC 8.2 and add
+ * -Wno-alloc-size-larger-than=PTRDIFF_MAX to CFLAGS for this file.
+ */
+void* (*g_test_malloc_ptr)(size_t) = &malloc;
+void* (*g_test_calloc_ptr)(size_t, size_t) = &calloc;
+
+void* test_malloc_wrapper(size_t size)
+{
+    return (*g_test_malloc_ptr)(size);
+}
+
+void* test_calloc_wrapper(size_t count, size_t size)
+{
+    return (*g_test_calloc_ptr)(count, size);
+}
+
+TEST_CASE("alloc overflows should all fail", "[heap]")
+{
+    /* allocates 8 bytes if size_t overflows */
+    TEST_ASSERT_NULL(test_calloc_wrapper(SIZE_MAX / 2 + 4, 2));
+
+    /* will overflow if any poisoning is enabled
+       (should fail for sensible OOM reasons, otherwise) */
+    TEST_ASSERT_NULL(test_malloc_wrapper(SIZE_MAX - 1));
+    TEST_ASSERT_NULL(test_calloc_wrapper(SIZE_MAX - 1, 1));
+
+    /* will overflow when the size is rounded up to word align it */
+    TEST_ASSERT_NULL(heap_caps_malloc(SIZE_MAX-1, MALLOC_CAP_32BIT));
+
+    TEST_ASSERT_NULL(heap_caps_malloc(SIZE_MAX-1, MALLOC_CAP_EXEC));
+}
+
+TEST_CASE("unreasonable allocs should all fail", "[heap]")
+{
+    TEST_ASSERT_NULL(test_calloc_wrapper(16, 1024*1024));
+    TEST_ASSERT_NULL(test_malloc_wrapper(16*1024*1024));
+    TEST_ASSERT_NULL(test_malloc_wrapper(SIZE_MAX / 2));
+    TEST_ASSERT_NULL(test_malloc_wrapper(SIZE_MAX - 256));
+    TEST_ASSERT_NULL(test_malloc_wrapper(xPortGetFreeHeapSize() - 1));
+}
+
+TEST_CASE("malloc(0) should return a NULL pointer", "[heap]")
+{
+    void *p;
+    p = malloc(0);
+    TEST_ASSERT(p == NULL);
+}

+ 247 - 0
components/heap/test/test_malloc_caps.c

@@ -0,0 +1,247 @@
+/*
+ Tests for the capabilities-based memory allocator.
+*/
+
+#include <esp_types.h>
+#include <stdio.h>
+#include "unity.h"
+#include "esp_attr.h"
+#include "esp_heap_caps.h"
+#include "esp_spi_flash.h"
+#include <stdlib.h>
+#include <sys/param.h>
+
+#ifndef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
+TEST_CASE("Capabilities allocator test", "[heap]")
+{
+    char *m1, *m2[10];
+    int x;
+    size_t free8start, free32start, free8, free32;
+
+    /* It's important we printf() something before we take the empty heap sizes,
+       as the first printf() in a task allocates heap resources... */
+    printf("Testing capabilities allocator...\n");
+
+    free8start = heap_caps_get_free_size(MALLOC_CAP_8BIT);
+    free32start = heap_caps_get_free_size(MALLOC_CAP_32BIT);
+    printf("Free 8bit-capable memory (start): %dK, 32-bit capable memory %dK\n", free8start, free32start);
+    TEST_ASSERT(free32start >= free8start);
+
+    printf("Allocating 10K of 8-bit capable RAM\n");
+    m1= heap_caps_malloc(10*1024, MALLOC_CAP_8BIT);
+    printf("--> %p\n", m1);
+    free8 = heap_caps_get_free_size(MALLOC_CAP_8BIT);
+    free32 = heap_caps_get_free_size(MALLOC_CAP_32BIT);
+    printf("Free 8bit-capable memory (both reduced): %dK, 32-bit capable memory %dK\n", free8, free32);
+    //Both should have gone down by 10K; 8bit capable ram is also 32-bit capable
+    TEST_ASSERT(free8<=(free8start-10*1024));
+    TEST_ASSERT(free32<=(free32start-10*1024));
+    //Assume we got DRAM back
+    TEST_ASSERT((((int)m1)&0xFF000000)==0x3F000000);
+    free(m1);
+
+    //The goal here is to allocate from IRAM. Since there is no external IRAM (yet)
+    //the following gives size of IRAM-only (not D/IRAM) memory.
+    size_t free_iram = heap_caps_get_free_size(MALLOC_CAP_INTERNAL) -
+                           heap_caps_get_free_size(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
+    size_t alloc32 = MIN(free_iram / 2, 10*1024) & (~3);
+    if(free_iram) {
+        printf("Freeing; allocating %u bytes of 32K-capable RAM\n", alloc32);
+        m1 = heap_caps_malloc(alloc32, MALLOC_CAP_32BIT);
+        printf("--> %p\n", m1);
+        //Check that we got IRAM back
+        TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000);
+        free8 = heap_caps_get_free_size(MALLOC_CAP_8BIT);
+        free32 = heap_caps_get_free_size(MALLOC_CAP_32BIT);
+        printf("Free 8bit-capable memory (after 32-bit): %dK, 32-bit capable memory %dK\n", free8, free32);
+        //Only 32-bit should have gone down by alloc32: 32-bit isn't necessarily 8bit capable
+        TEST_ASSERT(free32<=(free32start-alloc32));
+        TEST_ASSERT(free8==free8start);
+        free(m1);
+    } else {
+        printf("This platform has no 32-bit only capable RAM, jumping to next test \n");
+    }
+
+    printf("Allocating impossible caps\n");
+    m1= heap_caps_malloc(10*1024, MALLOC_CAP_8BIT|MALLOC_CAP_EXEC);
+    printf("--> %p\n", m1);
+    TEST_ASSERT(m1==NULL);
+
+    if(free_iram) {
+        printf("Testing changeover iram -> dram");
+        // priorities will exhaust IRAM first, then start allocating from DRAM
+        for (x=0; x<10; x++) {
+            m2[x]= heap_caps_malloc(alloc32, MALLOC_CAP_32BIT);
+            printf("--> %p\n", m2[x]);
+        }
+        TEST_ASSERT((((int)m2[0])&0xFF000000)==0x40000000);
+        TEST_ASSERT((((int)m2[9])&0xFF000000)==0x3F000000);
+
+    } else {
+        printf("This platform has no IRAM-only so changeover will never occur, jumping to next test\n");
+    }
+
+    printf("Test if allocating executable code still gives IRAM, even with dedicated IRAM region depleted\n");
+    if(free_iram) {
+        // (the allocation should come from D/IRAM)
+        free_iram = heap_caps_get_free_size(MALLOC_CAP_EXEC);
+        m1= heap_caps_malloc(MIN(free_iram / 2, 10*1024), MALLOC_CAP_EXEC);
+        printf("--> %p\n", m1);
+        TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000);
+        for (x=0; x<10; x++) free(m2[x]);
+
+    } else {
+        // (the allocation should come from D/IRAM)
+        free_iram = heap_caps_get_free_size(MALLOC_CAP_EXEC);
+        m1= heap_caps_malloc(MIN(free_iram / 2, 10*1024), MALLOC_CAP_EXEC);
+        printf("--> %p\n", m1);
+        TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000);
+    }
+
+    free(m1);
+    printf("Done.\n");
+}
+#endif
+
+#ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
+TEST_CASE("IRAM_8BIT capability test", "[heap]")
+{
+    uint8_t *ptr;
+    size_t free_size, free_size32, largest_free_size;
+
+    /* need to print something as first printf allocates some heap */
+    printf("IRAM_8BIT capability test\n");
+
+    free_size = heap_caps_get_free_size(MALLOC_CAP_IRAM_8BIT);
+    free_size32 = heap_caps_get_free_size(MALLOC_CAP_32BIT);
+
+    largest_free_size = heap_caps_get_largest_free_block(MALLOC_CAP_IRAM_8BIT);
+
+    ptr = heap_caps_malloc(largest_free_size, MALLOC_CAP_IRAM_8BIT);
+
+    TEST_ASSERT((((int)ptr)&0xFF000000)==0x40000000);
+
+    TEST_ASSERT(heap_caps_get_free_size(MALLOC_CAP_IRAM_8BIT) == (free_size - heap_caps_get_allocated_size(ptr)));
+    TEST_ASSERT(heap_caps_get_free_size(MALLOC_CAP_32BIT) == (free_size32 - heap_caps_get_allocated_size(ptr)));
+
+    free(ptr);
+}
+#endif
+
+TEST_CASE("heap_caps metadata test", "[heap]")
+{
+    /* need to print something as first printf allocates some heap */
+    printf("heap_caps metadata test\n");
+    heap_caps_print_heap_info(MALLOC_CAP_8BIT);
+
+    multi_heap_info_t original;
+    heap_caps_get_info(&original, MALLOC_CAP_8BIT);
+
+    void *b = heap_caps_malloc(original.largest_free_block, MALLOC_CAP_8BIT);
+    TEST_ASSERT_NOT_NULL(b);
+
+    printf("After allocating %d bytes:\n", original.largest_free_block);
+    heap_caps_print_heap_info(MALLOC_CAP_8BIT);
+
+    multi_heap_info_t after;
+    heap_caps_get_info(&after, MALLOC_CAP_8BIT);
+    TEST_ASSERT(after.largest_free_block <= original.largest_free_block);
+    TEST_ASSERT(after.total_free_bytes <= original.total_free_bytes);
+
+    free(b);
+    heap_caps_get_info(&after, MALLOC_CAP_8BIT);
+
+    printf("\n\n After test, heap status:\n");
+    heap_caps_print_heap_info(MALLOC_CAP_8BIT);
+
+    /* Allow some leeway here, because LWIP sometimes allocates up to 144 bytes in the background
+       as part of timer management.
+    */
+    TEST_ASSERT_INT32_WITHIN(200, after.total_free_bytes, original.total_free_bytes);
+    TEST_ASSERT_INT32_WITHIN(200, after.largest_free_block, original.largest_free_block);
+    TEST_ASSERT(after.minimum_free_bytes < original.total_free_bytes);
+}
+
+/* Small function runs from IRAM to check that malloc/free/realloc
+   all work OK when cache is disabled...
+*/
+static IRAM_ATTR __attribute__((noinline)) bool iram_malloc_test(void)
+{
+    spi_flash_guard_get()->start(); // Disables flash cache
+
+    bool result = true;
+    void *x = heap_caps_malloc(64, MALLOC_CAP_EXEC);
+    result = result && (x != NULL);
+    void *y = heap_caps_realloc(x, 32, MALLOC_CAP_EXEC);
+    result = result && (y != NULL);
+    heap_caps_free(y);
+
+    spi_flash_guard_get()->end(); // Re-enables flash cache
+
+    return result;
+}
+
+
+TEST_CASE("heap_caps_xxx functions work with flash cache disabled", "[heap]")
+{
+    TEST_ASSERT( iram_malloc_test() );
+}
+
+#ifdef CONFIG_HEAP_ABORT_WHEN_ALLOCATION_FAILS
+TEST_CASE("When enabled, allocation operation failure generates an abort", "[heap][reset=abort,SW_CPU_RESET]")
+{
+    const size_t stupid_allocation_size = (128 * 1024 * 1024);
+    void *ptr = heap_caps_malloc(stupid_allocation_size, MALLOC_CAP_DEFAULT);
+    (void)ptr;
+    TEST_FAIL_MESSAGE("should not be reached");
+}
+#endif
+
+static bool called_user_failed_hook = false;
+
+void heap_caps_alloc_failed_hook(size_t requested_size, uint32_t caps, const char *function_name)
+{
+    printf("%s was called but failed to allocate %d bytes with 0x%X capabilities. \n",function_name, requested_size, caps);
+    called_user_failed_hook = true;
+}
+
+TEST_CASE("user provided alloc failed hook must be called when allocation fails", "[heap]")
+{
+    TEST_ASSERT(heap_caps_register_failed_alloc_callback(heap_caps_alloc_failed_hook) == ESP_OK);
+
+    const size_t stupid_allocation_size = (128 * 1024 * 1024);
+    void *ptr = heap_caps_malloc(stupid_allocation_size, MALLOC_CAP_DEFAULT);
+    TEST_ASSERT(called_user_failed_hook != false);
+
+    called_user_failed_hook = false;
+    ptr = heap_caps_realloc(ptr, stupid_allocation_size, MALLOC_CAP_DEFAULT);
+    TEST_ASSERT(called_user_failed_hook != false);
+
+    called_user_failed_hook = false;
+    ptr = heap_caps_aligned_alloc(0x200, stupid_allocation_size, MALLOC_CAP_DEFAULT);
+    TEST_ASSERT(called_user_failed_hook != false);
+
+    (void)ptr;
+}
+
+TEST_CASE("allocation with invalid capability should also trigger the alloc failed hook", "[heap]")
+{
+    const size_t allocation_size = 64;
+    const uint32_t invalid_cap = MALLOC_CAP_INVALID;
+
+    TEST_ASSERT(heap_caps_register_failed_alloc_callback(heap_caps_alloc_failed_hook) == ESP_OK);
+
+    called_user_failed_hook = false;
+    void *ptr = heap_caps_malloc(allocation_size, invalid_cap);
+    TEST_ASSERT(called_user_failed_hook != false);
+
+    called_user_failed_hook = false;
+    ptr = heap_caps_realloc(ptr, allocation_size, invalid_cap);
+    TEST_ASSERT(called_user_failed_hook != false);
+
+    called_user_failed_hook = false;
+    ptr = heap_caps_aligned_alloc(0x200, allocation_size, invalid_cap);
+    TEST_ASSERT(called_user_failed_hook != false);
+
+    (void)ptr;
+}

+ 67 - 0
components/heap/test/test_realloc.c

@@ -0,0 +1,67 @@
+/*
+ Generic test for realloc
+*/
+
+#include <stdlib.h>
+#include <string.h>
+#include "unity.h"
+#include "sdkconfig.h"
+#include "esp_heap_caps.h"
+#include "soc/soc_memory_layout.h"
+
+
+#ifndef CONFIG_HEAP_POISONING_COMPREHENSIVE
+/* (can't realloc in place if comprehensive is enabled) */
+
+TEST_CASE("realloc shrink buffer in place", "[heap]")
+{
+    void *x = malloc(64);
+    TEST_ASSERT(x);
+    void *y = realloc(x, 48);
+    TEST_ASSERT_EQUAL_PTR(x, y);
+}
+
+#endif
+
+#ifndef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
+TEST_CASE("realloc shrink buffer with EXEC CAPS", "[heap]")
+{
+    const size_t buffer_size = 64;
+
+    void *x = heap_caps_malloc(buffer_size, MALLOC_CAP_EXEC);
+    TEST_ASSERT(x);
+    void *y = heap_caps_realloc(x, buffer_size - 16, MALLOC_CAP_EXEC);
+    TEST_ASSERT(y);
+
+    //y needs to fall in a compatible memory area of IRAM:
+    TEST_ASSERT(esp_ptr_executable(y)|| esp_ptr_in_iram(y) || esp_ptr_in_diram_iram(y));
+
+    free(y);
+}
+
+TEST_CASE("realloc move data to a new heap type", "[heap]")
+{
+    const char *test = "I am some test content to put in the heap";
+    char buf[64];
+    memset(buf, 0xEE, 64);
+    strlcpy(buf, test, 64);
+
+    char *a = malloc(64);
+    memcpy(a, buf, 64);
+    // move data from 'a' to IRAM
+    char *b = heap_caps_realloc(a, 64, MALLOC_CAP_EXEC);
+    TEST_ASSERT_NOT_NULL(b);
+    TEST_ASSERT_NOT_EQUAL(a, b);
+    TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_INVALID, true));
+    TEST_ASSERT_EQUAL_HEX32_ARRAY(buf, b, 64 / sizeof(uint32_t));
+
+    // Move data back to DRAM
+    char *c = heap_caps_realloc(b, 48, MALLOC_CAP_8BIT);
+    TEST_ASSERT_NOT_NULL(c);
+    TEST_ASSERT_NOT_EQUAL(b, c);
+    TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_INVALID, true));
+    TEST_ASSERT_EQUAL_HEX8_ARRAY(buf, c, 48);
+
+    free(c);
+}
+#endif

+ 72 - 0
components/heap/test/test_runtime_heap_reg.c

@@ -0,0 +1,72 @@
+/*
+ Tests for registering new heap memory at runtime
+*/
+
+#include <stdio.h>
+#include "unity.h"
+#include "esp_heap_caps_init.h"
+#include "esp_system.h"
+#include <stdlib.h>
+
+
+/* NOTE: This is not a well-formed unit test, it leaks memory */
+TEST_CASE("Allocate new heap at runtime", "[heap][ignore]")
+{
+    const size_t BUF_SZ = 1000;
+    const size_t HEAP_OVERHEAD_MAX = 200;
+    void *buffer = malloc(BUF_SZ);
+    TEST_ASSERT_NOT_NULL(buffer);
+    uint32_t before_free = esp_get_free_heap_size();
+    TEST_ESP_OK( heap_caps_add_region((intptr_t)buffer, (intptr_t)buffer + BUF_SZ) );
+    uint32_t after_free = esp_get_free_heap_size();
+    printf("Before %u after %u\n", before_free, after_free);
+    /* allow for some 'heap overhead' from accounting structures */
+    TEST_ASSERT(after_free >= before_free + BUF_SZ - HEAP_OVERHEAD_MAX);
+}
+
+/* NOTE: This is not a well-formed unit test, it leaks memory and
+   may fail if run twice in a row without a reset.
+*/
+TEST_CASE("Allocate new heap with new capability", "[heap][ignore]")
+{
+    const size_t BUF_SZ = 100;
+#ifdef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
+    const size_t ALLOC_SZ = 32;
+#else
+    const size_t ALLOC_SZ = 64; // More than half of BUF_SZ
+#endif
+    const uint32_t MALLOC_CAP_INVENTED = (1 << 30); /* this must be unused in esp_heap_caps.h */
+
+    /* no memory exists to provide this capability */
+    TEST_ASSERT_NULL( heap_caps_malloc(ALLOC_SZ, MALLOC_CAP_INVENTED) );
+
+    void *buffer = malloc(BUF_SZ);
+    TEST_ASSERT_NOT_NULL(buffer);
+    uint32_t caps[SOC_MEMORY_TYPE_NO_PRIOS] = { MALLOC_CAP_INVENTED };
+    TEST_ESP_OK( heap_caps_add_region_with_caps(caps, (intptr_t)buffer, (intptr_t)buffer + BUF_SZ) );
+
+    /* ta-da, it's now possible! */
+    TEST_ASSERT_NOT_NULL( heap_caps_malloc(ALLOC_SZ, MALLOC_CAP_INVENTED) );
+}
+
+/* NOTE: This is not a well-formed unit test.
+ * If run twice without a reset, it will failed.
+ */
+
+TEST_CASE("Add .bss memory to heap region runtime", "[heap][ignore]")
+{
+#define BUF_SZ 1000
+#define HEAP_OVERHEAD_MAX 200
+    static uint8_t s_buffer[BUF_SZ];
+
+    printf("s_buffer start %08x end %08x\n", (intptr_t)s_buffer, (intptr_t)s_buffer + BUF_SZ);
+    uint32_t before_free = esp_get_free_heap_size();
+    TEST_ESP_OK( heap_caps_add_region((intptr_t)s_buffer, (intptr_t)s_buffer + BUF_SZ) );
+    uint32_t after_free = esp_get_free_heap_size();
+    printf("Before %u after %u\n", before_free, after_free);
+    /* allow for some 'heap overhead' from accounting structures */
+    TEST_ASSERT(after_free >= before_free + BUF_SZ - HEAP_OVERHEAD_MAX);
+
+    /* Twice add must be failed */
+    TEST_ASSERT( (heap_caps_add_region((intptr_t)s_buffer, (intptr_t)s_buffer + BUF_SZ) != ESP_OK) );
+}

+ 54 - 0
components/heap/test_multi_heap_host/Makefile

@@ -0,0 +1,54 @@
+TEST_PROGRAM=test_multi_heap
+all: $(TEST_PROGRAM)
+
+ifneq ($(filter clean,$(MAKECMDGOALS)),)
+.NOTPARALLEL:  # prevent make clean racing the other targets
+endif
+
+SOURCE_FILES = $(abspath \
+    ../multi_heap.c \
+    ../heap_tlsf.c \
+	../multi_heap_poisoning.c \
+	test_multi_heap.cpp \
+	main.cpp \
+    )
+
+INCLUDE_FLAGS = -I../include -I../../../tools/catch
+
+GCOV ?= gcov
+
+CPPFLAGS += $(INCLUDE_FLAGS) -D CONFIG_LOG_DEFAULT_LEVEL -g -fstack-protector-all -m32  -DCONFIG_HEAP_POISONING_COMPREHENSIVE
+CFLAGS += -Wall -Werror -fprofile-arcs -ftest-coverage
+CXXFLAGS += -std=c++11 -Wall -Werror  -fprofile-arcs -ftest-coverage
+LDFLAGS += -lstdc++ -fprofile-arcs -ftest-coverage -m32
+
+OBJ_FILES = $(filter %.o, $(SOURCE_FILES:.cpp=.o) $(SOURCE_FILES:.c=.o))
+
+COVERAGE_FILES = $(OBJ_FILES:.o=.gc*)
+
+$(TEST_PROGRAM): $(OBJ_FILES)
+	g++ $(LDFLAGS) -o $(TEST_PROGRAM) $(OBJ_FILES)
+
+$(OUTPUT_DIR):
+	mkdir -p $(OUTPUT_DIR)
+
+test: $(TEST_PROGRAM)
+	./$(TEST_PROGRAM)
+
+$(COVERAGE_FILES): $(TEST_PROGRAM) test
+
+coverage.info: $(COVERAGE_FILES)
+	find ../ -name "*.gcno" -exec $(GCOV) -r -pb {} +
+	lcov --capture --directory $(abspath ../) --no-external --output-file coverage.info --gcov-tool $(GCOV)
+
+coverage_report: coverage.info
+	genhtml coverage.info --output-directory coverage_report
+	@echo "Coverage report is in coverage_report/index.html"
+
+clean:
+	rm -f $(OBJ_FILES) $(TEST_PROGRAM)
+	rm -f $(COVERAGE_FILES) *.gcov
+	rm -rf coverage_report/
+	rm -f coverage.info
+
+.PHONY: clean all test

+ 2 - 0
components/heap/test_multi_heap_host/main.cpp

@@ -0,0 +1,2 @@
+#define CATCH_CONFIG_MAIN
+#include "catch.hpp"

+ 20 - 0
components/heap/test_multi_heap_host/test_all_configs.sh

@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+#
+# Run the test suite with all configurations enabled
+#
+
+FAIL=0
+
+for FLAGS in "CONFIG_HEAP_POISONING_NONE" "CONFIG_HEAP_POISONING_LIGHT" "CONFIG_HEAP_POISONING_COMPREHENSIVE" ; do
+    echo "==== Testing with config: ${FLAGS} ===="
+    CPPFLAGS="-D${FLAGS}" make clean test || FAIL=1
+done
+
+make clean
+
+if [ $FAIL == 0 ]; then
+    echo "All configurations passed"
+else
+    echo "Some configurations failed, see log."
+    exit 1
+fi

+ 508 - 0
components/heap/test_multi_heap_host/test_multi_heap.cpp

@@ -0,0 +1,508 @@
+#include "catch.hpp"
+#include "multi_heap.h"
+
+#include "../multi_heap_config.h"
+
+#include <string.h>
+#include <assert.h>
+
+static void *__malloc__(size_t bytes) 
+{
+    return malloc(bytes);
+}
+
+static void __free__(void *ptr) 
+{
+    free(ptr);
+}
+
+/* Insurance against accidentally using libc heap functions in tests */
+#undef free
+#define free #error
+#undef malloc
+#define malloc #error
+#undef calloc
+#define calloc #error
+#undef realloc
+#define realloc #error
+
+TEST_CASE("multi_heap simple allocations", "[multi_heap]")
+{
+    uint8_t small_heap[4 * 1024];
+
+    multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
+
+    size_t test_alloc_size = (multi_heap_free_size(heap) + 4) / 2;
+
+    printf("New heap:\n");
+    multi_heap_dump(heap);
+    printf("*********************\n");
+
+    uint8_t *buf = (uint8_t *)multi_heap_malloc(heap, test_alloc_size);
+
+    printf("small_heap %p buf %p\n", small_heap, buf);
+    REQUIRE( buf != NULL );
+    REQUIRE((intptr_t)buf >= (intptr_t)small_heap);
+    REQUIRE( (intptr_t)buf < (intptr_t)(small_heap + sizeof(small_heap)));
+
+    REQUIRE( multi_heap_get_allocated_size(heap, buf) >= test_alloc_size );
+    REQUIRE( multi_heap_get_allocated_size(heap, buf) < test_alloc_size + 16);
+
+    memset(buf, 0xEE, test_alloc_size);
+
+    REQUIRE( multi_heap_malloc(heap, test_alloc_size) == NULL );
+
+    multi_heap_free(heap, buf);
+
+    printf("Empty?\n");
+    multi_heap_dump(heap);
+    printf("*********************\n");
+
+    /* Now there should be space for another allocation */
+    buf = (uint8_t *)multi_heap_malloc(heap, test_alloc_size);
+    REQUIRE( buf != NULL );
+    multi_heap_free(heap, buf);
+
+    REQUIRE( multi_heap_free_size(heap) > multi_heap_minimum_free_size(heap) );
+}
+
+
+TEST_CASE("multi_heap fragmentation", "[multi_heap]")
+{
+    uint8_t small_heap[4 * 1024];
+    multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
+
+    const size_t alloc_size = 128;
+
+    void *p[4];
+    for (int i = 0; i < 4; i++) {
+        multi_heap_dump(heap);
+        REQUIRE(  multi_heap_check(heap, true) );
+        p[i] = multi_heap_malloc(heap, alloc_size);
+        printf("%d = %p ****->\n", i, p[i]);
+        multi_heap_dump(heap);
+        REQUIRE( p[i] != NULL );
+    }
+
+    printf("allocated %p %p %p %p\n", p[0], p[1], p[2], p[3]);
+
+    REQUIRE( multi_heap_malloc(heap, alloc_size * 5) == NULL ); /* no room to allocate 5*alloc_size now */
+
+    printf("4 allocations:\n");
+    multi_heap_dump(heap);
+    printf("****************\n");
+
+    multi_heap_free(heap, p[0]);
+    multi_heap_free(heap, p[1]);
+    multi_heap_free(heap, p[3]);
+
+    printf("1 allocations:\n");
+    multi_heap_dump(heap);
+    printf("****************\n");
+
+    void *big = multi_heap_malloc(heap, alloc_size * 3);
+    //Blocks in TLSF are organized in different form, so this makes no sense
+    multi_heap_free(heap, big);
+
+    multi_heap_free(heap, p[2]);
+
+    printf("0 allocations:\n");
+    multi_heap_dump(heap);
+    printf("****************\n");
+
+    big = multi_heap_malloc(heap, alloc_size * 2);
+    //Blocks in TLSF are organized in different form, so this makes no sense
+    multi_heap_free(heap, big);
+}
+
+/* Test that malloc/free does not leave free space fragmented */
+TEST_CASE("multi_heap defrag", "[multi_heap]")
+{
+    void *p[4];
+    uint8_t small_heap[4 * 1024];
+    multi_heap_info_t info, info2;
+    multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
+
+    printf("0 ---\n");
+    multi_heap_dump(heap);
+    REQUIRE( multi_heap_check(heap, true) );
+    multi_heap_get_info(heap, &info);
+    REQUIRE( 0 == info.allocated_blocks );
+    REQUIRE( 1 == info.free_blocks );
+
+    printf("1 ---\n");
+    p[0] = multi_heap_malloc(heap, 128);
+    p[1] = multi_heap_malloc(heap, 32);
+    multi_heap_dump(heap);
+    REQUIRE( multi_heap_check(heap, true) );
+
+    printf("2 ---\n");
+    multi_heap_free(heap, p[0]);
+    p[2] = multi_heap_malloc(heap, 64);
+    multi_heap_dump(heap);
+    REQUIRE( p[2] == p[0] );
+    REQUIRE( multi_heap_check(heap, true) );
+
+    printf("3 ---\n");
+    multi_heap_free(heap, p[2]);
+    p[3] = multi_heap_malloc(heap, 32);
+    multi_heap_dump(heap);
+    REQUIRE( p[3] == p[0] );
+    REQUIRE( multi_heap_check(heap, true) );
+
+    multi_heap_get_info(heap, &info2);
+    REQUIRE( 2 == info2.allocated_blocks );
+    REQUIRE( 2 == info2.free_blocks );
+
+    multi_heap_free(heap, p[0]);
+    multi_heap_free(heap, p[1]);
+    multi_heap_get_info(heap, &info2);
+    REQUIRE( 0 == info2.allocated_blocks );
+    REQUIRE( 1 == info2.free_blocks );
+    REQUIRE( info.total_free_bytes == info2.total_free_bytes );
+}
+
+/* Test that malloc/free does not leave free space fragmented
+   Note: With fancy poisoning, realloc is implemented as malloc-copy-free and this test does not apply.
+ */
+#ifndef MULTI_HEAP_POISONING_SLOW
+TEST_CASE("multi_heap defrag realloc", "[multi_heap]")
+{
+    void *p[4];
+    uint8_t small_heap[4 * 1024];
+    multi_heap_info_t info, info2;
+    multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
+
+    printf("0 ---\n");
+    multi_heap_dump(heap);
+    REQUIRE( multi_heap_check(heap, true) );
+    multi_heap_get_info(heap, &info);
+    REQUIRE( 0 == info.allocated_blocks );
+    REQUIRE( 1 == info.free_blocks );
+
+    printf("1 ---\n");
+    p[0] = multi_heap_malloc(heap, 128);
+    p[1] = multi_heap_malloc(heap, 32);
+    multi_heap_dump(heap);
+    REQUIRE( multi_heap_check(heap, true) );
+
+    printf("2 ---\n");
+    p[2] = multi_heap_realloc(heap, p[0], 64);
+    multi_heap_dump(heap);
+    REQUIRE( p[2] == p[0] );
+    REQUIRE( multi_heap_check(heap, true) );
+
+    printf("3 ---\n");
+    p[3] = multi_heap_realloc(heap, p[2], 32);
+    multi_heap_dump(heap);
+    REQUIRE( p[3] == p[0] );
+    REQUIRE( multi_heap_check(heap, true) );
+
+    multi_heap_get_info(heap, &info2);
+    REQUIRE( 2 == info2.allocated_blocks );
+    REQUIRE( 2 == info2.free_blocks );
+
+    multi_heap_free(heap, p[0]);
+    multi_heap_free(heap, p[1]);
+    multi_heap_get_info(heap, &info2);
+    REQUIRE( 0 == info2.allocated_blocks );
+    REQUIRE( 1 == info2.free_blocks );
+    REQUIRE( info.total_free_bytes == info2.total_free_bytes );
+}
+#endif
+
+
+void multi_heap_allocation_impl(int heap_size)
+{
+    uint8_t *big_heap = (uint8_t *) __malloc__(2*heap_size);
+    const int NUM_POINTERS = 64;
+
+    printf("Running multi-allocation test with heap_size %d...\n", heap_size);
+
+    REQUIRE( big_heap );
+    multi_heap_handle_t heap = multi_heap_register(big_heap, heap_size);
+
+    void *p[NUM_POINTERS] = { 0 };
+    size_t s[NUM_POINTERS] = { 0 };
+
+    const size_t initial_free = multi_heap_free_size(heap);
+
+    const int ITERATIONS = 10000;
+
+    for (int i = 0; i < ITERATIONS; i++) {
+        /* check all pointers allocated so far are valid inside big_heap */
+        for (int j = 0; j < NUM_POINTERS; j++) {
+            if (p[j] != NULL) {
+            }
+        }
+
+        uint8_t n = rand() % NUM_POINTERS;
+
+        if (rand() % 4 == 0) {
+            /* 1 in 4 iterations, try to realloc the buffer instead
+               of using malloc/free
+            */
+            size_t new_size = rand() % 1024;
+            void *new_p = multi_heap_realloc(heap, p[n], new_size);
+            printf("realloc %p -> %p (%zu -> %zu)\n", p[n], new_p, s[n], new_size);
+            multi_heap_check(heap, true);
+            if (new_size == 0 || new_p != NULL) {
+                p[n] = new_p;
+                s[n] = new_size;
+                if (new_size > 0) {
+                    REQUIRE( p[n] >= big_heap );
+                    REQUIRE( p[n] < big_heap + heap_size );
+                    memset(p[n], n, new_size);
+                }
+            }
+            continue;
+        }
+        if (p[n] != NULL) {
+            if (s[n] > 0) {
+                /* Verify pre-existing contents of p[n] */
+                uint8_t compare[s[n]];
+                memset(compare, n, s[n]);
+                /*REQUIRE*/assert( memcmp(compare, p[n], s[n]) == 0 );
+            }
+            REQUIRE( multi_heap_check(heap, true) );
+            multi_heap_free(heap, p[n]);
+            printf("freed %p (%zu)\n", p[n], s[n]);
+            if (!multi_heap_check(heap, true)) {
+                printf("FAILED iteration %d after freeing %p\n", i, p[n]);
+                multi_heap_dump(heap);
+                REQUIRE(0);
+            }
+        }
+
+        s[n] = rand() % 1024;
+        REQUIRE( multi_heap_check(heap, true) );
+        p[n] = multi_heap_malloc(heap, s[n]);
+        printf("malloc %p (%zu)\n", p[n], s[n]);
+        if (p[n] != NULL) {
+            REQUIRE( p[n] >= big_heap );
+            REQUIRE( p[n] < big_heap + heap_size );
+        }
+        if (!multi_heap_check(heap, true)) {
+            printf("FAILED iteration %d after mallocing %p (%zu bytes)\n", i, p[n], s[n]);
+            multi_heap_dump(heap);
+            REQUIRE(0);
+        }
+        if (p[n] != NULL) {
+            memset(p[n], n, s[n]);
+        }
+    }
+
+    for (int i = 0; i < NUM_POINTERS; i++) {
+        multi_heap_free(heap, p[i]);
+        if (!multi_heap_check(heap, true)) {
+            printf("FAILED during cleanup after freeing %p\n", p[i]);
+            multi_heap_dump(heap);
+            REQUIRE(0);
+        }
+    }
+
+    REQUIRE( initial_free == multi_heap_free_size(heap) );
+    __free__(big_heap);
+}
+
+TEST_CASE("multi_heap many random allocations", "[multi_heap]")
+{
+    size_t poolsize[] = { 15, 255, 4095, 8191 };
+    for (size_t i = 0; i < sizeof(poolsize)/sizeof(size_t); i++) {
+        multi_heap_allocation_impl(poolsize[i] * 1024);
+    }	
+}
+
+TEST_CASE("multi_heap_get_info() function", "[multi_heap]")
+{
+    uint8_t heapdata[4 * 1024];
+    multi_heap_handle_t heap = multi_heap_register(heapdata, sizeof(heapdata));
+    multi_heap_info_t before, after, freed;
+
+    multi_heap_get_info(heap, &before);
+    printf("before: total_free_bytes %zu\ntotal_allocated_bytes %zu\nlargest_free_block %zu\nminimum_free_bytes %zu\nallocated_blocks %zu\nfree_blocks %zu\ntotal_blocks %zu\n",
+           before.total_free_bytes,
+           before.total_allocated_bytes,
+           before.largest_free_block,
+           before.minimum_free_bytes,
+           before.allocated_blocks,
+           before.free_blocks,
+           before.total_blocks);
+
+    REQUIRE( 0 == before.allocated_blocks );
+    REQUIRE( 0 == before.total_allocated_bytes );
+    REQUIRE( before.total_free_bytes == before.minimum_free_bytes );
+
+    void *x = multi_heap_malloc(heap, 32);
+    multi_heap_get_info(heap, &after);
+    printf("after: total_free_bytes %zu\ntotal_allocated_bytes %zu\nlargest_free_block %zu\nminimum_free_bytes %zu\nallocated_blocks %zu\nfree_blocks %zu\ntotal_blocks %zu\n",
+           after.total_free_bytes,
+           after.total_allocated_bytes,
+           after.largest_free_block,
+           after.minimum_free_bytes,
+           after.allocated_blocks,
+           after.free_blocks,
+           after.total_blocks);
+
+    REQUIRE( 1 == after.allocated_blocks );
+    REQUIRE( 32 == after.total_allocated_bytes );
+    REQUIRE( after.minimum_free_bytes < before.minimum_free_bytes);
+    REQUIRE( after.minimum_free_bytes > 0 );
+
+    multi_heap_free(heap, x);
+    multi_heap_get_info(heap, &freed);
+    printf("freed: total_free_bytes %zu\ntotal_allocated_bytes %zu\nlargest_free_block %zu\nminimum_free_bytes %zu\nallocated_blocks %zu\nfree_blocks %zu\ntotal_blocks %zu\n",
+           freed.total_free_bytes,
+           freed.total_allocated_bytes,
+           freed.largest_free_block,
+           freed.minimum_free_bytes,
+           freed.allocated_blocks,
+           freed.free_blocks,
+           freed.total_blocks);
+
+    REQUIRE( 0 == freed.allocated_blocks );
+    REQUIRE( 0 == freed.total_allocated_bytes );
+    REQUIRE( before.total_free_bytes == freed.total_free_bytes );
+    REQUIRE( after.minimum_free_bytes == freed.minimum_free_bytes );
+}
+
+TEST_CASE("multi_heap minimum-size allocations", "[multi_heap]")
+{
+    uint8_t heapdata[4096];
+    void *p[sizeof(heapdata) / sizeof(void *)] = {NULL};
+    const size_t NUM_P = sizeof(p) / sizeof(void *);
+    size_t allocated_size = 0;
+    multi_heap_handle_t heap = multi_heap_register(heapdata, sizeof(heapdata));
+    size_t before_free = multi_heap_free_size(heap);
+
+    size_t i;
+    for (i = 0; i < NUM_P; i++) {
+        //TLSF minimum block size is 4 bytes
+        p[i] = multi_heap_malloc(heap, 1);
+        if (p[i] == NULL) {
+            break;
+        }
+    }
+
+    REQUIRE( i < NUM_P); // Should have run out of heap before we ran out of pointers
+    printf("Allocated %zu minimum size chunks\n", i);
+
+    REQUIRE(multi_heap_free_size(heap) < before_free);
+    multi_heap_check(heap, true);
+
+    /* Free in random order */
+    bool has_allocations = true;
+    while (has_allocations) {
+        i = rand() % NUM_P;
+        multi_heap_free(heap, p[i]);
+        p[i] = NULL;
+        multi_heap_check(heap, true);
+
+        has_allocations = false;
+        for (i = 0; i < NUM_P && !has_allocations; i++) {
+            has_allocations = (p[i] != NULL);
+        }
+    }
+
+    /* all freed! */
+    REQUIRE( before_free == multi_heap_free_size(heap) );
+}
+
+TEST_CASE("multi_heap_realloc()", "[multi_heap]")
+{
+    const uint32_t PATTERN = 0xABABDADA;
+    uint8_t small_heap[4 * 1024];
+    multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
+
+    uint32_t *a = (uint32_t *)multi_heap_malloc(heap, 64);
+    uint32_t *b = (uint32_t *)multi_heap_malloc(heap, 32);
+    REQUIRE( a != NULL );
+    REQUIRE( b != NULL );
+    REQUIRE( b > a); /* 'b' takes the block after 'a' */
+
+    *a = PATTERN;
+
+    uint32_t *c = (uint32_t *)multi_heap_realloc(heap, a, 72);
+    REQUIRE( multi_heap_check(heap, true));
+    REQUIRE(  c  != NULL );
+    REQUIRE( c > b ); /* 'a' moves, 'c' takes the block after 'b' */
+    REQUIRE( *c == PATTERN );
+
+#ifndef MULTI_HEAP_POISONING_SLOW
+    // "Slow" poisoning implementation doesn't reallocate in place, so these
+    // test will fail...
+
+    uint32_t *d = (uint32_t *)multi_heap_realloc(heap, c, 36);
+    REQUIRE( multi_heap_check(heap, true) );
+    REQUIRE( c == d ); /* 'c' block should be shrunk in-place */
+    REQUIRE( *d == PATTERN);
+
+    uint32_t *e = (uint32_t *)multi_heap_malloc(heap, 64);
+    REQUIRE( multi_heap_check(heap, true));
+    REQUIRE( a == e ); /* 'e' takes the block formerly occupied by 'a' */
+
+    multi_heap_free(heap, d);
+    uint32_t *f = (uint32_t *)multi_heap_realloc(heap, b, 64);
+    REQUIRE( multi_heap_check(heap, true) );
+    REQUIRE( f == b ); /* 'b' should be extended in-place, over space formerly occupied by 'd' */
+
+#ifdef MULTI_HEAP_POISONING
+#define TOO_MUCH 7420 + 1
+#else
+#define TOO_MUCH 7420 + 1
+#endif
+    /* not enough contiguous space left in the heap */
+    uint32_t *g = (uint32_t *)multi_heap_realloc(heap, e, TOO_MUCH);
+    REQUIRE( g == NULL );
+
+    multi_heap_free(heap, f);
+    /* try again */
+    g = (uint32_t *)multi_heap_realloc(heap, e, 128);
+    REQUIRE( multi_heap_check(heap, true) );
+    REQUIRE( e == g ); /* 'g' extends 'e' in place, into the space formerly held by 'f' */
+#endif
+}
+
+// TLSF only accepts heaps aligned to 4-byte boundary so
+// only aligned allocation tests make sense.
+TEST_CASE("multi_heap aligned allocations", "[multi_heap]")
+{
+    uint8_t test_heap[4 * 1024];
+    multi_heap_handle_t heap = multi_heap_register(test_heap, sizeof(test_heap));
+    uint32_t aligments = 0; // starts from alignment by 4-byte boundary
+    size_t old_size = multi_heap_free_size(heap);
+    size_t leakage = 1024;
+    printf("[ALIGNED_ALLOC] heap_size before: %d \n", old_size);
+
+    printf("New heap:\n");
+    multi_heap_dump(heap);
+    printf("*********************\n");
+
+    for(;aligments <= 256; aligments++) {
+
+        //Use some stupid size value to test correct alignment even in strange
+        //memory layout objects:
+        uint8_t *buf = (uint8_t *)multi_heap_aligned_alloc(heap, (aligments + 137), aligments );
+        if(((aligments & (aligments - 1)) != 0) || (!aligments)) {
+            REQUIRE( buf == NULL );
+        } else {
+            REQUIRE( buf != NULL );
+            REQUIRE((intptr_t)buf >= (intptr_t)test_heap);
+            REQUIRE((intptr_t)buf < (intptr_t)(test_heap + sizeof(test_heap)));
+
+            printf("[ALIGNED_ALLOC] alignment required: %u \n", aligments);
+            printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
+            //Address of obtained block must be aligned with selected value
+            REQUIRE(((intptr_t)buf & (aligments - 1)) == 0);
+
+            //Write some data, if it corrupts memory probably the heap
+            //canary verification will fail:
+            memset(buf, 0xA5, (aligments + 137));
+
+            multi_heap_free(heap, buf);
+        }
+    }
+
+    printf("[ALIGNED_ALLOC] heap_size after: %d \n", multi_heap_free_size(heap));
+    REQUIRE((old_size - multi_heap_free_size(heap)) <= leakage);
+}

+ 1 - 1
components/platform_config/CMakeLists.txt

@@ -1,6 +1,6 @@
 idf_component_register(	SRC_DIRS . 
 						INCLUDE_DIRS . 
-						PRIV_REQUIRES   tools newlib  console esp_common freertos  services
+						PRIV_REQUIRES   tools newlib  console esp_common freertos tools
 						REQUIRES nvs_flash json
 )
 

+ 1 - 1
components/platform_config/nvs_utilities.c

@@ -14,7 +14,7 @@
 #include "nvs_flash.h"
 #include "nvs_utilities.h"
 #include "platform_config.h"
-#include "globdefs.h"
+#include "tools.h"
 
 const char current_namespace[] = "config";
 const char settings_partition[] = "settings";

+ 1 - 2
components/platform_config/platform_config.c

@@ -21,7 +21,6 @@
 #include "platform_config.h"
 #include "nvs_utilities.h"
 #include "platform_esp32.h"
-#include "trace.h"
 #include <stdio.h>
 #include <string.h>
 #include "esp_system.h"
@@ -38,7 +37,7 @@
 #include "cJSON.h"
 #include "freertos/timers.h"
 #include "freertos/event_groups.h"
-#include "globdefs.h"
+#include "tools.h"
 
 #define CONFIG_COMMIT_DELAY 1000
 #define LOCK_MAX_WAIT 20*CONFIG_COMMIT_DELAY

+ 18 - 3
components/platform_config/platform_config.h

@@ -8,9 +8,20 @@
 #ifdef __cplusplus
 extern "C" {
 #endif
-#ifdef __cplusplus
-}
-#endif
+
+#define PARSE_PARAM(S,P,C,V) do {												\
+	char *__p;																	\
+	if ((__p = strcasestr(S, P)) && (__p = strchr(__p, C))) V = atoi(__p+1); 	\
+} while (0)
+
+#define PARSE_PARAM_STR(S,P,C,V,I) do {						\
+	char *__p;                                              \
+	if ((__p = strstr(S, P)) && (__p = strchr(__p, C))) {	\
+		while (*++__p == ' ');								\
+		sscanf(__p,"%" #I "[^,]", V);						\
+	}   													\
+} while (0)
+
 #define DECLARE_SET_DEFAULT(t) void config_set_default_## t (const char *key, t  value);
 #define DECLARE_GET_NUM(t) esp_err_t config_get_## t (const char *key, t *  value);
 #ifndef FREE_RESET
@@ -45,3 +56,7 @@ esp_err_t config_set_value(nvs_type_t nvs_type, const char *key, const void * va
 nvs_type_t  config_get_item_type(cJSON * entry);
 void * config_safe_alloc_get_entry_value(nvs_type_t nvs_type, cJSON * entry);
 
+#ifdef __cplusplus
+}
+#endif
+

+ 1 - 1
components/platform_console/CMakeLists.txt

@@ -8,7 +8,7 @@ idf_component_register( SRCS
 							cmd_config.c
 						INCLUDE_DIRS .   
 						REQUIRES nvs_flash 
-						PRIV_REQUIRES console app_update tools services spi_flash  platform_config vfs pthread wifi-manager platform_config newlib  telnet display squeezelite services)
+						PRIV_REQUIRES console app_update tools services spi_flash  platform_config vfs pthread wifi-manager platform_config newlib  telnet display squeezelite tools)
 target_link_libraries(${COMPONENT_LIB}   "-Wl,--undefined=GDS_DrawPixelFast")
 target_link_libraries(${COMPONENT_LIB} ${build_dir}/esp-idf/$<TARGET_PROPERTY:RECOVERY_PREFIX>/lib$<TARGET_PROPERTY:RECOVERY_PREFIX>.a 	)
 target_add_binary_data( __idf_platform_console presets.json BINARY)

+ 1 - 1
components/platform_console/app_squeezelite/CMakeLists.txt

@@ -1,7 +1,7 @@
 idf_build_get_property(idf_path IDF_PATH)
 idf_component_register( SRCS cmd_squeezelite.c 
 						INCLUDE_DIRS . 
-						PRIV_REQUIRES spi_flash bootloader_support  partition_table bootloader_support console codecs squeezelite newlib pthread tools platform_config display services)
+						PRIV_REQUIRES spi_flash bootloader_support  partition_table bootloader_support console codecs squeezelite newlib pthread tools platform_config display tools)
 						
 
 target_link_libraries(${COMPONENT_LIB} INTERFACE "-Wl,--undefined=feof")

+ 1 - 1
components/platform_console/app_squeezelite/cmd_squeezelite.c

@@ -12,7 +12,7 @@
 #include "platform_esp32.h"
 #include "platform_config.h"
 #include "esp_app_format.h"
-#include "globdefs.h"
+#include "tools.h"
 
 extern esp_err_t process_recovery_ota(const char * bin_url, char * bin_buffer, uint32_t length);
 static const char * TAG = "squeezelite_cmd";

+ 1 - 2
components/platform_console/cmd_config.c

@@ -14,11 +14,10 @@
 #include "string.h"
 #include "stdio.h"
 #include "platform_config.h"
-#include "trace.h"
 #include "messaging.h"
 #include "accessors.h"
 #include "adac.h"
-#include "globdefs.h"
+#include "tools.h"
 #include "cJSON.h"
 #include "cmd_i2ctools.h"
 

+ 1 - 1
components/platform_console/cmd_i2ctools.c

@@ -20,7 +20,7 @@
 #include "messaging.h"
 #include "display.h"
 #include "config.h"
-#include "globdefs.h"
+#include "tools.h"
 #include "adac.h"
 
 #define I2C_MASTER_TX_BUF_DISABLE 0 /*!< I2C master doesn't need buffer */

+ 1 - 2
components/platform_console/cmd_nvs.c

@@ -26,8 +26,7 @@ extern "C" {
 #include "nvs_utilities.h"
 #include "platform_console.h"
 #include "messaging.h"
-#include "globdefs.h"
-#include "trace.h"
+#include "tools.h"
 
 extern esp_err_t network_wifi_erase_legacy();
 extern esp_err_t network_wifi_erase_known_ap();

+ 1 - 2
components/platform_console/cmd_system.c

@@ -31,8 +31,7 @@
 #include "driver/uart.h"            // for the uart driver access
 #include "messaging.h"				  
 #include "platform_console.h"
-#include "trace.h"
-#include "globdefs.h"
+#include "tools.h"
 
 #ifdef CONFIG_FREERTOS_GENERATE_RUN_TIME_STATS
 #pragma message("Runtime stats enabled")

+ 121 - 82
components/platform_console/platform_console.c

@@ -26,17 +26,26 @@
 #include "trace.h"
 #include "platform_config.h"
 #include "telnet.h" 
+#include "tools.h"
 
 #include "messaging.h"
 
 #include "config.h"
-pthread_t thread_console;
+static pthread_t thread_console;
 static void * console_thread();
 void console_start();
 static const char * TAG = "console";
 extern bool bypass_network_manager;
 extern void register_squeezelite();
 
+static EXT_RAM_ATTR QueueHandle_t uart_queue;
+static EXT_RAM_ATTR struct {
+		uint8_t _buf[128];
+		StaticRingbuffer_t _ringbuf;
+		RingbufHandle_t handle;
+		QueueSetHandle_t queue_set;
+} stdin_redir;	
+
 /* Prompt to be printed before each line.
  * This can be customized, made dynamic, etc.
  */
@@ -50,7 +59,7 @@ const char* recovery_prompt = LOG_COLOR_E "recovery-squeezelite-esp32> " LOG_RES
 
 #define MOUNT_PATH "/data"
 #define HISTORY_PATH MOUNT_PATH "/history.txt"
-esp_err_t run_command(char * line);
+static esp_err_t run_command(char * line);
 #define ADD_TO_JSON(o,t,n) if (t->n) cJSON_AddStringToObject(o,QUOTE(n),t->n);
 #define ADD_PARMS_TO_CMD(o,t,n) { cJSON * parms = ParmsToJSON(&t.n->hdr); if(parms) cJSON_AddItemToObject(o,QUOTE(n),parms); }
 cJSON * cmdList;
@@ -230,17 +239,43 @@ void process_autoexec(){
 	}
 }
 
+static ssize_t stdin_read(int fd, void* data, size_t size) {
+	size_t bytes = -1;
+	
+	while (1) {
+		QueueSetMemberHandle_t activated = xQueueSelectFromSet(stdin_redir.queue_set, portMAX_DELAY);
+	
+		if (activated == uart_queue) {
+			uart_event_t event;
+			
+			xQueueReceive(uart_queue, &event, 0);
+	
+			if (event.type == UART_DATA) {
+				bytes = uart_read_bytes(CONFIG_ESP_CONSOLE_UART_NUM, data, size < event.size ? size : event.size, 0);
+				// we have to do our own line ending translation here 
+				for (int i = 0; i < bytes; i++) if (((char*)data)[i] == '\r') ((char*)data)[i] = '\n';
+				break;
+			}	
+		} else if (xRingbufferCanRead(stdin_redir.handle, activated)) {
+			char *p = xRingbufferReceiveUpTo(stdin_redir.handle, &bytes, 0, size);
+			// we might receive strings, replace null by \n
+			for (int i = 0; i < bytes; i++) if (p[i] == '\0' || p[i] == '\r') p[i] = '\n';						
+			memcpy(data, p, bytes);
+			vRingbufferReturnItem(stdin_redir.handle, p);
+			break;
+		}
+	}	
+	
+	return bytes;
+}
 
-void initialize_console() {
-
-	/* Disable buffering on stdin */
-	setvbuf(stdin, NULL, _IONBF, 0);
-
-/* Minicom, screen, idf_monitor send CR when ENTER key is pressed */
-    esp_vfs_dev_uart_port_set_rx_line_endings(CONFIG_ESP_CONSOLE_UART_NUM, ESP_LINE_ENDINGS_CR);
-    /* Move the caret to the beginning of the next line on '\n' */
-    esp_vfs_dev_uart_port_set_tx_line_endings(CONFIG_ESP_CONSOLE_UART_NUM, ESP_LINE_ENDINGS_CRLF);
+static int stdin_dummy(const char * path, int flags, int mode) {	return 0; }
 
+void initialize_console() {
+	/* Minicom, screen, idf_monitor send CR when ENTER key is pressed (unused if we redirect stdin) */
+	esp_vfs_dev_uart_set_rx_line_endings(ESP_LINE_ENDINGS_CR);
+	/* Move the caret to the beginning of the next line on '\n' */
+	esp_vfs_dev_uart_set_tx_line_endings(ESP_LINE_ENDINGS_CRLF);
 
 	/* Configure UART. Note that REF_TICK is used so that the baud rate remains
 	 * correct while APB frequency is changing in light sleep mode.
@@ -252,10 +287,28 @@ void initialize_console() {
 	ESP_ERROR_CHECK(uart_param_config(CONFIG_ESP_CONSOLE_UART_NUM, &uart_config));
 
 	/* Install UART driver for interrupt-driven reads and writes */
-	ESP_ERROR_CHECK( uart_driver_install(CONFIG_ESP_CONSOLE_UART_NUM, 256, 0, 0, NULL, 0));
-
+	ESP_ERROR_CHECK( uart_driver_install(CONFIG_ESP_CONSOLE_UART_NUM, 256, 0, 3, &uart_queue, 0));
+	
 	/* Tell VFS to use UART driver */
 	esp_vfs_dev_uart_use_driver(CONFIG_ESP_CONSOLE_UART_NUM);
+		
+	/* re-direct stdin to our own driver so we can gather data from various sources */
+	stdin_redir.queue_set = xQueueCreateSet(2);
+	stdin_redir.handle = xRingbufferCreateStatic(sizeof(stdin_redir._buf), RINGBUF_TYPE_BYTEBUF, stdin_redir._buf, &stdin_redir._ringbuf);
+	xRingbufferAddToQueueSetRead(stdin_redir.handle, stdin_redir.queue_set);
+	xQueueAddToSet(uart_queue, stdin_redir.queue_set);
+	
+	const esp_vfs_t vfs = {
+			.flags = ESP_VFS_FLAG_DEFAULT,
+			.open = stdin_dummy,
+			.read = stdin_read,
+	};
+
+	ESP_ERROR_CHECK(esp_vfs_register("/dev/console", &vfs, NULL));
+	freopen("/dev/console", "r", stdin);
+
+	/* Disable buffering on stdin */
+	setvbuf(stdin, NULL, _IONBF, 0);
 
 	/* Initialize the console */
 	esp_console_config_t console_config = { .max_cmdline_args = 28,
@@ -283,20 +336,14 @@ void initialize_console() {
 	//linenoiseHistoryLoad(HISTORY_PATH);
 }
 
+bool console_push(const char *data, size_t size) {
+	return xRingbufferSend(stdin_redir.handle, data, size, pdMS_TO_TICKS(100)) == pdPASS;
+}	
+
 void console_start() {
-	if(!is_serial_suppressed()){
-		initialize_console();
-	}
-	else {
-		/* Initialize the console */
-		esp_console_config_t console_config = { .max_cmdline_args = 28,
-				.max_cmdline_length = 600,
-	#if CONFIG_LOG_COLORS
-				.hint_color = atoi(LOG_COLOR_CYAN)
-	#endif
-				};
-		ESP_ERROR_CHECK(esp_console_init(&console_config));
-	}
+	/* we always run console b/c telnet sends commands to stdin */
+	initialize_console();
+
 	/* Register commands */
 	MEMTRACE_PRINT_DELTA_MESSAGE("Registering help command");
 	esp_console_register_help_command();
@@ -320,67 +367,58 @@ void console_start() {
 	MEMTRACE_PRINT_DELTA_MESSAGE("Registering i2c commands");
 	register_i2ctools();
 	
-	if(!is_serial_suppressed()){
-		printf("\n");
-		if(is_recovery_running){
-			printf("****************************************************************\n"
-			"RECOVERY APPLICATION\n"
-			"This mode is used to flash Squeezelite into the OTA partition\n"
-			"****\n\n");
-		}
-		printf("Type 'help' to get the list of commands.\n"
-		"Use UP/DOWN arrows to navigate through command history.\n"
-		"Press TAB when typing command name to auto-complete.\n"
-		"\n");
-		if(!is_recovery_running){
-			printf("To automatically execute lines at startup:\n"
-					"\tSet NVS variable autoexec (U8) = 1 to enable, 0 to disable automatic execution.\n"
-					"\tSet NVS variable autoexec[1~9] (string)to a command that should be executed automatically\n");
-		}
-		printf("\n\n");
-
-		/* Figure out if the terminal supports escape sequences */
-		int probe_status = linenoiseProbe();
-		if (probe_status) { /* zero indicates success */
-			printf("\n****************************\n"
-					"Your terminal application does not support escape sequences.\n"
-					"Line editing and history features are disabled.\n"
-					"On Windows, try using Putty instead.\n"
-					"****************************\n");
-			linenoiseSetDumbMode(1);
-	#if CONFIG_LOG_COLORS
-			/* Since the terminal doesn't support escape sequences,
-			 * don't use color codes in the prompt.
-			 */
-			if(is_recovery_running){
-				recovery_prompt=  "recovery-squeezelite-esp32>";
-			}
-			prompt = "squeezelite-esp32> ";
-
-	#endif //CONFIG_LOG_COLORS
-		}
-		esp_pthread_cfg_t cfg = esp_pthread_get_default_config();
-		cfg.thread_name= "console";
-		cfg.inherit_cfg = true;
+	printf("\n");
+	if(is_recovery_running){
+		printf("****************************************************************\n"
+		"RECOVERY APPLICATION\n"
+		"This mode is used to flash Squeezelite into the OTA partition\n"
+		"****\n\n");
+	}
+	printf("Type 'help' to get the list of commands.\n"
+	"Use UP/DOWN arrows to navigate through command history.\n"
+	"Press TAB when typing command name to auto-complete.\n"
+	"\n");
+	if(!is_recovery_running){
+		printf("To automatically execute lines at startup:\n"
+				"\tSet NVS variable autoexec (U8) = 1 to enable, 0 to disable automatic execution.\n"
+				"\tSet NVS variable autoexec[1~9] (string)to a command that should be executed automatically\n");
+	}
+	printf("\n\n");
+
+	/* Figure out if the terminal supports escape sequences */
+	int probe_status = linenoiseProbe();
+	if (probe_status) { /* zero indicates success */
+		printf("\n****************************\n"
+				"Your terminal application does not support escape sequences.\n"
+				"Line editing and history features are disabled.\n"
+				"On Windows, try using Putty instead.\n"
+				"****************************\n");
+		linenoiseSetDumbMode(1);
+#if CONFIG_LOG_COLORS
+		/* Since the terminal doesn't support escape sequences,
+		 * don't use color codes in the prompt.
+		 */
 		if(is_recovery_running){
-			prompt = recovery_prompt;
-			cfg.stack_size = 4096 ;
+			recovery_prompt=  "recovery-squeezelite-esp32>";
 		}
-		MEMTRACE_PRINT_DELTA_MESSAGE("Creating console thread with stack size of 4096 bytes");
-		esp_pthread_set_cfg(&cfg);
-		pthread_attr_t attr;
-		pthread_attr_init(&attr);
-		pthread_create(&thread_console, &attr, console_thread, NULL);
-		pthread_attr_destroy(&attr);   	
-		MEMTRACE_PRINT_DELTA_MESSAGE("Console thread created");
-	} 
-	else if(!is_recovery_running){
-		MEMTRACE_PRINT_DELTA_MESSAGE("Running autoexec");
-		process_autoexec();
+		prompt = "squeezelite-esp32> ";
+#endif //CONFIG_LOG_COLORS
 	}
+	esp_pthread_cfg_t cfg = esp_pthread_get_default_config();
+	cfg.thread_name= "console";
+	cfg.inherit_cfg = true;
+	if(is_recovery_running){
+		prompt = recovery_prompt;
+		cfg.stack_size = 4096 ;
+	}
+		MEMTRACE_PRINT_DELTA_MESSAGE("Creating console thread with stack size of 4096 bytes");
+	esp_pthread_set_cfg(&cfg);
+	pthread_create(&thread_console, NULL, console_thread, NULL);
+	MEMTRACE_PRINT_DELTA_MESSAGE("Console thread created");
 
 }
-esp_err_t run_command(char * line){
+
+static esp_err_t run_command(char * line){
 	/* Try to run the command */
 	int ret;
 	esp_err_t err = esp_console_run(line, &ret);
@@ -400,6 +438,7 @@ esp_err_t run_command(char * line){
 	}
 	return err;
 }
+
 static void * console_thread() {
 	if(!is_recovery_running){
 		MEMTRACE_PRINT_DELTA_MESSAGE("Running autoexec");

+ 7 - 7
components/raop/raop.c

@@ -255,7 +255,7 @@ void raop_delete(struct raop_ctx_s *ctx) {
 	vTaskDelay(100 / portTICK_PERIOD_MS);
 	ulTaskNotifyTake(pdFALSE, portMAX_DELAY);
 	vTaskDelete(ctx->thread);
-	heap_caps_free(ctx->xTaskBuffer);
+	SAFE_PTR_FREE(ctx->xTaskBuffer);
 
 	// cleanup all session-created items
 	cleanup_rtsp(ctx, true);
@@ -525,15 +525,17 @@ static bool handle_rtsp(raop_ctx_t *ctx, int sock)
 		char *p;
 		rtp_resp_t rtp = { 0 };
 		short unsigned tport = 0, cport = 0;
+		uint8_t *buffer = NULL;
+		size_t size = 0;
 
-		// we are about to stream, do something if needed
-		success = ctx->cmd_cb(RAOP_SETUP);
+		// we are about to stream, do something if needed and optionally give buffers to play with
+		success = ctx->cmd_cb(RAOP_SETUP, &buffer, &size);
 
 		if ((p = strcasestr(buf, "timing_port")) != NULL) sscanf(p, "%*[^=]=%hu", &tport);
 		if ((p = strcasestr(buf, "control_port")) != NULL) sscanf(p, "%*[^=]=%hu", &cport);
 
 		rtp = rtp_init(ctx->peer, ctx->latency,	ctx->rtsp.aeskey, ctx->rtsp.aesiv,
-					   ctx->rtsp.fmtp, cport, tport, ctx->cmd_cb, ctx->data_cb);
+					   ctx->rtsp.fmtp, cport, tport, buffer, size, ctx->cmd_cb, ctx->data_cb);
 						
 		ctx->rtp = rtp.ctx;
 		
@@ -605,7 +607,6 @@ static bool handle_rtsp(raop_ctx_t *ctx, int sock)
 			sscanf(p, "%*[^:]:%u/%u/%u", &start, &current, &stop);
 			current = ((current - start) / 44100) * 1000;
 			if (stop) stop = ((stop - start) / 44100) * 1000;
-			else stop = -1;
 			LOG_INFO("[%p]: SET PARAMETER progress %d/%u %s", ctx, current, stop, p);
 			success = ctx->cmd_cb(RAOP_PROGRESS, max(current, 0), stop);
 		} else if (body && ((p = kd_lookup(headers, "Content-Type")) != NULL) && !strcasecmp(p, "application/x-dmap-tagged")) {
@@ -672,9 +673,8 @@ void cleanup_rtsp(raop_ctx_t *ctx, bool abort) {
 		ctx->active_remote.running = false;
 		xSemaphoreTake(ctx->active_remote.destroy_mutex, portMAX_DELAY);
 		vTaskDelete(ctx->active_remote.thread);
+		SAFE_PTR_FREE(ctx->active_remote.xTaskBuffer);
 		vSemaphoreDelete(ctx->active_remote.thread);
-
-		heap_caps_free(ctx->active_remote.xTaskBuffer);
 #endif
 		memset(&ctx->active_remote, 0, sizeof(ctx->active_remote));
 		LOG_INFO("[%p]: Remote search thread aborted", ctx);

+ 2 - 6
components/raop/raop.h

@@ -1,15 +1,12 @@
 /*
- *  AirCast: Chromecast to AirPlay
- *
- * (c) Philippe 2016-2017, philippe_44@outlook.com
+ *  (c) Philippe 2020, philippe_44@outlook.com
  *
  * This software is released under the MIT License.
  * https://opensource.org/licenses/MIT
  *
  */
 
-#ifndef __RAOP_H
-#define __RAOP_H
+#pragma once
 
 #include "platform.h"
 #include "raop_sink.h"
@@ -20,4 +17,3 @@ void  		  raop_delete(struct raop_ctx_s *ctx);
 void		  raop_abort(struct raop_ctx_s *ctx);
 bool		  raop_cmd(struct raop_ctx_s *ctx, raop_event_t event, void *param);
 
-#endif

+ 2 - 4
components/raop/raop_sink.c

@@ -16,9 +16,7 @@
 #include "audio_controls.h"
 #include "display.h"
 #include "accessors.h"
-
 #include "log_util.h"
-#include "trace.h"
 
 #ifndef CONFIG_AIRPLAY_NAME
 #define CONFIG_AIRPLAY_NAME		"ESP32-AirPlay"
@@ -184,7 +182,7 @@ static bool raop_sink_start(raop_cmd_vcb_t cmd_cb, raop_data_cb_t data_cb) {
     ESP_ERROR_CHECK( mdns_hostname_set(hostname) );
         
     char * sink_name_buffer= (char *)config_alloc_get(NVS_TYPE_STR,"airplay_name");
-    if(sink_name_buffer != NULL){
+    if (sink_name_buffer != NULL){
     	memset(sink_name, 0x00, sizeof(sink_name));
     	strncpy(sink_name,sink_name_buffer,sizeof(sink_name)-1 );
     	free(sink_name_buffer);
@@ -219,7 +217,7 @@ void raop_sink_init(raop_cmd_vcb_t cmd_cb, raop_data_cb_t data_cb) {
 		raop_cbs.data = data_cb;
 		TimerHandle_t timer = xTimerCreate("raopStart", 5000 / portTICK_RATE_MS, pdTRUE, NULL, raop_start_handler);
 		xTimerStart(timer, portMAX_DELAY);
-		LOG_INFO( "delaying AirPlay start");		
+		LOG_INFO( "Delaying AirPlay start");		
 	}	
 }
 

+ 18 - 8
components/raop/rtp.c

@@ -65,8 +65,8 @@
 #define MS2TS(ms, rate) ((((u64_t) (ms)) * (rate)) / 1000)
 #define TS2MS(ts, rate) NTP2MS(TS2NTP(ts,rate))
 
-
extern log_level 	raop_loglevel;
-
static log_level 	*loglevel = &raop_loglevel;
+extern log_level 	raop_loglevel;
+static log_level 	*loglevel = &raop_loglevel;
 
 //#define __RTP_STORE
 
@@ -93,6 +93,7 @@ typedef struct audio_buffer_entry {   // decoded audio packets
 	u32_t rtptime, last_resend;
 	s16_t *data;
 	int len;
+	bool allocated;
 } abuf_t;
 
 typedef struct rtp_s {
@@ -152,7 +153,7 @@ typedef struct rtp_s {
 
 
 #define BUFIDX(seqno) ((seq_t)(seqno) % BUFFER_FRAMES)
-static void 	buffer_alloc(abuf_t *audio_buffer, int size);
+static void 	buffer_alloc(abuf_t *audio_buffer, int size, uint8_t *buf, size_t buf_size);
 static void 	buffer_release(abuf_t *audio_buffer);
 static void 	buffer_reset(abuf_t *audio_buffer);
 static void 	buffer_push_packet(rtp_t *ctx);
@@ -208,6 +209,7 @@ static struct alac_codec_s* alac_init(int fmtp[32]) {
 /*---------------------------------------------------------------------------*/
 rtp_resp_t rtp_init(struct in_addr host, int latency, char *aeskey, char *aesiv, char *fmtpstr,
 								short unsigned pCtrlPort, short unsigned pTimingPort,
+								uint8_t *buffer, size_t size,
 								raop_cmd_cb_t cmd_cb, raop_data_cb_t data_cb)
 {
 	int i = 0;
@@ -260,7 +262,7 @@ rtp_resp_t rtp_init(struct in_addr host, int latency, char *aeskey, char *aesiv,
 	ctx->alac_codec = alac_init(fmtp);
 	rc &= ctx->alac_codec != NULL;
 
-	buffer_alloc(ctx->audio_buffer, ctx->frame_size*4);
+	buffer_alloc(ctx->audio_buffer, ctx->frame_size*4, buffer, size);
 
 	// create rtp ports
 	for (i = 0; i < 3; i++) {
@@ -311,7 +313,7 @@ void rtp_end(rtp_t *ctx)
 #else
 		ulTaskNotifyTake(pdFALSE, portMAX_DELAY);
 		vTaskDelete(ctx->thread);
-		heap_caps_free(ctx->xTaskBuffer);
+		SAFE_PTR_FREE(ctx->xTaskBuffer);
 #endif
 	}
 	
@@ -369,10 +371,18 @@ void rtp_record(rtp_t *ctx, unsigned short seqno, unsigned rtptime) {
 }
 
 /*---------------------------------------------------------------------------*/
-static void buffer_alloc(abuf_t *audio_buffer, int size) {
+static void buffer_alloc(abuf_t *audio_buffer, int size, uint8_t *buf, size_t buf_size) {
 	int i;
 	for (i = 0; i < BUFFER_FRAMES; i++) {
-		audio_buffer[i].data = malloc(size);
+		if (buf && buf_size >= size) {
+			audio_buffer[i].data = (s16_t*) buf;
+			audio_buffer[i].allocated = false;
+			buf += size;
+			buf_size -= size;
+		} else {
+			audio_buffer[i].allocated = true;
+			audio_buffer[i].data = malloc(size);
+		}
 		audio_buffer[i].ready = 0;
 	}
 }
@@ -381,7 +391,7 @@ static void buffer_alloc(abuf_t *audio_buffer, int size) {
 static void buffer_release(abuf_t *audio_buffer) {
 	int i;
 	for (i = 0; i < BUFFER_FRAMES; i++) {
-		free(audio_buffer[i].data);
+		if (audio_buffer[i].allocated) free(audio_buffer[i].data);
 	}
 }
 

+ 1 - 0
components/raop/rtp.h

@@ -12,6 +12,7 @@ typedef struct {
 rtp_resp_t 			rtp_init(struct in_addr host, int latency,
 							char *aeskey, char *aesiv, char *fmtpstr,
 							short unsigned pCtrlPort, short unsigned pTimingPort,
+							uint8_t *buffer, size_t size, 
 							raop_cmd_cb_t cmd_cb, raop_data_cb_t data_cb);
 void			 	rtp_end(struct rtp_s *ctx);
 bool 				rtp_flush(struct rtp_s *ctx, unsigned short seqno, unsigned rtptime, bool exit_locked);

+ 20 - 2
components/raop/util.h

@@ -18,6 +18,11 @@
 #include "platform.h"
 #include "pthread.h"
 
+#ifndef WIN32
+#include "freertos/FreeRTOS.h"
+#include "freertos/timers.h"
+#endif
+
 #define NFREE(p) if (p) { free(p); p = NULL; }
 
 typedef struct metadata_s {
@@ -46,9 +51,21 @@ char 		*strndup(const char *s, size_t n);
 int 		asprintf(char **strp, const char *fmt, ...);
 void 		winsock_init(void);
 void 		winsock_close(void);
-
#else
+#define		SAFE_PTR_FREE(P) free(P)
#else
 char 		*strlwr(char *str);
+
+// reason is that TCB might be cleanup in idle task
+#define SAFE_PTR_FREE(P)							\
+	do {											\
+		TimerHandle_t timer = xTimerCreate("cleanup", pdMS_TO_TICKS(5000), pdFALSE, P, _delayed_free);	\
+		xTimerStart(timer, portMAX_DELAY);			\
+	} while (0)				
+static void inline _delayed_free(TimerHandle_t xTimer) {
+	free(pvTimerGetTimerID(xTimer));
+	xTimerDelete(xTimer, portMAX_DELAY);
+}	
 #endif
+
 char* 		strextract(char *s1, char *beg, char *end);
 in_addr_t 	get_localhost(char **name);
 void 		get_mac(u8_t mac[]);
@@ -67,5 +84,6 @@ char* 		kd_dump(key_data_t *kd);
 void 		kd_free(key_data_t *kd);
 
 int 		_fprintf(FILE *file, ...);
-
#endif
+
+#endif
 

+ 124 - 84
components/services/accessors.c

@@ -29,12 +29,11 @@
 #include "driver/spi_common_internal.h"
 #include "esp32/rom/efuse.h"
 #include "adac.h"
-#include "trace.h"
+#include "tools.h"
 #include "monitor.h"
 #include "messaging.h"
 #include "network_ethernet.h"
 
-
 static const char *TAG = "services";
 const char *i2c_name_type="I2C";
 const char *spi_name_type="SPI";
@@ -63,7 +62,6 @@ static char * config_spdif_get_string(){
 											  ",ws=" STR(CONFIG_SPDIF_WS_IO) ",do=" STR(CONFIG_SPDIF_DO_IO));
 }
 
-
 /****************************************************************************************
  * 
  */
@@ -110,9 +108,9 @@ bool is_spdif_config_locked(){
 static void set_i2s_pin(char *config, i2s_pin_config_t *pin_config) {
 	char *p;
 	pin_config->bck_io_num = pin_config->ws_io_num = pin_config->data_out_num = pin_config->data_in_num = -1; 				
-	if ((p = strcasestr(config, "bck")) != NULL) pin_config->bck_io_num = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(config, "ws")) != NULL) pin_config->ws_io_num = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(config, "do")) != NULL) pin_config->data_out_num = atoi(strchr(p, '=') + 1);
+	if ((p = strcasestr(config, "bck"))) sscanf(p, "bck%*[^=]=%d", &pin_config->bck_io_num);
+	if ((p = strcasestr(config, "ws"))) sscanf(p, "ws%*[^=]=%d", &pin_config->ws_io_num);
+	if ((p = strcasestr(config, "do"))) sscanf(p, "do%*[^=]=%d", &pin_config->data_out_num);
 }
 
 /****************************************************************************************
@@ -120,19 +118,20 @@ static void set_i2s_pin(char *config, i2s_pin_config_t *pin_config) {
  */
 const i2s_platform_config_t * config_get_i2s_from_str(char * dac_config ){
 	static EXT_RAM_ATTR i2s_platform_config_t i2s_dac_pin;
-	memset(&i2s_dac_pin, 0xFF, sizeof(i2s_dac_pin));
+	memset(&i2s_dac_pin, 0xff, sizeof(i2s_dac_pin));
 	set_i2s_pin(dac_config, &i2s_dac_pin.pin);
 	strcpy(i2s_dac_pin.model, "i2s");
 	char * p=NULL;
-	if ((p = strcasestr(dac_config, "i2c")) != NULL) i2s_dac_pin.i2c_addr = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(dac_config, "sda")) != NULL) i2s_dac_pin.sda = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(dac_config, "scl")) != NULL) i2s_dac_pin.scl = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(dac_config, "model")) != NULL) sscanf(p, "%*[^=]=%31[^,]", i2s_dac_pin.model);
-	if ((p = strcasestr(dac_config, "mute")) != NULL) {
+
+	PARSE_PARAM(dac_config, "i2c", '=', i2s_dac_pin.i2c_addr);
+	PARSE_PARAM(dac_config, "sda", '=', i2s_dac_pin.sda);
+	PARSE_PARAM(dac_config, "scl", '=', i2s_dac_pin.scl);
+	PARSE_PARAM_STR(dac_config, "model", '=', i2s_dac_pin.model, 31);
+	if ((p = strcasestr(dac_config, "mute"))) {
 		char mute[8] = "";
 		sscanf(p, "%*[^=]=%7[^,]", mute);
 		i2s_dac_pin.mute_gpio = atoi(mute);
-		if ((p = strchr(mute, ':')) != NULL) i2s_dac_pin.mute_level = atoi(p + 1);
+		PARSE_PARAM(p, "mute", ':', i2s_dac_pin.mute_level);
 	}	
 	return &i2s_dac_pin;
 }
@@ -140,52 +139,56 @@ const i2s_platform_config_t * config_get_i2s_from_str(char * dac_config ){
 /****************************************************************************************
  * Get eth config structure from config string
  */
-const eth_config_t * config_get_eth_from_str(char * eth_config ){
-	char * p=NULL;
-	static EXT_RAM_ATTR eth_config_t eth_pin; 
-	memset(&eth_pin, 0xFF, sizeof(eth_pin));
-	memset(&eth_pin.model, 0x00, sizeof(eth_pin.model));
-	eth_pin.valid = true;
-
-	if ((p = strcasestr(eth_config, "model")) != NULL) sscanf(p, "%*[^=]=%15[^,]", eth_pin.model);
-	if ((p = strcasestr(eth_config, "mdc")) != NULL) eth_pin.mdc = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(eth_config, "mdio")) != NULL) eth_pin.mdio = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(eth_config, "rst")) != NULL) eth_pin.rst = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(eth_config, "mosi")) != NULL) eth_pin.mosi = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(eth_config, "miso")) != NULL) eth_pin.miso = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(eth_config, "intr")) != NULL) eth_pin.intr = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(eth_config, "cs")) != NULL) eth_pin.cs = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(eth_config, "speed")) != NULL) eth_pin.speed = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(eth_config, "clk")) != NULL) eth_pin.clk = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(eth_config, "host")) != NULL) eth_pin.host = atoi(strchr(p, '=') + 1);
-
-	if(!eth_pin.model || strlen(eth_pin.model)==0){
-		eth_pin.valid = false;
-		return &eth_pin;
+const eth_config_t * config_get_eth_from_str(char* config ){
+	static EXT_RAM_ATTR eth_config_t eth_config; 
+	memset(&eth_config, 0xff, sizeof(eth_config));
+	memset(&eth_config.model, 0x00, sizeof(eth_config.model));
+	eth_config.valid = true;
+
+	PARSE_PARAM_STR(config, "model", '=', eth_config.model, 15);
+	PARSE_PARAM(config, "mdc", '=', eth_config.mdc);
+	PARSE_PARAM(config, "mdio", '=', eth_config.mdio);
+	PARSE_PARAM(config, "rst", '=', eth_config.rst);
+	PARSE_PARAM(config, "mosi", '=', eth_config.mosi);
+	PARSE_PARAM(config, "miso", '=', eth_config.miso);
+	PARSE_PARAM(config, "intr", '=', eth_config.intr);
+	PARSE_PARAM(config, "cs", '=', eth_config.cs);
+	PARSE_PARAM(config, "speed", '=', eth_config.speed);
+	PARSE_PARAM(config, "clk", '=', eth_config.clk);
+
+	// only system host is available
+	eth_config.host = spi_system_host;
+
+	if(!eth_config.model || strlen(eth_config.model)==0){
+		eth_config.valid = false;
+		return &eth_config;
 	}
-	 network_ethernet_driver_t* network_driver = network_ethernet_driver_autodetect(eth_pin.model);
+	
+	network_ethernet_driver_t* network_driver = network_ethernet_driver_autodetect(eth_config.model);
+	
 	if(!network_driver || !network_driver->valid){
-		messaging_post_message(MESSAGING_ERROR,MESSAGING_CLASS_SYSTEM,"Ethernet config invalid: model %s %s",eth_pin.model,network_driver?"was not compiled in":"was not found"); 
-		eth_pin.valid = false;
+		messaging_post_message(MESSAGING_ERROR,MESSAGING_CLASS_SYSTEM,"Ethernet config invalid: model %s %s",eth_config.model,network_driver?"was not compiled in":"was not found"); 
+		eth_config.valid = false;
 	}
+	
 	if(network_driver){
-		eth_pin.rmii = network_driver->rmii;
-		eth_pin.spi = network_driver->spi;
+		eth_config.rmii = network_driver->rmii;
+		eth_config.spi = network_driver->spi;
 		
 		if(network_driver->rmii){
-			if(!GPIO_IS_VALID_GPIO(eth_pin.mdio) || !GPIO_IS_VALID_GPIO(eth_pin.mdc)){
-				messaging_post_message(MESSAGING_ERROR,MESSAGING_CLASS_SYSTEM,"Ethernet config invalid: %s %s",!GPIO_IS_VALID_GPIO(eth_pin.mdc)?"Invalid MDC":"",!GPIO_IS_VALID_GPIO(eth_pin.mdio)?"Invalid mdio":""); 
-				eth_pin.valid = false;
+			if(!GPIO_IS_VALID_GPIO(eth_config.mdio) || !GPIO_IS_VALID_GPIO(eth_config.mdc)){
+				messaging_post_message(MESSAGING_ERROR,MESSAGING_CLASS_SYSTEM,"Ethernet config invalid: %s %s",!GPIO_IS_VALID_GPIO(eth_config.mdc)?"Invalid MDC":"",!GPIO_IS_VALID_GPIO(eth_config.mdio)?"Invalid mdio":""); 
+				eth_config.valid = false;
 			}
 		}
 		else if(network_driver->spi){
-			if(!GPIO_IS_VALID_GPIO(eth_pin.cs)){
+			if(!GPIO_IS_VALID_GPIO(eth_config.cs)){
 				messaging_post_message(MESSAGING_ERROR,MESSAGING_CLASS_SYSTEM,"Ethernet config invalid: invalid CS pin"); 
 				return false;
 			}
 		}
 	}
-	return &eth_pin;
+	return &eth_config;
 }
 
 /****************************************************************************************
@@ -468,16 +471,18 @@ const display_config_t * config_display_get(){
 		sscanf(p, "%*[^:]:%u", &dstruct.depth);
 		dstruct.drivername = display_conf_get_driver_name(strchr(p, '=') + 1);
 	}
-	
-	if ((p = strcasestr(config, "width")) != NULL) dstruct.width = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(config, "height")) != NULL) dstruct.height = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(config, "reset")) != NULL) dstruct.RST_pin = atoi(strchr(p, '=') + 1);
+
+	PARSE_PARAM(config, "width", '=', dstruct.width);
+	PARSE_PARAM(config, "height", '=', dstruct.height);
+	PARSE_PARAM(config, "reset", '=', dstruct.RST_pin);
+	PARSE_PARAM(config, "address", '=', dstruct.address);
+	PARSE_PARAM(config, "cs", '=', dstruct.CS_pin);
+	PARSE_PARAM(config, "speed", '=', dstruct.speed);
+	PARSE_PARAM(config, "back", '=', dstruct.back);
+
 	if (strstr(config, "I2C") ) dstruct.type=i2c_name_type;
-	if ((p = strcasestr(config, "address")) != NULL) dstruct.address = atoi(strchr(p, '=') + 1);
 	if (strstr(config, "SPI") ) dstruct.type=spi_name_type;
-	if ((p = strcasestr(config, "cs")) != NULL) dstruct.CS_pin = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(config, "speed")) != NULL) dstruct.speed = atoi(strchr(p, '=') + 1);
-	if ((p = strcasestr(config, "back")) != NULL) dstruct.back = atoi(strchr(p, '=') + 1);
+
 	dstruct.hflip= strcasestr(config, "HFlip") ? true : false;
 	dstruct.vflip= strcasestr(config, "VFlip") ? true : false;
 	dstruct.rotate= strcasestr(config, "rotate") ? true : false;
@@ -488,7 +493,7 @@ const display_config_t * config_display_get(){
  * 
  */
 const i2c_config_t * config_i2c_get(int * i2c_port) {
-	char *nvs_item, *p;
+	char *nvs_item;
 	static i2c_config_t i2c = {
 		.mode = I2C_MODE_MASTER,
 		.sda_io_num = -1,
@@ -502,10 +507,10 @@ const i2c_config_t * config_i2c_get(int * i2c_port) {
 	
 	nvs_item = config_alloc_get(NVS_TYPE_STR, "i2c_config");
 	if (nvs_item) {
-		if ((p = strcasestr(nvs_item, "scl")) != NULL) i2c.scl_io_num = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(nvs_item, "sda")) != NULL) i2c.sda_io_num = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(nvs_item, "speed")) != NULL) i2c.master.clk_speed = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(nvs_item, "port")) != NULL) i2c_system_port = atoi(strchr(p, '=') + 1);
+		PARSE_PARAM(nvs_item, "scl", '=', i2c.scl_io_num);
+		PARSE_PARAM(nvs_item, "sda", '=', i2c.sda_io_num);
+		PARSE_PARAM(nvs_item, "speed", '=', i2c.master.clk_speed);
+		PARSE_PARAM(nvs_item, "port", '=', i2c_system_port);
 		free(nvs_item);
 	}
 	if(i2c_port) {
@@ -518,6 +523,46 @@ const i2c_config_t * config_i2c_get(int * i2c_port) {
 	return &i2c;
 }
 
+/****************************************************************************************
+ * Get IO expander config structure from config string
+ */
+const gpio_exp_config_t* config_gpio_exp_get(int index) {
+	char *nvs_item, *item, *p;
+	static gpio_exp_config_t config;
+
+	// re-initialize config every time
+	memset(&config, 0, sizeof(config));
+	config.intr = -1; config.count = 16; config.base = GPIO_NUM_MAX; config.phy.port = i2c_system_port; config.phy.host = spi_system_host;
+
+	nvs_item = config_alloc_get(NVS_TYPE_STR, "gpio_exp_config");
+	if (!nvs_item || !*nvs_item) return NULL;
+
+	// search index items
+	for (item = strtok(nvs_item, ";"); index && item; index--) {
+		if ((item = strtok(NULL, ";")) == NULL) {
+			free(nvs_item);
+			return NULL;
+		}
+	}
+
+	PARSE_PARAM(item, "addr", '=', config.phy.addr);
+	PARSE_PARAM(item, "cs", '=', config.phy.cs_pin);
+	PARSE_PARAM(item, "speed", '=', config.phy.speed);
+	PARSE_PARAM(item, "intr", '=', config.intr);
+	PARSE_PARAM(item, "base", '=', config.base);
+	PARSE_PARAM(item, "count", '=', config.count);
+	PARSE_PARAM_STR(item, "model", '=', config.model, 31);
+
+	if ((p = strcasestr(item, "port")) != NULL) {
+		char port[8] = "";
+		sscanf(p, "%*[^=]=%7[^,]", port);
+		if (strcasestr(port, "dac")) config.phy.port = 0;
+	}	
+
+	free(nvs_item);
+	return &config;
+}	
+
 /****************************************************************************************
  * 
  */
@@ -596,18 +641,19 @@ const set_GPIO_struct_t * get_gpio_struct(){
  * 
  */
 const spi_bus_config_t * config_spi_get(spi_host_device_t * spi_host) {
-	char *nvs_item, *p;
+	char *nvs_item;
 	static EXT_RAM_ATTR spi_bus_config_t spi;
-	memset(&spi, 0xFF, sizeof(spi));
+	memset(&spi, 0xff, sizeof(spi));
 	
 	nvs_item = config_alloc_get_str("spi_config", CONFIG_SPI_CONFIG, NULL);
 	if (nvs_item) {
-		if ((p = strcasestr(nvs_item, "data")) != NULL) spi.mosi_io_num = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(nvs_item, "mosi")) != NULL) spi.mosi_io_num = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(nvs_item, "miso")) != NULL) spi.miso_io_num = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(nvs_item, "clk")) != NULL) spi.sclk_io_num = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(nvs_item, "dc")) != NULL) spi_system_dc_gpio = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(nvs_item, "host")) != NULL) spi_system_host = atoi(strchr(p, '=') + 1);
+		PARSE_PARAM(nvs_item, "data", '=', spi.mosi_io_num);
+		PARSE_PARAM(nvs_item, "mosi", '=', spi.mosi_io_num);
+		PARSE_PARAM(nvs_item, "miso", '=', spi.miso_io_num);
+		PARSE_PARAM(nvs_item, "clk", '=', spi.sclk_io_num);
+		PARSE_PARAM(nvs_item, "dc", '=', spi_system_dc_gpio);
+		// only VSPI (1) can be used as Flash and PSRAM run at 80MHz
+		// if ((p = strcasestr(nvs_item, "host")) != NULL) spi_system_host = atoi(strchr(p, '=') + 1);
 		free(nvs_item);
 	}
 	if(spi_host) *spi_host = spi_system_host;
@@ -642,11 +688,11 @@ const rotary_struct_t * config_rotary_get() {
 	char *config = config_alloc_get_default(NVS_TYPE_STR, "rotary_config", NULL, 0);
 	if (config && *config) {
 		char *p;
-		
+
 		// parse config
-		if ((p = strcasestr(config, "A")) != NULL) rotary.A = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(config, "B")) != NULL) rotary.B = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(config, "SW")) != NULL) rotary.SW = atoi(strchr(p, '=') + 1);
+		PARSE_PARAM(config, "A", '=', rotary.A);
+		PARSE_PARAM(config, "B", '=', rotary.B);
+		PARSE_PARAM(config, "SW", '=', rotary.SW);
 		if ((p = strcasestr(config, "knobonly")) != NULL) {
 			p = strchr(p, '=');
 			rotary.knobonly = true;
@@ -691,13 +737,10 @@ cJSON * add_gpio_for_value(cJSON * list,const char * name,int gpio, const char *
  */
 cJSON * add_gpio_for_name(cJSON * list,const char * nvs_entry,const char * name, const char * prefix, bool fixed){
 	cJSON * llist = list?list:cJSON_CreateArray();
-	char *p;
 	int gpioNum=0;
-	if ((p = strcasestr(nvs_entry, name)) != NULL) {
-		gpioNum = atoi(strchr(p, '=') + 1);
-		if(gpioNum>=0){
-			cJSON_AddItemToArray(llist,get_gpio_entry(name,prefix,gpioNum,fixed));
-		}
+	PARSE_PARAM(nvs_entry, name, '=', gpioNum);
+	if(gpioNum>=0){
+		cJSON_AddItemToArray(llist,get_gpio_entry(name,prefix,gpioNum,fixed));
 	}
 	return llist;
 }
@@ -1059,14 +1102,11 @@ cJSON * get_gpio_list(bool refresh) {
 #ifndef CONFIG_BAT_LOCKED
 	char *bat_config = config_alloc_get_default(NVS_TYPE_STR, "bat_config", NULL, 0);
 	if (bat_config) {
-		char *p;
-		int channel;
-		if ((p = strcasestr(bat_config, "channel") ) != NULL) {
-			channel = atoi(strchr(p, '=') + 1);
-			if(channel != -1){
-				if(adc1_pad_get_io_num(channel,&gpio_num )==ESP_OK){
-					cJSON_AddItemToArray(gpio_list,get_gpio_entry("bat","other",gpio_num,false));
-				}
+		int channel = -1;
+		PARSE_PARAM(bat_config, "channel", '=', channel);
+		if(channel != -1){
+			if(adc1_pad_get_io_num(channel,&gpio_num )==ESP_OK){
+				cJSON_AddItemToArray(gpio_list,get_gpio_entry("bat","other",gpio_num,false));
 			}
 		}
 		free(bat_config);

+ 3 - 1
components/services/accessors.h

@@ -12,10 +12,11 @@
 #include "driver/i2c.h"
 #include "driver/i2s.h"
 #include "driver/spi_master.h"
-#include "freertos/queue.h"
+#include "gpio_exp.h"
 
 extern const char *i2c_name_type;
 extern const char *spi_name_type;
+
 typedef struct {
 	int width;
 	int height;
@@ -97,6 +98,7 @@ esp_err_t 					config_i2s_set(const i2s_platform_config_t * config, const char *
 esp_err_t 					config_spi_set(const spi_bus_config_t * config, int host, int dc);
 const i2c_config_t * 		config_i2c_get(int * i2c_port);
 const spi_bus_config_t * 	config_spi_get(spi_host_device_t * spi_host);
+const gpio_exp_config_t *   config_gpio_exp_get(int index);
 void 						parse_set_GPIO(void (*cb)(int gpio, char *value));
 const i2s_platform_config_t * 	config_dac_get();
 const i2s_platform_config_t * 	config_spdif_get( );

+ 6 - 6
components/services/audio_controls.c

@@ -67,12 +67,12 @@ static const char * actrls_action_s[ ] = { EP(ACTRLS_POWER),EP(ACTRLS_VOLUP),EP(
 static const char * TAG = "audio controls";
 static actrls_config_t *json_config;
 cJSON * control_profiles = NULL;
-static actrls_t default_controls, current_controls;
+static EXT_RAM_ATTR actrls_t default_controls, current_controls;
 static actrls_hook_t *default_hook, *current_hook;
 static bool default_raw_controls, current_raw_controls;
 static actrls_ir_handler_t *default_ir_handler, *current_ir_handler;
 
-static struct {
+static EXT_RAM_ATTR struct {
 	bool long_state;
 	bool volume_lock;
 	TimerHandle_t timer;
@@ -137,10 +137,10 @@ esp_err_t actrls_init(const char *profile_name) {
 		int A = -1, B = -1, SW = -1, longpress = 0;
 		
 		// parse config
-		if ((p = strcasestr(config, "A")) != NULL) A = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(config, "B")) != NULL) B = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(config, "SW")) != NULL) SW = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(config, "knobonly")) != NULL) {
+		PARSE_PARAM(config, "A", '=', A);
+		PARSE_PARAM(config, "B", '=', B);
+		PARSE_PARAM(config, "SW", '=', SW);
+		if ((p = strcasestr(config, "knobonly"))) {
 			p = strchr(p, '=');
 			int double_press = p ? atoi(p + 1) : 350;
 			rotary.timer = xTimerCreate("knobTimer", double_press / portTICK_RATE_MS, pdFALSE, NULL, rotary_timer);

+ 4 - 5
components/services/battery.c

@@ -79,13 +79,12 @@ void battery_svc_init(void) {
 
 	char *nvs_item = config_alloc_get_default(NVS_TYPE_STR, "bat_config", "n", 0);
 	if (nvs_item) {
-		char *p;		
 #ifndef CONFIG_BAT_LOCKED		
-		if ((p = strcasestr(nvs_item, "channel")) != NULL) battery.channel = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(nvs_item, "scale")) != NULL) battery.scale = atof(strchr(p, '=') + 1);
-		if ((p = strcasestr(nvs_item, "atten")) != NULL) battery.attenuation = atoi(strchr(p, '=') + 1);
+		PARSE_PARAM(nvs_item, "channel", '=', battery.channel);
+		PARSE_PARAM(nvs_item, "scale", '=', battery.scale);
+		PARSE_PARAM(nvs_item, "atten", '=', battery.attenuation);
 #endif		
-		if ((p = strcasestr(nvs_item, "cells")) != NULL) battery.cells = atof(strchr(p, '=') + 1);		
+		PARSE_PARAM(nvs_item, "cells", '=', battery.cells);
 		free(nvs_item);
 	}	
 

+ 69 - 52
components/services/buttons.c

@@ -21,16 +21,17 @@
 #include "esp_task.h"
 #include "driver/gpio.h"
 #include "driver/rmt.h"
+#include "gpio_exp.h"
 #include "buttons.h"
 #include "rotary_encoder.h"
 #include "globdefs.h"
 
 static const char * TAG = "buttons";
 
-static int n_buttons = 0;
+static EXT_RAM_ATTR int n_buttons;
 
 #define BUTTON_STACK_SIZE	4096
-#define MAX_BUTTONS			16
+#define MAX_BUTTONS			32
 #define DEBOUNCE			50
 #define BUTTON_QUEUE_LEN	10
 
@@ -47,6 +48,7 @@ static EXT_RAM_ATTR struct button_s {
 	TimerHandle_t timer;
 } buttons[MAX_BUTTONS];
 
+// can't use EXT_RAM_ATTR for initialized structure
 static struct {
 	int gpio, level;
 	struct button_s *button;
@@ -67,10 +69,11 @@ static EXT_RAM_ATTR struct {
 	infrared_handler handler;
 } infrared;
 
-static xQueueHandle button_evt_queue;
-static QueueSetHandle_t common_queue_set;
+static EXT_RAM_ATTR QueueHandle_t button_queue;
+static EXT_RAM_ATTR QueueSetHandle_t common_queue_set;
 
 static void buttons_task(void* arg);
+static void buttons_handler(struct button_s *button, int level);
 
 /****************************************************************************************
  * Start task needed by button,s rotaty and infrared
@@ -86,40 +89,33 @@ static void common_task_init(void) {
  }	
 
 /****************************************************************************************
- * GPIO low-level handler
+ * GPIO low-level ISR handler
  */
 static void IRAM_ATTR gpio_isr_handler(void* arg)
 {
 	struct button_s *button = (struct button_s*) arg;
 	BaseType_t woken = pdFALSE;
 
-	if (xTimerGetPeriod(button->timer) > button->debounce / portTICK_RATE_MS) xTimerChangePeriodFromISR(button->timer, button->debounce / portTICK_RATE_MS, &woken); // does that restart the timer? 
-	else xTimerResetFromISR(button->timer, &woken);
+	if (xTimerGetPeriod(button->timer) > pdMS_TO_TICKS(button->debounce)) {
+		if (button->gpio < GPIO_NUM_MAX) xTimerChangePeriodFromISR(button->timer, pdMS_TO_TICKS(button->debounce), &woken); 
+		else xTimerChangePeriod(button->timer, pdMS_TO_TICKS(button->debounce), pdMS_TO_TICKS(10)); 
+	} else {
+		if (button->gpio < GPIO_NUM_MAX) xTimerResetFromISR(button->timer, &woken);
+		else xTimerReset(button->timer, portMAX_DELAY);
+	}
+
 	if (woken) portYIELD_FROM_ISR();
+
 	ESP_EARLY_LOGD(TAG, "INT gpio %u level %u", button->gpio, button->level);
 }
 
 /****************************************************************************************
  * Buttons debounce/longpress timer
  */
-static void buttons_timer( TimerHandle_t xTimer ) {
+static void buttons_timer_handler( TimerHandle_t xTimer ) {
 	struct button_s *button = (struct button_s*) pvTimerGetTimerID (xTimer);
-
-	button->level = gpio_get_level(button->gpio);
-	if (button->shifter && button->shifter->type == button->shifter->level) button->shifter->shifting = true;
-
-	if (button->long_press && !button->long_timer && button->level == button->type) {
-		// detect a long press, so hold event generation
-		ESP_LOGD(TAG, "setting long timer gpio:%u level:%u", button->gpio, button->level);
-		xTimerChangePeriod(xTimer, button->long_press / portTICK_RATE_MS, 0);
-		button->long_timer = true;
-	} else {
-		// send a button pressed/released event (content is copied in queue)
-		ESP_LOGD(TAG, "sending event for gpio:%u level:%u", button->gpio, button->level);
-		// queue will have a copy of button's context
-		xQueueSend(button_evt_queue, button, 0);
-		button->long_timer = false;
-	}
+	// if this is an expanded GPIO, must give cache a chance
+	buttons_handler(button, gpio_exp_get_level(button->gpio, (button->debounce * 3) / 2, NULL));
 }
 
 /****************************************************************************************
@@ -133,11 +129,33 @@ static void buttons_polling( TimerHandle_t xTimer ) {
 	
 		if (level != polled_gpio[i].level) {
 			polled_gpio[i].level = level;
-			buttons_timer(polled_gpio[i].button->timer);
+			buttons_handler(polled_gpio[i].button, level);
 		}	
 	}	
 }
 
+/****************************************************************************************
+ * Buttons timer handler for press/longpress
+ */
+static void buttons_handler(struct button_s *button, int level) {
+	button->level = level;
+
+	if (button->shifter && button->shifter->type == button->shifter->level) button->shifter->shifting = true;
+
+	if (button->long_press && !button->long_timer && button->level == button->type) {
+		// detect a long press, so hold event generation
+		ESP_LOGD(TAG, "setting long timer gpio:%u level:%u", button->gpio, button->level);
+		xTimerChangePeriod(button->timer, button->long_press / portTICK_RATE_MS, 0);
+		button->long_timer = true;
+	} else {
+		// send a button pressed/released event (content is copied in queue)
+		ESP_LOGD(TAG, "sending event for gpio:%u level:%u", button->gpio, button->level);
+		// queue will have a copy of button's context
+		xQueueSend(button_queue, button, 0);
+		button->long_timer = false;
+	}
+}
+
 /****************************************************************************************
  * Tasks that calls the appropriate functions when buttons are pressed
  */
@@ -150,13 +168,13 @@ static void buttons_task(void* arg) {
 		// wait on button, rotary and infrared queues 
 		if ((xActivatedMember = xQueueSelectFromSet( common_queue_set, portMAX_DELAY )) == NULL) continue;
 		
-		if (xActivatedMember == button_evt_queue) {
+		if (xActivatedMember == button_queue) {
 			struct button_s button;
 			button_event_e event;
 			button_press_e press;
 			
 			// received a button event
-			xQueueReceive(button_evt_queue, &button, 0);
+			xQueueReceive(button_queue, &button, 0);
 
 			event = (button.level == button.type) ? BUTTON_PRESSED : BUTTON_RELEASED;		
 
@@ -175,18 +193,18 @@ static void buttons_task(void* arg) {
 				if (event == BUTTON_RELEASED) {
 					// early release of a long-press button, send press/release
 					if (!button.shifting) {
-						(*button.handler)(button.client, BUTTON_PRESSED, press, false);		
-						(*button.handler)(button.client, BUTTON_RELEASED, press, false);		
+						button.handler(button.client, BUTTON_PRESSED, press, false);		
+						button.handler(button.client, BUTTON_RELEASED, press, false);		
 					}
 					// button is a copy, so need to go to real context
 					button.self->shifting = false;
 				} else if (!button.shifting) {
 					// normal long press and not shifting so don't discard
-					(*button.handler)(button.client, BUTTON_PRESSED, press, true);
+					button.handler(button.client, BUTTON_PRESSED, press, true);
 				}  
 			} else {
 				// normal press/release of a button or release of a long-press button
-				if (!button.shifting) (*button.handler)(button.client, event, press, button.long_press);
+				if (!button.shifting) button.handler(button.client, event, press, button.long_press);
 				// button is a copy, so need to go to real context
 				button.self->shifting = false;
 			}
@@ -195,12 +213,12 @@ static void buttons_task(void* arg) {
 			
 			// received a rotary event
 		    xQueueReceive(rotary.queue, &event, 0);
-			
+
 			ESP_LOGD(TAG, "Event: position %d, direction %s", event.state.position,
-                     event.state.direction ? (event.state.direction == ROTARY_ENCODER_DIRECTION_CLOCKWISE ? "CW" : "CCW") : "NOT_SET");
+					event.state.direction ? (event.state.direction == ROTARY_ENCODER_DIRECTION_CLOCKWISE ? "CW" : "CCW") : "NOT_SET");
 			
 			rotary.handler(rotary.client, event.state.direction == ROTARY_ENCODER_DIRECTION_CLOCKWISE ? 
-										  ROTARY_RIGHT : ROTARY_LEFT, false);   
+											ROTARY_RIGHT : ROTARY_LEFT, false);   
 		} else {
 			// this is IR
 			infrared_receive(infrared.rb, infrared.handler);
@@ -224,9 +242,9 @@ void button_create(void *client, int gpio, int type, bool pull, int debounce, bu
 	ESP_LOGI(TAG, "Creating button using GPIO %u, type %u, pull-up/down %u, long press %u shifter %d", gpio, type, pull, long_press, shifter_gpio);
 
 	if (!n_buttons) {
-		button_evt_queue = xQueueCreate(BUTTON_QUEUE_LEN, sizeof(struct button_s));
+		button_queue = xQueueCreate(BUTTON_QUEUE_LEN, sizeof(struct button_s));
 		common_task_init();
-		xQueueAddToSet( button_evt_queue, common_queue_set );
+		xQueueAddToSet( button_queue, common_queue_set );
 	}
 	
 	// just in case this structure is allocated in a future release
@@ -240,7 +258,7 @@ void button_create(void *client, int gpio, int type, bool pull, int debounce, bu
 	buttons[n_buttons].long_press = long_press;
 	buttons[n_buttons].shifter_gpio = shifter_gpio;
 	buttons[n_buttons].type = type;
-	buttons[n_buttons].timer = xTimerCreate("buttonTimer", buttons[n_buttons].debounce / portTICK_RATE_MS, pdFALSE, (void *) &buttons[n_buttons], buttons_timer);
+	buttons[n_buttons].timer = xTimerCreate("buttonTimer", buttons[n_buttons].debounce / portTICK_RATE_MS, pdFALSE, (void *) &buttons[n_buttons], buttons_timer_handler);
 	buttons[n_buttons].self = buttons + n_buttons;
 
 	for (int i = 0; i < n_buttons; i++) {
@@ -257,24 +275,21 @@ void button_create(void *client, int gpio, int type, bool pull, int debounce, bu
 		}	
 	}
 
-	gpio_pad_select_gpio(gpio);
-	gpio_set_direction(gpio, GPIO_MODE_INPUT);
-
-	// we need any edge detection
-	gpio_set_intr_type(gpio, GPIO_INTR_ANYEDGE);
+	gpio_pad_select_gpio_x(gpio);
+	gpio_set_direction_x(gpio, GPIO_MODE_INPUT);
 
 	// do we need pullup or pulldown
 	if (pull) {
-		if (GPIO_IS_VALID_OUTPUT_GPIO(gpio)) {
-			if (type == BUTTON_LOW) gpio_set_pull_mode(gpio, GPIO_PULLUP_ONLY);
-			else gpio_set_pull_mode(gpio, GPIO_PULLDOWN_ONLY);
+		if (GPIO_IS_VALID_OUTPUT_GPIO(gpio) || gpio >= GPIO_NUM_MAX) {
+			if (type == BUTTON_LOW) gpio_set_pull_mode_x(gpio, GPIO_PULLUP_ONLY);
+			else gpio_set_pull_mode_x(gpio, GPIO_PULLDOWN_ONLY);
 		} else {	
 			ESP_LOGW(TAG, "cannot set pull up/down for gpio %u", gpio);
 		}
 	}
 	
 	// and initialize level ...
-	buttons[n_buttons].level = gpio_get_level(gpio);
+	buttons[n_buttons].level = gpio_get_level_x(gpio);
 	
 	// nasty ESP32 bug: fire-up constantly INT on GPIO 36/39 if ADC1, AMP, Hall used which WiFi does when PS is activated
 	for (int i = 0; polled_gpio[i].gpio != -1; i++) if (polled_gpio[i].gpio == gpio) {
@@ -282,19 +297,21 @@ void button_create(void *client, int gpio, int type, bool pull, int debounce, bu
 			polled_timer = xTimerCreate("buttonsPolling", 100 / portTICK_RATE_MS, pdTRUE, polled_gpio, buttons_polling);		
 			xTimerStart(polled_timer, portMAX_DELAY);
 		}	
-		
+	
 		polled_gpio[i].button = buttons + n_buttons;					
 		polled_gpio[i].level = gpio_get_level(gpio);
 		ESP_LOGW(TAG, "creating polled gpio %u, level %u", gpio, polled_gpio[i].level);		
-		
+	
 		gpio = -1;
 		break;
 	}
 	
-	// only create timers and ISR is this is not a polled gpio
+	// only create ISR if this is not a polled gpio
 	if (gpio != -1) {
-		gpio_isr_handler_add(gpio, gpio_isr_handler, (void*) &buttons[n_buttons]);
-		gpio_intr_enable(gpio);
+		// we need any edge detection
+		gpio_set_intr_type_x(gpio, GPIO_INTR_ANYEDGE);
+		gpio_isr_handler_add_x(gpio, gpio_isr_handler, buttons + n_buttons);
+		gpio_intr_enable_x(gpio);
 	}	
 
 	n_buttons++;
@@ -362,7 +379,7 @@ void *button_remap(void *client, int gpio, button_handler handler, int long_pres
 }
 
 /****************************************************************************************
- * Create rotary encoder
+ * Rotary encoder handler
  */
 static void rotary_button_handler(void *id, button_event_e event, button_press_e mode, bool long_press) {
 	ESP_LOGI(TAG, "Rotary push-button %d", event);

+ 0 - 10
components/services/globdefs.h

@@ -21,13 +21,3 @@ typedef struct {
 	int timer, base_channel, max;
 } pwm_system_t;
 extern pwm_system_t pwm_system;
-#ifdef CONFIG_SQUEEZEAMP
-#define ADAC dac_tas57xx
-#elif defined(CONFIG_A1S)
-#define ADAC dac_a1s
-#else
-#define ADAC dac_external
-#endif
-void * malloc_init_external(size_t sz);
-void * clone_obj_psram(void * source, size_t source_sz);
-char * strdup_psram(const char * source);

+ 738 - 0
components/services/gpio_exp.c

@@ -0,0 +1,738 @@
+/* GDS Example
+
+   This example code is in the Public Domain (or CC0 licensed, at your option.)
+
+   Unless required by applicable law or agreed to in writing, this
+   software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+   CONDITIONS OF ANY KIND, either express or implied.
+*/
+#include <string.h>
+#include <stdlib.h>
+#include "freertos/FreeRTOS.h"
+#include "freertos/task.h"
+#include "freertos/timers.h"
+#include "freertos/queue.h"
+#include "esp_task.h"
+#include "esp_log.h"
+#include "driver/gpio.h"
+#include "driver/i2c.h"
+#include "driver/spi_master.h"
+#include "gpio_exp.h"
+
+#define GPIO_EXP_INTR	0x100
+#define	GPIO_EXP_WRITE	0x200
+
+/* 
+ shadow register is both output and input, so we assume that reading to the
+ ports also reads the value set on output
+*/
+ 
+typedef struct gpio_exp_s {
+	uint32_t first, last;
+	int intr;
+	bool intr_pending;
+	struct  {
+		struct gpio_exp_phy_s phy;
+		spi_device_handle_t spi_handle;
+	};
+	uint32_t shadow, pending;
+	TickType_t age;
+	SemaphoreHandle_t mutex;
+	uint32_t r_mask, w_mask;
+	uint32_t pullup, pulldown;
+	struct gpio_exp_isr_s {
+		gpio_isr_t handler;
+		void *arg;
+		TimerHandle_t timer;
+	} isr[32];
+	struct gpio_exp_model_s const *model;
+} gpio_exp_t;
+
+typedef struct {
+	enum { ASYNC_WRITE } type;
+	int gpio;
+	int level;
+	gpio_exp_t *expander;
+} queue_request_t;
+
+static const char TAG[] = "gpio expander";
+
+static void   IRAM_ATTR intr_isr_handler(void* arg);
+static gpio_exp_t* find_expander(gpio_exp_t *expander, int *gpio);
+
+static void 	pca9535_set_direction(gpio_exp_t* self);
+static uint32_t pca9535_read(gpio_exp_t* self);
+static void 	pca9535_write(gpio_exp_t* self);
+
+static uint32_t	pca85xx_read(gpio_exp_t* self);
+static void 	pca85xx_write(gpio_exp_t* self);
+
+static esp_err_t mcp23017_init(gpio_exp_t* self);
+static void      mcp23017_set_pull_mode(gpio_exp_t* self);
+static void      mcp23017_set_direction(gpio_exp_t* self);
+static uint32_t  mcp23017_read(gpio_exp_t* self);
+static void      mcp23017_write(gpio_exp_t* self);
+
+static esp_err_t mcp23s17_init(gpio_exp_t* self);
+static void      mcp23s17_set_pull_mode(gpio_exp_t* self);
+static void      mcp23s17_set_direction(gpio_exp_t* self);
+static uint32_t  mcp23s17_read(gpio_exp_t* self);
+static void      mcp23s17_write(gpio_exp_t* self);
+
+static void   service_handler(void *arg);
+static void   debounce_handler( TimerHandle_t xTimer );
+
+static esp_err_t i2c_write(uint8_t port, uint8_t addr, uint8_t reg, uint32_t data, int len);
+static uint32_t  i2c_read(uint8_t port, uint8_t addr, uint8_t reg, int len);
+
+static spi_device_handle_t spi_config(struct gpio_exp_phy_s *phy);
+static esp_err_t           spi_write(spi_device_handle_t handle, uint8_t addr, uint8_t reg, uint32_t data, int len);
+static uint32_t            spi_read(spi_device_handle_t handle, uint8_t addr, uint8_t reg, int len);
+
+static const struct gpio_exp_model_s {
+	char *model;
+	gpio_int_type_t trigger;
+	esp_err_t (*init)(gpio_exp_t* self);
+	uint32_t  (*read)(gpio_exp_t* self);
+	void      (*write)(gpio_exp_t* self);
+	void      (*set_direction)(gpio_exp_t* self);
+	void      (*set_pull_mode)(gpio_exp_t* self);
+} registered[] = {
+	{ .model = "pca9535",
+	  .trigger = GPIO_INTR_NEGEDGE, 
+	  .set_direction = pca9535_set_direction,
+	  .read = pca9535_read,
+	  .write = pca9535_write, },
+	{ .model = "pca85xx",
+	  .trigger = GPIO_INTR_NEGEDGE, 
+	  .read = pca85xx_read,
+	  .write = pca85xx_write, },
+	{ .model = "mcp23017",
+	  .trigger = GPIO_INTR_NEGEDGE, 
+	  .init = mcp23017_init,
+	  .set_direction = mcp23017_set_direction,
+	  .set_pull_mode = mcp23017_set_pull_mode,
+	  .read = mcp23017_read,
+	  .write = mcp23017_write, },
+	{ .model = "mcp23s17",
+	  .trigger = GPIO_INTR_NEGEDGE, 
+	  .init = mcp23s17_init,
+	  .set_direction = mcp23s17_set_direction,
+	  .set_pull_mode = mcp23s17_set_pull_mode,
+	  .read = mcp23s17_read,
+	  .write = mcp23s17_write, },
+};
+
+static EXT_RAM_ATTR uint8_t n_expanders;
+static EXT_RAM_ATTR QueueHandle_t message_queue;
+static EXT_RAM_ATTR gpio_exp_t expanders[4];
+static EXT_RAM_ATTR TaskHandle_t service_task;
+
+/******************************************************************************
+ * Retrieve base from an expander reference
+ */
+uint32_t gpio_exp_get_base(gpio_exp_t *expander) { 
+	return expander->first; 
+}
+
+/******************************************************************************
+ * Retrieve reference from a GPIO
+ */
+gpio_exp_t *gpio_exp_get_expander(int gpio) { 
+	int _gpio = gpio;
+	return find_expander(NULL, &_gpio);
+}
+
+/******************************************************************************
+ * Create an I2C expander
+ */
+gpio_exp_t* gpio_exp_create(const gpio_exp_config_t *config) {
+	gpio_exp_t *expander = expanders + n_expanders;
+	
+	if (config->base < GPIO_NUM_MAX || n_expanders == sizeof(expanders)/sizeof(gpio_exp_t)) {
+		ESP_LOGE(TAG, "Base %d GPIO must be at least %d for %s or too many expanders %d", config->base, GPIO_NUM_MAX, config->model, n_expanders);
+		return NULL;
+	}
+
+	// See if we know that model (expanders is zero-initialized)
+	for (int i = 0; !expander->model && i < sizeof(registered)/sizeof(struct gpio_exp_model_s); i++) {
+		if (strcasestr(config->model, registered[i].model)) expander->model = registered + i;
+    }
+
+	// well... try again
+	if (!expander->model) {
+		ESP_LOGE(TAG, "Unknown GPIO expansion chip %s", config->model);
+		return NULL;
+	}
+		
+	memcpy(&expander->phy, &config->phy, sizeof(struct gpio_exp_phy_s));
+
+	// try to initialize the expander if required
+	if (expander->model->init && expander->model->init(expander) != ESP_OK) {
+		ESP_LOGE(TAG, "Cannot create GPIO expander %s, check i2c/spi configuration", config->model);
+		return NULL;
+	}
+
+	n_expanders++;
+	expander->first = config->base;
+	expander->last = config->base + config->count - 1;
+	expander->intr = config->intr;
+	expander->mutex = xSemaphoreCreateMutex();
+
+	// create a task to handle asynchronous requests (only write at this time)
+	if (!message_queue) {
+		// we allocate TCB but stack is static to avoid SPIRAM fragmentation
+		StaticTask_t* xTaskBuffer = (StaticTask_t*) heap_caps_malloc(sizeof(StaticTask_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
+		static EXT_RAM_ATTR StackType_t xStack[4*1024] __attribute__ ((aligned (4)));
+
+		message_queue = xQueueCreate(4, sizeof(queue_request_t));
+		service_task = xTaskCreateStatic(service_handler, "gpio_expander", sizeof(xStack), NULL, ESP_TASK_PRIO_MIN + 1, xStack, xTaskBuffer);
+	}
+
+	// set interrupt if possible
+	if (config->intr >= 0) {
+		gpio_pad_select_gpio(config->intr);
+		gpio_set_direction(config->intr, GPIO_MODE_INPUT);
+
+		switch (expander->model->trigger) {
+		case GPIO_INTR_NEGEDGE:
+		case GPIO_INTR_LOW_LEVEL:
+			gpio_set_pull_mode(config->intr, GPIO_PULLUP_ONLY);
+			break;
+		case GPIO_INTR_POSEDGE:
+		case GPIO_INTR_HIGH_LEVEL:
+			gpio_set_pull_mode(config->intr, GPIO_PULLDOWN_ONLY);
+			break;
+		default:	
+			gpio_set_pull_mode(config->intr, GPIO_PULLUP_PULLDOWN);
+			break;
+		}	
+		
+		gpio_set_intr_type(config->intr, expander->model->trigger);		
+		gpio_isr_handler_add(config->intr, intr_isr_handler, expander);
+		gpio_intr_enable(config->intr);						
+	}
+	
+	ESP_LOGI(TAG, "Create GPIO expander %s at base %u with INT %d at @%x on port/host %d/%d", config->model, config->base, config->intr, config->phy.addr, config->phy.port, config->phy.host);
+	return expander;
+}
+
+/******************************************************************************
+ * Add ISR handler for a GPIO
+ */
+esp_err_t gpio_exp_isr_handler_add(int gpio, gpio_isr_t isr_handler, uint32_t debounce, void *arg, struct gpio_exp_s *expander) {
+	if (gpio < GPIO_NUM_MAX && !expander) return gpio_isr_handler_add(gpio, isr_handler, arg);
+	if ((expander = find_expander(expander, &gpio)) == NULL) return ESP_ERR_INVALID_ARG;
+
+	expander->isr[gpio].handler = isr_handler;
+	expander->isr[gpio].arg = arg;
+	if (debounce) expander->isr[gpio].timer = xTimerCreate("gpioExpDebounce", pdMS_TO_TICKS(debounce), 
+	                                                       pdFALSE, expander->isr + gpio, debounce_handler );
+
+	return ESP_OK;
+}
+
+/******************************************************************************
+ * Remove ISR handler for a GPIO
+ */
+esp_err_t gpio_exp_isr_handler_remove(int gpio, struct gpio_exp_s *expander) {
+	if (gpio < GPIO_NUM_MAX && !expander) return gpio_isr_handler_remove(gpio);
+	if ((expander = find_expander(expander, &gpio)) == NULL) return ESP_ERR_INVALID_ARG;
+
+	if (expander->isr[gpio].timer) xTimerDelete(expander->isr[gpio].timer, portMAX_DELAY);
+	memset(expander->isr + gpio, 0, sizeof(struct gpio_exp_isr_s));
+
+	return ESP_OK;
+}
+
+/******************************************************************************
+ * Set GPIO direction
+ */
+esp_err_t gpio_exp_set_direction(int gpio, gpio_mode_t mode, gpio_exp_t *expander) {
+	if (gpio < GPIO_NUM_MAX && !expander) return gpio_set_direction(gpio, mode);
+	if ((expander = find_expander(expander, &gpio)) == NULL) return ESP_ERR_INVALID_ARG;
+
+	xSemaphoreTake(expander->mutex, pdMS_TO_TICKS(portMAX_DELAY));
+
+	if (mode == GPIO_MODE_INPUT) {
+		expander->r_mask |= 1 << gpio;
+		expander->shadow = expander->model->read(expander);
+		expander->age = ~xTaskGetTickCount();
+	} else {
+		expander->w_mask |= 1 << gpio;
+	}
+	
+	if (expander->r_mask & expander->w_mask) {
+		xSemaphoreGive(expander->mutex);
+		ESP_LOGE(TAG, "GPIO %d on expander base %u can't be r/w", gpio, expander->first);
+		return ESP_ERR_INVALID_ARG;
+	}
+
+	// most expanders want unconfigured GPIO to be set to output
+	if (expander->model->set_direction) expander->model->set_direction(expander);
+
+	xSemaphoreGive(expander->mutex);
+
+	return ESP_OK;
+}	
+
+/******************************************************************************
+ * Get GPIO level with cache
+ */
+int gpio_exp_get_level(int gpio, int age, gpio_exp_t *expander) {
+	if (gpio < GPIO_NUM_MAX && !expander) return gpio_get_level(gpio);
+	if ((expander = find_expander(expander, &gpio)) == NULL) return -1;
+	uint32_t now = xTaskGetTickCount();
+
+	// return last thing we had if we can't get the mutex
+	if (xSemaphoreTake(expander->mutex, pdMS_TO_TICKS(50)) == pdFALSE) {
+		ESP_LOGW(TAG, "Can't get mutex for GPIO %d", expander->first + gpio);
+		return (expander->shadow >> gpio) & 0x01;
+	}
+
+	// re-read the expander if data is too old
+	if (age >= 0 && now - expander->age >= pdMS_TO_TICKS(age)) {
+		uint32_t value = expander->model->read(expander);
+		expander->pending |= (expander->shadow ^ value) & expander->r_mask;
+		expander->shadow = value;
+		expander->age = now;
+	}
+
+	// clear pending bit
+	expander->pending &= ~(1 << gpio);
+
+	xSemaphoreGive(expander->mutex);
+	
+	ESP_LOGD(TAG, "Get level for GPIO %u => read %x", expander->first + gpio, expander->shadow);
+	return (expander->shadow >> gpio) & 0x01;
+}
+
+/******************************************************************************
+ * Set GPIO level with cache
+ */
+esp_err_t gpio_exp_set_level(int gpio, int level, bool direct, gpio_exp_t *expander) {
+	if (gpio < GPIO_NUM_MAX && !expander) return gpio_set_level(gpio, level);
+	if ((expander = find_expander(expander, &gpio)) == NULL) return ESP_ERR_INVALID_ARG;
+	uint32_t mask = 1 << gpio;
+
+	// very limited risk with lack of semaphore here
+	if ((expander->w_mask & mask) == 0) {
+		ESP_LOGW(TAG, "GPIO %d is not set for output", expander->first + gpio);
+		return ESP_ERR_INVALID_ARG;
+	}
+
+	if (direct) {
+		xSemaphoreTake(expander->mutex, pdMS_TO_TICKS(portMAX_DELAY));
+
+		level = level ? mask : 0;
+		mask &= expander->shadow;
+
+		// only write if shadow not up to date
+		if ((mask ^ level) && expander->model->write) {
+			expander->shadow = (expander->shadow & ~(mask | level)) | level;
+			expander->model->write(expander);
+		}
+
+		xSemaphoreGive(expander->mutex);
+		ESP_LOGD(TAG, "Set level %x for GPIO %u => wrote %x", level, expander->first + gpio, expander->shadow);
+	} else {
+		queue_request_t request = { .gpio = gpio, .level = level, .type = ASYNC_WRITE, .expander = expander };
+		if (xQueueSend(message_queue, &request, 0) == pdFALSE) return ESP_ERR_INVALID_RESPONSE;
+
+		// notify service task that will write it when it can
+		xTaskNotify(service_task, GPIO_EXP_WRITE, eSetValueWithoutOverwrite);
+	} 
+
+	return ESP_OK;
+}
+
+/******************************************************************************
+ * Set GPIO pullmode
+ */
+esp_err_t gpio_exp_set_pull_mode(int gpio, gpio_pull_mode_t mode, gpio_exp_t *expander) {
+	if (gpio < GPIO_NUM_MAX && !expander) return gpio_set_pull_mode(gpio, mode);
+	if ((expander = find_expander(expander, &gpio)) != NULL && expander->model->set_pull_mode) {
+
+		expander->pullup &= ~(1 << gpio);
+		expander->pulldown &= ~(1 << gpio);
+
+		if (mode == GPIO_PULLUP_ONLY  || mode == GPIO_PULLUP_PULLDOWN) expander->pullup |= 1 << gpio;
+		if (mode == GPIO_PULLDOWN_ONLY || mode == GPIO_PULLUP_PULLDOWN) expander->pulldown |= 1 << gpio;
+
+		expander->model->set_pull_mode(expander);
+		return ESP_OK;
+	}
+	return ESP_ERR_INVALID_ARG;
+}
+
+/******************************************************************************
+ * Wrapper function
+ */
+esp_err_t gpio_set_pull_mode_x(int gpio, gpio_pull_mode_t mode) {
+	if (gpio < GPIO_NUM_MAX) return gpio_set_pull_mode(gpio, mode);
+	return gpio_exp_set_pull_mode(gpio, mode, NULL);
+}
+
+esp_err_t gpio_set_direction_x(int gpio, gpio_mode_t mode) {
+	if (gpio < GPIO_NUM_MAX) return gpio_set_direction(gpio, mode);
+	return gpio_exp_set_direction(gpio, mode, NULL);
+}
+
+int gpio_get_level_x(int gpio) {
+	if (gpio < GPIO_NUM_MAX) return gpio_get_level(gpio);
+	return gpio_exp_get_level(gpio, 10, NULL);
+}
+
+esp_err_t gpio_set_level_x(int gpio, int level) {
+	if (gpio < GPIO_NUM_MAX) return gpio_set_level(gpio, level);
+	return gpio_exp_set_level(gpio, level, false, NULL);
+}
+
+esp_err_t gpio_isr_handler_add_x(int gpio, gpio_isr_t isr_handler, void* args) {
+	if (gpio < GPIO_NUM_MAX) return gpio_isr_handler_add(gpio, isr_handler, args);
+	return gpio_exp_isr_handler_add(gpio, isr_handler, 0, args, NULL);
+}
+
+esp_err_t gpio_isr_handler_remove_x(int gpio) {
+	if (gpio < GPIO_NUM_MAX) return gpio_isr_handler_remove(gpio);
+	return gpio_exp_isr_handler_remove(gpio, NULL);
+}
+
+
+/****************************************************************************************
+ * INTR low-level handler
+ */
+static void IRAM_ATTR intr_isr_handler(void* arg) {
+	gpio_exp_t *self = (gpio_exp_t*) arg;
+	BaseType_t woken = pdFALSE;
+	
+	// activate all, including ourselves
+	for (int i = 0; i < n_expanders; i++) if (expanders[i].intr == self->intr) expanders[i].intr_pending = true; 
+	
+	xTaskNotifyFromISR(service_task, GPIO_EXP_INTR, eSetValueWithOverwrite, &woken);
+	if (woken) portYIELD_FROM_ISR();
+
+	ESP_EARLY_LOGD(TAG, "INTR for expander base %d", gpio_exp_get_base(self));
+}
+
+/****************************************************************************************
+ * INTR debounce handler
+ */
+static void debounce_handler( TimerHandle_t xTimer ) {
+	struct gpio_exp_isr_s *isr = (struct gpio_exp_isr_s*) pvTimerGetTimerID (xTimer);
+	isr->handler(isr->arg);
+}
+
+/****************************************************************************************
+ * Service task
+ */
+void service_handler(void *arg) {
+	while (1) {
+		queue_request_t request;
+		uint32_t notif = ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
+
+		// we have been notified of an interrupt
+		if (notif == GPIO_EXP_INTR) {
+			/* If we want a smarter bitmap of expanders with a pending interrupt
+			   we'll have to disable interrupts while clearing that bitmap. For 
+			   now, a loop will do */
+			for (int i = 0; i < n_expanders; i++) {
+				gpio_exp_t *expander = expanders + i;
+
+				// no interrupt for that gpio
+				if (expander->intr < 0) continue;
+
+				// only check expander with pending interrupts
+				gpio_intr_disable(expander->intr);
+				if (!expander->intr_pending) {
+					gpio_intr_enable(expander->intr);
+					continue;
+				}
+				expander->intr_pending = false;
+				gpio_intr_enable(expander->intr);
+				
+				xSemaphoreTake(expander->mutex, pdMS_TO_TICKS(50));
+
+				// read GPIOs and clear all pending status
+				uint32_t value = expander->model->read(expander);
+				uint32_t pending = expander->pending | ((expander->shadow ^ value) & expander->r_mask);
+				expander->shadow = value;
+				expander->pending = 0;
+				expander->age = xTaskGetTickCount();
+
+				xSemaphoreGive(expander->mutex);
+				ESP_LOGD(TAG, "Handling GPIO %d reads 0x%04x and has 0x%04x pending", expander->first, expander->shadow, pending);
+				
+				for (int gpio = 31, clz; pending; pending <<= (clz + 1)) {
+					clz = __builtin_clz(pending);
+					gpio -= clz;
+					if (expander->isr[gpio].timer) xTimerReset(expander->isr[gpio].timer, 1);	// todo 0
+					else if (expander->isr[gpio].handler) expander->isr[gpio].handler(expander->isr[gpio].arg);
+				}
+			}
+		}
+
+		// check if we have some other pending requests
+		while (xQueueReceive(message_queue, &request, 0) == pdTRUE) {
+			esp_err_t err = gpio_exp_set_level(request.gpio, request.level, true, request.expander);
+			if (err != ESP_OK) ESP_LOGW(TAG, "Can't execute async GPIO %d write request (%d)", request.gpio, err);  
+		}
+	}
+}
+
+/****************************************************************************************
+ * Find the expander related to base
+ */
+static gpio_exp_t* find_expander(gpio_exp_t *expander, int *gpio) {
+	// a mutex would be better, but risk is so small...
+	for (int i = 0; !expander && i < n_expanders; i++) {
+		if (*gpio >= expanders[i].first && *gpio <= expanders[i].last) expander = expanders + i;
+	}
+	
+	// normalize GPIO number
+	if (expander && *gpio >= expander->first) *gpio -= expander->first;
+	
+	return expander;
+}
+
+/****************************************************************************************
+                                        DRIVERS                                       
+****************************************************************************************/
+
+/****************************************************************************************
+ * PCA9535 family : direction, read and write
+ */
+static void pca9535_set_direction(gpio_exp_t* self) {
+	i2c_write(self->phy.port, self->phy.addr, 0x06, self->r_mask, 2);
+}
+
+static uint32_t pca9535_read(gpio_exp_t* self) {
+	return i2c_read(self->phy.port, self->phy.addr, 0x00, 2);
+}
+
+static void pca9535_write(gpio_exp_t* self) {
+	i2c_write(self->phy.port, self->phy.addr, 0x02, self->shadow, 2);
+}
+
+/****************************************************************************************
+ * PCA85xx family : read and write
+ */
+static uint32_t pca85xx_read(gpio_exp_t* self) {
+	// must return the full set of pins, not just inputs
+	uint32_t data = i2c_read(self->phy.port, self->phy.addr, 0xff, 2);
+	return (data & self->r_mask) | (self->shadow & ~self->r_mask);
+}
+
+static void pca85xx_write(gpio_exp_t* self) {
+	/* 
+	 There is no good option with this chip: normally, unused pin should be set to input
+	 to avoid any conflict but then they float and create tons of suprious. So option 1 is
+	 to le tthem float and option 2 is to set them as output to 0.
+	 In addition, setting an output pin to 1 equals is making it an input and if this is
+	 use to short a led (e.g.) instead of being the sink, the it generates a spurious
+	*/
+	// option 1 
+	// i2c_write(self->phy.port, self->phy.addr, 0xff, (self->shadow & self->w_mask) | ~self->w_mask, 2);
+	// option 2
+	i2c_write(self->phy.port, self->phy.addr, 0xff, (self->shadow & self->w_mask) | self->r_mask, 2);
+}
+
+/****************************************************************************************
+ * MCP23017 family : init, direction, read and write
+ */
+static esp_err_t mcp23017_init(gpio_exp_t* self) {
+	/*
+	0111 x10x = same bank, mirrot single int, no sequentµial, open drain, active low
+	not sure about this funny change of mapping of the control register itself, really?
+	*/
+	esp_err_t err = i2c_write(self->phy.port, self->phy.addr, 0x05, 0x74, 1);
+	err |= i2c_write(self->phy.port, self->phy.addr, 0x0a, 0x74, 1);
+
+	// no interrupt on comparison or on change
+	err |= i2c_write(self->phy.port, self->phy.addr, 0x04, 0x00, 2);
+	err |= i2c_write(self->phy.port, self->phy.addr, 0x08, 0x00, 2);
+
+	return err;
+}
+
+static void mcp23017_set_direction(gpio_exp_t* self) {
+	// default to input and set real input to generate interrupt
+	i2c_write(self->phy.port, self->phy.addr, 0x00, ~self->w_mask, 2);
+	i2c_write(self->phy.port, self->phy.addr, 0x04, self->r_mask, 2);
+}
+
+static void mcp23017_set_pull_mode(gpio_exp_t* self) {
+	i2c_write(self->phy.port, self->phy.addr, 0x0c, self->pullup, 2);
+}
+
+static uint32_t mcp23017_read(gpio_exp_t* self) {
+	// read the pins value, not the stored one @interrupt
+	return i2c_read(self->phy.port, self->phy.addr, 0x12, 2);
+}
+
+static void mcp23017_write(gpio_exp_t* self) {
+	i2c_write(self->phy.port, self->phy.addr, 0x12, self->shadow, 2);
+}
+
+/****************************************************************************************
+ * MCP23s17 family : init, direction, read and write
+ */
+static esp_err_t mcp23s17_init(gpio_exp_t* self) {
+	if ((self->spi_handle = spi_config(&self->phy)) == NULL) return ESP_ERR_INVALID_ARG;
+	
+	/*
+	0111 x10x = same bank, mirrot single int, no sequentµial, open drain, active low
+	not sure about this funny change of mapping of the control register itself, really?
+	*/
+	esp_err_t err = spi_write(self->spi_handle, self->phy.addr, 0x05, 0x74, 1);
+	err |= spi_write(self->spi_handle, self->phy.addr, 0x0a, 0x74, 1);
+
+	// no interrupt on comparison or on change
+	err |= spi_write(self->spi_handle, self->phy.addr, 0x04, 0x00, 2);
+	err |= spi_write(self->spi_handle, self->phy.addr, 0x08, 0x00, 2);
+
+	return err;
+}
+
+static void mcp23s17_set_direction(gpio_exp_t* self) {
+	// default to input and set real input to generate interrupt
+	spi_write(self->spi_handle, self->phy.addr, 0x00, ~self->w_mask, 2);
+	spi_write(self->spi_handle, self->phy.addr, 0x04, self->r_mask, 2);
+}
+
+static void mcp23s17_set_pull_mode(gpio_exp_t* self) {
+	spi_write(self->spi_handle, self->phy.addr, 0x0c, self->pullup, 2);
+}
+
+static uint32_t mcp23s17_read(gpio_exp_t* self) {
+	// read the pins value, not the stored one @interrupt
+	return spi_read(self->spi_handle, self->phy.addr, 0x12, 2);
+}
+
+static void mcp23s17_write(gpio_exp_t* self) {
+	spi_write(self->spi_handle, self->phy.addr, 0x12, self->shadow, 2);
+}
+
+/***************************************************************************************
+                                     I2C low level                                   
+***************************************************************************************/
+
+/****************************************************************************************
+ * I2C write up to 32 bits
+ */
+static esp_err_t i2c_write(uint8_t port, uint8_t addr, uint8_t reg, uint32_t data, int len) {
+	i2c_cmd_handle_t cmd = i2c_cmd_link_create();
+    i2c_master_start(cmd);
+	
+	i2c_master_write_byte(cmd, (addr << 1) | I2C_MASTER_WRITE, I2C_MASTER_NACK);
+	if (reg != 0xff) i2c_master_write_byte(cmd, reg, I2C_MASTER_NACK);
+
+	// works with our endianness
+	if (len > 1) i2c_master_write(cmd, (uint8_t*) &data, len, I2C_MASTER_NACK);
+	else i2c_master_write_byte(cmd, data, I2C_MASTER_NACK);
+    
+	i2c_master_stop(cmd);
+	esp_err_t ret = i2c_master_cmd_begin(port, cmd, 100 / portTICK_RATE_MS);
+    i2c_cmd_link_delete(cmd);
+	
+	if (ret != ESP_OK) {		
+		ESP_LOGW(TAG, "I2C write failed");
+	}
+	
+    return ret;
+}
+
+/****************************************************************************************
+ * I2C read up to 32 bits
+ */
+static uint32_t i2c_read(uint8_t port, uint8_t addr, uint8_t reg, int len) {
+	uint32_t data = 0;
+	
+	i2c_cmd_handle_t cmd = i2c_cmd_link_create();
+
+    i2c_master_start(cmd);
+
+	// when using a register, write it's value then the device address again
+	if (reg != 0xff) {
+		i2c_master_write_byte(cmd, (addr << 1) | I2C_MASTER_WRITE, I2C_MASTER_NACK);
+		i2c_master_write_byte(cmd, reg, I2C_MASTER_NACK);
+		i2c_master_start(cmd);
+		i2c_master_write_byte(cmd, (addr << 1) | I2C_MASTER_READ, I2C_MASTER_NACK);
+	} else {
+		i2c_master_write_byte(cmd, (addr << 1) | I2C_MASTER_READ, I2C_MASTER_NACK);
+	}
+	
+	// works with our endianness
+	if (len > 1) i2c_master_read(cmd, (uint8_t*) &data, len, I2C_MASTER_LAST_NACK);
+	else i2c_master_read_byte(cmd, (uint8_t*) &data, I2C_MASTER_NACK);
+		
+    i2c_master_stop(cmd);
+    esp_err_t ret = i2c_master_cmd_begin(port, cmd, 100 / portTICK_RATE_MS);
+    i2c_cmd_link_delete(cmd);
+	
+	if (ret != ESP_OK) {
+		ESP_LOGW(TAG, "I2C read failed");
+	}
+
+	return data;
+}
+
+/***************************************************************************************
+                                     SPI low level                                   
+***************************************************************************************/
+
+/****************************************************************************************
+ * SPI device addition
+ */
+static spi_device_handle_t spi_config(struct gpio_exp_phy_s *phy) {
+    spi_device_interface_config_t config = { };
+    spi_device_handle_t handle = NULL;
+
+	config.command_bits = config.address_bits = 8;
+    config.clock_speed_hz = phy->speed ? phy->speed : SPI_MASTER_FREQ_8M;
+    config.spics_io_num = phy->cs_pin;
+    config.queue_size = 1;
+	config.flags = SPI_DEVICE_NO_DUMMY;
+
+    spi_bus_add_device( phy->host, &config, &handle );
+	ESP_LOGI(TAG, "SPI expander initialized on host:%d with cs:%d and speed:%dHz", phy->host, phy->cs_pin, config.clock_speed_hz);
+
+	return handle;
+}
+
+/****************************************************************************************
+ * SPI write up to 32 bits
+ */
+static esp_err_t spi_write(spi_device_handle_t handle, uint8_t addr, uint8_t reg, uint32_t data, int len) {
+    spi_transaction_t transaction = { };
+
+	// rx_buffer is NULL, nothing to receive
+	transaction.flags = SPI_TRANS_USE_TXDATA;
+	transaction.cmd = addr << 1;
+	transaction.addr = reg;
+	transaction.tx_data[0] = data; transaction.tx_data[1] = data >> 8;
+	transaction.length = len * 8;
+
+	// only do polling as we don't have contention on SPI (otherwise DMA for transfers > 16 bytes)		
+	return spi_device_polling_transmit(handle, &transaction);
+}
+
+/****************************************************************************************
+ * SPI read up to 32 bits
+ */
+static uint32_t spi_read(spi_device_handle_t handle, uint8_t addr, uint8_t reg, int len) {
+	spi_transaction_t *transaction = heap_caps_calloc(1, sizeof(spi_transaction_t), MALLOC_CAP_DMA);
+
+	// tx_buffer is NULL, nothing to transmit except cmd/addr
+	transaction->flags = SPI_TRANS_USE_RXDATA;
+	transaction->cmd = (addr << 1) | 0x01;
+	transaction->addr = reg;
+	transaction->length = len * 8;
+
+	// only do polling as we don't have contention on SPI (otherwise DMA for transfers > 16 bytes)		
+	spi_device_polling_transmit(handle, transaction);
+	uint32_t data = *(uint32_t*) transaction->rx_data;
+	free(transaction);
+
+	return data;
+}

+ 61 - 0
components/services/gpio_exp.h

@@ -0,0 +1,61 @@
+/* GDS Example
+
+   This example code is in the Public Domain (or CC0 licensed, at your option.)
+
+   Unless required by applicable law or agreed to in writing, this
+   software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+   CONDITIONS OF ANY KIND, either express or implied.
+*/
+#pragma once
+
+#include <stdint.h>
+#include "freertos/FreeRTOS.h"
+#include "driver/gpio.h"
+
+struct gpio_exp_s;
+
+typedef struct {
+	char model[32];
+	int intr;
+	uint8_t count;
+	uint32_t base;
+	struct gpio_exp_phy_s {
+		uint8_t addr;
+		struct {				// for I2C
+			uint8_t port;
+		};
+		struct {				// for SPI
+			uint32_t speed;	
+			uint8_t host;	
+			uint8_t cs_pin; 		
+		};
+	} phy;	
+} gpio_exp_config_t;
+
+// set <intr> to -1 and <queue> to NULL if there is no interrupt
+struct gpio_exp_s*  gpio_exp_create(const gpio_exp_config_t *config);
+uint32_t            gpio_exp_get_base(struct gpio_exp_s *expander);
+struct gpio_exp_s*  gpio_exp_get_expander(int gpio);
+#define				gpio_is_expanded(gpio) (gpio < GPIO_NUM_MAX)
+
+/* 
+ For all functions below when <expander> is provided, GPIO's can be numbered from 0. If <expander>
+ is NULL, then GPIO must start from base OR be on-chip
+*/
+esp_err_t	gpio_exp_set_direction(int gpio, gpio_mode_t mode, struct gpio_exp_s *expander);
+esp_err_t   gpio_exp_set_pull_mode(int gpio, gpio_pull_mode_t mode, struct gpio_exp_s *expander);
+int         gpio_exp_get_level(int gpio, int age, struct gpio_exp_s *expander);
+esp_err_t   gpio_exp_set_level(int gpio, int level, bool direct, struct gpio_exp_s *expander);
+esp_err_t   gpio_exp_isr_handler_add(int gpio, gpio_isr_t isr, uint32_t debounce, void *arg, struct gpio_exp_s *expander);
+esp_err_t   gpio_exp_isr_handler_remove(int gpio, struct gpio_exp_s *expander);
+
+// unified function to use either built-in or expanded GPIO
+esp_err_t	gpio_set_direction_x(int gpio, gpio_mode_t mode);
+esp_err_t   gpio_set_pull_mode_x(int gpio, gpio_pull_mode_t mode);
+int         gpio_get_level_x(int gpio);
+esp_err_t   gpio_set_level_x(int gpio, int level);
+esp_err_t   gpio_isr_handler_add_x(int gpio, gpio_isr_t isr_handler, void* args);
+esp_err_t   gpio_isr_handler_remove_x(int gpio);
+#define     gpio_set_intr_type_x(gpio, type) do { if (gpio < GPIO_NUM_MAX) gpio_set_intr_type(gpio, type); } while (0)
+#define     gpio_intr_enable_x(gpio) do { if (gpio < GPIO_NUM_MAX) gpio_intr_enable(gpio); } while (0)
+#define     gpio_pad_select_gpio_x(gpio) do { if (gpio < GPIO_NUM_MAX) gpio_pad_select_gpio(gpio); } while (0)

+ 10 - 8
components/services/led.c

@@ -19,6 +19,7 @@
 #include "driver/gpio.h"
 #include "driver/ledc.h"
 #include "platform_config.h"
+#include "gpio_exp.h"
 #include "led.h"
 #include "globdefs.h"
 #include "accessors.h"
@@ -40,7 +41,8 @@ static EXT_RAM_ATTR struct led_s {
 	TimerHandle_t timer;
 } leds[MAX_LED];
 
-static EXT_RAM_ATTR struct {
+// can't use EXT_RAM_ATTR for initialized structure
+static struct {
 	int gpio;
 	int active;
 	int pwm;
@@ -53,7 +55,7 @@ static int led_max = 2;
  * 
  */
 static void set_level(struct led_s *led, bool on) {
-	if (led->pwm < 0) gpio_set_level(led->gpio, on ? led->onstate : !led->onstate);
+	if (led->pwm < 0 || led->gpio >= GPIO_NUM_MAX) gpio_set_level_x(led->gpio, on ? led->onstate : !led->onstate);
 	else {
 		ledc_set_duty(LEDC_HIGH_SPEED_MODE, led->channel, on ? led->pwm : (led->onstate ? 0 : pwm_system.max));
 		ledc_update_duty(LEDC_HIGH_SPEED_MODE, led->channel);
@@ -179,9 +181,9 @@ bool led_config(int idx, gpio_num_t gpio, int onstate, int pwm) {
 	leds[idx].onstate = onstate;
 	leds[idx].pwm = -1;
 
-	if (pwm < 0) {	
-		gpio_pad_select_gpio(gpio);
-		gpio_set_direction(gpio, GPIO_MODE_OUTPUT);
+	if (pwm < 0 || gpio >= GPIO_NUM_MAX) {	
+		gpio_pad_select_gpio_x(gpio);
+		gpio_set_direction_x(gpio, GPIO_MODE_OUTPUT);
 	} else {	
 		leds[idx].channel = pwm_system.base_channel++;
 		leds[idx].pwm = pwm_system.max * powf(pwm / 100.0, 3);
@@ -232,10 +234,10 @@ void led_svc_init(void) {
 	parse_set_GPIO(set_led_gpio);
 #endif
 
-	char *nvs_item = config_alloc_get(NVS_TYPE_STR, "led_brightness"), *p; 
+	char *nvs_item = config_alloc_get(NVS_TYPE_STR, "led_brightness"); 
 	if (nvs_item) {
-		if ((p = strcasestr(nvs_item, "green")) != NULL) green.pwm = atoi(strchr(p, '=') + 1);
-		if ((p = strcasestr(nvs_item, "red")) != NULL) red.pwm = atoi(strchr(p, '=') + 1);
+		PARSE_PARAM(nvs_item, "green", '=', green.pwm);
+		PARSE_PARAM(nvs_item, "red", '=', red.pwm);
 		free(nvs_item);
 	}
 

+ 1 - 2
components/services/messaging.c

@@ -14,8 +14,7 @@
 #include "nvs_utilities.h"
 #include "platform_esp32.h"
 #include "messaging.h"
-#include "trace.h"
-#include "globdefs.h"
+#include "tools.h"
 /************************************
  * Globals
  */

+ 2 - 2
components/services/monitor.c

@@ -23,7 +23,7 @@
 #include "accessors.h"
 #include "messaging.h"
 #include "cJSON.h"
-#include "trace.h"
+#include "tools.h"
 
 #define MONITOR_TIMER	(10*1000)
 #define SCRATCH_SIZE	256
@@ -147,7 +147,7 @@ static void monitor_callback(TimerHandle_t xTimer) {
  * 
  */
 static void jack_handler_default(void *id, button_event_e event, button_press_e mode, bool long_press) {
-	ESP_LOGD(TAG, "Jack %s", event == BUTTON_PRESSED ? "inserted" : "removed");
+	ESP_LOGI(TAG, "Jack %s", event == BUTTON_PRESSED ? "inserted" : "removed");
 	if (jack_handler_svc) (*jack_handler_svc)(event == BUTTON_PRESSED);
 }
 

+ 26 - 19
components/services/rotary_encoder.c

@@ -91,6 +91,7 @@
 
 #include "esp_log.h"
 #include "driver/gpio.h"
+#include "gpio_exp.h"
 
 #define TAG "rotary_encoder"
 
@@ -148,7 +149,7 @@ static uint8_t _process(rotary_encoder_info_t * info)
     if (info != NULL)
     {
         // Get state of input pins.
-        uint8_t pin_state = (gpio_get_level(info->pin_b) << 1) | gpio_get_level(info->pin_a);
+        uint8_t pin_state = (gpio_get_level_x(info->pin_b) << 1) | gpio_get_level_x(info->pin_a);
 
         // Determine new state from the pins and state table.
 #ifdef ROTARY_ENCODER_DEBUG
@@ -198,12 +199,18 @@ static void _isr_rotenc(void * args)
                 .direction = info->state.direction,
             },
         };
-        BaseType_t task_woken = pdFALSE;
-        xQueueOverwriteFromISR(info->queue, &queue_event, &task_woken);
-        if (task_woken)
-        {
-            portYIELD_FROM_ISR();
-        }
+		if (info->pin_a < GPIO_NUM_MAX) {
+			BaseType_t task_woken = pdFALSE;
+			xQueueOverwriteFromISR(info->queue, &queue_event, &task_woken);
+			if (task_woken)
+			{
+				portYIELD_FROM_ISR();
+			}
+		}
+		else 
+		{
+			xQueueOverwrite(info->queue, &queue_event);
+		}
     }
 }
 
@@ -220,19 +227,19 @@ esp_err_t rotary_encoder_init(rotary_encoder_info_t * info, gpio_num_t pin_a, gp
         info->state.direction = ROTARY_ENCODER_DIRECTION_NOT_SET;
 
         // configure GPIOs
-        gpio_pad_select_gpio(info->pin_a);
-        gpio_set_pull_mode(info->pin_a, GPIO_PULLUP_ONLY);
-        gpio_set_direction(info->pin_a, GPIO_MODE_INPUT);
-        gpio_set_intr_type(info->pin_a, GPIO_INTR_ANYEDGE);
+        gpio_pad_select_gpio_x(info->pin_a);
+        gpio_set_pull_mode_x(info->pin_a, GPIO_PULLUP_ONLY);
+        gpio_set_direction_x(info->pin_a, GPIO_MODE_INPUT);
+        gpio_set_intr_type_x(info->pin_a, GPIO_INTR_ANYEDGE);
 
-        gpio_pad_select_gpio(info->pin_b);
-        gpio_set_pull_mode(info->pin_b, GPIO_PULLUP_ONLY);
-        gpio_set_direction(info->pin_b, GPIO_MODE_INPUT);
-        gpio_set_intr_type(info->pin_b, GPIO_INTR_ANYEDGE);
+        gpio_pad_select_gpio_x(info->pin_b);
+        gpio_set_pull_mode_x(info->pin_b, GPIO_PULLUP_ONLY);
+        gpio_set_direction_x(info->pin_b, GPIO_MODE_INPUT);
+        gpio_set_intr_type_x(info->pin_b, GPIO_INTR_ANYEDGE);
 
         // install interrupt handlers
-        gpio_isr_handler_add(info->pin_a, _isr_rotenc, info);
-        gpio_isr_handler_add(info->pin_b, _isr_rotenc, info);
+        gpio_isr_handler_add_x(info->pin_a, _isr_rotenc, info);
+        gpio_isr_handler_add_x(info->pin_b, _isr_rotenc, info);
     }
     else
     {
@@ -280,8 +287,8 @@ esp_err_t rotary_encoder_uninit(rotary_encoder_info_t * info)
     esp_err_t err = ESP_OK;
     if (info)
     {
-        gpio_isr_handler_remove(info->pin_a);
-        gpio_isr_handler_remove(info->pin_b);
+        gpio_isr_handler_remove_x(info->pin_a);
+        gpio_isr_handler_remove_x(info->pin_b);
     }
     else
     {

+ 32 - 44
components/services/services.c

@@ -12,15 +12,13 @@
 #include "driver/ledc.h"
 #include "driver/i2c.h"
 #include "platform_config.h"
+#include "gpio_exp.h"
 #include "battery.h"
 #include "led.h"
 #include "monitor.h"
 #include "globdefs.h"
 #include "accessors.h"
 #include "messaging.h"
-#include "esp_heap_caps.h"
-#include "esp_log.h"
-
 
 extern void battery_svc_init(void);
 extern void monitor_svc_init(void);
@@ -38,48 +36,14 @@ pwm_system_t pwm_system = {
 
 static const char *TAG = "services";
 
-
-void * malloc_init_external(size_t sz){
-	void * ptr=NULL;
-	ptr = heap_caps_malloc(sz, MALLOC_CAP_SPIRAM | MALLOC_CAP_8BIT);
-	if(ptr==NULL){
-		ESP_LOGE(TAG,"malloc_init_external:  unable to allocate %d bytes of PSRAM!",sz);
-	}
-	else {
-		memset(ptr,0x00,sz);
-	}
-	return ptr;
-}
-void * clone_obj_psram(void * source, size_t source_sz){
-	void * ptr=NULL;
-	ptr = heap_caps_malloc(source_sz, MALLOC_CAP_SPIRAM | MALLOC_CAP_8BIT);
-	if(ptr==NULL){
-		ESP_LOGE(TAG,"clone_obj_psram:  unable to allocate %d bytes of PSRAM!",source_sz);
-	}
-	else {
-		memcpy(ptr,source,source_sz);
-	}
-	return ptr;
-}
-char * strdup_psram(const char * source){
-	void * ptr=NULL;
-	size_t source_sz = strlen(source)+1;
-	ptr = heap_caps_malloc(source_sz, MALLOC_CAP_SPIRAM | MALLOC_CAP_8BIT);
-	if(ptr==NULL){
-		ESP_LOGE(TAG,"strdup_psram:  unable to allocate %d bytes of PSRAM! Cannot clone string %s",source_sz,source);
-	}
-	else {
-		memset(ptr,0x00,source_sz);
-		strcpy(ptr,source);
-	}
-	return ptr;
-}
-
 /****************************************************************************************
  * 
  */
-void set_power_gpio(int gpio, char *value) {
+void set_chip_power_gpio(int gpio, char *value) {
 	bool parsed = true;
+
+	// we only parse on-chip GPIOs
+	if (gpio >= GPIO_NUM_MAX) return;
 	
 	if (!strcasecmp(value, "vcc") ) {
 		gpio_pad_select_gpio(gpio);
@@ -89,9 +53,26 @@ void set_power_gpio(int gpio, char *value) {
 		gpio_pad_select_gpio(gpio);
 		gpio_set_direction(gpio, GPIO_MODE_OUTPUT);
 		gpio_set_level(gpio, 0);
-	} else parsed = false	;
+	} else parsed = false;
 	
 	if (parsed) ESP_LOGI(TAG, "set GPIO %u to %s", gpio, value);
+}	
+
+void set_exp_power_gpio(int gpio, char *value) {
+	bool parsed = true;
+
+	// we only parse on-chip GPIOs
+	if (gpio < GPIO_NUM_MAX) return;
+	
+	if (!strcasecmp(value, "vcc") ) {
+		gpio_exp_set_direction(gpio, GPIO_MODE_OUTPUT, NULL);
+		gpio_exp_set_level(gpio, 1, true, NULL);
+	} else if (!strcasecmp(value, "gnd")) {
+		gpio_exp_set_direction(gpio, GPIO_MODE_OUTPUT, NULL);
+		gpio_exp_set_level(gpio, 0, true, NULL);
+	} else parsed = false;
+	
+	if (parsed) ESP_LOGI(TAG, "set expanded GPIO %u to %s", gpio, value);
  }	
  
 
@@ -109,8 +90,8 @@ void services_init(void) {
 	}
 #endif
 
-	// set potential power GPIO
-	parse_set_GPIO(set_power_gpio);
+	// set potential power GPIO on chip first in case expanders are power using these
+	parse_set_GPIO(set_chip_power_gpio);
 
 	// shared I2C bus 
 	const i2c_config_t * i2c_config = config_i2c_get(&i2c_system_port);
@@ -140,6 +121,13 @@ void services_init(void) {
 		ESP_LOGW(TAG, "no SPI configured");
 	}	
 
+	// create GPIO expanders
+	const gpio_exp_config_t* gpio_exp_config;
+	for (int count = 0; (gpio_exp_config = config_gpio_exp_get(count)); count++) gpio_exp_create(gpio_exp_config);
+
+	// now set potential power GPIO on expander 
+	parse_set_GPIO(set_exp_power_gpio);
+
 	// system-wide PWM timer configuration
 	ledc_timer_config_t pwm_timer = {
 		.duty_resolution = LEDC_TIMER_13_BIT, 

+ 28 - 0
components/spotify/CMakeLists.txt

@@ -0,0 +1,28 @@
+# this must be set *before* idf_component_register
+set(CMAKE_CXX_STANDARD 17)
+
+idf_component_register(
+		SRC_DIRS . 
+		INCLUDE_DIRS . "cspot/include" "cspot/bell/include" "cspot/protos"
+		PRIV_REQUIRES mbedtls mdns nvs_flash platform_config services esp_http_server tools
+		LDFRAGMENTS "linker.lf"
+)
+
+include_directories("../codecs/inc")
+add_definitions(-DBELL_USE_MBEDTLS)
+add_definitions(-Wno-unused-variable -Wno-unused-const-variable -Wchar-subscripts -Wunused-label -Wmaybe-uninitialized -Wmisleading-indentation)
+
+set(BELL_DISABLE_CODECS 1)
+set(BELL_TREMOR_EXTERNAL "idf::codecs")
+set(BELL_CJSON_EXTERNAL "idf::json")
+
+add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/cspot ${CMAKE_CURRENT_BINARY_DIR}/cspot)
+target_link_libraries(${COMPONENT_LIB} PRIVATE cspot ${EXTRA_REQ_LIBS})
+
+#if (!WIN32)
+#	message(${CMAKE_CURRENT_SOURCE_DIR}/cmake/generate_protos.sh)
+#	execute_process(COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/cmake/generate_protos.sh" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}")
+#endif ()
+
+
+

+ 381 - 0
components/spotify/Shim.cpp

@@ -0,0 +1,381 @@
+/* 
+ *  This software is released under the MIT License.
+ *  https://opensource.org/licenses/MIT
+ *
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include "sdkconfig.h"
+#include "freertos/FreeRTOS.h"
+#include "freertos/task.h"
+#include "esp_system.h"
+#include "esp_wifi.h"
+#include "esp_event.h"
+#include "esp_log.h"
+#include "esp_http_server.h"
+
+#include <ConstantParameters.h>
+#include <Session.h>
+#include <SpircController.h>
+#include <MercuryManager.h>
+#include <ZeroconfAuthenticator.h>
+#include <ApResolve.h>
+#include <HTTPServer.h>
+#include "ConfigJSON.h"
+#include "Logger.h"
+
+#include "platform_config.h"
+#include "tools.h"
+#include "cspot_private.h"
+#include "cspot_sink.h"
+#include "Shim.h"
+
+extern "C" {
+	httpd_handle_t get_http_server(int *port);
+	static esp_err_t handlerWrapper(httpd_req_t *req);
+};
+
+#define CSPOT_STACK_SIZE (8*1024)
+
+static const char *TAG = "cspot";
+
+// using a global is pretty ugly, but it's easier with all Lambda below
+static EXT_RAM_ATTR struct cspot_s {
+	char name[32];
+	cspot_cmd_cb_t cHandler;
+	cspot_data_cb_t dHandler;
+	TaskHandle_t TaskHandle;
+    std::shared_ptr<LoginBlob> blob;
+} cspot;
+
+std::shared_ptr<ConfigJSON> configMan;
+std::shared_ptr<NVSFile> file;
+std::shared_ptr<MercuryManager> mercuryManager;
+std::shared_ptr<SpircController> spircController;
+
+/****************************************************************************************
+ * Main task (could it be deleted after spirc has started?)
+ */
+static void cspotTask(void *pvParameters) {
+	char configName[] = "cspot_config";
+	std::string jsonConfig;	
+	
+    // Config file
+    file = std::make_shared<NVSFile>();
+	configMan = std::make_shared<ConfigJSON>(configName, file);
+   
+	// We might have no config at all
+	if (!file->readFile(configName, jsonConfig) || !jsonConfig.length()) {
+		ESP_LOGW(TAG, "Cannot load config, using default");
+		
+		configMan->deviceName = cspot.name;
+		configMan->format = AudioFormat::OGG_VORBIS_160;
+		configMan->volume = 32767;
+
+		configMan->save();	
+	}
+	
+	// safely load config now
+	configMan->load();
+	if (!configMan->deviceName.length()) configMan->deviceName = cspot.name;
+	ESP_LOGI(TAG, "Started CSpot with %s (bitrate %d)", configMan->deviceName.c_str(), configMan->format == AudioFormat::OGG_VORBIS_320 ? 320 : (configMan->format == AudioFormat::OGG_VORBIS_160 ? 160 : 96));
+
+	// All we do here is notify the task to start the mercury loop
+    auto createPlayerCallback = [](std::shared_ptr<LoginBlob> blob) {
+		// TODO: handle/refuse that another user takes ownership
+		cspot.blob = blob;
+		xTaskNotifyGive(cspot.TaskHandle);
+    };
+
+	int port;
+	httpd_handle_t server = get_http_server(&port);
+	auto httpServer = std::make_shared<ShimHTTPServer>(server, port);
+
+    auto authenticator = std::make_shared<ZeroconfAuthenticator>(createPlayerCallback, httpServer);
+	authenticator->registerHandlers();
+
+	// wait to be notified and have a mercury loop
+	while (1) {
+		ulTaskNotifyTake(pdFALSE, portMAX_DELAY);
+
+        auto session = std::make_unique<Session>();
+        session->connectWithRandomAp();
+        auto token = session->authenticate(cspot.blob);
+
+        ESP_LOGI(TAG, "Creating Spotify(CSpot) player");
+		
+        // Auth successful
+        if (token.size() > 0) {
+			// tell sink that we are taking over
+			cspot.cHandler(CSPOT_SETUP, 44100);
+
+			auto audioSink = std::make_shared<ShimAudioSink>();
+
+            // @TODO Actually store this token somewhere
+            mercuryManager = std::make_shared<MercuryManager>(std::move(session));
+            mercuryManager->startTask();
+
+            spircController = std::make_shared<SpircController>(mercuryManager, cspot.blob->username, audioSink);
+
+			spircController->setEventHandler([](CSpotEvent &event) {
+            switch (event.eventType) {
+            case CSpotEventType::TRACK_INFO: {
+                TrackInfo track = std::get<TrackInfo>(event.data);
+				cspot.cHandler(CSPOT_TRACK, 44100, track.artist.c_str(), track.album.c_str(), track.name.c_str());
+                break;
+            }
+            case CSpotEventType::PLAY_PAUSE: {
+                bool isPaused = std::get<bool>(event.data);
+				if (isPaused) cspot.cHandler(CSPOT_PAUSE);
+				else cspot.cHandler(CSPOT_PLAY);
+                break;
+            }
+			case CSpotEventType::SEEK:
+				cspot.cHandler(CSPOT_SEEK, std::get<int>(event.data));
+				break;
+			case CSpotEventType::DISC:
+				cspot.cHandler(CSPOT_DISC);				
+				spircController->stopPlayer();
+				mercuryManager->stop();
+				break;
+			case CSpotEventType::PREV:
+			case CSpotEventType::NEXT:
+				cspot.cHandler(CSPOT_FLUSH);
+                break;
+			/*
+			// we use volume from sink which is a 16 bits value
+			case CSpotEventType::VOLUME: {
+                int volume = std::get<int>(event.data);
+				cspot.cHandler(CSPOT_VOLUME, volume);
+				ESP_LOGW(TAG, "cspot volume : %d", volume);
+                break;
+            }
+			*/
+            default:
+                break;
+            }
+			});
+
+            mercuryManager->reconnectedCallback = []() {
+                return spircController->subscribe();
+            };
+
+            mercuryManager->handleQueue();
+        }
+
+		// release all ownership
+		mercuryManager.reset();
+		spircController.reset();
+		cspot.blob.reset();
+
+		// flush files
+		file->flush();
+ESP_LOGW(TAG, "THIS SESSION IS FINISHED %ld %ld %ld", mercuryManager.use_count(), spircController.use_count(), cspot.blob.use_count());
+	}
+
+	// we should not be here
+	vTaskDelete(NULL);
+}
+
+/****************************************************************************************
+ * API to create and start a cspot instance
+ */
+struct cspot_s* cspot_create(const char *name, cspot_cmd_cb_t cmd_cb, cspot_data_cb_t data_cb) {
+	static DRAM_ATTR StaticTask_t xTaskBuffer __attribute__ ((aligned (4)));
+	static EXT_RAM_ATTR StackType_t xStack[CSPOT_STACK_SIZE] __attribute__ ((aligned (4)));
+
+	bell::setDefaultLogger();
+	
+	cspot.cHandler = cmd_cb;
+	cspot.dHandler = data_cb;
+	strncpy(cspot.name, name, sizeof(cspot.name) - 1);
+    cspot.TaskHandle = xTaskCreateStatic(&cspotTask, "cspot", CSPOT_STACK_SIZE, NULL, ESP_TASK_PRIO_MIN + 1, xStack, &xTaskBuffer);
+	
+	return &cspot;
+}
+
+/****************************************************************************************
+ * Commands sent by local buttons/actions
+ */
+bool cspot_cmd(struct cspot_s* ctx, cspot_event_t event, void *param) {
+	switch(event) {
+		case CSPOT_PREV:
+			spircController->prevSong();
+			break;
+		case CSPOT_NEXT:
+			spircController->nextSong();
+			break;
+		case CSPOT_TOGGLE:
+			spircController->playToggle();
+			break;
+		case CSPOT_PAUSE:
+			spircController->setPause(true);
+			break;
+		case CSPOT_PLAY:
+			spircController->setPause(false);
+			break;
+		case CSPOT_DISC:
+			spircController->stopPlayer();
+			mercuryManager->stop();
+			break;
+		case CSPOT_STOP:
+			spircController->stopPlayer();
+			break;
+		case CSPOT_VOLUME_UP:
+			spircController->adjustVolume(MAX_VOLUME / 100 + 1);
+			break;
+		case CSPOT_VOLUME_DOWN:
+			spircController->adjustVolume(-(MAX_VOLUME / 100 + 1));
+			break;
+		default:
+			break;
+	}
+
+	return true;
+}
+
+/****************************************************************************************
+ * AudioSink class to push data to squeezelite backend (decode_external)
+ */
+void ShimAudioSink::volumeChanged(uint16_t volume) {
+	cspot.cHandler(CSPOT_VOLUME, volume);
+}
+
+void ShimAudioSink::feedPCMFrames(std::vector<uint8_t> &data) {	
+	cspot.dHandler(&data[0], data.size());
+}
+
+/****************************************************************************************
+ * NVSFile class to store config
+ */
+ bool NVSFile::readFile(std::string filename, std::string &fileContent) {
+	auto search = files.find(filename);
+    
+	// cache 
+	if (search == files.end()) {
+		char *content = (char*) config_alloc_get(NVS_TYPE_STR, filename.c_str());
+		if (!content) return false;
+		fileContent = content;
+		free(content);
+	} else {
+		fileContent = search->second;
+	}
+
+	return true;
+}
+
+bool NVSFile::writeFile(std::string filename, std::string fileContent) {
+    auto search = files.find(filename);
+
+	files[filename] = fileContent;    
+	if (search == files.end()) return (ESP_OK == config_set_value(NVS_TYPE_STR, filename.c_str(), fileContent.c_str()));
+	return true;
+}
+
+bool NVSFile::flush() {
+	esp_err_t err = ESP_OK;
+
+	for (auto it = files.begin(); it != files.end(); ++it) {
+		err |= config_set_value(NVS_TYPE_STR, it->first.c_str(), it->second.c_str());
+	}
+	return (err == ESP_OK);
+}
+
+/****************************************************************************************
+ * Shim HTTP server for spirc
+ */
+static esp_err_t handlerWrapper(httpd_req_t *req) {
+	bell::HTTPRequest request = { };
+	char *query = NULL, *body = NULL;
+	bell::httpHandler *handler = (bell::httpHandler*) req->user_ctx;
+	size_t query_len = httpd_req_get_url_query_len(req);
+
+	request.connection = httpd_req_to_sockfd(req);
+
+	// get body if any (add '\0' at the end if used as string)
+	if (req->content_len) {
+		body = (char*) calloc(1, req->content_len + 1);
+		int size = httpd_req_recv(req, body, req->content_len);
+		request.body = body;
+		ESP_LOGD(TAG,"wrapper received body %d/%d", size, req->content_len);
+	}
+
+	// parse query if any (can be in body as well for url-encoded)
+	if (query_len) {
+		query = (char*) malloc(query_len + 1);
+		httpd_req_get_url_query_str(req, query, query_len + 1);
+	} else if (body && strchr(body, '&')) {
+		query = body;
+		body = NULL;
+	}	
+		
+	// I know this is very crude and unsafe...
+	url_decode(query);
+	char *key = strtok(query, "&");
+
+	while (key) {
+		char *value = strchr(key, '=');
+		*value++ = '\0';
+		request.queryParams[key] = value;
+		ESP_LOGD(TAG,"wrapper received key:%s value:%s", key, value);
+		key = strtok(NULL, "&");
+	};
+
+	if (query) free(query);
+	if (body) free(body);
+	
+	/*
+	 This is a strange construct as the C++ handler will call the ShimHTTPSer::respond 
+	 and then we'll return. So we can't obtain the response to be sent, as esp_http_server
+	 normally expects, instead respond() will use raw socket and close connection
+	*/
+	(*handler)(request);
+
+	return ESP_OK;
+}
+
+void ShimHTTPServer::registerHandler(bell::RequestType requestType, const std::string &routeUrl, bell::httpHandler handler) {
+	httpd_uri_t request = { 
+		.uri = routeUrl.c_str(), 
+		.method = (requestType == bell::RequestType::GET ? HTTP_GET : HTTP_POST),
+		.handler = handlerWrapper,
+		.user_ctx = NULL,
+	};
+
+	// find athe first free spot and register handler
+	for (int i = 0; i < sizeof(uriHandlers)/sizeof(bell::httpHandler); i++) {
+		if (!uriHandlers[i]) {
+			uriHandlers[i] = handler;
+			request.user_ctx = uriHandlers + i;
+			httpd_register_uri_handler(serverHandle, &request);
+			break;	
+		}
+	}
+		
+	if (!request.user_ctx) ESP_LOGW(TAG, "Cannot add handler for %s", routeUrl.c_str());
+}
+
+void ShimHTTPServer::respond(const bell::HTTPResponse &response) {
+	char *buf;
+	size_t len = asprintf(&buf, "HTTP/1.1 %d OK\r\n"
+			"Server: SQUEEZEESP32\r\n"
+			"Connection: close\r\n"		
+			"Content-type: %s\r\n"		
+			"Content-length: %d\r\n"	
+			"Access-Control-Allow-Origin: *\r\n"
+			"Access-Control-Allow-Methods: GET, POST, PATCH, PUT, DELETE, OPTIONS\r\n"
+			"Access-Control-Allow-Headers: Origin, Content-Type, X-Auth-Token\r\n"
+			"\r\n%s", 
+			response.status, response.contentType.c_str(), 
+			response.body.size(), response.body.c_str()
+	);
+
+	// use raw socket send and close connection
+	httpd_socket_send(serverHandle, response.connectionFd, buf, len, 0);
+	free(buf);
+	
+	// we want to close the socket due to the strange construct
+	httpd_sess_trigger_close(serverHandle, response.connectionFd);
+}

+ 50 - 0
components/spotify/Shim.h

@@ -0,0 +1,50 @@
+/* 
+ *  This software is released under the MIT License.
+ *  https://opensource.org/licenses/MIT
+ *
+ */
+
+#pragma once
+
+#include <vector>
+#include <iostream>
+#include <map>
+#include "AudioSink.h"
+#include "FileHelper.h"
+#include "BaseHTTPServer.h"
+#include <stdio.h>
+#include <string.h>
+#include <sys/unistd.h>
+#include <sys/stat.h>
+#include "esp_err.h"
+#include "esp_http_server.h"
+#include "esp_log.h"
+
+class ShimAudioSink : public AudioSink {
+public:
+	ShimAudioSink(void) { softwareVolumeControl = false; }
+    void feedPCMFrames(std::vector<uint8_t> &data);
+	virtual void volumeChanged(uint16_t volume);
+};
+
+class NVSFile : public FileHelper {
+private:
+	std::map<std::string, std::string> files;
+
+public:
+    bool readFile(std::string filename, std::string &fileContent);
+    bool writeFile(std::string filename, std::string fileContent);
+	bool flush();
+};
+
+class ShimHTTPServer : public bell::BaseHTTPServer {    
+private:
+	httpd_handle_t serverHandle;
+	bell::httpHandler uriHandlers[4];
+	
+public:
+   ShimHTTPServer(httpd_handle_t server, int port) { serverHandle = server; serverPort = port; }
+
+   void registerHandler(bell::RequestType requestType, const std::string &, bell::httpHandler);
+   void respond(const bell::HTTPResponse &);
+};

+ 66 - 0
components/spotify/cspot/CMakeLists.txt

@@ -0,0 +1,66 @@
+project(cspot)
+
+cmake_minimum_required(VERSION 2.8.9)
+set (CMAKE_CXX_STANDARD 17)
+
+file(GLOB SOURCES "src/*.cpp" "src/*.c")
+
+if (NOT DEFINED ${USE_EXTERNAL_BELL})
+    add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/bell ${CMAKE_CURRENT_BINARY_DIR}/bell)
+endif()
+# Add platform specific sources
+# if(${ESP_PLATFORM})
+#     file(GLOB ESP_PLATFORM_SOURCES "src/platform/esp/*.cpp" "src/platform/esp/*.c")
+#     set(SOURCES ${SOURCES} ${ESP_PLATFORM_SOURCES} )
+# endif()
+
+# if(UNIX)
+#     file(GLOB UNIX_PLATFORM_SOURCES "src/platform/unix/*.cpp" "src/platform/unix/*.c")
+#     set(SOURCES ${SOURCES} ${UNIX_PLATFORM_SOURCES} )
+# endif()
+
+# if(APPLE)
+#     file(GLOB APPLE_PLATFORM_SOURCES "src/platform/apple/*.cpp" "src/platform/apple/*.c")
+#     set(SOURCES ${SOURCES} ${APPLE_PLATFORM_SOURCES} )
+# endif()
+
+# if(UNIX AND NOT APPLE)
+# #     file(GLOB LINUX_PLATFORM_SOURCES "src/platform/linux/*.cpp" "src/platform/linux/*.c")
+# #     set(SOURCES ${SOURCES} ${LINUX_PLATFORM_SOURCES} )
+# # endif()
+
+
+# if(${ESP_PLATFORM})
+#     list(REMOVE_ITEM SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/CryptoOpenSSL.cpp) # use MBedTLS 
+#     idf_build_set_property(COMPILE_DEFINITIONS "-DCSPOT_USE_MBEDTLS" APPEND)
+#     set(EXTRA_REQ_LIBS idf::mbedtls idf::pthread idf::mdns)
+#     add_definitions(-Wunused-const-variable -Wchar-subscripts -Wunused-label -Wmaybe-uninitialized -Wmisleading-indentation)
+# else()
+#     list(REMOVE_ITEM SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/CryptoMbedTLS.cpp) # use OpenSSL
+#     find_package(OpenSSL REQUIRED)
+#     if(OPENSSL_FOUND)
+#         set(OPENSSL_USE_STATIC_LIBS TRUE)
+#     endif()
+#     set(EXTRA_REQ_LIBS OpenSSL::Crypto Threads::Threads)
+#     set(THREADS_PREFER_PTHREAD_FLAG ON)
+#     find_package(Threads REQUIRED)
+# endif()
+
+if(UNIX AND NOT APPLE)
+    set(EXTRA_REQ_LIBS ${EXTRA_REQ_LIBS} dns_sd) # add apple bonjur compatibility library for linux
+    # TODO: migrate from this to native linux mDNS
+endif()
+
+include_directories("include")
+include_directories("${CMAKE_CURRENT_BINARY_DIR}")
+include_directories("protos")
+
+message(${CMAKE_CURRENT_SOURCE_DIR}/cmake/generate_protos.sh)
+execute_process(COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/cmake/generate_protos.sh" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}")
+
+set(SOURCES ${SOURCES} "protos/AnyRefImpl.cpp" "protos/ReflectTypeInfo.cpp")
+
+add_library(cspot STATIC ${SOURCES})
+target_link_libraries(cspot PRIVATE bell ${EXTRA_REQ_LIBS})
+
+target_include_directories(cspot PUBLIC "include" "protos" bell ${EXTRA_REQ_LIBS} ${CMAKE_CURRENT_BINARY_DIR})

BIN
components/spotify/cspot/bell/.DS_Store


+ 7 - 0
components/spotify/cspot/bell/.gitmodules

@@ -0,0 +1,7 @@
+[submodule "tremor"]
+	path = tremor
+	url = https://gitlab.xiph.org/xiph/tremor.git
+	branch = lowmem
+[submodule "cJSON"]
+	path = cJSON
+	url = https://github.com/DaveGamble/cJSON

+ 80 - 0
components/spotify/cspot/bell/CMakeLists.txt

@@ -0,0 +1,80 @@
+project(bell)
+
+cmake_minimum_required(VERSION 2.8.9)
+set (CMAKE_CXX_STANDARD 17)
+
+file(GLOB SOURCES "src/*.cpp" "src/*.c")
+
+# Add platform specific sources
+
+if(${ESP_PLATFORM})
+    file(GLOB ESP_PLATFORM_SOURCES "src/platform/esp/*.cpp" "src/platform/esp/*.c" "src/asm/biquad_f32_ae32.S")
+    set(SOURCES ${SOURCES} ${ESP_PLATFORM_SOURCES} )
+endif()
+
+if(UNIX)
+    file(GLOB UNIX_PLATFORM_SOURCES "src/platform/unix/*.cpp" "src/platform/linux/TLSSocket.cpp" "src/platform/unix/*.c")
+    set(SOURCES ${SOURCES} ${UNIX_PLATFORM_SOURCES} )
+endif()
+
+if(APPLE)
+    file(GLOB APPLE_PLATFORM_SOURCES "src/platform/apple/*.cpp" "src/platform/linux/TLSSocket.cpp"  "src/platform/apple/*.c")
+    set(SOURCES ${SOURCES} ${APPLE_PLATFORM_SOURCES} )
+endif()
+
+if(UNIX AND NOT APPLE)
+    file(GLOB LINUX_PLATFORM_SOURCES "src/platform/linux/*.cpp" "src/platform/linux/*.c")
+    set(SOURCES ${SOURCES} ${LINUX_PLATFORM_SOURCES} )
+endif()
+
+if(${ESP_PLATFORM})
+    list(REMOVE_ITEM SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/CryptoOpenSSL.cpp) # use MBedTLS 
+    idf_build_set_property(COMPILE_DEFINITIONS "-DBELL_USE_MBEDTLS" APPEND)
+    set(EXTRA_REQ_LIBS idf::mbedtls idf::pthread idf::mdns)
+    add_definitions(-Wunused-const-variable -Wchar-subscripts -Wunused-label -Wmaybe-uninitialized -Wmisleading-indentation)
+else()
+    list(REMOVE_ITEM SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/CryptoMbedTLS.cpp) # use OpenSSL
+    find_package(OpenSSL REQUIRED)
+    if(OPENSSL_FOUND)
+        set(OPENSSL_USE_STATIC_LIBS TRUE)
+    endif()
+    set(EXTRA_REQ_LIBS OpenSSL::Crypto OpenSSL::SSL Threads::Threads)
+    set(THREADS_PREFER_PTHREAD_FLAG ON)
+    find_package(Threads REQUIRED)
+endif()
+
+if (BELL_DISABLE_CODECS) 
+    list(REMOVE_ITEM SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/DecoderGlobals.cpp) # use OpenSSL
+	add_definitions(-DBELL_DISABLE_CODECS)
+else()
+	set(EXTRA_INC ${EXTRA_INC} "libhelix-aac" "libhelix-mp3")
+	set(SOURCES ${SOURCES} "libhelix-aac/aacdec.c" "libhelix-aac/aactabs.c" "libhelix-aac/bitstream.c" "libhelix-aac/buffers.c" "libhelix-aac/dct4.c" "libhelix-aac/decelmnt.c" "libhelix-aac/dequant.c" "libhelix-aac/fft.c" "libhelix-aac/filefmt.c" "libhelix-aac/huffman.c" "libhelix-aac/hufftabs.c" "libhelix-aac/imdct.c" "libhelix-aac/noiseless.c" "libhelix-aac/pns.c" "libhelix-aac/sbr.c" "libhelix-aac/sbrfft.c" "libhelix-aac/sbrfreq.c" "libhelix-aac/sbrhfadj.c" "libhelix-aac/sbrhfgen.c" "libhelix-aac/sbrhuff.c" "libhelix-aac/sbrimdct.c" "libhelix-aac/sbrmath.c" "libhelix-aac/sbrqmf.c" "libhelix-aac/sbrside.c" "libhelix-aac/sbrtabs.c" "libhelix-aac/stproc.c" "libhelix-aac/tns.c" "libhelix-aac/trigtabs.c")
+	set(SOURCES ${SOURCES} "libhelix-mp3/bitstream.c" "libhelix-mp3/buffers.c" "libhelix-mp3/dct32.c" "libhelix-mp3/dequant.c" "libhelix-mp3/dqchan.c" "libhelix-mp3/huffman.c" "libhelix-mp3/hufftabs.c" "libhelix-mp3/imdct.c" "libhelix-mp3/mp3dec.c" "libhelix-mp3/mp3tabs.c" "libhelix-mp3/polyphase.c" "libhelix-mp3/scalfact.c" "libhelix-mp3/stproc.c" "libhelix-mp3/subband.c" "libhelix-mp3/trigtabs.c")
+endif()
+
+if(UNIX AND NOT APPLE)
+    set(EXTRA_REQ_LIBS ${EXTRA_REQ_LIBS} dns_sd) # add apple bonjur compatibility library for linux
+    # TODO: migrate from this to native linux mDNS
+endif()
+add_definitions( -DUSE_DEFAULT_STDLIB=1)
+
+if (BELL_CJSON_EXTERNAL) 
+	message("Using external cJSON")
+	set(EXTRA_REQ_LIBS ${EXTRA_REQ_LIBS} ${BELL_CJSON_EXTERNAL})
+else() 
+	set(EXTRA_INC ${EXTRA_INC} "cJSON")
+	set(SOURCES ${SOURCES} "cJSON/cJSON.c")
+endif()
+
+if (BELL_TREMOR_EXTERNAL) 
+	message("Using external TREMOR")
+	set(EXTRA_REQ_LIBS ${EXTRA_REQ_LIBS} ${BELL_TREMOR_EXTERNAL})
+else()
+	set(EXTRA_INC ${EXTRA_INC} "tremor")
+	set(SOURCES ${SOURCES} "tremor/mdct.c" "tremor/dsp.c" "tremor/info.c" "tremor/misc.c" "tremor/floor1.c" "tremor/floor0.c" "tremor/vorbisfile.c" "tremor/res012.c" "tremor/mapping0.c" "tremor/codebook.c" "tremor/framing.c" "tremor/bitwise.c" "tremor/floor_lookup.c")
+endif()
+
+add_library(bell STATIC ${SOURCES})
+target_link_libraries(bell PRIVATE ${EXTRA_REQ_LIBS})
+target_include_directories(bell PUBLIC "include" "protos" ${EXTRA_INC} ${EXTRA_REQ_LIBS} ${CMAKE_CURRENT_BINARY_DIR})
+

+ 9 - 0
components/spotify/cspot/bell/README.md

@@ -0,0 +1,9 @@
+# bell
+
+Core audio utils library used in cspot and euphonium projects.
+
+Implemented utilities:
+
+- HTTPServer
+- Crypto (openssl and mbedtls backed)
+- Semaphore implementations

+ 23 - 0
components/spotify/cspot/bell/cJSON/.editorconfig

@@ -0,0 +1,23 @@
+root = true
+
+
+[*]
+indent_style = space
+indent_size = 4
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
+
+[Makefile]
+indent_style = tab
+indent_size = unset
+
+# ignore external repositories and test inputs
+[tests/{unity,json-patch-tests,inputs}/*]
+indent_style = unset
+indent_size = unset
+end_of_line = unset
+charset = unset
+trim_trailing_whitespace = unset
+insert_final_newline = unset

+ 11 - 0
components/spotify/cspot/bell/cJSON/.gitattributes

@@ -0,0 +1,11 @@
+*  					text=auto
+/tests/inputs/*	text eol=lf
+
+.gitattributes          export-ignore
+.gitignore              export-ignore
+.github                 export-ignore
+.editorconfig           export-ignore
+.travis.yml             export-ignore
+
+# Linguist incorrectly identified the headers as C++, manually override this.
+*.h linguist-language=C

+ 54 - 0
components/spotify/cspot/bell/cJSON/.github/CONTRIBUTING.md

@@ -0,0 +1,54 @@
+Contribution Guidelines
+=======================
+
+Contributions to cJSON are welcome. If you find a bug or want to improve cJSON in another way, pull requests are appreciated.
+
+For bigger changes, in order to avoid wasted effort, please open an issue to discuss the technical details before creating a pull request.
+
+The further sections explain the process in more detail and provides some guidelines on how contributions should look like.
+
+Branches
+--------
+There are two branches to be aware of, the `master` and the `develop` branch. The `master` branch is reserved for the latest release, so only make pull requests to the `master` branch for small bug- or security fixes (these are usually just a few lines). In all other cases, please make a pull request to the `develop` branch.
+
+Coding Style
+------------
+The coding style has been discussed in [#24](https://github.com/DaveGamble/cJSON/issues/24). The basics are:
+
+* Use 4 spaces for indentation
+* No oneliners (conditions, loops, variable declarations ...)
+* Always use parenthesis for control structures
+* Don't implicitly rely on operator precedence, use round brackets in expressions. e.g. `(a > b) && (c < d)` instead of `a>b && c<d`
+* opening curly braces start in the next line
+* use spaces around operators
+* lines should not have trailing whitespace
+* use spaces between function parameters
+* use pronouncable variable names, not just a combination of letters
+
+Example:
+
+```c
+/* calculate the new length of the string in a printbuffer and update the offset */
+static void update_offset(printbuffer * const buffer)
+{
+    const unsigned char *buffer_pointer = NULL;
+    if ((buffer == NULL) || (buffer->buffer == NULL))
+    {
+        return;
+    }
+    buffer_pointer = buffer->buffer + buffer->offset;
+
+    buffer->offset += strlen((const char*)buffer_pointer);
+}
+```
+
+Unit Tests
+----------
+cJSON uses the [Unity](https://github.com/ThrowTheSwitch/Unity) library for unit tests. The tests are located in the `tests` directory. In order to add a new test, either add it to one of the existing files (if it fits) or add a new C file for the test. That new file has to be added to the list of tests in `tests/CMakeLists.txt`.
+
+All new features have to be covered by unit tests.
+
+Other Notes
+-----------
+* Internal functions are to be declared static.
+* Wrap the return type of external function in the `CJSON_PUBLIC` macro.

+ 102 - 0
components/spotify/cspot/bell/cJSON/.github/workflows/CI.yml

@@ -0,0 +1,102 @@
+name: CI
+
+on:
+  push:
+    branches: [ master ]
+    paths-ignore:
+      - '**.md'
+      - 'LICENSE'
+  pull_request:
+    types: [opened, synchronize]
+    paths-ignore:
+      - '**.md'
+      - 'LICENSE'
+
+jobs:
+  linux:
+    runs-on: ubuntu-latest
+    if: "!contains(github.event.head_commit.message, 'ci skip')"
+    strategy:
+      fail-fast: false
+      matrix:
+        mem_check:
+          - ENABLE_VALGRIND
+          - ENABLE_SANITIZERS
+          - NONE_MEM_CHECK
+        compiler:
+          - GCC
+          - CLANG
+    steps:
+    - uses: actions/checkout@v2
+    - name: install build dependencies
+      run: |
+        sudo apt-get update
+        sudo apt-get install clang-8 valgrind
+    - name: build and test
+      shell: bash
+      run: |
+          if [ "${{ matrix.mem_check }}" == "ENABLE_VALGRIND" ]; then
+            EVENT_CMAKE_OPTIONS="-DENABLE_CJSON_UTILS=ON -DENABLE_VALGRIND=ON -DENABLE_SAFE_STACK=ON -DENABLE_SANITIZERS=OFF"
+          elif [ "${{ matrix.mem_check }}" == "ENABLE_SANITIZERS" ]; then
+            EVENT_CMAKE_OPTIONS="-DENABLE_CJSON_UTILS=ON -DENABLE_VALGRIND=OFF -DENABLE_SAFE_STACK=OFF -DENABLE_SANITIZERS=ON"
+          else
+            EVENT_CMAKE_OPTIONS="-DENABLE_CJSON_UTILS=ON -DENABLE_VALGRIND=OFF -DENABLE_SAFE_STACK=OFF -DENABLE_SANITIZERS=OFF"
+          fi
+          if [ "${{ matrix.compiler }}" == "GCC" ]; then
+            export CC=gcc
+          else
+            export CC=clang
+          fi
+          #run build and test
+          JOBS=20
+          export CTEST_PARALLEL_LEVEL=$JOBS
+          export CTEST_OUTPUT_ON_FAILURE=1
+          mkdir -p build
+          cd build
+          echo [cmake]: cmake .. $EVENT_CMAKE_OPTIONS
+          cmake .. $EVENT_CMAKE_OPTIONS || (rm -rf * && cmake .. $EVENT_CMAKE_OPTIONS)
+          cmake --build .
+          make
+          make test
+          
+  macos:
+    runs-on: macos-latest
+    if: "!contains(github.event.head_commit.message, 'ci skip')"
+    strategy:
+      fail-fast: false
+      matrix:
+        mem_check:
+          - ENABLE_VALGRIND
+          - ENABLE_SANITIZERS
+          - NONE_MEM_CHECK
+        compiler:
+          - GCC
+          - CLANG
+    steps:
+    - uses: actions/checkout@v2
+    - name: build and test
+      shell: bash
+      run: |
+          if [ "${{ matrix.mem_check }}" == "ENABLE_VALGRIND" ]; then
+            EVENT_CMAKE_OPTIONS="-DENABLE_CJSON_UTILS=ON -DENABLE_VALGRIND=ON -DENABLE_SAFE_STACK=ON -DENABLE_SANITIZERS=OFF"
+          elif [ "${{ matrix.mem_check }}" == "ENABLE_SANITIZERS" ]; then
+            EVENT_CMAKE_OPTIONS="-DENABLE_CJSON_UTILS=ON -DENABLE_VALGRIND=OFF -DENABLE_SAFE_STACK=OFF -DENABLE_SANITIZERS=ON"
+          else
+            EVENT_CMAKE_OPTIONS="-DENABLE_CJSON_UTILS=ON -DENABLE_VALGRIND=OFF -DENABLE_SAFE_STACK=OFF -DENABLE_SANITIZERS=OFF"
+          fi
+          if [ "${{ matrix.compiler }}" == "GCC" ]; then
+            export CC=gcc
+          else
+            export CC=clang
+          fi
+          #run build and test
+          JOBS=20
+          export CTEST_PARALLEL_LEVEL=$JOBS
+          export CTEST_OUTPUT_ON_FAILURE=1
+          mkdir -p build
+          cd build
+          echo [cmake]: cmake .. $EVENT_CMAKE_OPTIONS
+          cmake .. $EVENT_CMAKE_OPTIONS || (rm -rf * && cmake .. $EVENT_CMAKE_OPTIONS)
+          cmake --build .
+          make
+          make test

+ 20 - 0
components/spotify/cspot/bell/cJSON/.gitignore

@@ -0,0 +1,20 @@
+.svn
+test
+*.o
+*.a
+*.so
+*.swp
+*.patch
+tags
+*.dylib
+build/
+cJSON_test
+cJSON_test_utils
+libcjson.so.*
+libcjson_utils.so.*
+*.orig
+.vscode
+.idea
+cmake-build-debug
+*.lst
+*.lss

+ 28 - 0
components/spotify/cspot/bell/cJSON/.travis.yml

@@ -0,0 +1,28 @@
+dist: trusty
+sudo: false
+language: c
+env:
+  matrix:
+    - VALGRIND=On SANITIZERS=Off
+    - VALGRIND=Off SANITIZERS=Off
+    - VALGRIND=Off SANITIZERS=On
+compiler:
+  - gcc
+  - clang
+addons:
+  apt:
+    packages:
+      - valgrind
+      - libasan0
+      - lib32asan0
+      # currently not supported on travis:
+      # - libasan1
+      # - libasan2
+      # - libubsan0
+      - llvm
+script:
+  - mkdir build
+  - cd build
+  - cmake .. -DENABLE_CJSON_UTILS=On -DENABLE_VALGRIND="${VALGRIND}" -DENABLE_SAFE_STACK="${VALGRIND}" -DENABLE_SANITIZERS="${SANITIZERS}"
+  - make
+  - make test CTEST_OUTPUT_ON_FAILURE=On

Some files were not shown because too many files changed in this diff