2
0

diskcache.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. /*
  2. * Simple disk cache, dynamically sharing space with the sbrk heap.
  3. */
  4. #include "common.h"
  5. #include "list.h"
  6. #include "sdcard.h"
  7. #include "sys.h"
  8. #include "systime.h"
  9. #include "console.h"
  10. /*
  11. * Maximum possible arena size; metadata structures and sbrk
  12. * thresholds are sized based on this value. A reasonable overestimate
  13. * is fine.
  14. */
  15. #define MAX_ARENA_SIZE SDRAM_SIZE
  16. #define CACHE_BLOCK_BITS 5
  17. #define CACHE_BLOCK_SHIFT (CACHE_BLOCK_BITS+SECTOR_SHIFT)
  18. #define CACHE_BLOCK_SECTORS (1 << CACHE_BLOCK_BITS)
  19. #define CACHE_BLOCK_SIZE (SECTOR_SIZE << CACHE_BLOCK_BITS)
  20. #define MAX_CACHE_BLOCKS (MAX_ARENA_SIZE/CACHE_BLOCK_SIZE)
  21. #define CACHE_HASH_SIZE (MAX_CACHE_BLOCKS*2)
  22. /*
  23. * Minimum cache size; fail sbrk allocation beyond this point.
  24. */
  25. #define MIN_CACHE_SIZE (MAX_ARENA_SIZE >> 4)
  26. #define MIN_CACHE_BLOCKS (MIN_CACHE_SIZE/CACHE_BLOCK_SIZE)
  27. /*
  28. * When reclaiming storage from the brk heap, free up this much space
  29. * in addition to the requested allocation (if possible.)
  30. */
  31. #define BRK_SLACK (MAX_ARENA_SIZE >> 6)
  32. /* Minimum alignment for brk */
  33. #define HEAP_ALIGN 32
  34. typedef sector_t block_t;
  35. #define NO_BLOCK ((block_t)(-1))
  36. enum block_status {
  37. FL_NOTCACHE = 0, /* heap or not present */
  38. FL_CACHE_BIT = 1,
  39. FL_FREE = FL_CACHE_BIT,
  40. FL_VALID_BIT = 2,
  41. FL_VALID = FL_CACHE_BIT | FL_VALID_BIT,
  42. FL_CLEAN = FL_VALID,
  43. FL_DIRTY_BIT = 4,
  44. FL_DIRTY = FL_VALID | FL_DIRTY_BIT
  45. };
  46. struct cache_block {
  47. struct dll hash; /* Link in hash chain or free list */
  48. struct dll lru; /* Link in LRU chain */
  49. block_t block; /* Physical block index */
  50. enum block_status flags; /* Status flags */
  51. };
  52. static struct dll __dram_bss cache_hash[CACHE_HASH_SIZE];
  53. static struct cache_block __dram_bss cache_blocks[MAX_CACHE_BLOCKS];
  54. extern char __heap_start[], __heap_end[];
  55. #define CACHE_START align_up(&__heap_start[0], CACHE_BLOCK_SIZE)
  56. #define CACHE_END __heap_end
  57. /* Convert between data and metadata pointers */
  58. static inline __constfunc char *bp_to_data(const struct cache_block *block)
  59. {
  60. return CACHE_START + ((block - cache_blocks) << CACHE_BLOCK_SHIFT);
  61. }
  62. static inline __constfunc struct cache_block *data_to_bp(void *data)
  63. {
  64. return &cache_blocks[((char *)data - CACHE_START) >> CACHE_BLOCK_SHIFT];
  65. }
  66. static struct dll lru_list;
  67. static struct dll free_list;
  68. static size_t cur_brk;
  69. static size_t cache_base;
  70. static void invalidate_all(void);
  71. static inline __constfunc struct dll *hash_slot(block_t block)
  72. {
  73. uint64_t m;
  74. uint32_t hash;
  75. m = UINT64_C(0x34f1f85d) * block;
  76. hash = (m >> 32) + m;
  77. return &cache_hash[hash % CACHE_HASH_SIZE];
  78. }
  79. static struct cache_block *disk_cache_find(block_t block)
  80. {
  81. struct dll *hp, *bhp;
  82. struct cache_block *bp;
  83. hp = hash_slot(block);
  84. for (bhp = hp->next; bhp != hp; bhp = bhp->next) {
  85. bp = container_of(bhp, struct cache_block, hash);
  86. if (bp->block == block)
  87. return bp;
  88. }
  89. return NULL;
  90. }
  91. static void invalidate_block(struct cache_block *bp)
  92. {
  93. if (bp->flags & FL_VALID_BIT) {
  94. dll_remove(&bp->hash);
  95. dll_insert_head(&free_list, &bp->hash);
  96. bp->block = NO_BLOCK;
  97. bp->flags = FL_FREE;
  98. dll_demote(&lru_list, &bp->lru);
  99. }
  100. }
  101. static DRESULT sync_block(struct cache_block *bp)
  102. {
  103. if (sdc.status & (STA_NOINIT | STA_NODISK))
  104. goto err;
  105. if (bp->flags == FL_DIRTY) {
  106. sector_t sector = bp->block << CACHE_BLOCK_BITS;
  107. sector_t size = sdc.lbasize;
  108. sector_t sectors = min(CACHE_BLOCK_SECTORS, size - sector);
  109. if (sdcard_write_sectors(bp_to_data(bp), sector, sectors)
  110. != (int)sectors) {
  111. invalidate_block(bp); /* Or...? */
  112. goto err;
  113. }
  114. bp->flags = FL_CLEAN;
  115. }
  116. return RES_OK;
  117. err:
  118. if (sdc.status & (STA_NOINIT | STA_NODISK))
  119. invalidate_all();
  120. return RES_ERROR;
  121. }
  122. static DRESULT clear_block(struct cache_block *bp)
  123. {
  124. DRESULT rv;
  125. rv = sync_block(bp);
  126. if (rv != RES_OK)
  127. return rv;
  128. invalidate_block(bp);
  129. return RES_OK;
  130. }
  131. static DRESULT sync_all(void)
  132. {
  133. DRESULT rv = RES_OK;
  134. struct dll *bhp;
  135. struct cache_block *bp;
  136. for (bhp = lru_list.next; bhp != &lru_list; bhp = bhp->next) {
  137. bp = container_of(bhp, struct cache_block, lru);
  138. if (bp->flags == FL_DIRTY)
  139. rv |= sync_block(bp);
  140. }
  141. return rv;
  142. }
  143. static void invalidate_all(void)
  144. {
  145. struct cache_block *bp = data_to_bp((void *)cache_base);
  146. const struct cache_block *ebp = data_to_bp(CACHE_END);
  147. while (bp < ebp)
  148. invalidate_block(bp++);
  149. }
  150. static DRESULT sync_kill_block(struct cache_block *bp)
  151. {
  152. DRESULT rv = RES_OK;
  153. if (!(bp->flags & FL_CACHE_BIT))
  154. return rv;
  155. if (bp->flags == FL_DIRTY)
  156. rv = sync_block(bp);
  157. if (!rv) {
  158. dll_remove(&bp->hash);
  159. dll_remove(&bp->lru);
  160. bp->flags = FL_NOTCACHE;
  161. bp->block = NO_BLOCK;
  162. }
  163. return rv;
  164. }
  165. static struct cache_block *disk_cache_get(block_t block, bool do_read)
  166. {
  167. const sector_t size = sdc.lbasize;
  168. struct cache_block *bp;
  169. if (sdc.status & (STA_NOINIT | STA_NODISK))
  170. goto err;
  171. bp = disk_cache_find(block);
  172. if (!bp) {
  173. /* Block not in cache, need to get it */
  174. sector_t sector = block << CACHE_BLOCK_BITS;
  175. int sectors = CACHE_BLOCK_SECTORS;
  176. if (sector >= size)
  177. return NULL;
  178. if (sector + sectors > size)
  179. sectors = size - sectors; /* Truncated final block */
  180. /* Get the oldest block */
  181. bp = container_of(lru_list.prev, struct cache_block, lru);
  182. clear_block(bp);
  183. if (do_read) {
  184. if (sdcard_read_sectors(bp_to_data(bp), sector, sectors) != sectors)
  185. goto err;
  186. bp->flags = FL_CLEAN;
  187. }
  188. bp->block = block;
  189. dll_insert_head(hash_slot(block), &bp->hash);
  190. }
  191. dll_promote(&lru_list, &bp->lru);
  192. return bp;
  193. err:
  194. if (sdc.status & (STA_NOINIT | STA_NODISK))
  195. invalidate_all();
  196. return NULL;
  197. }
  198. /* --------------------------------------------------------------------------
  199. * Heap management
  200. * ------------------------------------------------------------------------- */
  201. #define ARENA_START ((size_t)&__heap_start)
  202. #define ARENA_END ((size_t)&__heap_end)
  203. static size_t disk_cache_adjust(size_t newbrk)
  204. {
  205. size_t new_base = align_up(newbrk + BRK_SLACK, CACHE_BLOCK_SIZE);
  206. if (new_base > ARENA_END - MIN_CACHE_SIZE)
  207. new_base = ARENA_END - MIN_CACHE_SIZE;
  208. /*
  209. * Add new entries to the tail of the free list in reverse order,
  210. * so higher addresses get preferred.
  211. */
  212. struct cache_block * const obp = data_to_bp((void *)cache_base);
  213. struct cache_block * const nbp = data_to_bp((void *)new_base);
  214. struct cache_block *bp;
  215. if (obp < nbp) {
  216. /* Shrink cache */
  217. for (bp = obp; bp < nbp; bp++) {
  218. if (sync_kill_block(bp))
  219. break;
  220. }
  221. } else {
  222. /* Grow cache */
  223. for (bp = nbp; bp < obp; bp++) {
  224. bp->block = NO_BLOCK;
  225. bp->flags = FL_FREE;
  226. dll_insert_tail(&free_list, &bp->hash);
  227. dll_insert_tail(&lru_list, &bp->lru);
  228. }
  229. bp = nbp;
  230. }
  231. new_base = (size_t)bp_to_data(bp);
  232. if (cache_base != new_base) {
  233. con_printf("heap: start %p, brk %p, cache %p, %u blocks\n",
  234. __heap_start, (void *)newbrk, (void *)new_base,
  235. (ARENA_END - new_base) >> CACHE_BLOCK_SHIFT);
  236. }
  237. return cache_base = new_base;
  238. }
  239. void * __hot _sbrk(ptrdiff_t increment)
  240. {
  241. const size_t heap_start = align_up(ARENA_START, HEAP_ALIGN);
  242. size_t new_brk = align_up(cur_brk + increment, HEAP_ALIGN);
  243. if (unlikely(new_brk > ARENA_END - MIN_CACHE_SIZE))
  244. goto err;
  245. if (unlikely(new_brk < heap_start))
  246. new_brk = heap_start;
  247. if (new_brk > cache_base || new_brk <= cache_base - BRK_SLACK) {
  248. if (disk_cache_adjust(new_brk) < new_brk)
  249. goto err;
  250. }
  251. size_t old_brk = cur_brk;
  252. cur_brk = new_brk;
  253. return (void *)old_brk;
  254. err:
  255. errno = ENOMEM;
  256. return (void *)(-1);
  257. }
  258. void heap_init(void)
  259. {
  260. const size_t heap_start = align_up(ARENA_START, HEAP_ALIGN);
  261. cur_brk = heap_start;
  262. cache_base = ARENA_END;
  263. con_printf("heap_init: start %p, end %p\n", cur_brk, (void *)cache_base);
  264. dll_init(&free_list);
  265. dll_init(&lru_list);
  266. struct dll * const hep = cache_hash + CACHE_HASH_SIZE;
  267. for (struct dll *hp = cache_hash; hp < hep; hp++)
  268. dll_init(hp);
  269. disk_cache_adjust(cur_brk);
  270. }
  271. /* --------------------------------------------------------------------------
  272. * Interface to fatfs
  273. * ------------------------------------------------------------------------- */
  274. DSTATUS disk_initialize(BYTE drive)
  275. {
  276. DSTATUS status;
  277. if (drive != 0)
  278. return STA_NOINIT | STA_NODISK;
  279. /* If we get here, STA_NOINIT was set at some point, so cache is bad */
  280. invalidate_all();
  281. return sdc.fsstatus = sdcard_init();
  282. }
  283. DRESULT disk_ioctl(BYTE drive, BYTE command, void *buffer)
  284. {
  285. if (drive != 0)
  286. return STA_NOINIT | STA_NODISK;
  287. if (sdc.status & STA_NOINIT)
  288. return RES_NOTRDY;
  289. switch (command) {
  290. case CTRL_SYNC:
  291. return sync_all();
  292. case GET_SECTOR_SIZE:
  293. *(WORD *)buffer = SECTOR_SIZE;
  294. return RES_OK;
  295. case GET_SECTOR_COUNT:
  296. *(DWORD *)buffer = sdc.lbasize;
  297. return RES_OK;
  298. case GET_BLOCK_SIZE:
  299. *(DWORD *)buffer = CACHE_BLOCK_SECTORS;
  300. return RES_OK;
  301. default:
  302. return RES_PARERR;
  303. }
  304. }
  305. DRESULT disk_read(BYTE drive, BYTE *buffer,
  306. LBA_t sectornumber, UINT sectorcount)
  307. {
  308. (void)drive;
  309. if (sdc.status & STA_NOINIT)
  310. return RES_NOTRDY;
  311. if (!sectorcount)
  312. return RES_OK;
  313. block_t block = sectornumber >> CACHE_BLOCK_BITS;
  314. block_t last = (sectornumber + sectorcount - 1) >> CACHE_BLOCK_BITS;
  315. size_t offset = (sectornumber & (CACHE_BLOCK_SECTORS-1)) << SECTOR_SHIFT;
  316. size_t len = sectorcount << SECTOR_SHIFT;
  317. while (block <= last) {
  318. struct cache_block *bp = disk_cache_get(block, true);
  319. if (!bp)
  320. return RES_ERROR;
  321. size_t bytes = min(CACHE_BLOCK_SIZE - offset, len);
  322. memcpy(buffer, bp_to_data(bp) + offset, bytes);
  323. len -= bytes;
  324. block++;
  325. offset = 0;
  326. }
  327. return RES_OK;
  328. }
  329. DRESULT disk_write(BYTE drive, const BYTE *buffer, LBA_t sectornumber,
  330. UINT sectorcount)
  331. {
  332. (void)drive;
  333. if (sdc.status & STA_NOINIT)
  334. return RES_NOTRDY;
  335. if (!sectorcount)
  336. return RES_OK;
  337. block_t block = sectornumber >> CACHE_BLOCK_BITS;
  338. block_t last = (sectornumber + sectorcount - 1) >> CACHE_BLOCK_BITS;
  339. size_t offset = (sectornumber & (CACHE_BLOCK_SECTORS-1)) << SECTOR_SHIFT;
  340. size_t len = sectorcount << SECTOR_SHIFT;
  341. size_t size = sdc.lbasize;
  342. while (block <= last) {
  343. sector_t sector = block << CACHE_BLOCK_BITS;
  344. sector_t sectors = min(CACHE_BLOCK_SECTORS, size - sector);
  345. size_t block_bytes = sectors << SECTOR_SHIFT;
  346. size_t bytes = min(block_bytes - offset, len);
  347. struct cache_block *bp;
  348. bp = disk_cache_get(block, bytes < block_bytes);
  349. if (!bp)
  350. return RES_ERROR;
  351. memcpy(bp_to_data(bp) + offset, buffer, bytes);
  352. bp->flags = FL_DIRTY;
  353. len -= bytes;
  354. block++;
  355. offset = 0;
  356. }
  357. return RES_OK;
  358. }