| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458 | /* * Simple disk cache, dynamically sharing space with the sbrk heap. */#include "common.h"#include "list.h"#include "sdcard.h"#include "sys.h"#include "systime.h"#include "console.h"/* * Maximum possible arena size; metadata structures and sbrk * thresholds are sized based on this value. A reasonable overestimate * is fine. */#define MAX_ARENA_SIZE		SDRAM_SIZE#define CACHE_BLOCK_BITS	5#define CACHE_BLOCK_SHIFT	(CACHE_BLOCK_BITS+SECTOR_SHIFT)#define CACHE_BLOCK_SECTORS	(1 << CACHE_BLOCK_BITS)#define CACHE_BLOCK_SIZE	(SECTOR_SIZE << CACHE_BLOCK_BITS)#define MAX_CACHE_BLOCKS	(MAX_ARENA_SIZE/CACHE_BLOCK_SIZE)#define CACHE_HASH_SIZE		(MAX_CACHE_BLOCKS*2)/* * Minimum cache size; fail sbrk allocation beyond this point. */#define MIN_CACHE_SIZE		(MAX_ARENA_SIZE >> 4)#define MIN_CACHE_BLOCKS	(MIN_CACHE_SIZE/CACHE_BLOCK_SIZE)/* * When reclaiming storage from the brk heap, free up this much space * in addition to the requested allocation (if possible.) */#define BRK_SLACK		(MAX_ARENA_SIZE >> 6)/* Minimum alignment for brk */#define HEAP_ALIGN		32typedef sector_t block_t;#define NO_BLOCK ((block_t)(-1))enum block_status {    FL_NOTCACHE	 = 0,		/* heap or not present */    FL_CACHE_BIT = 1,    FL_FREE      = FL_CACHE_BIT,    FL_VALID_BIT = 2,    FL_VALID     = FL_CACHE_BIT | FL_VALID_BIT,    FL_CLEAN     = FL_VALID,    FL_DIRTY_BIT = 4,    FL_DIRTY     = FL_VALID | FL_DIRTY_BIT};struct cache_block {    struct dll hash;		/* Link in hash chain or free list */    struct dll lru;		/* Link in LRU chain */    block_t block;		/* Physical block index */    enum block_status flags;	/* Status flags */};static struct dll __dram_bss cache_hash[CACHE_HASH_SIZE];static struct cache_block __dram_bss cache_blocks[MAX_CACHE_BLOCKS];extern char __heap_start[], __heap_end[];#define CACHE_START	align_up(&__heap_start[0], CACHE_BLOCK_SIZE)#define CACHE_END	__heap_end/* Convert between data and metadata pointers */static inline __constfunc char *bp_to_data(const struct cache_block *block){    return CACHE_START + ((block - cache_blocks) << CACHE_BLOCK_SHIFT);}static inline __constfunc struct cache_block *data_to_bp(void *data){    return &cache_blocks[((char *)data - CACHE_START) >> CACHE_BLOCK_SHIFT];}static struct dll lru_list;static struct dll free_list;static size_t cur_brk;static size_t cache_base;static void invalidate_all(void);static inline __constfunc struct dll *hash_slot(block_t block){    uint64_t m;    uint32_t hash;    m = UINT64_C(0x34f1f85d) * block;    hash = (m >> 32) + m;    return &cache_hash[hash % CACHE_HASH_SIZE];}static struct cache_block *disk_cache_find(block_t block){    struct dll *hp, *bhp;    struct cache_block *bp;    hp = hash_slot(block);    for (bhp = hp->next; bhp != hp; bhp = bhp->next) {	bp = container_of(bhp, struct cache_block, hash);	if (bp->block == block)	    return bp;    }    return NULL;}static void invalidate_block(struct cache_block *bp){    if (bp->flags & FL_VALID_BIT) {	dll_remove(&bp->hash);	dll_insert_head(&free_list, &bp->hash);	bp->block = NO_BLOCK;	bp->flags = FL_FREE;	dll_demote(&lru_list, &bp->lru);    }}static DRESULT sync_block(struct cache_block *bp){    if (sdc.status & (STA_NOINIT | STA_NODISK))	goto err;    if (bp->flags == FL_DIRTY) {	sector_t sector = bp->block << CACHE_BLOCK_BITS;	sector_t size = sdc.lbasize;	sector_t sectors = min(CACHE_BLOCK_SECTORS, size - sector);	if (sdcard_write_sectors(bp_to_data(bp), sector, sectors)	    != (int)sectors) {	    invalidate_block(bp); /* Or...? */	    goto err;	}	bp->flags = FL_CLEAN;    }    return RES_OK;err:    if (sdc.status & (STA_NOINIT | STA_NODISK))	invalidate_all();    return RES_ERROR;}static DRESULT clear_block(struct cache_block *bp){    DRESULT rv;    rv = sync_block(bp);    if (rv != RES_OK)	return rv;    invalidate_block(bp);    return RES_OK;}static DRESULT sync_all(void){    DRESULT rv = RES_OK;    struct dll *bhp;    struct cache_block *bp;    for (bhp = lru_list.next; bhp != &lru_list; bhp = bhp->next) {	bp = container_of(bhp, struct cache_block, lru);	if (bp->flags == FL_DIRTY)	    rv |= sync_block(bp);    }    return rv;}static void invalidate_all(void){    struct cache_block *bp  = data_to_bp((void *)cache_base);    const struct cache_block *ebp = data_to_bp(CACHE_END);    while (bp < ebp)	invalidate_block(bp++);}static DRESULT sync_kill_block(struct cache_block *bp){    DRESULT rv = RES_OK;    if (!(bp->flags & FL_CACHE_BIT))	return rv;    if (bp->flags == FL_DIRTY)	rv = sync_block(bp);    if (!rv) {	dll_remove(&bp->hash);	dll_remove(&bp->lru);	bp->flags = FL_NOTCACHE;	bp->block = NO_BLOCK;    }    return rv;}static struct cache_block *disk_cache_get(block_t block, bool do_read){    const sector_t size = sdc.lbasize;    struct cache_block *bp;    if (sdc.status & (STA_NOINIT | STA_NODISK))	goto err;    bp = disk_cache_find(block);    if (!bp) {	/* Block not in cache, need to get it */	sector_t sector = block << CACHE_BLOCK_BITS;	int sectors = CACHE_BLOCK_SECTORS;	if (sector >= size)	    return NULL;	if (sector + sectors > size)	    sectors = size - sectors; /* Truncated final block */	/* Get the oldest block */	bp = container_of(lru_list.prev, struct cache_block, lru);	clear_block(bp);	if (do_read) {	    if (sdcard_read_sectors(bp_to_data(bp), sector, sectors) != sectors)		goto err;	    bp->flags = FL_CLEAN;	}	bp->block = block;	dll_insert_head(hash_slot(block), &bp->hash);    }    dll_promote(&lru_list, &bp->lru);    return bp;err:    if (sdc.status & (STA_NOINIT | STA_NODISK))	invalidate_all();    return NULL;}/* -------------------------------------------------------------------------- *  Heap management * ------------------------------------------------------------------------- */#define ARENA_START		((size_t)&__heap_start)#define ARENA_END		((size_t)&__heap_end)static size_t disk_cache_adjust(size_t newbrk){    size_t new_base = align_up(newbrk + BRK_SLACK, CACHE_BLOCK_SIZE);    if (new_base > ARENA_END - MIN_CACHE_SIZE)	new_base = ARENA_END - MIN_CACHE_SIZE;    /*     * Add new entries to the tail of the free list in reverse order,     * so higher addresses get preferred.     */    struct cache_block * const obp = data_to_bp((void *)cache_base);    struct cache_block * const nbp = data_to_bp((void *)new_base);    struct cache_block *bp;    if (obp < nbp) {	/* Shrink cache */	for (bp = obp; bp < nbp; bp++) {	    if (sync_kill_block(bp))		break;	}    } else {	/* Grow cache */	for (bp = nbp; bp < obp; bp++) {	    bp->block = NO_BLOCK;	    bp->flags = FL_FREE;	    dll_insert_tail(&free_list, &bp->hash);	    dll_insert_tail(&lru_list, &bp->lru);	}	bp = nbp;    }    new_base = (size_t)bp_to_data(bp);    if (cache_base != new_base) {	con_printf("heap: start %p, brk %p, cache %p, %u blocks\n",		   __heap_start, (void *)newbrk, (void *)new_base,		   (ARENA_END - new_base) >> CACHE_BLOCK_SHIFT);    }    return cache_base = new_base;}void * __hot _sbrk(ptrdiff_t increment){    const size_t heap_start = align_up(ARENA_START, HEAP_ALIGN);    size_t new_brk = align_up(cur_brk + increment, HEAP_ALIGN);    if (unlikely(new_brk > ARENA_END - MIN_CACHE_SIZE))	goto err;    if (unlikely(new_brk < heap_start))	new_brk = heap_start;    if (new_brk > cache_base || new_brk <= cache_base - BRK_SLACK) {	if (disk_cache_adjust(new_brk) < new_brk)	    goto err;    }    size_t old_brk = cur_brk;    cur_brk = new_brk;    return (void *)old_brk;err:    errno = ENOMEM;    return (void *)(-1);}void heap_init(void){    const size_t heap_start = align_up(ARENA_START, HEAP_ALIGN);    cur_brk = heap_start;    cache_base = ARENA_END;    con_printf("heap_init: start %p, end %p\n", cur_brk, (void *)cache_base);    dll_init(&free_list);    dll_init(&lru_list);    struct dll * const hep = cache_hash + CACHE_HASH_SIZE;    for (struct dll *hp = cache_hash; hp < hep; hp++)	dll_init(hp);    disk_cache_adjust(cur_brk);}/* -------------------------------------------------------------------------- *  Interface to fatfs * ------------------------------------------------------------------------- */DSTATUS disk_initialize(BYTE drive){    DSTATUS status;    if (drive != 0)	return STA_NOINIT | STA_NODISK;    /* If we get here, STA_NOINIT was set at some point, so cache is bad */    invalidate_all();    return sdc.fsstatus = sdcard_init();}DRESULT disk_ioctl(BYTE drive, BYTE command, void *buffer){    if (drive != 0)	return STA_NOINIT | STA_NODISK;    if (sdc.status & STA_NOINIT)	return RES_NOTRDY;    switch (command) {    case CTRL_SYNC:	return sync_all();    case GET_SECTOR_SIZE:	*(WORD *)buffer = SECTOR_SIZE;	return RES_OK;    case GET_SECTOR_COUNT:	*(DWORD *)buffer = sdc.lbasize;	return RES_OK;    case GET_BLOCK_SIZE:	*(DWORD *)buffer = CACHE_BLOCK_SECTORS;	return RES_OK;    default:	return RES_PARERR;    }}DRESULT disk_read(BYTE drive, BYTE *buffer,		  LBA_t sectornumber, UINT sectorcount){    (void)drive;    if (sdc.status & STA_NOINIT)	return RES_NOTRDY;    if (!sectorcount)	return RES_OK;    block_t block = sectornumber >> CACHE_BLOCK_BITS;    block_t last = (sectornumber + sectorcount - 1) >> CACHE_BLOCK_BITS;    size_t offset = (sectornumber & (CACHE_BLOCK_SECTORS-1)) << SECTOR_SHIFT;    size_t len = sectorcount << SECTOR_SHIFT;    while (block <= last) {	struct cache_block *bp = disk_cache_get(block, true);	if (!bp)	    return RES_ERROR;	size_t bytes = min(CACHE_BLOCK_SIZE - offset, len);	memcpy(buffer, bp_to_data(bp) + offset, bytes);	len -= bytes;	block++;	offset = 0;    }    return RES_OK;}DRESULT disk_write(BYTE drive, const BYTE *buffer, LBA_t sectornumber,		   UINT sectorcount){    (void)drive;    if (sdc.status & STA_NOINIT)	return RES_NOTRDY;    if (!sectorcount)	return RES_OK;    block_t block = sectornumber >> CACHE_BLOCK_BITS;    block_t last = (sectornumber + sectorcount - 1) >> CACHE_BLOCK_BITS;    size_t offset = (sectornumber & (CACHE_BLOCK_SECTORS-1)) << SECTOR_SHIFT;    size_t len = sectorcount << SECTOR_SHIFT;    size_t size = sdc.lbasize;    while (block <= last) {	sector_t sector = block << CACHE_BLOCK_BITS;	sector_t sectors = min(CACHE_BLOCK_SECTORS, size - sector);	size_t block_bytes = sectors << SECTOR_SHIFT;	size_t bytes = min(block_bytes - offset, len);	struct cache_block *bp;	bp = disk_cache_get(block, bytes < block_bytes);	if (!bp)	    return RES_ERROR;	memcpy(bp_to_data(bp) + offset, buffer, bytes);	bp->flags = FL_DIRTY;	len -= bytes;	block++;	offset = 0;    }    return RES_OK;}
 |