From 7b6844188837519b23f67342f92a0ef58613a13a Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Tue, 19 Dec 2023 15:02:40 -0600 Subject: Renamed a number of internal block-allocator fields - Renamed lfs.free -> lfs.lookahead - Renamed lfs.free.off -> lfs.lookahead.start - Renamed lfs.free.i -> lfs.lookahead.next - Renamed lfs.free.ack -> lfs.lookahead.ckpoint - Renamed lfs_alloc_ack -> lfs_alloc_ckpoint These have been named a bit confusingly, and I think the new names make their relevant purposes a bit clearer. At the very it's clear lfs.lookahead is related to the lookahead buffer. (and doesn't imply a closed free-bitmap). --- lfs.c | 93 +++++++++++++++++++++++++------------------------ lfs.h | 10 +++--- tests/test_orphans.toml | 4 +-- 3 files changed, 55 insertions(+), 52 deletions(-) diff --git a/lfs.c b/lfs.c index aed1b07..27a66e9 100644 --- a/lfs.c +++ b/lfs.c @@ -596,11 +596,11 @@ static int lfs_rawunmount(lfs_t *lfs); #ifndef LFS_READONLY static int lfs_alloc_lookahead(void *p, lfs_block_t block) { lfs_t *lfs = (lfs_t*)p; - lfs_block_t off = ((block - lfs->free.off) + lfs_block_t off = ((block - lfs->lookahead.start) + lfs->block_count) % lfs->block_count; - if (off < lfs->free.size) { - lfs->free.buffer[off / 32] |= 1U << (off % 32); + if (off < lfs->lookahead.size) { + lfs->lookahead.buffer[off / 32] |= 1U << (off % 32); } return 0; @@ -610,28 +610,31 @@ static int lfs_alloc_lookahead(void *p, lfs_block_t block) { // indicate allocated blocks have been committed into the filesystem, this // is to prevent blocks from being garbage collected in the middle of a // commit operation -static void lfs_alloc_ack(lfs_t *lfs) { - lfs->free.ack = lfs->block_count; +static void lfs_alloc_ckpoint(lfs_t *lfs) { + lfs->lookahead.ckpoint = lfs->block_count; } // drop the lookahead buffer, this is done during mounting and failed // traversals in order to avoid invalid lookahead state static void lfs_alloc_drop(lfs_t *lfs) { - lfs->free.size = 0; - lfs->free.i = 0; - lfs_alloc_ack(lfs); + lfs->lookahead.size = 0; + lfs->lookahead.next = 0; + lfs_alloc_ckpoint(lfs); } #ifndef LFS_READONLY static int lfs_fs_rawgc(lfs_t *lfs) { - // Move free offset at the first unused block (lfs->free.i) - // lfs->free.i is equal lfs->free.size when all blocks are used - lfs->free.off = (lfs->free.off + lfs->free.i) % lfs->block_count; - lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack); - lfs->free.i = 0; + // Move free offset at the first unused block (lfs->lookahead.next) + // lfs->lookahead.next is equal lfs->lookahead.size when all blocks are used + lfs->lookahead.start = (lfs->lookahead.start + lfs->lookahead.next) + % lfs->block_count; + lfs->lookahead.size = lfs_min( + 8*lfs->cfg->lookahead_size, + lfs->lookahead.ckpoint); + lfs->lookahead.next = 0; // find mask of free blocks from tree - memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size); + memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size); int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true); if (err) { lfs_alloc_drop(lfs); @@ -645,22 +648,22 @@ static int lfs_fs_rawgc(lfs_t *lfs) { #ifndef LFS_READONLY static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) { while (true) { - while (lfs->free.i != lfs->free.size) { - lfs_block_t off = lfs->free.i; - lfs->free.i += 1; - lfs->free.ack -= 1; + while (lfs->lookahead.next != lfs->lookahead.size) { + lfs_block_t off = lfs->lookahead.next; + lfs->lookahead.next += 1; + lfs->lookahead.ckpoint -= 1; - if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) { + if (!(lfs->lookahead.buffer[off / 32] & (1U << (off % 32)))) { // found a free block - *block = (lfs->free.off + off) % lfs->block_count; + *block = (lfs->lookahead.start + off) % lfs->block_count; // eagerly find next off so an alloc ack can // discredit old lookahead blocks - while (lfs->free.i != lfs->free.size && - (lfs->free.buffer[lfs->free.i / 32] - & (1U << (lfs->free.i % 32)))) { - lfs->free.i += 1; - lfs->free.ack -= 1; + while (lfs->lookahead.next != lfs->lookahead.size && + (lfs->lookahead.buffer[lfs->lookahead.next / 32] + & (1U << (lfs->lookahead.next % 32)))) { + lfs->lookahead.next += 1; + lfs->lookahead.ckpoint -= 1; } return 0; @@ -668,9 +671,9 @@ static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) { } // check if we have looked at all blocks since last ack - if (lfs->free.ack == 0) { + if (lfs->lookahead.ckpoint == 0) { LFS_ERROR("No more free space %"PRIu32, - lfs->free.i + lfs->free.off); + lfs->lookahead.next + lfs->lookahead.start); return LFS_ERR_NOSPC; } @@ -2586,7 +2589,7 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) { } // build up new directory - lfs_alloc_ack(lfs); + lfs_alloc_ckpoint(lfs); lfs_mdir_t dir; err = lfs_dir_alloc(lfs, &dir); if (err) { @@ -3272,7 +3275,7 @@ relocate: #ifndef LFS_READONLY static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) { file->off = file->pos; - lfs_alloc_ack(lfs); + lfs_alloc_ckpoint(lfs); int err = lfs_file_relocate(lfs, file); if (err) { return err; @@ -3535,7 +3538,7 @@ static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file, } // extend file with new blocks - lfs_alloc_ack(lfs); + lfs_alloc_ckpoint(lfs); int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache, file->block, file->pos, &file->block, &file->off); @@ -3578,7 +3581,7 @@ relocate: data += diff; nsize -= diff; - lfs_alloc_ack(lfs); + lfs_alloc_ckpoint(lfs); } return size; @@ -4185,10 +4188,10 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) { LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 && (uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0); if (lfs->cfg->lookahead_buffer) { - lfs->free.buffer = lfs->cfg->lookahead_buffer; + lfs->lookahead.buffer = lfs->cfg->lookahead_buffer; } else { - lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size); - if (!lfs->free.buffer) { + lfs->lookahead.buffer = lfs_malloc(lfs->cfg->lookahead_size); + if (!lfs->lookahead.buffer) { err = LFS_ERR_NOMEM; goto cleanup; } @@ -4245,7 +4248,7 @@ static int lfs_deinit(lfs_t *lfs) { } if (!lfs->cfg->lookahead_buffer) { - lfs_free(lfs->free.buffer); + lfs_free(lfs->lookahead.buffer); } return 0; @@ -4265,12 +4268,12 @@ static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) { LFS_ASSERT(cfg->block_count != 0); // create free lookahead - memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size); - lfs->free.off = 0; - lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, + memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size); + lfs->lookahead.start = 0; + lfs->lookahead.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->block_count); - lfs->free.i = 0; - lfs_alloc_ack(lfs); + lfs->lookahead.next = 0; + lfs_alloc_ckpoint(lfs); // create root dir lfs_mdir_t root; @@ -4478,7 +4481,7 @@ static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) { // setup free lookahead, to distribute allocations uniformly across // boots, we start the allocator at a random location - lfs->free.off = lfs->seed % lfs->block_count; + lfs->lookahead.start = lfs->seed % lfs->block_count; lfs_alloc_drop(lfs); return 0; @@ -5451,10 +5454,10 @@ static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1, lfs->lfs1->root[1] = LFS_BLOCK_NULL; // setup free lookahead - lfs->free.off = 0; - lfs->free.size = 0; - lfs->free.i = 0; - lfs_alloc_ack(lfs); + lfs->lookahead.start = 0; + lfs->lookahead.size = 0; + lfs->lookahead.next = 0; + lfs_alloc_ckpoint(lfs); // load superblock lfs1_dir_t dir; diff --git a/lfs.h b/lfs.h index 9eeab23..524d087 100644 --- a/lfs.h +++ b/lfs.h @@ -430,13 +430,13 @@ typedef struct lfs { lfs_gstate_t gdisk; lfs_gstate_t gdelta; - struct lfs_free { - lfs_block_t off; + struct lfs_lookahead { + lfs_block_t start; lfs_block_t size; - lfs_block_t i; - lfs_block_t ack; + lfs_block_t next; + lfs_block_t ckpoint; uint32_t *buffer; - } free; + } lookahead; const struct lfs_config *cfg; lfs_size_t block_count; diff --git a/tests/test_orphans.toml b/tests/test_orphans.toml index 2c8405a..d7040ed 100644 --- a/tests/test_orphans.toml +++ b/tests/test_orphans.toml @@ -98,7 +98,7 @@ code = ''' lfs_mount(&lfs, cfg) => 0; // create an orphan lfs_mdir_t orphan; - lfs_alloc_ack(&lfs); + lfs_alloc_ckpoint(&lfs); lfs_dir_alloc(&lfs, &orphan) => 0; lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0; @@ -170,7 +170,7 @@ code = ''' lfs_mount(&lfs, cfg) => 0; // create an orphan lfs_mdir_t orphan; - lfs_alloc_ack(&lfs); + lfs_alloc_ckpoint(&lfs); lfs_dir_alloc(&lfs, &orphan) => 0; lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0; -- cgit v1.2.3 From 1f9c3c04b1f936d925f9101ca2ea84d71ce2c3e5 Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Wed, 20 Dec 2023 00:23:04 -0600 Subject: Reworked the block allocator so the logic is hopefully simpler Some of this is just better documentation, some of this is reworking the logic to be more intention driven... if that makes sense... --- lfs.c | 64 ++++++++++++++++++++++++++++++++++++++++------------------------ 1 file changed, 40 insertions(+), 24 deletions(-) diff --git a/lfs.c b/lfs.c index 27a66e9..f6001a5 100644 --- a/lfs.c +++ b/lfs.c @@ -607,9 +607,10 @@ static int lfs_alloc_lookahead(void *p, lfs_block_t block) { } #endif -// indicate allocated blocks have been committed into the filesystem, this -// is to prevent blocks from being garbage collected in the middle of a -// commit operation +// allocations should call this when all allocated blocks are committed to +// the filesystem +// +// after a checkpoint, the block allocator may realloc any untracked blocks static void lfs_alloc_ckpoint(lfs_t *lfs) { lfs->lookahead.ckpoint = lfs->block_count; } @@ -624,14 +625,16 @@ static void lfs_alloc_drop(lfs_t *lfs) { #ifndef LFS_READONLY static int lfs_fs_rawgc(lfs_t *lfs) { - // Move free offset at the first unused block (lfs->lookahead.next) - // lfs->lookahead.next is equal lfs->lookahead.size when all blocks are used + // move lookahead buffer to the first unused block + // + // note we limit the lookahead buffer to at most the amount of blocks + // checkpointed, this prevents the math in lfs_alloc from underflowing lfs->lookahead.start = (lfs->lookahead.start + lfs->lookahead.next) % lfs->block_count; + lfs->lookahead.next = 0; lfs->lookahead.size = lfs_min( 8*lfs->cfg->lookahead_size, lfs->lookahead.ckpoint); - lfs->lookahead.next = 0; // find mask of free blocks from tree memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size); @@ -648,35 +651,48 @@ static int lfs_fs_rawgc(lfs_t *lfs) { #ifndef LFS_READONLY static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) { while (true) { - while (lfs->lookahead.next != lfs->lookahead.size) { - lfs_block_t off = lfs->lookahead.next; - lfs->lookahead.next += 1; - lfs->lookahead.ckpoint -= 1; - - if (!(lfs->lookahead.buffer[off / 32] & (1U << (off % 32)))) { + // scan our lookahead buffer for free blocks + while (lfs->lookahead.next < lfs->lookahead.size) { + if (!(lfs->lookahead.buffer[lfs->lookahead.next / 32] + & (1U << (lfs->lookahead.next % 32)))) { // found a free block - *block = (lfs->lookahead.start + off) % lfs->block_count; + *block = (lfs->lookahead.start + lfs->lookahead.next) + % lfs->block_count; - // eagerly find next off so an alloc ack can - // discredit old lookahead blocks - while (lfs->lookahead.next != lfs->lookahead.size && - (lfs->lookahead.buffer[lfs->lookahead.next / 32] - & (1U << (lfs->lookahead.next % 32)))) { + // eagerly find next free block to maximize how many blocks + // lfs_alloc_ckpoint makes available for scanning + while (true) { lfs->lookahead.next += 1; lfs->lookahead.ckpoint -= 1; - } - return 0; + if (lfs->lookahead.next >= lfs->lookahead.size + || !(lfs->lookahead.buffer[lfs->lookahead.next / 32] + & (1U << (lfs->lookahead.next % 32)))) { + return 0; + } + } } + + lfs->lookahead.next += 1; + lfs->lookahead.ckpoint -= 1; } - // check if we have looked at all blocks since last ack - if (lfs->lookahead.ckpoint == 0) { - LFS_ERROR("No more free space %"PRIu32, - lfs->lookahead.next + lfs->lookahead.start); + // In order to keep our block allocator from spinning forever when our + // filesystem is full, we mark points where there are no in-flight + // allocations with a checkpoint before starting a set of allocations. + // + // If we've looked at all blocks since the last checkpoint, we report + // the filesystem as out of storage. + // + if (lfs->lookahead.ckpoint <= 0) { + LFS_ERROR("No more free space 0x%"PRIx32, + (lfs->lookahead.start + lfs->lookahead.next) + % lfs->cfg->block_count); return LFS_ERR_NOSPC; } + // No blocks in our lookahead buffer, we need to scan the filesystem for + // unused blocks in the next lookahead window. int err = lfs_fs_rawgc(lfs); if(err) { return err; -- cgit v1.2.3 From b1b10c0e7559adcb7f2600e7bb7b5cf481d02d0d Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Wed, 20 Dec 2023 00:32:17 -0600 Subject: Relaxed lookahead buffer alignment This drops the lookahead buffer from operating on 32-bit words to operating on 8-bit bytes, and removes any alignment requirement. This may have some minor performance impact, but it is unlikely to be significant when you consider IO overhead. The original motivation for 32-bit alignment was an attempt at future-proofing in case we wanted some more complex on-disk data structure. This never happened, and even if it did, it could have been added via additional config options. This has been a significant pain point for users, since providing word-aligned byte-sized buffers in C can be a bit annoying. --- lfs.c | 15 +++++++-------- lfs.h | 9 ++++----- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/lfs.c b/lfs.c index f6001a5..dbcf0f0 100644 --- a/lfs.c +++ b/lfs.c @@ -600,7 +600,7 @@ static int lfs_alloc_lookahead(void *p, lfs_block_t block) { + lfs->block_count) % lfs->block_count; if (off < lfs->lookahead.size) { - lfs->lookahead.buffer[off / 32] |= 1U << (off % 32); + lfs->lookahead.buffer[off / 8] |= 1U << (off % 8); } return 0; @@ -653,8 +653,8 @@ static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) { while (true) { // scan our lookahead buffer for free blocks while (lfs->lookahead.next < lfs->lookahead.size) { - if (!(lfs->lookahead.buffer[lfs->lookahead.next / 32] - & (1U << (lfs->lookahead.next % 32)))) { + if (!(lfs->lookahead.buffer[lfs->lookahead.next / 8] + & (1U << (lfs->lookahead.next % 8)))) { // found a free block *block = (lfs->lookahead.start + lfs->lookahead.next) % lfs->block_count; @@ -666,8 +666,8 @@ static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) { lfs->lookahead.ckpoint -= 1; if (lfs->lookahead.next >= lfs->lookahead.size - || !(lfs->lookahead.buffer[lfs->lookahead.next / 32] - & (1U << (lfs->lookahead.next % 32)))) { + || !(lfs->lookahead.buffer[lfs->lookahead.next / 8] + & (1U << (lfs->lookahead.next % 8)))) { return 0; } } @@ -4199,10 +4199,9 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) { lfs_cache_zero(lfs, &lfs->rcache); lfs_cache_zero(lfs, &lfs->pcache); - // setup lookahead, must be multiple of 64-bits, 32-bit aligned + // setup lookahead buffer, note mount finishes initializing this after + // we establish a decent pseudo-random seed LFS_ASSERT(lfs->cfg->lookahead_size > 0); - LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 && - (uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0); if (lfs->cfg->lookahead_buffer) { lfs->lookahead.buffer = lfs->cfg->lookahead_buffer; } else { diff --git a/lfs.h b/lfs.h index 524d087..ad1da35 100644 --- a/lfs.h +++ b/lfs.h @@ -226,7 +226,7 @@ struct lfs_config { // Size of the lookahead buffer in bytes. A larger lookahead buffer // increases the number of blocks found during an allocation pass. The // lookahead buffer is stored as a compact bitmap, so each byte of RAM - // can track 8 blocks. Must be a multiple of 8. + // can track 8 blocks. lfs_size_t lookahead_size; // Optional statically allocated read buffer. Must be cache_size. @@ -237,9 +237,8 @@ struct lfs_config { // By default lfs_malloc is used to allocate this buffer. void *prog_buffer; - // Optional statically allocated lookahead buffer. Must be lookahead_size - // and aligned to a 32-bit boundary. By default lfs_malloc is used to - // allocate this buffer. + // Optional statically allocated lookahead buffer. Must be lookahead_size. + // By default lfs_malloc is used to allocate this buffer. void *lookahead_buffer; // Optional upper limit on length of file names in bytes. No downside for @@ -435,7 +434,7 @@ typedef struct lfs { lfs_block_t size; lfs_block_t next; lfs_block_t ckpoint; - uint32_t *buffer; + uint8_t *buffer; } lookahead; const struct lfs_config *cfg; -- cgit v1.2.3 From 60567677b95205d50d98815d073a5f466d052e68 Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Tue, 16 Jan 2024 00:27:07 -0600 Subject: Relaxed alignment requirements for lfs_malloc The only reason we needed this alignment was for the lookahead buffer. Now that the lookahead buffer is relaxed to operate on bytes, we can relax our malloc alignment requirement all the way down to the byte level, since we mainly use lfs_malloc to allocate byte-level buffers. This does introduce a risk that we might need word-level mallocs in the future. If that happens we will need to decide if changing the malloc alignment is a breaking change, or gate alignment requirements behind user provided defines. Found by HiFiPhile. --- lfs_util.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lfs_util.h b/lfs_util.h index 13e9396..8248d1d 100644 --- a/lfs_util.h +++ b/lfs_util.h @@ -215,7 +215,9 @@ static inline uint32_t lfs_tobe32(uint32_t a) { uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size); // Allocate memory, only used if buffers are not provided to littlefs -// Note, memory must be 64-bit aligned +// +// littlefs current has no alignment requirements, as it only allocates +// byte-level buffers. static inline void *lfs_malloc(size_t size) { #ifndef LFS_NO_MALLOC return malloc(size); -- cgit v1.2.3