Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/littlefs-project/littlefs.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristopher Haster <geky@geky.net>2024-01-19 21:27:14 +0300
committerGitHub <noreply@github.com>2024-01-19 21:27:14 +0300
commited7bd054357094688462aca9e302ce98ab24f35c (patch)
tree14a74c70fbf127d4063dc0dfd294e8b7a90aae3e
parent1195d606ae95be8544b23617cd8f4ee0a4f89738 (diff)
parent60567677b95205d50d98815d073a5f466d052e68 (diff)
Merge pull request #912 from littlefs-project/relaxed-lookahead
Relaxed lookahead alignment, other internal block alloc readability improvements
-rw-r--r--lfs.c136
-rw-r--r--lfs.h19
-rw-r--r--lfs_util.h4
-rw-r--r--tests/test_orphans.toml4
4 files changed, 91 insertions, 72 deletions
diff --git a/lfs.c b/lfs.c
index 2f10d57..df6a230 100644
--- a/lfs.c
+++ b/lfs.c
@@ -596,42 +596,48 @@ static int lfs_rawunmount(lfs_t *lfs);
#ifndef LFS_READONLY
static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
lfs_t *lfs = (lfs_t*)p;
- lfs_block_t off = ((block - lfs->free.off)
+ lfs_block_t off = ((block - lfs->lookahead.start)
+ lfs->block_count) % lfs->block_count;
- if (off < lfs->free.size) {
- lfs->free.buffer[off / 32] |= 1U << (off % 32);
+ if (off < lfs->lookahead.size) {
+ lfs->lookahead.buffer[off / 8] |= 1U << (off % 8);
}
return 0;
}
#endif
-// indicate allocated blocks have been committed into the filesystem, this
-// is to prevent blocks from being garbage collected in the middle of a
-// commit operation
-static void lfs_alloc_ack(lfs_t *lfs) {
- lfs->free.ack = lfs->block_count;
+// allocations should call this when all allocated blocks are committed to
+// the filesystem
+//
+// after a checkpoint, the block allocator may realloc any untracked blocks
+static void lfs_alloc_ckpoint(lfs_t *lfs) {
+ lfs->lookahead.ckpoint = lfs->block_count;
}
// drop the lookahead buffer, this is done during mounting and failed
// traversals in order to avoid invalid lookahead state
static void lfs_alloc_drop(lfs_t *lfs) {
- lfs->free.size = 0;
- lfs->free.i = 0;
- lfs_alloc_ack(lfs);
+ lfs->lookahead.size = 0;
+ lfs->lookahead.next = 0;
+ lfs_alloc_ckpoint(lfs);
}
#ifndef LFS_READONLY
static int lfs_fs_rawgc(lfs_t *lfs) {
- // Move free offset at the first unused block (lfs->free.i)
- // lfs->free.i is equal lfs->free.size when all blocks are used
- lfs->free.off = (lfs->free.off + lfs->free.i) % lfs->block_count;
- lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
- lfs->free.i = 0;
+ // move lookahead buffer to the first unused block
+ //
+ // note we limit the lookahead buffer to at most the amount of blocks
+ // checkpointed, this prevents the math in lfs_alloc from underflowing
+ lfs->lookahead.start = (lfs->lookahead.start + lfs->lookahead.next)
+ % lfs->block_count;
+ lfs->lookahead.next = 0;
+ lfs->lookahead.size = lfs_min(
+ 8*lfs->cfg->lookahead_size,
+ lfs->lookahead.ckpoint);
// find mask of free blocks from tree
- memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
+ memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
if (err) {
lfs_alloc_drop(lfs);
@@ -645,35 +651,48 @@ static int lfs_fs_rawgc(lfs_t *lfs) {
#ifndef LFS_READONLY
static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
while (true) {
- while (lfs->free.i != lfs->free.size) {
- lfs_block_t off = lfs->free.i;
- lfs->free.i += 1;
- lfs->free.ack -= 1;
-
- if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
+ // scan our lookahead buffer for free blocks
+ while (lfs->lookahead.next < lfs->lookahead.size) {
+ if (!(lfs->lookahead.buffer[lfs->lookahead.next / 8]
+ & (1U << (lfs->lookahead.next % 8)))) {
// found a free block
- *block = (lfs->free.off + off) % lfs->block_count;
-
- // eagerly find next off so an alloc ack can
- // discredit old lookahead blocks
- while (lfs->free.i != lfs->free.size &&
- (lfs->free.buffer[lfs->free.i / 32]
- & (1U << (lfs->free.i % 32)))) {
- lfs->free.i += 1;
- lfs->free.ack -= 1;
+ *block = (lfs->lookahead.start + lfs->lookahead.next)
+ % lfs->block_count;
+
+ // eagerly find next free block to maximize how many blocks
+ // lfs_alloc_ckpoint makes available for scanning
+ while (true) {
+ lfs->lookahead.next += 1;
+ lfs->lookahead.ckpoint -= 1;
+
+ if (lfs->lookahead.next >= lfs->lookahead.size
+ || !(lfs->lookahead.buffer[lfs->lookahead.next / 8]
+ & (1U << (lfs->lookahead.next % 8)))) {
+ return 0;
+ }
}
-
- return 0;
}
+
+ lfs->lookahead.next += 1;
+ lfs->lookahead.ckpoint -= 1;
}
- // check if we have looked at all blocks since last ack
- if (lfs->free.ack == 0) {
- LFS_ERROR("No more free space %"PRIu32,
- lfs->free.i + lfs->free.off);
+ // In order to keep our block allocator from spinning forever when our
+ // filesystem is full, we mark points where there are no in-flight
+ // allocations with a checkpoint before starting a set of allocations.
+ //
+ // If we've looked at all blocks since the last checkpoint, we report
+ // the filesystem as out of storage.
+ //
+ if (lfs->lookahead.ckpoint <= 0) {
+ LFS_ERROR("No more free space 0x%"PRIx32,
+ (lfs->lookahead.start + lfs->lookahead.next)
+ % lfs->cfg->block_count);
return LFS_ERR_NOSPC;
}
+ // No blocks in our lookahead buffer, we need to scan the filesystem for
+ // unused blocks in the next lookahead window.
int err = lfs_fs_rawgc(lfs);
if(err) {
return err;
@@ -2588,7 +2607,7 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
}
// build up new directory
- lfs_alloc_ack(lfs);
+ lfs_alloc_ckpoint(lfs);
lfs_mdir_t dir;
err = lfs_dir_alloc(lfs, &dir);
if (err) {
@@ -3274,7 +3293,7 @@ relocate:
#ifndef LFS_READONLY
static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
file->off = file->pos;
- lfs_alloc_ack(lfs);
+ lfs_alloc_ckpoint(lfs);
int err = lfs_file_relocate(lfs, file);
if (err) {
return err;
@@ -3537,7 +3556,7 @@ static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
}
// extend file with new blocks
- lfs_alloc_ack(lfs);
+ lfs_alloc_ckpoint(lfs);
int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
file->block, file->pos,
&file->block, &file->off);
@@ -3580,7 +3599,7 @@ relocate:
data += diff;
nsize -= diff;
- lfs_alloc_ack(lfs);
+ lfs_alloc_ckpoint(lfs);
}
return size;
@@ -4197,15 +4216,14 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
lfs_cache_zero(lfs, &lfs->rcache);
lfs_cache_zero(lfs, &lfs->pcache);
- // setup lookahead, must be multiple of 64-bits, 32-bit aligned
+ // setup lookahead buffer, note mount finishes initializing this after
+ // we establish a decent pseudo-random seed
LFS_ASSERT(lfs->cfg->lookahead_size > 0);
- LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
- (uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
if (lfs->cfg->lookahead_buffer) {
- lfs->free.buffer = lfs->cfg->lookahead_buffer;
+ lfs->lookahead.buffer = lfs->cfg->lookahead_buffer;
} else {
- lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size);
- if (!lfs->free.buffer) {
+ lfs->lookahead.buffer = lfs_malloc(lfs->cfg->lookahead_size);
+ if (!lfs->lookahead.buffer) {
err = LFS_ERR_NOMEM;
goto cleanup;
}
@@ -4262,7 +4280,7 @@ static int lfs_deinit(lfs_t *lfs) {
}
if (!lfs->cfg->lookahead_buffer) {
- lfs_free(lfs->free.buffer);
+ lfs_free(lfs->lookahead.buffer);
}
return 0;
@@ -4282,12 +4300,12 @@ static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) {
LFS_ASSERT(cfg->block_count != 0);
// create free lookahead
- memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
- lfs->free.off = 0;
- lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size,
+ memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
+ lfs->lookahead.start = 0;
+ lfs->lookahead.size = lfs_min(8*lfs->cfg->lookahead_size,
lfs->block_count);
- lfs->free.i = 0;
- lfs_alloc_ack(lfs);
+ lfs->lookahead.next = 0;
+ lfs_alloc_ckpoint(lfs);
// create root dir
lfs_mdir_t root;
@@ -4495,7 +4513,7 @@ static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {
// setup free lookahead, to distribute allocations uniformly across
// boots, we start the allocator at a random location
- lfs->free.off = lfs->seed % lfs->block_count;
+ lfs->lookahead.start = lfs->seed % lfs->block_count;
lfs_alloc_drop(lfs);
return 0;
@@ -5468,10 +5486,10 @@ static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
lfs->lfs1->root[1] = LFS_BLOCK_NULL;
// setup free lookahead
- lfs->free.off = 0;
- lfs->free.size = 0;
- lfs->free.i = 0;
- lfs_alloc_ack(lfs);
+ lfs->lookahead.start = 0;
+ lfs->lookahead.size = 0;
+ lfs->lookahead.next = 0;
+ lfs_alloc_ckpoint(lfs);
// load superblock
lfs1_dir_t dir;
diff --git a/lfs.h b/lfs.h
index 452dd0e..6742ffe 100644
--- a/lfs.h
+++ b/lfs.h
@@ -224,7 +224,7 @@ struct lfs_config {
// Size of the lookahead buffer in bytes. A larger lookahead buffer
// increases the number of blocks found during an allocation pass. The
// lookahead buffer is stored as a compact bitmap, so each byte of RAM
- // can track 8 blocks. Must be a multiple of 8.
+ // can track 8 blocks.
lfs_size_t lookahead_size;
// Optional statically allocated read buffer. Must be cache_size.
@@ -235,9 +235,8 @@ struct lfs_config {
// By default lfs_malloc is used to allocate this buffer.
void *prog_buffer;
- // Optional statically allocated lookahead buffer. Must be lookahead_size
- // and aligned to a 32-bit boundary. By default lfs_malloc is used to
- // allocate this buffer.
+ // Optional statically allocated lookahead buffer. Must be lookahead_size.
+ // By default lfs_malloc is used to allocate this buffer.
void *lookahead_buffer;
// Optional upper limit on length of file names in bytes. No downside for
@@ -428,13 +427,13 @@ typedef struct lfs {
lfs_gstate_t gdisk;
lfs_gstate_t gdelta;
- struct lfs_free {
- lfs_block_t off;
+ struct lfs_lookahead {
+ lfs_block_t start;
lfs_block_t size;
- lfs_block_t i;
- lfs_block_t ack;
- uint32_t *buffer;
- } free;
+ lfs_block_t next;
+ lfs_block_t ckpoint;
+ uint8_t *buffer;
+ } lookahead;
const struct lfs_config *cfg;
lfs_size_t block_count;
diff --git a/lfs_util.h b/lfs_util.h
index 45cefc9..4e57700 100644
--- a/lfs_util.h
+++ b/lfs_util.h
@@ -221,7 +221,9 @@ uint32_t lfs_crc(uint32_t crc, const void *buffer, size_t size);
#endif
// Allocate memory, only used if buffers are not provided to littlefs
-// Note, memory must be 64-bit aligned
+//
+// littlefs current has no alignment requirements, as it only allocates
+// byte-level buffers.
static inline void *lfs_malloc(size_t size) {
#if defined(LFS_MALLOC)
return LFS_MALLOC(size);
diff --git a/tests/test_orphans.toml b/tests/test_orphans.toml
index 2c8405a..d7040ed 100644
--- a/tests/test_orphans.toml
+++ b/tests/test_orphans.toml
@@ -98,7 +98,7 @@ code = '''
lfs_mount(&lfs, cfg) => 0;
// create an orphan
lfs_mdir_t orphan;
- lfs_alloc_ack(&lfs);
+ lfs_alloc_ckpoint(&lfs);
lfs_dir_alloc(&lfs, &orphan) => 0;
lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;
@@ -170,7 +170,7 @@ code = '''
lfs_mount(&lfs, cfg) => 0;
// create an orphan
lfs_mdir_t orphan;
- lfs_alloc_ack(&lfs);
+ lfs_alloc_ckpoint(&lfs);
lfs_dir_alloc(&lfs, &orphan) => 0;
lfs_dir_commit(&lfs, &orphan, NULL, 0) => 0;