Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/littlefs-project/littlefs.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/lfs.c
diff options
context:
space:
mode:
authorWill <will@mon.im>2020-12-15 05:59:32 +0300
committerWill <will@mon.im>2020-12-15 05:59:32 +0300
commit6b16dafb4db903fc1f8509d4170af56ae70ee56e (patch)
treefc05969c787306c350a241cc8821d741278365f5 /lfs.c
parent1a59954ec64ca168828a15242cc6de94ac75f9d1 (diff)
Add metadata_max and inline_file_max to config
We have seen poor read performance on NAND flashes with 128kB blocks. The root cause is inline files having to traverse many sets of metadata pairs inside the current block before being fully reconstructed. Simply disabling inline files is not enough, as the metadata will still fill up the block and eventually need to be compacted. By allowing configuration of how much size metadata takes up, along with limiting (or disabling) inline file size, we achieve read performance improvements on an order of magnitude.
Diffstat (limited to 'lfs.c')
-rw-r--r--lfs.c24
1 files changed, 19 insertions, 5 deletions
diff --git a/lfs.c b/lfs.c
index d7439fe..5585b40 100644
--- a/lfs.c
+++ b/lfs.c
@@ -1589,7 +1589,7 @@ static int lfs_dir_compact(lfs_t *lfs,
// for metadata updates.
if (end - begin < 0xff &&
size <= lfs_min(lfs->cfg->block_size - 36,
- lfs_alignup(lfs->cfg->block_size/2,
+ lfs_alignup(lfs->metadata_max/2,
lfs->cfg->prog_size))) {
break;
}
@@ -1674,7 +1674,7 @@ static int lfs_dir_compact(lfs_t *lfs,
.crc = 0xffffffff,
.begin = 0,
- .end = lfs->cfg->block_size - 8,
+ .end = lfs->metadata_max - 8,
};
// erase block to write to
@@ -1884,7 +1884,7 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
.crc = 0xffffffff,
.begin = dir->off,
- .end = lfs->cfg->block_size - 8,
+ .end = lfs->metadata_max - 8,
};
// traverse attrs that need to be written out
@@ -2966,7 +2966,7 @@ static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
if ((file->flags & LFS_F_INLINE) &&
lfs_max(file->pos+nsize, file->ctz.size) >
lfs_min(0x3fe, lfs_min(
- lfs->cfg->cache_size, lfs->cfg->block_size/8))) {
+ lfs->cfg->cache_size, lfs->inline_file_max))) {
// inline file doesn't fit anymore
int err = lfs_file_outline(lfs, file);
if (err) {
@@ -3536,6 +3536,20 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
lfs->attr_max = LFS_ATTR_MAX;
}
+ LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size);
+ lfs->metadata_max = lfs->cfg->metadata_max;
+ if (!lfs->metadata_max) {
+ lfs->metadata_max = lfs->cfg->block_size;
+ }
+
+ LFS_ASSERT(lfs->cfg->inline_file_max <= LFS_FILE_MAX);
+ lfs->inline_file_max = lfs->cfg->inline_file_max;
+ if (!lfs->inline_file_max) {
+ lfs->inline_file_max = lfs->cfg->block_size / 8;
+ } else if(lfs->inline_file_max == -1) {
+ lfs->inline_file_max = 0;
+ }
+
// setup default state
lfs->root[0] = LFS_BLOCK_NULL;
lfs->root[1] = LFS_BLOCK_NULL;
@@ -3829,7 +3843,7 @@ int lfs_fs_rawtraverse(lfs_t *lfs,
if (err) {
return err;
}
- } else if (includeorphans &&
+ } else if (includeorphans &&
lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
for (int i = 0; i < 2; i++) {
err = cb(data, (&ctz.head)[i]);