Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/littlefs-project/littlefs.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/lfs.h
diff options
context:
space:
mode:
authorChristopher Haster <chaster@utexas.edu>2019-07-29 05:35:48 +0300
committerGitHub <noreply@github.com>2019-07-29 05:35:48 +0300
commit74fe46de3de98cecdff8681ccd53c481c31352e6 (patch)
tree6836f06dda74127ba6a544058b2c5fefd3865c15 /lfs.h
parent582b596ed1818aab60a65dd931815fa8e688796c (diff)
parente8c023aab055a8892b8abc262d82f78a11108dc5 (diff)
Merge pull request #233 from ARMmbed/discourage-no-wear-leveling
Change block_cycles disable from 0 to -1
Diffstat (limited to 'lfs.h')
-rw-r--r--lfs.h10
1 files changed, 7 insertions, 3 deletions
diff --git a/lfs.h b/lfs.h
index 3f80016..64d4f0a 100644
--- a/lfs.h
+++ b/lfs.h
@@ -191,9 +191,13 @@ struct lfs_config {
// Number of erasable blocks on the device.
lfs_size_t block_count;
- // Number of erase cycles before we should move data to another block.
- // May be zero, in which case no block-level wear-leveling is performed.
- uint32_t block_cycles;
+ // Number of erase cycles before littlefs evicts metadata logs and moves
+ // the metadata to another block. Suggested values are in the
+ // range 100-1000, with large values having better performance at the cost
+ // of less consistent wear distribution.
+ //
+ // Set to -1 to disable block-level wear-leveling.
+ int32_t block_cycles;
// Size of block caches. Each cache buffers a portion of a block in RAM.
// The littlefs needs a read cache, a program cache, and one additional