Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/littlefs-project/littlefs.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristopher Haster <chaster@utexas.edu>2018-10-02 23:42:07 +0300
committerChristopher Haster <chaster@utexas.edu>2018-10-18 18:00:49 +0300
commitf010d2add1e7ad1907579f3a0358da2662b288b2 (patch)
tree21396cf6b51d6c5531dcee2ea5380f5c4a5c02c4
parentd7e4abad0bbbff05489a85a66302d5adb12cb789 (diff)
Fixed issue with reads ignoring the pcache
The downside of smarter caching is that now there are more complicated corner cases to consider. Here we weren't considering our pcaches when aligning reads to the rcache. This meant if things were unaligned, we would read a cache-line that overlaps the pcache and then proceed to ignore whatever we overlapped. This fix is to determine the limit of an rcache read not from cache alignment but from the available caches, which we check anyways to find cached data.
-rw-r--r--lfs.c53
1 files changed, 30 insertions, 23 deletions
diff --git a/lfs.c b/lfs.c
index 4f50fce..feb936d 100644
--- a/lfs.c
+++ b/lfs.c
@@ -44,39 +44,46 @@ static int lfs_bd_read(lfs_t *lfs,
}
while (size > 0) {
+ lfs_size_t diff = size;
+
if (pcache && block == pcache->block &&
- off >= pcache->off &&
off < pcache->off + pcache->size) {
- // is already in pcache?
- lfs_size_t diff = lfs_min(size, pcache->size - (off-pcache->off));
- memcpy(data, &pcache->buffer[off-pcache->off], diff);
+ if (off >= pcache->off) {
+ // is already in pcache?
+ diff = lfs_min(diff, pcache->size - (off-pcache->off));
+ memcpy(data, &pcache->buffer[off-pcache->off], diff);
- data += diff;
- off += diff;
- size -= diff;
- continue;
+ data += diff;
+ off += diff;
+ size -= diff;
+ continue;
+ }
+
+ // pcache takes priority
+ diff = lfs_min(diff, pcache->off-off);
}
if (block == rcache->block &&
- off >= rcache->off &&
off < rcache->off + rcache->size) {
- // is already in rcache?
- lfs_size_t diff = lfs_min(size, rcache->size - (off-rcache->off));
- if (pcache && block == pcache->block) {
- diff = lfs_min(diff, pcache->off - off);
+ if (off >= rcache->off) {
+ // is already in rcache?
+ diff = lfs_min(diff, rcache->size - (off-rcache->off));
+ memcpy(data, &rcache->buffer[off-rcache->off], diff);
+
+ data += diff;
+ off += diff;
+ size -= diff;
+ continue;
}
- memcpy(data, &rcache->buffer[off-rcache->off], diff);
- data += diff;
- off += diff;
- size -= diff;
- continue;
+ // rcache takes priority
+ diff = lfs_min(diff, rcache->off-off);
}
if (size >= hint && off % lfs->cfg->read_size == 0 &&
size >= lfs->cfg->read_size) {
// bypass cache?
- lfs_size_t diff = size - (size % lfs->cfg->read_size);
+ diff = lfs_aligndown(diff, lfs->cfg->read_size);
int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
if (err) {
return err;
@@ -91,10 +98,10 @@ static int lfs_bd_read(lfs_t *lfs,
// load to cache, first condition can no longer fail
LFS_ASSERT(block < lfs->cfg->block_count);
rcache->block = block;
- rcache->off = lfs_aligndown(off, lfs->cfg->prog_size);
- rcache->size = lfs_min(lfs_min(
- lfs_alignup(off+hint, lfs->cfg->prog_size),
- lfs->cfg->block_size) - rcache->off, lfs->cfg->cache_size);
+ rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
+ rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size),
+ lfs_min(lfs->cfg->block_size - rcache->off,
+ lfs->cfg->cache_size));
int err = lfs->cfg->read(lfs->cfg, rcache->block,
rcache->off, rcache->buffer, rcache->size);
if (err) {