From: Chi Zhiling <chizhiling@kylinos.cn>
The current cache mechanism does not support reading clusters starting
from a file offset of zero. This patch enables that feature in
preparation for subsequent reads of contiguous clusters from offset zero.
1. modify exfat_cache_lookup() to find clusters with offset zero.
2. allow clusters with zero offset to be cached.
Signed-off-by: Chi Zhiling <chizhiling@kylinos.cn>
---
fs/exfat/cache.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/exfat/cache.c b/fs/exfat/cache.c
index 025b39b7a9ac..73147e153c2c 100644
--- a/fs/exfat/cache.c
+++ b/fs/exfat/cache.c
@@ -92,7 +92,7 @@ static unsigned int exfat_cache_lookup(struct inode *inode,
spin_lock(&ei->cache_lru_lock);
list_for_each_entry(p, &ei->cache_lru, cache_list) {
/* Find the cache of "fclus" or nearest cache. */
- if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
+ if (p->fcluster <= fclus && hit->fcluster <= p->fcluster) {
hit = p;
if (hit->fcluster + hit->nr_contig < fclus) {
offset = hit->nr_contig;
@@ -259,7 +259,7 @@ int exfat_get_cluster(struct inode *inode, unsigned int cluster,
if (cluster == 0 || *dclus == EXFAT_EOF_CLUSTER)
return 0;
- cache_init(&cid, EXFAT_EOF_CLUSTER, EXFAT_EOF_CLUSTER);
+ cache_init(&cid, fclus, *dclus);
exfat_cache_lookup(inode, cluster, &cid, &fclus, dclus);
if (fclus == cluster)
--
2.43.0
From: Chi Zhiling <chizhiling@kylinos.cn>
Change exfat_cache_lookup to return the cluster number of the last
cluster before the next cache (i.e., the end of the current cache range)
or the given 'end' if there is no next cache. This allows the caller to
know whether the next cluster after the current cache is cached.
The function signature is changed to accept an 'end' parameter, which
is the upper bound of the search range. The function now stops early
if it finds a cache that starts within the current cache's tail, meaning
caches are contiguous. The return value is the cluster number at which
the next cache starts (minus one) or the original 'end' if no next cache
is found.
The new behavior is illustrated as follows:
cache: [ccccccc-------ccccccccc]
search: [..................]
return: ^
Signed-off-by: Chi Zhiling <chizhiling@kylinos.cn>
---
fs/exfat/cache.c | 49 ++++++++++++++++++++++++++++++++++++------------
1 file changed, 37 insertions(+), 12 deletions(-)
diff --git a/fs/exfat/cache.c b/fs/exfat/cache.c
index 73147e153c2c..5cdeac014a3d 100644
--- a/fs/exfat/cache.c
+++ b/fs/exfat/cache.c
@@ -80,41 +80,66 @@ static inline void exfat_cache_update_lru(struct inode *inode,
list_move(&cache->cache_list, &ei->cache_lru);
}
-static unsigned int exfat_cache_lookup(struct inode *inode,
- unsigned int fclus, struct exfat_cache_id *cid,
+/*
+ * Find the cache that covers or precedes 'fclus' and return the last
+ * cluster before the next cache range.
+ */
+static inline unsigned int
+exfat_cache_lookup(struct inode *inode, struct exfat_cache_id *cid,
+ unsigned int fclus, unsigned int end,
unsigned int *cached_fclus, unsigned int *cached_dclus)
{
struct exfat_inode_info *ei = EXFAT_I(inode);
static struct exfat_cache nohit = { .fcluster = 0, };
struct exfat_cache *hit = &nohit, *p;
- unsigned int offset = EXFAT_EOF_CLUSTER;
+ unsigned int tail = 0; /* End boundary of hit cache */
+ /*
+ * Search range [fclus, end]. Stop early if:
+ * 1. Cache covers entire range, or
+ * 2. Next cache starts at current cache tail
+ */
spin_lock(&ei->cache_lru_lock);
list_for_each_entry(p, &ei->cache_lru, cache_list) {
/* Find the cache of "fclus" or nearest cache. */
- if (p->fcluster <= fclus && hit->fcluster <= p->fcluster) {
+ if (p->fcluster <= fclus) {
+ if (p->fcluster < hit->fcluster)
+ continue;
+
hit = p;
- if (hit->fcluster + hit->nr_contig < fclus) {
- offset = hit->nr_contig;
- } else {
- offset = fclus - hit->fcluster;
+ tail = hit->fcluster + hit->nr_contig;
+
+ /* Current cache covers [fclus, end] completely */
+ if (tail >= end)
+ break;
+ } else if (p->fcluster <= end) {
+ end = p->fcluster - 1;
+
+ /*
+ * If we have a hit and next cache starts within/at
+ * its tail, caches are contiguous, stop searching.
+ */
+ if (tail && tail >= end)
break;
- }
}
}
if (hit != &nohit) {
- exfat_cache_update_lru(inode, hit);
+ unsigned int offset;
+ exfat_cache_update_lru(inode, hit);
cid->id = ei->cache_valid_id;
cid->nr_contig = hit->nr_contig;
cid->fcluster = hit->fcluster;
cid->dcluster = hit->dcluster;
+
+ offset = min(cid->nr_contig, fclus - cid->fcluster);
*cached_fclus = cid->fcluster + offset;
*cached_dclus = cid->dcluster + offset;
}
spin_unlock(&ei->cache_lru_lock);
- return offset;
+ /* Return next cache start or 'end' if no more caches */
+ return end;
}
static struct exfat_cache *exfat_cache_merge(struct inode *inode,
@@ -260,7 +285,7 @@ int exfat_get_cluster(struct inode *inode, unsigned int cluster,
return 0;
cache_init(&cid, fclus, *dclus);
- exfat_cache_lookup(inode, cluster, &cid, &fclus, dclus);
+ exfat_cache_lookup(inode, &cid, cluster, cluster, &fclus, dclus);
if (fclus == cluster)
return 0;
--
2.43.0
From: Chi Zhiling <chizhiling@kylinos.cn>
This patch introduces a count parameter to exfat_get_cluster, which
serves as an input parameter for the caller to specify the desired
number of clusters, and as an output parameter to store the length
of consecutive clusters.
This patch can improve read performance by reducing the number of
get_block calls in sequential read scenarios. speacially in small
cluster size.
According to my test data, the performance improvement is
approximately 10% when read FAT_CHAIN file with 512 bytes of
cluster size.
454 MB/s -> 511 MB/s
Suggested-by: Yuezhang Mo <Yuezhang.Mo@sony.com>
Signed-off-by: Chi Zhiling <chizhiling@kylinos.cn>
---
fs/exfat/cache.c | 56 +++++++++++++++++++++++++++++++++++++++++----
fs/exfat/exfat_fs.h | 2 +-
fs/exfat/inode.c | 3 +--
3 files changed, 53 insertions(+), 8 deletions(-)
diff --git a/fs/exfat/cache.c b/fs/exfat/cache.c
index 5cdeac014a3d..18d304d1d4cc 100644
--- a/fs/exfat/cache.c
+++ b/fs/exfat/cache.c
@@ -259,13 +259,15 @@ static inline void cache_init(struct exfat_cache_id *cid,
}
int exfat_get_cluster(struct inode *inode, unsigned int cluster,
- unsigned int *dclus, unsigned int *last_dclus)
+ unsigned int *dclus, unsigned int *count,
+ unsigned int *last_dclus)
{
struct super_block *sb = inode->i_sb;
struct exfat_inode_info *ei = EXFAT_I(inode);
struct buffer_head *bh = NULL;
struct exfat_cache_id cid;
unsigned int content, fclus;
+ unsigned int end = cluster + *count - 1;
if (ei->start_clu == EXFAT_FREE_CLUSTER) {
exfat_fs_error(sb,
@@ -279,17 +281,33 @@ int exfat_get_cluster(struct inode *inode, unsigned int cluster,
*last_dclus = *dclus;
/*
- * Don`t use exfat_cache if zero offset or non-cluster allocation
+ * This case should not exist, as exfat_map_cluster function doesn't
+ * call this routine when start_clu == EXFAT_EOF_CLUSTER.
+ * This case is retained here for routine completeness.
*/
- if (cluster == 0 || *dclus == EXFAT_EOF_CLUSTER)
+ if (*dclus == EXFAT_EOF_CLUSTER) {
+ *count = 0;
+ return 0;
+ }
+
+ /* If only the first cluster is needed, return now. */
+ if (fclus == cluster && *count == 1)
return 0;
cache_init(&cid, fclus, *dclus);
- exfat_cache_lookup(inode, &cid, cluster, cluster, &fclus, dclus);
+ /*
+ * Update the 'end' to exclude the next cache range, as clusters in
+ * different cache are typically not contiguous.
+ */
+ end = exfat_cache_lookup(inode, &cid, cluster, end, &fclus, dclus);
- if (fclus == cluster)
+ /* Return if the cache covers the entire range. */
+ if (cid.fcluster + cid.nr_contig >= end) {
+ *count = cid.fcluster + cid.nr_contig - cluster + 1;
return 0;
+ }
+ /* Find the first cluster we need. */
while (fclus < cluster) {
if (exfat_ent_get(sb, *dclus, &content, &bh))
return -EIO;
@@ -305,6 +323,34 @@ int exfat_get_cluster(struct inode *inode, unsigned int cluster,
cache_init(&cid, fclus, *dclus);
}
+ /*
+ * Now the cid cache contains the first cluster requested, collect
+ * the remaining clusters of this contiguous extent.
+ */
+ if (*dclus != EXFAT_EOF_CLUSTER) {
+ unsigned int clu = *dclus;
+
+ while (fclus < end) {
+ if (exfat_ent_get(sb, clu, &content, &bh))
+ return -EIO;
+ if (++clu != content)
+ break;
+ fclus++;
+ }
+ cid.nr_contig = fclus - cid.fcluster;
+ *count = fclus - cluster + 1;
+
+ /*
+ * Cache this discontiguous cluster, we'll definitely need
+ * it later
+ */
+ if (fclus < end && content != EXFAT_EOF_CLUSTER) {
+ exfat_cache_add(inode, &cid);
+ cache_init(&cid, fclus + 1, content);
+ }
+ } else {
+ *count = 0;
+ }
brelse(bh);
exfat_cache_add(inode, &cid);
return 0;
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index e58d8eed5495..2dbed5f8ec26 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -486,7 +486,7 @@ int exfat_cache_init(void);
void exfat_cache_shutdown(void);
void exfat_cache_inval_inode(struct inode *inode);
int exfat_get_cluster(struct inode *inode, unsigned int cluster,
- unsigned int *dclus, unsigned int *last_dclus);
+ unsigned int *dclus, unsigned int *count, unsigned int *last_dclus);
/* dir.c */
extern const struct inode_operations exfat_dir_inode_operations;
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 00ff6c7ed935..e8b74185b0ad 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -160,10 +160,9 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
}
} else {
int err = exfat_get_cluster(inode, clu_offset,
- clu, &last_clu);
+ clu, count, &last_clu);
if (err)
return -EIO;
- *count = (*clu == EXFAT_EOF_CLUSTER) ? 0 : 1;
}
if (*clu == EXFAT_EOF_CLUSTER) {
--
2.43.0
© 2016 - 2026 Red Hat, Inc.