fs/fat/fatent.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-)
The writer and reader access FAT32 entry without any lock, so the data
obtained by the reader is incomplete.
Add spin lock to solve the race condition that occurs when accessing
FAT32 entry.
FAT16 entry has the same issue and is handled together.
Reported-by: syzbot+d3c29ed63db6ddf8406e@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=d3c29ed63db6ddf8406e
Signed-off-by: Edward Adam Davis <eadavis@qq.com>
---
fs/fat/fatent.c | 22 ++++++++++++++++++++--
1 file changed, 20 insertions(+), 2 deletions(-)
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index a7061c2ad8e4..0e64875e932c 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -19,6 +19,8 @@ struct fatent_operations {
};
static DEFINE_SPINLOCK(fat12_entry_lock);
+static DEFINE_SPINLOCK(fat16_entry_lock);
+static DEFINE_SPINLOCK(fat32_entry_lock);
static void fat12_ent_blocknr(struct super_block *sb, int entry,
int *offset, sector_t *blocknr)
@@ -137,8 +139,13 @@ static int fat12_ent_get(struct fat_entry *fatent)
static int fat16_ent_get(struct fat_entry *fatent)
{
- int next = le16_to_cpu(*fatent->u.ent16_p);
+ int next;
+
+ spin_lock(&fat16_entry_lock);
+ next = le16_to_cpu(*fatent->u.ent16_p);
WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
+ spin_unlock(&fat16_entry_lock);
+
if (next >= BAD_FAT16)
next = FAT_ENT_EOF;
return next;
@@ -146,8 +153,13 @@ static int fat16_ent_get(struct fat_entry *fatent)
static int fat32_ent_get(struct fat_entry *fatent)
{
- int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
+ int next;
+
+ spin_lock(&fat32_entry_lock);
+ next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
+ spin_unlock(&fat32_entry_lock);
+
if (next >= BAD_FAT32)
next = FAT_ENT_EOF;
return next;
@@ -180,15 +192,21 @@ static void fat16_ent_put(struct fat_entry *fatent, int new)
if (new == FAT_ENT_EOF)
new = EOF_FAT16;
+ spin_lock(&fat16_entry_lock);
*fatent->u.ent16_p = cpu_to_le16(new);
+ spin_unlock(&fat16_entry_lock);
+
mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
}
static void fat32_ent_put(struct fat_entry *fatent, int new)
{
WARN_ON(new & 0xf0000000);
+ spin_lock(&fat32_entry_lock);
new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
*fatent->u.ent32_p = cpu_to_le32(new);
+ spin_unlock(&fat32_entry_lock);
+
mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
}
--
2.43.0
On Mon, Jul 28, 2025 at 07:37:02PM +0800, Edward Adam Davis wrote: > The writer and reader access FAT32 entry without any lock, so the data > obtained by the reader is incomplete. Could you be more specific? "Incomplete" in which sense? > Add spin lock to solve the race condition that occurs when accessing > FAT32 entry. Which race condition would that be? > FAT16 entry has the same issue and is handled together. FWIW, I strongly suspect that * "issue" with FAT32 is a red herring coming from mindless parroting of dumb tool output * issue with FAT16 just might be real, if architecture-specific. If 16bit stores are done as 32bit read-modify-write, we might need some serialization. Assuming we still have such architectures, that is - alpha used to be one, but support for pre-BWX models got dropped. Sufficiently ancient ARM?
On Mon, Jul 28, 2025 at 05:04:45PM +0100, Al Viro wrote: > On Mon, Jul 28, 2025 at 07:37:02PM +0800, Edward Adam Davis wrote: > > The writer and reader access FAT32 entry without any lock, so the data > > obtained by the reader is incomplete. > > Could you be more specific? "Incomplete" in which sense? > > > Add spin lock to solve the race condition that occurs when accessing > > FAT32 entry. > > Which race condition would that be? > > > FAT16 entry has the same issue and is handled together. > > FWIW, I strongly suspect that > * "issue" with FAT32 is a red herring coming from mindless parroting > of dumb tool output > * issue with FAT16 just might be real, if architecture-specific. > If 16bit stores are done as 32bit read-modify-write, we might need some > serialization. Assuming we still have such architectures, that is - > alpha used to be one, but support for pre-BWX models got dropped. > Sufficiently ancient ARM? Note that FAT12 situation is really different - we not just have an inevitable read-modify-write for stores (half-byte access), we are not even guaranteed that byte and half-byte will be within the same cacheline, so cmpxchg is not an option; we have to use a spinlock there.
On Mon, 28 Jul 2025 17:35:26 +0100, Al Viro wrote: > On Mon, Jul 28, 2025 at 05:04:45PM +0100, Al Viro wrote: > > On Mon, Jul 28, 2025 at 07:37:02PM +0800, Edward Adam Davis wrote: > > > The writer and reader access FAT32 entry without any lock, so the data > > > obtained by the reader is incomplete. > > > > Could you be more specific? "Incomplete" in which sense? Because ent32_p and ent12_p are in the same union [1], their addresses are the same, and they both have the "read/write race condition" problem, so I used the same method as [2] to add a spinlock to solve it. > > > > > Add spin lock to solve the race condition that occurs when accessing > > > FAT32 entry. > > > > Which race condition would that be? data-race in fat32_ent_get / fat32_ent_put, detail: see [3] > > > > > FAT16 entry has the same issue and is handled together. > > > > FWIW, I strongly suspect that > > * "issue" with FAT32 is a red herring coming from mindless parroting > > of dumb tool output > > * issue with FAT16 just might be real, if architecture-specific. > > If 16bit stores are done as 32bit read-modify-write, we might need some > > serialization. Assuming we still have such architectures, that is - > > alpha used to be one, but support for pre-BWX models got dropped. > > Sufficiently ancient ARM? > > Note that FAT12 situation is really different - we not just have an inevitable > read-modify-write for stores (half-byte access), we are not even guaranteed that > byte and half-byte will be within the same cacheline, so cmpxchg is not an > option; we have to use a spinlock there. I think for FAT12 they are always in the same cacheline, the offset of the member ent12_p in struct fat_entry is 4 bytes, and no matter x86-64 or arm64, the regular 64-byte cacheline is enough to ensure that they are in the same cacheline. [1] 345 struct fat_entry { 1 int entry; 2 union { 3 u8 *ent12_p[2]; 4 __le16 *ent16_p; 5 __le32 *ent32_p; 6 } u; 7 int nr_bhs; 8 struct buffer_head *bhs[2]; 9 struct inode *fat_inode; 10 }; [2] 98283bb49c6c ("fat: Fix the race of read/write the FAT12 entry") [3] https://syzkaller.appspot.com/bug?extid=d3c29ed63db6ddf8406e BR, Edward
On Tue, Jul 29, 2025 at 12:17:21PM +0800, Edward Adam Davis wrote: > > > > > > Could you be more specific? "Incomplete" in which sense? > Because ent32_p and ent12_p are in the same union [1], their addresses are > the same, and they both have the "read/write race condition" problem, so I > used the same method as [2] to add a spinlock to solve it. What the hell? ent32_p and ent12_p are _pointers_; whatever races there might be are about the objects they are pointing to. > > > Which race condition would that be? > data-race in fat32_ent_get / fat32_ent_put, detail: see [3] References to KCSAN output are worthless, unless you can explain what the actual problem is (no, "tool is not quiet" is *NOT* a problem; it might or might not be a symptom of one). > > Note that FAT12 situation is really different - we not just have an inevitable > > read-modify-write for stores (half-byte access), we are not even guaranteed that > > byte and half-byte will be within the same cacheline, so cmpxchg is not an > > option; we have to use a spinlock there. > I think for FAT12 they are always in the same cacheline, the offset of the > member ent12_p in struct fat_entry is 4 bytes, and no matter x86-64 or arm64, > the regular 64-byte cacheline is enough to ensure that they are in the same > cacheline. Have you actually read what these functions are doing? _Pointers_ are not problematic at all; neither ..._ent_get nor ..._ent_put are changing those, for crying out loud! If KCSAN is warning about those (which I sincerely doubt), you need to report a bug in KCSAN. I would *really* recommend verifying that first. FAT12 problem is that FAT entries being accessed there are 12-bit, packed in pairs into an array of 3-byte values. Have you actually read what the functions are doing? There we *must* serialize the access to bytes that have 4 bits from one entry and 4 from another - there's no such thing as atomically update half a byte; it has to be read, modified and stored back. If two threads try to do that to upper and lower halves of the same byte at the same time, the value will be corrupted. The same *might* happen to FAT16 on (sub)architectures we no longer support; there is no way in hell we run into anything of that sort for (aligned) 32bit stores. Never had been. And neither aligned 16bit nor aligned 32bit ever had a problem with read seeing a state with only a part of value having been written.
On Tue, Jul 29, 2025 at 05:53:23AM +0100, Al Viro wrote: > FAT12 problem is that FAT entries being accessed there are 12-bit, packed in > pairs into an array of 3-byte values. PS: they most definitely can cross the cacheline boundaries - cacheline size is not going to be a multiple of 3 on anything realistic. Hell, they can cross *block* boundaries (which is why for FAT12 that code is using two separate pointers - most of the time they point to adjacent bytes, but if one byte is in one block and the next one is in another...; that's what that if in fat12_ent_set_ptr() is doing)... We could map the entire array contiguously (and it might simplify some of the logics there), but it's not going to avoid the problem with a single entry occupying a byte in one cacheline and half of a byte in another. So nothing like cmpxchg would suffice - we need a spinlock for FAT12 case.
On Tue, 29 Jul 2025 05:53:23 +0100, Al Viro wrote: >FAT12 problem is that FAT entries being accessed there are 12-bit, packed in >pairs into an array of 3-byte values. Have you actually read what the functions I learned it. I didn't really understand this code, but after your hint, I understood that the 12-bit entry FAT12 has 3 bytes, and cacheline will not be an integer multiple of 3. Finally, the entry with fat12 may exceed the judgment of cacheline. >are doing? There we *must* serialize the access to bytes that have 4 bits >from one entry and 4 from another - there's no such thing as atomically >update half a byte; it has to be read, modified and stored back. If two >threads try to do that to upper and lower halves of the same byte at the >same time, the value will be corrupted. I will change the fix to READ_ONCE/WRITE_ONCE later. BR, Edward
syzbot reports data-race in fat32_ent_get/fat32_ent_put.
CPU0(Task A) CPU1(Task B)
==== ====
vfs_write
new_sync_write
generic_file_write_iter
fat_write_begin
block_write_begin vfs_statfs
fat_get_block statfs_by_dentry
fat_add_cluster fat_statfs
fat_ent_write fat_count_free_clusters
fat32_ent_put fat32_ent_get
Task A's write operation on CPU0 and Task B's read operation on CPU1 occur
simultaneously, generating an race condition.
Add READ/WRITE_ONCE to solve the race condition that occurs when accessing
FAT32 entry.
Reported-by: syzbot+d3c29ed63db6ddf8406e@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=d3c29ed63db6ddf8406e
Signed-off-by: Edward Adam Davis <eadavis@qq.com>
---
V1 -> V2: using READ/WRITE_ONCE to fix
fs/fat/fatent.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 1db348f8f887..a9eecd090d91 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -146,8 +146,8 @@ static int fat16_ent_get(struct fat_entry *fatent)
static int fat32_ent_get(struct fat_entry *fatent)
{
- int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
- WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
+ int next = le32_to_cpu(READ_ONCE(*fatent->u.ent32_p)) & 0x0fffffff;
+ WARN_ON((unsigned long)READ_ONCE(fatent->u.ent32_p) & (4 - 1));
if (next >= BAD_FAT32)
next = FAT_ENT_EOF;
return next;
@@ -187,8 +187,8 @@ static void fat16_ent_put(struct fat_entry *fatent, int new)
static void fat32_ent_put(struct fat_entry *fatent, int new)
{
WARN_ON(new & 0xf0000000);
- new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
- *fatent->u.ent32_p = cpu_to_le32(new);
+ new |= le32_to_cpu(READ_ONCE(*fatent->u.ent32_p)) & ~0x0fffffff;
+ WRITE_ONCE(*fatent->u.ent32_p, cpu_to_le32(new));
mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
}
--
2.43.0
On Tue, Jul 29, 2025 at 02:17:10PM +0800, Edward Adam Davis wrote: > syzbot reports data-race in fat32_ent_get/fat32_ent_put. > > CPU0(Task A) CPU1(Task B) > ==== ==== > vfs_write > new_sync_write > generic_file_write_iter > fat_write_begin > block_write_begin vfs_statfs > fat_get_block statfs_by_dentry > fat_add_cluster fat_statfs > fat_ent_write fat_count_free_clusters > fat32_ent_put fat32_ent_get > > Task A's write operation on CPU0 and Task B's read operation on CPU1 occur > simultaneously, generating an race condition. > > Add READ/WRITE_ONCE to solve the race condition that occurs when accessing > FAT32 entry. Solve it in which sense? fat32_ent_get() and fat32_ent_put() are already atomic wrt each other; neither this nor your previous variant change anything whatsoever. And if you are talking about the results of *multiple* fat32_ent_get(), with some assumptions made by fat_count_free_clusters() that somehow get screwed by the modifications from fat_add_cluster(), your patch does not prevent any of that (not that you explained what kind of assumptions would those be). Long story short - accesses to individual entries are already atomic wrt each other; the fact that they happen simultaneously _might_ be a symptom of insufficient serialization, but neither version of your patch resolves that in any way - it just prevents the tool from reporting its suspicions. It does not give fat_count_free_clusters() a stable state of the entire table, assuming it needs one. It might, at that - I hadn't looked into that code since way back. But unless I'm missing something, the only thing your patch does is making your (rather blunt) tool STFU. If there is a race, explain what sequence of events leads to incorrect behaviour and explain why your proposed change prevents that incorrect behaviour. Note that if that behaviour is "amount of free space reported by statfs(2) depends upon how far did the ongoing write(2) get", it is *not* incorrect - that's exactly what the userland has asked for. If it's "statfs(2) gets confused into reporting an amount of free space that wouldn't have been accurate for any moment of time (or, worse yet, crashes, etc.)" - yes, that would be a problem, but it could not be solved by preventing simultaneous access to *single* entries, if it happens at all.
On Tue, Jul 29, 2025 at 10:35:58AM +0100, Al Viro wrote: > On Tue, Jul 29, 2025 at 02:17:10PM +0800, Edward Adam Davis wrote: > > syzbot reports data-race in fat32_ent_get/fat32_ent_put. > > > > CPU0(Task A) CPU1(Task B) > > ==== ==== > > vfs_write > > new_sync_write > > generic_file_write_iter > > fat_write_begin > > block_write_begin vfs_statfs > > fat_get_block statfs_by_dentry > > fat_add_cluster fat_statfs Sorry, no - you've missed an intermediate fat_chain_add() in the call chain here. And that makes your race a non-issue. > > fat_ent_write fat_count_free_clusters > > fat32_ent_put fat32_ent_get fat_count_free_clusters() doesn't care about exact value of entry; the only thing that matters is whether it's equal to FAT_ENT_FREE. Actualy changes of that predicate (i.e. allocating and freeing of clusters) still happens under fat_lock() - nothing has changed in that area. *That* is not happening simultaneously with reads in fat_count_free_clusters(). It's attaching the new element to the end of chain that is outside of fat_lock(). And that operation does not affect that predicate at all; it changes the value of the entry for last cluster of file (FAT_ENT_EOF) to the number of cluster being added to the file. Neither is equal to FAT_ENT_FREE, so there's no problem - value you get ->ent_get() is affected by that store, the value of ops->ent_get(&fatent) == FAT_ENT_FREE isn't. Probably worth a comment in fat_chain_add() re "this store is safe outside of fat_lock() because of <list of reasons>". Changes to this chain (both extending and truncating it) are excluded by <lock> and as far as allocator is concerned, we are not changing the state of any cluster. Basically, FAT encodes both the free block map (entry[block] == FAT_ENT_FREE <=> block is not in use) and linked lists of blocks for individual files - they store the number of file's first block within directory entry and use FAT for forwards pointers in the list. FAT_ENT_EOF is used for "no more blocks, it's the end of file". Allocator and fat_count_free_clusters care only about the free block map part of that thing; access to file contents - the linked list for that file.
On Tue, 29 Jul 2025 11:06:35 +0100, Al Viro wrote: syzbot reports data-race in fat32_ent_get/fat32_ent_put. CPU0(Task A) CPU1(Task B) ==== ==== vfs_write new_sync_write generic_file_write_iter fat_write_begin block_write_begin fat_get_block vfs_statfs fat_add_cluster statfs_by_dentry fat_chain_add fat_statfs fat_ent_write fat_count_free_clusters fat32_ent_put fat32_ent_get In fat_add_cluster(), fat_alloc_clusters() retrieves an free cluster and marks the entry with a value of FAT_ENT_EOF, protected by lock_fat(). There is no lock protection in fat_chain_add(). When fat_ent_write() writes the last entry to new_dclus, this has no effect on fat_count_free_clusters() in the statfs operation. The last entry is not FAT_ENT_FREE before or after the write. Therefore, the race condition reported here is invalid. BR, Edward
Edward Adam Davis <eadavis@qq.com> writes: > syzbot reports data-race in fat32_ent_get/fat32_ent_put. > > CPU0(Task A) CPU1(Task B) > ==== ==== > vfs_write > new_sync_write > generic_file_write_iter > fat_write_begin > block_write_begin vfs_statfs > fat_get_block statfs_by_dentry > fat_add_cluster fat_statfs > fat_ent_write fat_count_free_clusters > fat32_ent_put fat32_ent_get > > Task A's write operation on CPU0 and Task B's read operation on CPU1 occur > simultaneously, generating an race condition. > > Add READ/WRITE_ONCE to solve the race condition that occurs when accessing > FAT32 entry. I mentioned about READ/WRITE_ONCE though, it was about possibility. Do we really need to add those? Can you clarify more? > diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c > index 1db348f8f887..a9eecd090d91 100644 > --- a/fs/fat/fatent.c > +++ b/fs/fat/fatent.c > @@ -146,8 +146,8 @@ static int fat16_ent_get(struct fat_entry *fatent) > > static int fat32_ent_get(struct fat_entry *fatent) > { > - int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff; > - WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1)); > + int next = le32_to_cpu(READ_ONCE(*fatent->u.ent32_p)) & 0x0fffffff; > + WARN_ON((unsigned long)READ_ONCE(fatent->u.ent32_p) & (4 - 1)); Adding READ_ONCE() for pointer is broken. > if (next >= BAD_FAT32) > next = FAT_ENT_EOF; > return next; > @@ -187,8 +187,8 @@ static void fat16_ent_put(struct fat_entry *fatent, int new) > static void fat32_ent_put(struct fat_entry *fatent, int new) > { > WARN_ON(new & 0xf0000000); > - new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff; > - *fatent->u.ent32_p = cpu_to_le32(new); > + new |= le32_to_cpu(READ_ONCE(*fatent->u.ent32_p)) & ~0x0fffffff; > + WRITE_ONCE(*fatent->u.ent32_p, cpu_to_le32(new)); > mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode); > } Also READ_ONCE() is really required here? -- OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Edward Adam Davis <eadavis@qq.com> writes: > The writer and reader access FAT32 entry without any lock, so the data > obtained by the reader is incomplete. > > Add spin lock to solve the race condition that occurs when accessing > FAT32 entry. > > FAT16 entry has the same issue and is handled together. What is the real issue? Counting free entries doesn't care whether EOF (0xffffff) or allocate (0x000068), so it should be same result on both case. We may want to use READ_ONCE/WRITE_ONCE though, I can't see the reason to add spin_lock. Thanks. > Reported-by: syzbot+d3c29ed63db6ddf8406e@syzkaller.appspotmail.com > Closes: https://syzkaller.appspot.com/bug?extid=d3c29ed63db6ddf8406e > Signed-off-by: Edward Adam Davis <eadavis@qq.com> > --- > fs/fat/fatent.c | 22 ++++++++++++++++++++-- > 1 file changed, 20 insertions(+), 2 deletions(-) > > diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c > index a7061c2ad8e4..0e64875e932c 100644 > --- a/fs/fat/fatent.c > +++ b/fs/fat/fatent.c > @@ -19,6 +19,8 @@ struct fatent_operations { > }; > > static DEFINE_SPINLOCK(fat12_entry_lock); > +static DEFINE_SPINLOCK(fat16_entry_lock); > +static DEFINE_SPINLOCK(fat32_entry_lock); > > static void fat12_ent_blocknr(struct super_block *sb, int entry, > int *offset, sector_t *blocknr) > @@ -137,8 +139,13 @@ static int fat12_ent_get(struct fat_entry *fatent) > > static int fat16_ent_get(struct fat_entry *fatent) > { > - int next = le16_to_cpu(*fatent->u.ent16_p); > + int next; > + > + spin_lock(&fat16_entry_lock); > + next = le16_to_cpu(*fatent->u.ent16_p); > WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1)); > + spin_unlock(&fat16_entry_lock); > + > if (next >= BAD_FAT16) > next = FAT_ENT_EOF; > return next; > @@ -146,8 +153,13 @@ static int fat16_ent_get(struct fat_entry *fatent) > > static int fat32_ent_get(struct fat_entry *fatent) > { > - int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff; > + int next; > + > + spin_lock(&fat32_entry_lock); > + next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff; > WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1)); > + spin_unlock(&fat32_entry_lock); > + > if (next >= BAD_FAT32) > next = FAT_ENT_EOF; > return next; > @@ -180,15 +192,21 @@ static void fat16_ent_put(struct fat_entry *fatent, int new) > if (new == FAT_ENT_EOF) > new = EOF_FAT16; > > + spin_lock(&fat16_entry_lock); > *fatent->u.ent16_p = cpu_to_le16(new); > + spin_unlock(&fat16_entry_lock); > + > mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode); > } > > static void fat32_ent_put(struct fat_entry *fatent, int new) > { > WARN_ON(new & 0xf0000000); > + spin_lock(&fat32_entry_lock); > new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff; > *fatent->u.ent32_p = cpu_to_le32(new); > + spin_unlock(&fat32_entry_lock); > + > mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode); > } -- OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
On Tue, 29 Jul 2025 00:10:31 +0900, OGAWA Hirofumi wrote: >> The writer and reader access FAT32 entry without any lock, so the data >> obtained by the reader is incomplete. >> >> Add spin lock to solve the race condition that occurs when accessing >> FAT32 entry. >> >> FAT16 entry has the same issue and is handled together. > >What is the real issue? Counting free entries doesn't care whether EOF >(0xffffff) or allocate (0x000068), so it should be same result on both >case. > >We may want to use READ_ONCE/WRITE_ONCE though, I can't see the reason >to add spin_lock. Because ent32_p and ent12_p are in the same union [1], their addresses are the same, and they both have the "read/write race condition" problem, so I used the same method as [2] to add a spinlock to solve it. [1] 345 struct fat_entry { 1 int entry; 2 union { 3 u8 *ent12_p[2]; 4 __le16 *ent16_p; 5 __le32 *ent32_p; 6 } u; 7 int nr_bhs; 8 struct buffer_head *bhs[2]; 9 struct inode *fat_inode; 10 }; [2] 98283bb49c6c ("fat: Fix the race of read/write the FAT12 entry") BR, Edward
© 2016 - 2025 Red Hat, Inc.