fs/hfsplus/btree.c | 69 +++++++++++++++++++++++++++++++++++++++++ fs/hfsplus/hfsplus_fs.h | 1 + fs/hfsplus/super.c | 7 +++++ 3 files changed, 77 insertions(+)
Add bitmap validation during HFS+ btree open to detect corruption where
node 0 (header node) is not marked allocated. When corruption is detected,
mount the filesystem read-only instead of failing the mount, allowing data
recovery from corrupted images.
The bitmap validation checks the node allocation bitmap in the btree header
node (record #2) and verifies that bit 7 (MSB) of the first byte is set,
indicating node 0 is allocated. This is a fundamental invariant that must
always hold.
Implementation details:
- Add 'btree_bitmap_corrupted' flag to 'struct hfsplus_sb_info' to track
corruption at superblock level
- Create and use 'hfsplus_validate_btree_bitmap()' to return bool
indicating corruption
- Check corruption flag in 'hfsplus_fill_super()' after all btree opens
- Mount read-only with consolidated warning message when corruption
detected
- Preserve existing btree validation logic and error handling patterns
This prevents kernel panics from corrupted syzkaller-generated HFS+ images
while enabling data recovery by mounting read-only instead of failing.
Reported-by: syzbot+1c8ff72d0cd8a50dfeaa@syzkaller.appspotmail.com
Link: https://syzkaller.appspot.com/bug?extid=1c8ff72d0cd8a50dfeaa
Link: https://lore.kernel.org/all/784415834694f39902088fa8946850fc1779a318.camel@ibm.com/
Signed-off-by: Shardul Bankar <shardul.b@mpiricsoftware.com>
---
v2 changes:
- Fix compiler warning about comparing u16 bitmap_off with PAGE_SIZE which
can exceed u16 maximum on some architectures
- Cast bitmap_off to unsigned int for the PAGE_SIZE comparison to avoid
tautological constant-out-of-range comparison warning.
- Link: https://lore.kernel.org/oe-kbuild-all/202601251011.kJUhBF3P-lkp@intel.com/
fs/hfsplus/btree.c | 69 +++++++++++++++++++++++++++++++++++++++++
fs/hfsplus/hfsplus_fs.h | 1 +
fs/hfsplus/super.c | 7 +++++
3 files changed, 77 insertions(+)
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 229f25dc7c49..0fb8c7c06afe 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -129,6 +129,68 @@ u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
return clump_size;
}
+/*
+ * Validate that node 0 (header node) is marked allocated in the bitmap.
+ * This is a fundamental invariant - node 0 must always be allocated.
+ * Returns true if corruption is detected (node 0 bit is unset).
+ * Note: head must be from kmap_local_page(page) that is still mapped.
+ * This function accesses the page through head pointer, so it must be
+ * called before kunmap_local(head).
+ */
+static bool hfsplus_validate_btree_bitmap(struct hfs_btree *tree,
+ struct hfs_btree_header_rec *head)
+{
+ u8 *page_base;
+ u16 rec_off_tbl_off;
+ __be16 rec_data[2];
+ u16 bitmap_off, bitmap_len;
+ u8 *bitmap_ptr;
+ u8 first_byte;
+ unsigned int node_size = tree->node_size;
+
+ /*
+ * Get base page pointer. head points to:
+ * kmap_local_page(page) + sizeof(struct hfs_bnode_desc)
+ */
+ page_base = (u8 *)head - sizeof(struct hfs_bnode_desc);
+
+ /*
+ * Calculate offset to record 2 entry in record offset table.
+ * Record offsets are at end of node: node_size - (rec_num + 2) * 2
+ * Record 2: (2+2)*2 = 8 bytes from end
+ */
+ rec_off_tbl_off = node_size - (2 + 2) * 2;
+
+ /* Only validate if record offset table is on the first page */
+ if (rec_off_tbl_off + 4 > node_size || rec_off_tbl_off + 4 > PAGE_SIZE)
+ return false; /* Skip validation if offset table not on first page */
+
+ /* Read record 2 offset table entry (length and offset, both u16) */
+ memcpy(rec_data, page_base + rec_off_tbl_off, 4);
+ bitmap_off = be16_to_cpu(rec_data[1]);
+ bitmap_len = be16_to_cpu(rec_data[0]) - bitmap_off;
+
+ /*
+ * Validate bitmap offset is within node and after bnode_desc.
+ * Also ensure bitmap is on the first page.
+ */
+ if (bitmap_len == 0 ||
+ bitmap_off < sizeof(struct hfs_bnode_desc) ||
+ bitmap_off >= node_size ||
+ (unsigned int) bitmap_off >= PAGE_SIZE)
+ return false; /* Skip validation if bitmap not accessible */
+
+ /* Read first byte of bitmap */
+ bitmap_ptr = page_base + bitmap_off;
+ first_byte = bitmap_ptr[0];
+
+ /* Check if node 0's bit (bit 7, MSB) is set */
+ if (!(first_byte & 0x80))
+ return true; /* Corruption detected */
+
+ return false;
+}
+
/* Get a reference to a B*Tree and do some initial checks */
struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
{
@@ -176,6 +238,13 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
tree->max_key_len = be16_to_cpu(head->max_key_len);
tree->depth = be16_to_cpu(head->depth);
+ /* Validate bitmap: node 0 must be marked allocated */
+ if (hfsplus_validate_btree_bitmap(tree, head)) {
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+
+ sbi->btree_bitmap_corrupted = true;
+ }
+
/* Verify the tree and set the correct compare function */
switch (id) {
case HFSPLUS_EXT_CNID:
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 45fe3a12ecba..b925878333d4 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -154,6 +154,7 @@ struct hfsplus_sb_info {
int part, session;
unsigned long flags;
+ bool btree_bitmap_corrupted; /* Bitmap corruption detected during btree open */
int work_queued; /* non-zero delayed work is queued */
struct delayed_work sync_work; /* FS sync delayed work */
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index aaffa9e060a0..b3facd23d758 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -534,6 +534,13 @@ static int hfsplus_fill_super(struct super_block *sb, struct fs_context *fc)
}
atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
}
+
+ /* Check for bitmap corruption and mount read-only if detected */
+ if (sbi->btree_bitmap_corrupted) {
+ pr_warn("HFS+ (device %s): btree bitmap corruption detected, mounting read-only; run fsck.hfsplus to repair\n",
+ sb->s_id);
+ sb->s_flags |= SB_RDONLY;
+ }
sb->s_xattr = hfsplus_xattr_handlers;
inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
--
2.34.1
On Sun, 2026-01-25 at 08:37 +0530, Shardul Bankar wrote:
> Add bitmap validation during HFS+ btree open to detect corruption where
> node 0 (header node) is not marked allocated. When corruption is detected,
> mount the filesystem read-only instead of failing the mount, allowing data
> recovery from corrupted images.
>
> The bitmap validation checks the node allocation bitmap in the btree header
> node (record #2) and verifies that bit 7 (MSB) of the first byte is set,
> indicating node 0 is allocated. This is a fundamental invariant that must
> always hold.
>
> Implementation details:
> - Add 'btree_bitmap_corrupted' flag to 'struct hfsplus_sb_info' to track
> corruption at superblock level
> - Create and use 'hfsplus_validate_btree_bitmap()' to return bool
> indicating corruption
> - Check corruption flag in 'hfsplus_fill_super()' after all btree opens
> - Mount read-only with consolidated warning message when corruption
> detected
> - Preserve existing btree validation logic and error handling patterns
>
> This prevents kernel panics from corrupted syzkaller-generated HFS+ images
> while enabling data recovery by mounting read-only instead of failing.
>
> Reported-by: syzbot+1c8ff72d0cd8a50dfeaa@syzkaller.appspotmail.com
> Link: https://syzkaller.appspot.com/bug?extid=1c8ff72d0cd8a50dfeaa
> Link: https://lore.kernel.org/all/784415834694f39902088fa8946850fc1779a318.camel@ibm.com/
> Signed-off-by: Shardul Bankar <shardul.b@mpiricsoftware.com>
> ---
> v2 changes:
> - Fix compiler warning about comparing u16 bitmap_off with PAGE_SIZE which
> can exceed u16 maximum on some architectures
> - Cast bitmap_off to unsigned int for the PAGE_SIZE comparison to avoid
> tautological constant-out-of-range comparison warning.
> - Link: https://lore.kernel.org/oe-kbuild-all/202601251011.kJUhBF3P-lkp@intel.com/
>
> fs/hfsplus/btree.c | 69 +++++++++++++++++++++++++++++++++++++++++
> fs/hfsplus/hfsplus_fs.h | 1 +
> fs/hfsplus/super.c | 7 +++++
> 3 files changed, 77 insertions(+)
>
> diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
> index 229f25dc7c49..0fb8c7c06afe 100644
> --- a/fs/hfsplus/btree.c
> +++ b/fs/hfsplus/btree.c
> @@ -129,6 +129,68 @@ u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
> return clump_size;
> }
>
> +/*
> + * Validate that node 0 (header node) is marked allocated in the bitmap.
> + * This is a fundamental invariant - node 0 must always be allocated.
> + * Returns true if corruption is detected (node 0 bit is unset).
> + * Note: head must be from kmap_local_page(page) that is still mapped.
This function works with pointer on memory region. It doesn't need to explain
the details of how this pointer has been prepared.
> + * This function accesses the page through head pointer, so it must be
> + * called before kunmap_local(head).
> + */
> +static bool hfsplus_validate_btree_bitmap(struct hfs_btree *tree,
> + struct hfs_btree_header_rec *head)
I don't think that we need in struct hfs_btree_header_rec here. We don't use any
information from the header. We simply need to provide pointer on node itself
and it can be void * or u8 * pointer.
> +{
> + u8 *page_base;
> + u16 rec_off_tbl_off;
> + __be16 rec_data[2];
> + u16 bitmap_off, bitmap_len;
> + u8 *bitmap_ptr;
> + u8 first_byte;
> + unsigned int node_size = tree->node_size;
> +
> + /*
> + * Get base page pointer. head points to:
> + * kmap_local_page(page) + sizeof(struct hfs_bnode_desc)
Forget about kmap_local_page(page).
> + */
> + page_base = (u8 *)head - sizeof(struct hfs_bnode_desc);
What's the point to provide pointer on header record if you need the start of
node? This computation could be source of bugs. Simply provide the pointer on
node's start to this function.
> +
> + /*
> + * Calculate offset to record 2 entry in record offset table.
> + * Record offsets are at end of node: node_size - (rec_num + 2) * 2
> + * Record 2: (2+2)*2 = 8 bytes from end
> + */
> + rec_off_tbl_off = node_size - (2 + 2) * 2;
Don't invent the wheel. HFS+ code already has logic of offsets table reading.
What if the node size will be bigger that 4K (for example, 8K)?
I never accept the patch with hardcoded constants. Please, use named constants.
But you need to reuse the existing logic of working with b-tree nodes.
> +
> + /* Only validate if record offset table is on the first page */
> + if (rec_off_tbl_off + 4 > node_size || rec_off_tbl_off + 4 > PAGE_SIZE)
> + return false; /* Skip validation if offset table not on first page */
So, we skip bitmap validation if a node is bigger than 4K? I cannot accept such
code.
> +
> + /* Read record 2 offset table entry (length and offset, both u16) */
> + memcpy(rec_data, page_base + rec_off_tbl_off, 4);
Ditto.
> + bitmap_off = be16_to_cpu(rec_data[1]);
> + bitmap_len = be16_to_cpu(rec_data[0]) - bitmap_off;
I assume we have metadata structure for offsets table record. No?
> +
> + /*
> + * Validate bitmap offset is within node and after bnode_desc.
> + * Also ensure bitmap is on the first page.
> + */
> + if (bitmap_len == 0 ||
> + bitmap_off < sizeof(struct hfs_bnode_desc) ||
> + bitmap_off >= node_size ||
> + (unsigned int) bitmap_off >= PAGE_SIZE)
> + return false; /* Skip validation if bitmap not accessible */
First of all, it makes sense to introduce the static inline function for the
check.
Secondly, we need to be ready to validate the bitmap for any reasonable node
size.
> +
> + /* Read first byte of bitmap */
> + bitmap_ptr = page_base + bitmap_off;
> + first_byte = bitmap_ptr[0];
The bitmap_ptr is completely enough. The *bitmap_ptr gives you access to the
first byte.
> +
> + /* Check if node 0's bit (bit 7, MSB) is set */
> + if (!(first_byte & 0x80))
What's about test_bit(nr, addr) here?
> + return true; /* Corruption detected */
> +
> + return false;
> +}
> +
> /* Get a reference to a B*Tree and do some initial checks */
> struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
> {
> @@ -176,6 +238,13 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
> tree->max_key_len = be16_to_cpu(head->max_key_len);
> tree->depth = be16_to_cpu(head->depth);
>
> + /* Validate bitmap: node 0 must be marked allocated */
> + if (hfsplus_validate_btree_bitmap(tree, head)) {
> + struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
> +
> + sbi->btree_bitmap_corrupted = true;
Please, see my comment about this field.
> + }
> +
> /* Verify the tree and set the correct compare function */
> switch (id) {
> case HFSPLUS_EXT_CNID:
> diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
> index 45fe3a12ecba..b925878333d4 100644
> --- a/fs/hfsplus/hfsplus_fs.h
> +++ b/fs/hfsplus/hfsplus_fs.h
> @@ -154,6 +154,7 @@ struct hfsplus_sb_info {
>
> int part, session;
> unsigned long flags;
> + bool btree_bitmap_corrupted; /* Bitmap corruption detected during btree open */
This field is completely unnecessary. The hfs_btree_open() can return -EROFS
error code and hfsplus_fill_super() can process it.
>
> int work_queued; /* non-zero delayed work is queued */
> struct delayed_work sync_work; /* FS sync delayed work */
> diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
> index aaffa9e060a0..b3facd23d758 100644
> --- a/fs/hfsplus/super.c
> +++ b/fs/hfsplus/super.c
> @@ -534,6 +534,13 @@ static int hfsplus_fill_super(struct super_block *sb, struct fs_context *fc)
> }
> atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
> }
> +
> + /* Check for bitmap corruption and mount read-only if detected */
> + if (sbi->btree_bitmap_corrupted) {
> + pr_warn("HFS+ (device %s): btree bitmap corruption detected, mounting read-only; run fsck.hfsplus to repair\n",
We don't need to mention "HFS+" here. I prefer to have two messages: (1)
information about bitmap corruption (it will be good to know which tree is
corrupted), (2) recommendation of running FSCK tool.
Thanks,
Slava.
> + sb->s_id);
> + sb->s_flags |= SB_RDONLY;
> + }
> sb->s_xattr = hfsplus_xattr_handlers;
>
> inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
On Mon, 2026-01-26 at 22:42 +0000, Viacheslav Dubeyko wrote:
> On Sun, 2026-01-25 at 08:37 +0530, Shardul Bankar wrote:
> >
> > @@ -176,6 +238,13 @@ struct hfs_btree *hfs_btree_open(struct
> > super_block *sb, u32 id)
> > tree->max_key_len = be16_to_cpu(head->max_key_len);
> > tree->depth = be16_to_cpu(head->depth);
> >
> > + /* Validate bitmap: node 0 must be marked allocated */
> > + if (hfsplus_validate_btree_bitmap(tree, head)) {
> > + struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
> > +
> > + sbi->btree_bitmap_corrupted = true;
>
> Please, see my comment about this field.
>
> > + }
> > +
> > /* Verify the tree and set the correct compare function */
> > switch (id) {
> > case HFSPLUS_EXT_CNID:
> > diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
> > index 45fe3a12ecba..b925878333d4 100644
> > --- a/fs/hfsplus/hfsplus_fs.h
> > +++ b/fs/hfsplus/hfsplus_fs.h
> > @@ -154,6 +154,7 @@ struct hfsplus_sb_info {
> >
> > int part, session;
> > unsigned long flags;
> > + bool btree_bitmap_corrupted; /* Bitmap corruption
> > detected during btree open */
>
> This field is completely unnecessary. The hfs_btree_open() can return
> -EROFS
> error code and hfsplus_fill_super() can process it.
>
Hi Slava,
Thanks for the review.
Regarding the suggestion to convert hfs_btree_open() to return
ERR_PTR(-EROFS):
I reviewed this, but I cannot use ERR_PTR for the corruption case
because it would defeat a purpose of the patch (data recovery).
If hfs_btree_open() returns -EROFS, the caller hfsplus_fill_super()
would receive the error code but would have no tree object to work
with. Without the B-tree structure, we cannot mount the filesystem-even
read-only-making data recovery impossible.
To support recovery, hfs_btree_open() must return a valid tree pointer
even when corruption is detected.
Therefore, for v3, I plan to:
-Keep the return type as-is to avoid scope creep and ensure easy
backporting.
-Use sb->s_flags |= SB_RDONLY inside hfs_btree_open() to flag the
safety issue.
-Drop the bool flag I added in v2 (as you requested) and simply
check sb_rdonly(sb) in fill_super to print the warning.
I will, of course, address your other comments regarding named
constants and pointer arithmetic in v3.
Does this sound acceptable?
Thanks,
Shardul
© 2016 - 2026 Red Hat, Inc.