fs/ocfs2/cluster/heartbeat.c | 64 +++++++++++++++++++++++++----------- 1 file changed, 45 insertions(+), 19 deletions(-)
From: Yufan Chen <ericterminal@gmail.com>
o2hb_map_slot_data() allocated hr_tmp_block, hr_slots, hr_slot_data, and pages in stages but returned directly on allocation failures without unwinding previously allocated resources. Under repeated allocation failures this could leak memory and increase pressure toward OOM.
o2hb_region_dev_store() also failed to roll back slot mapping resources when setup aborted, leaving stale allocations around retry attempts.
Introduce o2hb_unmap_slot_data() as a single reverse-order cleanup helper, switch o2hb_map_slot_data() to a centralized goto-based error exit, and call the same rollback path from o2hb_region_dev_store() after stopping a possibly started heartbeat thread. This ensures failed setup fully releases resources and remains safely retryable.
Signed-off-by: Yufan Chen <ericterminal@gmail.com>
---
fs/ocfs2/cluster/heartbeat.c | 64 +++++++++++++++++++++++++-----------
1 file changed, 45 insertions(+), 19 deletions(-)
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index fe1949578..2f82040f4 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1488,18 +1488,10 @@ static struct o2hb_region *to_o2hb_region(struct config_item *item)
return item ? container_of(item, struct o2hb_region, hr_item) : NULL;
}
-/* drop_item only drops its ref after killing the thread, nothing should
- * be using the region anymore. this has to clean up any state that
- * attributes might have built up. */
-static void o2hb_region_release(struct config_item *item)
+static void o2hb_unmap_slot_data(struct o2hb_region *reg)
{
int i;
struct page *page;
- struct o2hb_region *reg = to_o2hb_region(item);
-
- mlog(ML_HEARTBEAT, "hb region release (%pg)\n", reg_bdev(reg));
-
- kfree(reg->hr_tmp_block);
if (reg->hr_slot_data) {
for (i = 0; i < reg->hr_num_pages; i++) {
@@ -1508,13 +1500,32 @@ static void o2hb_region_release(struct config_item *item)
__free_page(page);
}
kfree(reg->hr_slot_data);
+ reg->hr_slot_data = NULL;
}
+ kfree(reg->hr_slots);
+ reg->hr_slots = NULL;
+
+ kfree(reg->hr_tmp_block);
+ reg->hr_tmp_block = NULL;
+ reg->hr_num_pages = 0;
+}
+
+/* drop_item only drops its ref after killing the thread, nothing should
+ * be using the region anymore. this has to clean up any state that
+ * attributes might have built up.
+ */
+static void o2hb_region_release(struct config_item *item)
+{
+ struct o2hb_region *reg = to_o2hb_region(item);
+
+ mlog(ML_HEARTBEAT, "hb region release (%pg)\n", reg_bdev(reg));
+
+ o2hb_unmap_slot_data(reg);
+
if (reg->hr_bdev_file)
fput(reg->hr_bdev_file);
- kfree(reg->hr_slots);
-
debugfs_remove_recursive(reg->hr_debug_dir);
kfree(reg->hr_db_livenodes);
kfree(reg->hr_db_regnum);
@@ -1667,6 +1678,7 @@ static void o2hb_init_region_params(struct o2hb_region *reg)
static int o2hb_map_slot_data(struct o2hb_region *reg)
{
int i, j;
+ int ret = -ENOMEM;
unsigned int last_slot;
unsigned int spp = reg->hr_slots_per_page;
struct page *page;
@@ -1674,14 +1686,14 @@ static int o2hb_map_slot_data(struct o2hb_region *reg)
struct o2hb_disk_slot *slot;
reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL);
- if (reg->hr_tmp_block == NULL)
- return -ENOMEM;
+ if (!reg->hr_tmp_block)
+ goto out;
reg->hr_slots = kzalloc_objs(struct o2hb_disk_slot, reg->hr_blocks);
- if (reg->hr_slots == NULL)
- return -ENOMEM;
+ if (!reg->hr_slots)
+ goto out;
- for(i = 0; i < reg->hr_blocks; i++) {
+ for (i = 0; i < reg->hr_blocks; i++) {
slot = ®->hr_slots[i];
slot->ds_node_num = i;
INIT_LIST_HEAD(&slot->ds_live_item);
@@ -1695,12 +1707,12 @@ static int o2hb_map_slot_data(struct o2hb_region *reg)
reg->hr_slot_data = kzalloc_objs(struct page *, reg->hr_num_pages);
if (!reg->hr_slot_data)
- return -ENOMEM;
+ goto out;
- for(i = 0; i < reg->hr_num_pages; i++) {
+ for (i = 0; i < reg->hr_num_pages; i++) {
page = alloc_page(GFP_KERNEL);
if (!page)
- return -ENOMEM;
+ goto out;
reg->hr_slot_data[i] = page;
@@ -1720,6 +1732,10 @@ static int o2hb_map_slot_data(struct o2hb_region *reg)
}
return 0;
+
+out:
+ o2hb_unmap_slot_data(reg);
+ return ret;
}
/* Read in all the slots available and populate the tracking
@@ -1903,6 +1919,16 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
out3:
if (ret < 0) {
+ spin_lock(&o2hb_live_lock);
+ hb_task = reg->hr_task;
+ reg->hr_task = NULL;
+ spin_unlock(&o2hb_live_lock);
+
+ if (hb_task)
+ kthread_stop(hb_task);
+
+ o2hb_unmap_slot_data(reg);
+
fput(reg->hr_bdev_file);
reg->hr_bdev_file = NULL;
}
--
2.47.3
On 3/28/26 5:23 PM, Yufan Chen wrote:
> From: Yufan Chen <ericterminal@gmail.com>
>
> o2hb_map_slot_data() allocated hr_tmp_block, hr_slots, hr_slot_data, and pages in stages but returned directly on allocation failures without unwinding previously allocated resources. Under repeated allocation failures this could leak memory and increase pressure toward OOM.
>
> o2hb_region_dev_store() also failed to roll back slot mapping resources when setup aborted, leaving stale allocations around retry attempts.
>
> Introduce o2hb_unmap_slot_data() as a single reverse-order cleanup helper, switch o2hb_map_slot_data() to a centralized goto-based error exit, and call the same rollback path from o2hb_region_dev_store() after stopping a possibly started heartbeat thread. This ensures failed setup fully releases resources and remains safely retryable.
>
The commit log is too long for a single line.
Please use scripts/checkpatch.pl to check it.
> Signed-off-by: Yufan Chen <ericterminal@gmail.com>
> ---
> fs/ocfs2/cluster/heartbeat.c | 64 +++++++++++++++++++++++++-----------
> 1 file changed, 45 insertions(+), 19 deletions(-)
>
> diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
> index fe1949578..2f82040f4 100644
> --- a/fs/ocfs2/cluster/heartbeat.c
> +++ b/fs/ocfs2/cluster/heartbeat.c
> @@ -1488,18 +1488,10 @@ static struct o2hb_region *to_o2hb_region(struct config_item *item)
> return item ? container_of(item, struct o2hb_region, hr_item) : NULL;
> }
>
> -/* drop_item only drops its ref after killing the thread, nothing should
> - * be using the region anymore. this has to clean up any state that
> - * attributes might have built up. */
> -static void o2hb_region_release(struct config_item *item)
> +static void o2hb_unmap_slot_data(struct o2hb_region *reg)
> {
> int i;
> struct page *page;
> - struct o2hb_region *reg = to_o2hb_region(item);
> -
> - mlog(ML_HEARTBEAT, "hb region release (%pg)\n", reg_bdev(reg));
> -
> - kfree(reg->hr_tmp_block);
>
> if (reg->hr_slot_data) {
> for (i = 0; i < reg->hr_num_pages; i++) {
> @@ -1508,13 +1500,32 @@ static void o2hb_region_release(struct config_item *item)
> __free_page(page);
> }
> kfree(reg->hr_slot_data);
> + reg->hr_slot_data = NULL;
> }
>
> + kfree(reg->hr_slots);
> + reg->hr_slots = NULL;
> +
> + kfree(reg->hr_tmp_block);
> + reg->hr_tmp_block = NULL;
> + reg->hr_num_pages = 0;
Instead of set reg->hr_num_pages to 0, I'd like to set
reg->hr_slot_data[i] to NULL once freed.
> +}
> +
> +/* drop_item only drops its ref after killing the thread, nothing should
> + * be using the region anymore. this has to clean up any state that
> + * attributes might have built up.
> + */
> +static void o2hb_region_release(struct config_item *item)
> +{
> + struct o2hb_region *reg = to_o2hb_region(item);
> +
> + mlog(ML_HEARTBEAT, "hb region release (%pg)\n", reg_bdev(reg));
> +
> + o2hb_unmap_slot_data(reg);
> +
> if (reg->hr_bdev_file)
> fput(reg->hr_bdev_file);
>
> - kfree(reg->hr_slots);
> -
> debugfs_remove_recursive(reg->hr_debug_dir);
> kfree(reg->hr_db_livenodes);
> kfree(reg->hr_db_regnum);
> @@ -1667,6 +1678,7 @@ static void o2hb_init_region_params(struct o2hb_region *reg)
> static int o2hb_map_slot_data(struct o2hb_region *reg)
> {
> int i, j;
> + int ret = -ENOMEM;
> unsigned int last_slot;
> unsigned int spp = reg->hr_slots_per_page;
> struct page *page;
> @@ -1674,14 +1686,14 @@ static int o2hb_map_slot_data(struct o2hb_region *reg)
> struct o2hb_disk_slot *slot;
>
> reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL);
> - if (reg->hr_tmp_block == NULL)
> - return -ENOMEM;
> + if (!reg->hr_tmp_block)
> + goto out;
>
> reg->hr_slots = kzalloc_objs(struct o2hb_disk_slot, reg->hr_blocks);
> - if (reg->hr_slots == NULL)
> - return -ENOMEM;
> + if (!reg->hr_slots)
> + goto out;
>
> - for(i = 0; i < reg->hr_blocks; i++) {
> + for (i = 0; i < reg->hr_blocks; i++) {
> slot = ®->hr_slots[i];
> slot->ds_node_num = i;
> INIT_LIST_HEAD(&slot->ds_live_item);
> @@ -1695,12 +1707,12 @@ static int o2hb_map_slot_data(struct o2hb_region *reg)
>
> reg->hr_slot_data = kzalloc_objs(struct page *, reg->hr_num_pages);
> if (!reg->hr_slot_data)
> - return -ENOMEM;
> + goto out;
>
> - for(i = 0; i < reg->hr_num_pages; i++) {
> + for (i = 0; i < reg->hr_num_pages; i++) {
> page = alloc_page(GFP_KERNEL);
> if (!page)
> - return -ENOMEM;
> + goto out;
>
> reg->hr_slot_data[i] = page;
>
> @@ -1720,6 +1732,10 @@ static int o2hb_map_slot_data(struct o2hb_region *reg)
> }
>
> return 0;
> +
> +out:
> + o2hb_unmap_slot_data(reg);
> + return ret;
> }
>
> /* Read in all the slots available and populate the tracking
> @@ -1903,6 +1919,16 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
>
> out3:
Let's clean the 'out3' to 'out' as well.
Thanks,
Joseph
> if (ret < 0) {
> + spin_lock(&o2hb_live_lock);
> + hb_task = reg->hr_task;
> + reg->hr_task = NULL;
> + spin_unlock(&o2hb_live_lock);
> +
> + if (hb_task)
> + kthread_stop(hb_task);
> +
> + o2hb_unmap_slot_data(reg);
> +
> fput(reg->hr_bdev_file);
> reg->hr_bdev_file = NULL;
> }
© 2016 - 2026 Red Hat, Inc.