[PATCH v3] erofs: lazily initialize per-CPU workers and CPU hotplug hooks

Sandeep Dhavale posted 1 patch 9 months, 3 weeks ago
There is a newer version of this series
fs/erofs/zdata.c | 65 ++++++++++++++++++++++++++++++++++++------------
1 file changed, 49 insertions(+), 16 deletions(-)
[PATCH v3] erofs: lazily initialize per-CPU workers and CPU hotplug hooks
Posted by Sandeep Dhavale 9 months, 3 weeks ago
Currently, when EROFS is built with per-CPU workers, the workers are
started and CPU hotplug hooks are registered during module initialization.
This leads to unnecessary worker start/stop cycles during CPU hotplug
events, particularly on Android devices that frequently suspend and resume.

This change defers the initialization of per-CPU workers and the
registration of CPU hotplug hooks until the first EROFS mount. This
ensures that these resources are only allocated and managed when EROFS is
actually in use.

The tear down of per-CPU workers and unregistration of CPU hotplug hooks
still occurs during z_erofs_exit_subsystem(), but only if they were
initialized.

Signed-off-by: Sandeep Dhavale <dhavale@google.com>
---
v2: https://lore.kernel.org/linux-erofs/20250402202728.2157627-1-dhavale@google.com/
Changes since v2:
- Renamed functions to use pcpu so it is clear.
- Removed z_erofs_init_workers_once() declaration from internal.h as
  there is no need.
- Removed empty stubs for helpers erofs_init_percpu_workers() and
  erofs_destroy_percpu_workers().
- Moved erofs_percpu_workers_initialized under
  CONFIG_EROFS_FS_PCPU_KTHREAD as further cleanup.

 fs/erofs/zdata.c | 65 ++++++++++++++++++++++++++++++++++++------------
 1 file changed, 49 insertions(+), 16 deletions(-)

diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 0671184d9cf1..e12df8b914b6 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -291,6 +291,9 @@ static struct workqueue_struct *z_erofs_workqueue __read_mostly;
 
 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
 static struct kthread_worker __rcu **z_erofs_pcpu_workers;
+static atomic_t erofs_percpu_workers_initialized = ATOMIC_INIT(0);
+static int erofs_cpu_hotplug_init(void);
+static void erofs_cpu_hotplug_destroy(void);
 
 static void erofs_destroy_percpu_workers(void)
 {
@@ -336,9 +339,44 @@ static int erofs_init_percpu_workers(void)
 	}
 	return 0;
 }
+
+static int z_erofs_init_pcpu_workers(void)
+{
+	int err;
+
+	err = erofs_init_percpu_workers();
+	if (err)
+		goto err_init_percpu_workers;
+
+	err = erofs_cpu_hotplug_init();
+	if (err < 0)
+		goto err_cpuhp_init;
+	return err;
+
+err_cpuhp_init:
+	erofs_destroy_percpu_workers();
+err_init_percpu_workers:
+	atomic_set(&erofs_percpu_workers_initialized, 0);
+	return err;
+}
+
+static int z_erofs_init_workers_once(void)
+{
+	if (atomic_xchg(&erofs_percpu_workers_initialized, 1))
+		return 0;
+	return z_erofs_init_pcpu_workers();
+}
+
+static void z_erofs_destroy_pcpu_workers(void)
+{
+	if (!atomic_xchg(&erofs_percpu_workers_initialized, 0))
+		return;
+	erofs_cpu_hotplug_destroy();
+	erofs_destroy_percpu_workers();
+}
 #else
-static inline void erofs_destroy_percpu_workers(void) {}
-static inline int erofs_init_percpu_workers(void) { return 0; }
+static inline int z_erofs_init_workers_once(void) { return 0; }
+static inline void z_erofs_destroy_pcpu_workers(void) {}
 #endif
 
 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
@@ -405,8 +443,7 @@ static inline void erofs_cpu_hotplug_destroy(void) {}
 
 void z_erofs_exit_subsystem(void)
 {
-	erofs_cpu_hotplug_destroy();
-	erofs_destroy_percpu_workers();
+	z_erofs_destroy_pcpu_workers();
 	destroy_workqueue(z_erofs_workqueue);
 	z_erofs_destroy_pcluster_pool();
 	z_erofs_exit_decompressor();
@@ -430,19 +467,8 @@ int __init z_erofs_init_subsystem(void)
 		goto err_workqueue_init;
 	}
 
-	err = erofs_init_percpu_workers();
-	if (err)
-		goto err_pcpu_worker;
-
-	err = erofs_cpu_hotplug_init();
-	if (err < 0)
-		goto err_cpuhp_init;
 	return err;
 
-err_cpuhp_init:
-	erofs_destroy_percpu_workers();
-err_pcpu_worker:
-	destroy_workqueue(z_erofs_workqueue);
 err_workqueue_init:
 	z_erofs_destroy_pcluster_pool();
 err_pcluster_pool:
@@ -644,10 +670,17 @@ static const struct address_space_operations z_erofs_cache_aops = {
 
 int z_erofs_init_super(struct super_block *sb)
 {
-	struct inode *const inode = new_inode(sb);
+	struct inode *inode;
+	int err;
 
+	err = z_erofs_init_workers_once();
+	if (err)
+		return err;
+
+	inode = new_inode(sb);
 	if (!inode)
 		return -ENOMEM;
+
 	set_nlink(inode, 1);
 	inode->i_size = OFFSET_MAX;
 	inode->i_mapping->a_ops = &z_erofs_cache_aops;
-- 
2.49.0.805.g082f7c87e0-goog
Re: [PATCH v3] erofs: lazily initialize per-CPU workers and CPU hotplug hooks
Posted by Gao Xiang 9 months, 3 weeks ago
Hi Sandeep,

On 2025/4/23 07:45, Sandeep Dhavale wrote:
> Currently, when EROFS is built with per-CPU workers, the workers are
> started and CPU hotplug hooks are registered during module initialization.
> This leads to unnecessary worker start/stop cycles during CPU hotplug
> events, particularly on Android devices that frequently suspend and resume.
> 
> This change defers the initialization of per-CPU workers and the
> registration of CPU hotplug hooks until the first EROFS mount. This
> ensures that these resources are only allocated and managed when EROFS is
> actually in use.
> 
> The tear down of per-CPU workers and unregistration of CPU hotplug hooks
> still occurs during z_erofs_exit_subsystem(), but only if they were
> initialized.
> 
> Signed-off-by: Sandeep Dhavale <dhavale@google.com>
> ---
> v2: https://lore.kernel.org/linux-erofs/20250402202728.2157627-1-dhavale@google.com/
> Changes since v2:
> - Renamed functions to use pcpu so it is clear.
> - Removed z_erofs_init_workers_once() declaration from internal.h as
>    there is no need.
> - Removed empty stubs for helpers erofs_init_percpu_workers() and
>    erofs_destroy_percpu_workers().
> - Moved erofs_percpu_workers_initialized under
>    CONFIG_EROFS_FS_PCPU_KTHREAD as further cleanup.
> 
>   fs/erofs/zdata.c | 65 ++++++++++++++++++++++++++++++++++++------------
>   1 file changed, 49 insertions(+), 16 deletions(-)
> 
> diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
> index 0671184d9cf1..e12df8b914b6 100644
> --- a/fs/erofs/zdata.c
> +++ b/fs/erofs/zdata.c
> @@ -291,6 +291,9 @@ static struct workqueue_struct *z_erofs_workqueue __read_mostly;
>   
>   #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
>   static struct kthread_worker __rcu **z_erofs_pcpu_workers;
> +static atomic_t erofs_percpu_workers_initialized = ATOMIC_INIT(0);
> +static int erofs_cpu_hotplug_init(void);
> +static void erofs_cpu_hotplug_destroy(void);
>   
>   static void erofs_destroy_percpu_workers(void)
>   {
> @@ -336,9 +339,44 @@ static int erofs_init_percpu_workers(void)
>   	}
>   	return 0;
>   }
> +
> +static int z_erofs_init_pcpu_workers(void)

I think you could just fold it in the caller.

> +{
> +	int err;
> +
> +	err = erofs_init_percpu_workers();
> +	if (err)
> +		goto err_init_percpu_workers;
> +
> +	err = erofs_cpu_hotplug_init();
> +	if (err < 0)
> +		goto err_cpuhp_init;
> +	return err;
> +
> +err_cpuhp_init:
> +	erofs_destroy_percpu_workers();
> +err_init_percpu_workers:
> +	atomic_set(&erofs_percpu_workers_initialized, 0);
> +	return err;
> +}
> +
> +static int z_erofs_init_workers_once(void)

and rename it as `z_erofs_init_pcpu_workers()` since
initializing once is just an internal implmentation.

> +{
> +	if (atomic_xchg(&erofs_percpu_workers_initialized, 1))
> +		return 0;
> +	return z_erofs_init_pcpu_workers();
> +}
> +

..

>   err_pcluster_pool:
> @@ -644,10 +670,17 @@ static const struct address_space_operations z_erofs_cache_aops = {
>   
>   int z_erofs_init_super(struct super_block *sb)
>   {
> -	struct inode *const inode = new_inode(sb);
> +	struct inode *inode;
> +	int err;
>   
> +	err = z_erofs_init_workers_once();
> +	if (err)
> +		return err;


Then just call
	err = z_erofs_init_pcpu_workers();
	if (err)
		return err;
here.


Otherwise it looks good to me.

Thanks,
Gao Xiang
Re: [PATCH v3] erofs: lazily initialize per-CPU workers and CPU hotplug hooks
Posted by Sandeep Dhavale 9 months, 3 weeks ago
Hi Gao,
Thanks for the review, I will work on v4 to address remaining items.

-Sandeep.