[PATCH] x86/split_lock: simplify reenabling

Maksim Davydov posted 1 patch 8 months, 4 weeks ago
arch/x86/kernel/cpu/bus_lock.c | 36 +++++++++++++++++++++++-----------
1 file changed, 25 insertions(+), 11 deletions(-)
[PATCH] x86/split_lock: simplify reenabling
Posted by Maksim Davydov 8 months, 4 weeks ago
When split_lock_mitigate is disabled each CPU needs its own delayed_work
structure. They are used to reenable split lock detection after its
disabling. But delayed_work structure must be correctly initialized after
its allocation. Current implementation uses deferred initialization that
makes the split lock handler code unclear. So the code can be simplified
if the initialization is moved to the appropriate initcall. sld_setup()
is called before setup_per_cpu_areas(), thus it can't be used for this
purpose. Another way is to implement independent initcall for the
initialization, that's what has been done.

Signed-off-by: Maksim Davydov <davydov-max@yandex-team.ru>
---
 arch/x86/kernel/cpu/bus_lock.c | 36 +++++++++++++++++++++++-----------
 1 file changed, 25 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c
index 97222efb4d2a..5149e3874c92 100644
--- a/arch/x86/kernel/cpu/bus_lock.c
+++ b/arch/x86/kernel/cpu/bus_lock.c
@@ -200,6 +200,26 @@ static void __split_lock_reenable(struct work_struct *work)
  */
 static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
 
+/*
+ * Per-CPU delayed_work can't be statically initialized properly because
+ * the struct address is unknown. Thus per-CPU delayed_work structures
+ * have to be initialized during kernel initialization and after calling
+ * setup_per_cpu_areas().
+ */
+static int __init setup_split_lock_delayed_work(void)
+{
+	unsigned int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct delayed_work *work = per_cpu_ptr(&sl_reenable, cpu);
+
+		INIT_DELAYED_WORK(work, __split_lock_reenable);
+	}
+
+	return 0;
+}
+pure_initcall(setup_split_lock_delayed_work);
+
 /*
  * If a CPU goes offline with pending delayed work to re-enable split lock
  * detection then the delayed work will be executed on some other CPU. That
@@ -219,15 +239,16 @@ static int splitlock_cpu_offline(unsigned int cpu)
 
 static void split_lock_warn(unsigned long ip)
 {
-	struct delayed_work *work = NULL;
+	struct delayed_work *work;
 	int cpu;
+	unsigned int saved_sld_mitigate = READ_ONCE(sysctl_sld_mitigate);
 
 	if (!current->reported_split_lock)
 		pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
 				    current->comm, current->pid, ip);
 	current->reported_split_lock = 1;
 
-	if (sysctl_sld_mitigate) {
+	if (saved_sld_mitigate) {
 		/*
 		 * misery factor #1:
 		 * sleep 10ms before trying to execute split lock.
@@ -240,18 +261,11 @@ static void split_lock_warn(unsigned long ip)
 		 */
 		if (down_interruptible(&buslock_sem) == -EINTR)
 			return;
-		work = &sl_reenable_unlock;
 	}
 
 	cpu = get_cpu();
-
-	if (!work) {
-		work = this_cpu_ptr(&sl_reenable);
-		/* Deferred initialization of per-CPU struct */
-		if (!work->work.func)
-			INIT_DELAYED_WORK(work, __split_lock_reenable);
-	}
-
+	work = (saved_sld_mitigate ?
+		&sl_reenable_unlock : per_cpu_ptr(&sl_reenable, cpu));
 	schedule_delayed_work_on(cpu, work, 2);
 
 	/* Disable split lock detection on this CPU to make progress */
-- 
2.34.1
Re: [PATCH] x86/split_lock: simplify reenabling
Posted by Ingo Molnar 8 months, 4 weeks ago
* Maksim Davydov <davydov-max@yandex-team.ru> wrote:

> sld_setup() is called before setup_per_cpu_areas(), thus it can't be 
> used for this purpose. Another way is to implement independent 
> initcall for the initialization, that's what has been done.

> + * Per-CPU delayed_work can't be statically initialized properly because
> + * the struct address is unknown. Thus per-CPU delayed_work structures
> + * have to be initialized during kernel initialization and after calling
> + * setup_per_cpu_areas().
> + */
> +static int __init setup_split_lock_delayed_work(void)
> +{
> +	unsigned int cpu;
> +
> +	for_each_possible_cpu(cpu) {
> +		struct delayed_work *work = per_cpu_ptr(&sl_reenable, cpu);
> +
> +		INIT_DELAYED_WORK(work, __split_lock_reenable);
> +	}
> +
> +	return 0;
> +}
> +pure_initcall(setup_split_lock_delayed_work);

Oh, I didn't realize sld_setup() couldn't be used for this - thx for 
the followup!

	Ingo
Re: [PATCH] x86/split_lock: simplify reenabling
Posted by Maksim Davydov 8 months, 4 weeks ago
On 3/25/25 12:25, Ingo Molnar wrote:
> 
> * Maksim Davydov <davydov-max@yandex-team.ru> wrote:
> 
>> sld_setup() is called before setup_per_cpu_areas(), thus it can't be
>> used for this purpose. Another way is to implement independent
>> initcall for the initialization, that's what has been done.
> 
>> + * Per-CPU delayed_work can't be statically initialized properly because
>> + * the struct address is unknown. Thus per-CPU delayed_work structures
>> + * have to be initialized during kernel initialization and after calling
>> + * setup_per_cpu_areas().
>> + */
>> +static int __init setup_split_lock_delayed_work(void)
>> +{
>> +	unsigned int cpu;
>> +
>> +	for_each_possible_cpu(cpu) {
>> +		struct delayed_work *work = per_cpu_ptr(&sl_reenable, cpu);
>> +
>> +		INIT_DELAYED_WORK(work, __split_lock_reenable);
>> +	}
>> +
>> +	return 0;
>> +}
>> +pure_initcall(setup_split_lock_delayed_work);
> 
> Oh, I didn't realize sld_setup() couldn't be used for this - thx for
> the followup!
> 
> 	Ingo

Thanks for reviewing :-)

-- 
Best regards,
Maksim Davydov
[tip: locking/urgent] x86/split_lock: Simplify reenabling
Posted by tip-bot2 for Maksim Davydov 8 months, 4 weeks ago
The following commit has been merged into the locking/urgent branch of tip:

Commit-ID:     0e1ff67d164be45e8ddfea5aaf5803224ede0805
Gitweb:        https://git.kernel.org/tip/0e1ff67d164be45e8ddfea5aaf5803224ede0805
Author:        Maksim Davydov <davydov-max@yandex-team.ru>
AuthorDate:    Tue, 25 Mar 2025 11:58:07 +03:00
Committer:     Ingo Molnar <mingo@kernel.org>
CommitterDate: Tue, 25 Mar 2025 10:16:50 +01:00

x86/split_lock: Simplify reenabling

When split_lock_mitigate is disabled, each CPU needs its own delayed_work
structure. They are used to reenable split lock detection after its
disabling. But delayed_work structure must be correctly initialized after
its allocation.

Current implementation uses deferred initialization that makes the
split lock handler code unclear. The code can be simplified a bit
if the initialization is moved to the appropriate initcall.

sld_setup() is called before setup_per_cpu_areas(), thus it can't be used
for this purpose, so introduce an independent initcall for
the initialization.

[ mingo: Simplified the 'work' assignment line a bit more. ]

Signed-off-by: Maksim Davydov <davydov-max@yandex-team.ru>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20250325085807.171885-1-davydov-max@yandex-team.ru
---
 arch/x86/kernel/cpu/bus_lock.c | 35 ++++++++++++++++++++++-----------
 1 file changed, 24 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c
index 97222ef..237faf7 100644
--- a/arch/x86/kernel/cpu/bus_lock.c
+++ b/arch/x86/kernel/cpu/bus_lock.c
@@ -201,6 +201,26 @@ static void __split_lock_reenable(struct work_struct *work)
 static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
 
 /*
+ * Per-CPU delayed_work can't be statically initialized properly because
+ * the struct address is unknown. Thus per-CPU delayed_work structures
+ * have to be initialized during kernel initialization and after calling
+ * setup_per_cpu_areas().
+ */
+static int __init setup_split_lock_delayed_work(void)
+{
+	unsigned int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct delayed_work *work = per_cpu_ptr(&sl_reenable, cpu);
+
+		INIT_DELAYED_WORK(work, __split_lock_reenable);
+	}
+
+	return 0;
+}
+pure_initcall(setup_split_lock_delayed_work);
+
+/*
  * If a CPU goes offline with pending delayed work to re-enable split lock
  * detection then the delayed work will be executed on some other CPU. That
  * handles releasing the buslock_sem, but because it executes on a
@@ -219,15 +239,16 @@ static int splitlock_cpu_offline(unsigned int cpu)
 
 static void split_lock_warn(unsigned long ip)
 {
-	struct delayed_work *work = NULL;
+	struct delayed_work *work;
 	int cpu;
+	unsigned int saved_sld_mitigate = READ_ONCE(sysctl_sld_mitigate);
 
 	if (!current->reported_split_lock)
 		pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
 				    current->comm, current->pid, ip);
 	current->reported_split_lock = 1;
 
-	if (sysctl_sld_mitigate) {
+	if (saved_sld_mitigate) {
 		/*
 		 * misery factor #1:
 		 * sleep 10ms before trying to execute split lock.
@@ -240,18 +261,10 @@ static void split_lock_warn(unsigned long ip)
 		 */
 		if (down_interruptible(&buslock_sem) == -EINTR)
 			return;
-		work = &sl_reenable_unlock;
 	}
 
 	cpu = get_cpu();
-
-	if (!work) {
-		work = this_cpu_ptr(&sl_reenable);
-		/* Deferred initialization of per-CPU struct */
-		if (!work->work.func)
-			INIT_DELAYED_WORK(work, __split_lock_reenable);
-	}
-
+	work = saved_sld_mitigate ? &sl_reenable_unlock : per_cpu_ptr(&sl_reenable, cpu);
 	schedule_delayed_work_on(cpu, work, 2);
 
 	/* Disable split lock detection on this CPU to make progress */