Disable reader optimistic spinning by default. And, can enable it
by "rwsem.opt_rspin" cmdline.
Also, fix compile error without CONFIG_RWSEM_SPIN_ON_OWNER (reported by
kernel test robot)
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202306010043.VJHcuCnb-lkp@intel.com/
Signed-off-by: Bongkyu Kim <bongkyu7.kim@samsung.com>
---
Documentation/admin-guide/kernel-parameters.txt | 9 +++++++++
kernel/locking/rwsem.c | 16 +++++++++++++++-
2 files changed, 24 insertions(+), 1 deletion(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 772b54df084b..adf16a07fe4d 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5605,6 +5605,15 @@
rw [KNL] Mount root device read-write on boot
+ rwsem.opt_rspin [KNL]
+ Use rwsem reader optimistic spinning. Reader optimistic
+ spinning is helpful when the reader critical section is
+ short and there aren't that many readers around.
+ For example, enable this option may improve performance
+ in mobile workload that there're not many readers, but
+ may reduce performance in server workload that there're
+ many readers.
+
S [KNL] Run init in single mode
s390_iommu= [HW,S390]
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 9c0462d515c1..47c467880af5 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -117,6 +117,17 @@
# define DEBUG_RWSEMS_WARN_ON(c, sem)
#endif
+static bool __ro_after_init rwsem_opt_rspin;
+
+static int __init opt_rspin(char *str)
+{
+ rwsem_opt_rspin = true;
+
+ return 0;
+}
+
+early_param("rwsem.opt_rspin", opt_rspin);
+
/*
* On 64-bit architectures, the bit definitions of the count are:
*
@@ -1083,7 +1094,7 @@ static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
return false;
}
-static inline bool rwsem_no_spinners(sem)
+static inline bool rwsem_no_spinners(struct rw_semaphore *sem)
{
return false;
}
@@ -1157,6 +1168,9 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
return sem;
}
+ if (!IS_ENABLED(CONFIG_RWSEM_SPIN_ON_OWNER) || !rwsem_opt_rspin)
+ goto queue;
+
/*
* Save the current read-owner of rwsem, if available, and the
* reader nonspinnable bit.
--
2.36.1
On Fri, Sep 01, 2023 at 10:07:04AM +0900, Bongkyu Kim wrote:
> diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
> index 9c0462d515c1..47c467880af5 100644
> --- a/kernel/locking/rwsem.c
> +++ b/kernel/locking/rwsem.c
> @@ -117,6 +117,17 @@
> # define DEBUG_RWSEMS_WARN_ON(c, sem)
> #endif
>
> +static bool __ro_after_init rwsem_opt_rspin;
> +
> +static int __init opt_rspin(char *str)
> +{
> + rwsem_opt_rspin = true;
> +
> + return 0;
> +}
> +
> +early_param("rwsem.opt_rspin", opt_rspin);
> +
> /*
> * On 64-bit architectures, the bit definitions of the count are:
> *
> @@ -1083,7 +1094,7 @@ static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
> return false;
> }
>
> -static inline bool rwsem_no_spinners(sem)
> +static inline bool rwsem_no_spinners(struct rw_semaphore *sem)
> {
> return false;
> }
> @@ -1157,6 +1168,9 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
> return sem;
> }
>
> + if (!IS_ENABLED(CONFIG_RWSEM_SPIN_ON_OWNER) || !rwsem_opt_rspin)
> + goto queue;
> +
At the very least this should be a static_branch(), but I still very
much want an answer on how all this interacts with the handoff stuff.
© 2016 - 2025 Red Hat, Inc.