b/drivers/char/random.c | 9 + b/drivers/gpu/drm/amd/pm/amdgpu_pm.c | 11 -- b/drivers/gpu/drm/i915/i915_perf.c | 8 - b/include/linux/ratelimit.h | 40 +++++++- b/include/linux/ratelimit_types.h | 2 b/lib/Kconfig.debug | 11 ++ b/lib/ratelimit.c | 8 - b/lib/tests/Makefile | 1 b/lib/tests/test_ratelimit.c | 79 ++++++++++++++++ include/linux/ratelimit.h | 13 +- include/linux/ratelimit_types.h | 3 lib/ratelimit.c | 165 ++++++++++++++++++++--------------- 12 files changed, 246 insertions(+), 104 deletions(-)
Hello! This v4 series replaces open-coded uses of the ratelimit_state structure with formal APIs, counts all rate-limit misses, replaces jiffies=0 special case with a flag, provides a ___ratelimit() trylock-failure fastpath to (almost) eliminate false-positive misses, simplifies the code, and adds a simple test. The key point of this series is the reduction of false-positive misses. More could be done to avoid open-coded access to the ->interval and ->burst fields, and to tighten up checking of user input for these fields, but those are jobs for later patches. The individual patches are as follows: 1. Add trivial kunit test for ratelimit. 2. Create functions to handle ratelimit_state internals. 3. Avoid open-coded use of ratelimit_state structure's ->missed field. 4. Avoid open-coded use of ratelimit_state structure's ->missed field. 5. Avoid open-coded use of ratelimit_state structure's internals. 6. Convert the ->missed field to atomic_t. 7. Count misses due to lock contention. 8. Avoid jiffies=0 special case. 9. Reduce ___ratelimit() false-positive rate limiting, courtesy of Petr Mladek. 10. Allow zero ->burst to disable ratelimiting. 11. Force re-initialization when rate-limiting re-enabled. 12. Don't flush misses counter if RATELIMIT_MSG_ON_RELEASE. 13. Avoid atomic decrement if already rate-limited. 14. Avoid atomic decrement under lock if already rate-limited. 15. Warn if ->interval or ->burst are negative, courtesy of Petr Mladek. 16. Simplify common-case exit path. 17. Use nolock_ret label to save a couple of lines of code. 18. Use nolock_ret label to collapse lock-failure code. 19. Use nolock_ret restructuring to collapse common case code. 20. Drop redundant accesses to burst. Changes since v3: o Correctly handle zero-initialized ratelimit_state structures, being careful to avoid acquiring the uninitialized ->lock. o Remove redundant checks of the "burst" local variable. o Add Reviewed-by tags. Changes since v2: o Apply feedback from Bert Karwatzki, Srikanth Aithal, and Mark Brown, fixing a hang that happened on some systems. o Applied Reviewed-by tags and added links. o Added a prototype patch from Petr Mladek that splats if either interval or burst are negative. o Added several commits that simplify the code. Changes since v1 (RFC): o Switch from lockless fastpath to carrying out needed updates upon trylock failure, per Petr Mladek feedback. This greatly simplifies the code and is a much smaller change from the current code. There is a small performance penalty compared to the lockless fastpath, but not enough to matter. o Never unconditionally acquire the lock, again per Petr Mladek feedback. o Better define effects of non-positive burst values (always ratelimit) and non-positive interval values (never ratelimit when the burst value is positive). o The changes from Petr's original are supplied as five incremental patches, but could easily be folded into Petr's original if desired. (Left to my lazy self, they stay as-is.) Thanx, Paul ------------------------------------------------------------------------ b/drivers/char/random.c | 9 + b/drivers/gpu/drm/amd/pm/amdgpu_pm.c | 11 -- b/drivers/gpu/drm/i915/i915_perf.c | 8 - b/include/linux/ratelimit.h | 40 +++++++- b/include/linux/ratelimit_types.h | 2 b/lib/Kconfig.debug | 11 ++ b/lib/ratelimit.c | 8 - b/lib/tests/Makefile | 1 b/lib/tests/test_ratelimit.c | 79 ++++++++++++++++ include/linux/ratelimit.h | 13 +- include/linux/ratelimit_types.h | 3 lib/ratelimit.c | 165 ++++++++++++++++++++--------------- 12 files changed, 246 insertions(+), 104 deletions(-)
On Tue 2025-04-29 18:05:00, Paul E. McKenney wrote: > Hello! > > This v4 series replaces open-coded uses of the ratelimit_state structure > with formal APIs, counts all rate-limit misses, replaces jiffies=0 special > case with a flag, provides a ___ratelimit() trylock-failure fastpath to > (almost) eliminate false-positive misses, simplifies the code, and adds > a simple test. > > The key point of this series is the reduction of false-positive misses. > More could be done to avoid open-coded access to the ->interval and > ->burst fields, and to tighten up checking of user input for these fields, > but those are jobs for later patches. JFYI, the whole series looks good to me. Best Regards, Petr
On Mon, May 05, 2025 at 01:37:57PM +0200, Petr Mladek wrote: > On Tue 2025-04-29 18:05:00, Paul E. McKenney wrote: > > Hello! > > > > This v4 series replaces open-coded uses of the ratelimit_state structure > > with formal APIs, counts all rate-limit misses, replaces jiffies=0 special > > case with a flag, provides a ___ratelimit() trylock-failure fastpath to > > (almost) eliminate false-positive misses, simplifies the code, and adds > > a simple test. > > > > The key point of this series is the reduction of false-positive misses. > > More could be done to avoid open-coded access to the ->interval and > > ->burst fields, and to tighten up checking of user input for these fields, > > but those are jobs for later patches. > > JFYI, the whole series looks good to me. I double-checked, and after I apply these two Reviewed-by's, each patch in the series will have either your Signed-off-by or your Reviewed-by, so thank you for your reviews and feedback! Thanx, Paul
Hello! This v5 series replaces open-coded uses of the ratelimit_state structure with formal APIs, counts all rate-limit misses, replaces jiffies=0 special case with a flag, provides a ___ratelimit() trylock-failure fastpath to (almost) eliminate false-positive misses, simplifies the code, and adds a simple "smoke" test along with a simple stress test. The key point of this series is the reduction of false-positive misses. More could be done to avoid open-coded access to the ->interval and ->burst fields, and to tighten up checking of user input for these fields, but those are jobs for later patches. The individual patches are as follows: 1. Create functions to handle ratelimit_state internals. 2. Avoid open-coded use of ratelimit_state structure's ->missed field. 3. Avoid open-coded use of ratelimit_state structure's ->missed field. 4. Avoid open-coded use of ratelimit_state structure's internals. 5. Convert the ->missed field to atomic_t. 6. Count misses due to lock contention. 7. Avoid jiffies=0 special case. 8. Reduce ___ratelimit() false-positive rate limiting, courtesy of Petr Mladek. 9. Allow zero ->burst to disable ratelimiting. 10. Force re-initialization when rate-limiting re-enabled. 11. Don't flush misses counter if RATELIMIT_MSG_ON_RELEASE. 12. Avoid atomic decrement if already rate-limited. 13. Avoid atomic decrement under lock if already rate-limited. 14. Warn if ->interval or ->burst are negative, courtesy of Petr Mladek. 15. Simplify common-case exit path. 16. Use nolock_ret label to save a couple of lines of code. 17. Use nolock_ret label to collapse lock-failure code. 18. Use nolock_ret restructuring to collapse common case code. 19. Drop redundant accesses to burst. 20. Add trivial kunit test for ratelimit. 21. Add stress test for ratelimit. Changes since v4: o Add a simple stress test. o Move the tests to the end of the series for bisectability. o Add Reviewed-by tags. Changes since v3: o Correctly handle zero-initialized ratelimit_state structures, being careful to avoid acquiring the uninitialized ->lock. o Remove redundant checks of the "burst" local variable. o Add Reviewed-by tags. Changes since v2: o Apply feedback from Bert Karwatzki, Srikanth Aithal, and Mark Brown, fixing a hang that happened on some systems. o Applied Reviewed-by tags and added links. o Added a prototype patch from Petr Mladek that splats if either interval or burst are negative. o Added several commits that simplify the code. Changes since v1 (RFC): o Switch from lockless fastpath to carrying out needed updates upon trylock failure, per Petr Mladek feedback. This greatly simplifies the code and is a much smaller change from the current code. There is a small performance penalty compared to the lockless fastpath, but not enough to matter. o Never unconditionally acquire the lock, again per Petr Mladek feedback. o Better define effects of non-positive burst values (always ratelimit) and non-positive interval values (never ratelimit when the burst value is positive). o The changes from Petr's original are supplied as five incremental patches, but could easily be folded into Petr's original if desired. (Left to my lazy self, they stay as-is.) Thanx, Paul ------------------------------------------------------------------------ b/drivers/char/random.c | 9 + b/drivers/gpu/drm/amd/pm/amdgpu_pm.c | 11 -- b/drivers/gpu/drm/i915/i915_perf.c | 8 - b/include/linux/ratelimit.h | 40 +++++++- b/include/linux/ratelimit_types.h | 2 b/lib/Kconfig.debug | 11 ++ b/lib/ratelimit.c | 8 - b/lib/tests/Makefile | 1 b/lib/tests/test_ratelimit.c | 79 ++++++++++++++++ include/linux/ratelimit.h | 13 +- include/linux/ratelimit_types.h | 3 lib/ratelimit.c | 165 ++++++++++++++++++++--------------- lib/tests/test_ratelimit.c | 69 ++++++++++++++ 13 files changed, 313 insertions(+), 106 deletions(-)
Hello! This v6 series adds a simple "smoke" test along with a simple stress test: 1. Add trivial kunit test for ratelimit. 2. Make the ratelimit test more reliable, courtesy of Petr Mladek. 3. Add stress test for ratelimit. Thanx, Paul Changes since v5: o Drop patches that have since been accepted into mainline. o Add Petr Mladek's patch improving the reliability of the simple test. Changes since v4: o Add a simple stress test. o Move the tests to the end of the series for bisectability. o Add Reviewed-by tags. Changes since v3: o Correctly handle zero-initialized ratelimit_state structures, being careful to avoid acquiring the uninitialized ->lock. o Remove redundant checks of the "burst" local variable. o Add Reviewed-by tags. Changes since v2: o Apply feedback from Bert Karwatzki, Srikanth Aithal, and Mark Brown, fixing a hang that happened on some systems. o Applied Reviewed-by tags and added links. o Added a prototype patch from Petr Mladek that splats if either interval or burst are negative. o Added several commits that simplify the code. Changes since v1 (RFC): o Switch from lockless fastpath to carrying out needed updates upon trylock failure, per Petr Mladek feedback. This greatly simplifies the code and is a much smaller change from the current code. There is a small performance penalty compared to the lockless fastpath, but not enough to matter. o Never unconditionally acquire the lock, again per Petr Mladek feedback. o Better define effects of non-positive burst values (always ratelimit) and non-positive interval values (never ratelimit when the burst value is positive). o The changes from Petr's original are supplied as five incremental patches, but could easily be folded into Petr's original if desired. (Left to my lazy self, they stay as-is.) ------------------------------------------------------------------------ b/lib/Kconfig.debug | 11 +++++ b/lib/tests/Makefile | 1 b/lib/tests/test_ratelimit.c | 79 +++++++++++++++++++++++++++++++++++++++++++ lib/tests/test_ratelimit.c | 77 ++++++++++++++++++++++++++++++++++++++--- 4 files changed, 162 insertions(+), 6 deletions(-)
Add a simple single-threaded smoke test for lib/ratelimit.c
To run on x86:
make ARCH=x86_64 mrproper
./tools/testing/kunit/kunit.py run --arch x86_64 --kconfig_add CONFIG_RATELIMIT_KUNIT_TEST=y --kconfig_add CONFIG_SMP=y lib_ratelimit
This will fail on old ___ratelimit(), and subsequent patches provide
the fixes that are required.
[ paulmck: Apply timeout and kunit feedback from Petr Mladek. ]
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Jon Pan-Doh <pandoh@google.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Karolina Stolarek <karolina.stolarek@oracle.com>
---
lib/Kconfig.debug | 11 ++++++
lib/tests/Makefile | 1 +
lib/tests/test_ratelimit.c | 79 ++++++++++++++++++++++++++++++++++++++
3 files changed, 91 insertions(+)
create mode 100644 lib/tests/test_ratelimit.c
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ebe33181b6e6e..d69d27f808340 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -3225,6 +3225,17 @@ config TEST_OBJPOOL
If unsure, say N.
+config RATELIMIT_KUNIT_TEST
+ tristate "KUnit Test for correctness and stress of ratelimit" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the "test_ratelimit" module that should be used
+ for correctness verification and concurrent testings of rate
+ limiting.
+
+ If unsure, say N.
+
config INT_POW_KUNIT_TEST
tristate "Integer exponentiation (int_pow) test" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/lib/tests/Makefile b/lib/tests/Makefile
index 56d6450144828..3edc30a515840 100644
--- a/lib/tests/Makefile
+++ b/lib/tests/Makefile
@@ -46,5 +46,6 @@ obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
+obj-$(CONFIG_RATELIMIT_KUNIT_TEST) += test_ratelimit.o
obj-$(CONFIG_TEST_RUNTIME_MODULE) += module/
diff --git a/lib/tests/test_ratelimit.c b/lib/tests/test_ratelimit.c
new file mode 100644
index 0000000000000..0374107f5ea89
--- /dev/null
+++ b/lib/tests/test_ratelimit.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+
+#include <linux/ratelimit.h>
+#include <linux/module.h>
+
+/* a simple boot-time regression test */
+
+#define TESTRL_INTERVAL (5 * HZ)
+static DEFINE_RATELIMIT_STATE(testrl, TESTRL_INTERVAL, 3);
+
+#define test_ratelimited(test, expected) \
+ KUNIT_ASSERT_EQ(test, ___ratelimit(&testrl, "test_ratelimit_smoke"), (expected))
+
+static void test_ratelimit_smoke(struct kunit *test)
+{
+ // Check settings.
+ KUNIT_ASSERT_GE(test, TESTRL_INTERVAL, 100);
+
+ // Test normal operation.
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, false);
+
+ schedule_timeout_idle(TESTRL_INTERVAL - 40);
+ test_ratelimited(test, false);
+
+ schedule_timeout_idle(50);
+ test_ratelimited(test, true);
+
+ schedule_timeout_idle(2 * TESTRL_INTERVAL);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+
+ schedule_timeout_idle(TESTRL_INTERVAL - 40);
+ test_ratelimited(test, true);
+ schedule_timeout_idle(50);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, false);
+
+ // Test disabling.
+ testrl.burst = 0;
+ test_ratelimited(test, false);
+ testrl.burst = 2;
+ testrl.interval = 0;
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+
+ // Testing re-enabling.
+ testrl.interval = TESTRL_INTERVAL;
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, false);
+ test_ratelimited(test, false);
+}
+
+static struct kunit_case sort_test_cases[] = {
+ KUNIT_CASE_SLOW(test_ratelimit_smoke),
+ {}
+};
+
+static struct kunit_suite ratelimit_test_suite = {
+ .name = "lib_ratelimit",
+ .test_cases = sort_test_cases,
+};
+
+kunit_test_suites(&ratelimit_test_suite);
+
+MODULE_DESCRIPTION("___ratelimit() KUnit test suite");
+MODULE_LICENSE("GPL");
--
2.40.1
On Wed, 9 Jul 2025 11:03:33 -0700 "Paul E. McKenney" <paulmck@kernel.org> wrote: > Add a simple single-threaded smoke test for lib/ratelimit.c > > To run on x86: > > make ARCH=x86_64 mrproper > ./tools/testing/kunit/kunit.py run --arch x86_64 --kconfig_add CONFIG_RATELIMIT_KUNIT_TEST=y --kconfig_add CONFIG_SMP=y lib_ratelimit > > This will fail on old ___ratelimit(), and subsequent patches provide > the fixes that are required. > > [ paulmck: Apply timeout and kunit feedback from Petr Mladek. ] The above line makes me suspect that this was paulmck tweaking someone else's patch. If the authorship correct on this one?
On Wed, 9 Jul 2025 15:41:52 -0700 Andrew Morton <akpm@linux-foundation.org> wrote: > On Wed, 9 Jul 2025 11:03:33 -0700 "Paul E. McKenney" <paulmck@kernel.org> wrote: > > > Add a simple single-threaded smoke test for lib/ratelimit.c > > > > To run on x86: > > > > make ARCH=x86_64 mrproper > > ./tools/testing/kunit/kunit.py run --arch x86_64 --kconfig_add CONFIG_RATELIMIT_KUNIT_TEST=y --kconfig_add CONFIG_SMP=y lib_ratelimit > > > > This will fail on old ___ratelimit(), and subsequent patches provide > > the fixes that are required. > > > > [ paulmck: Apply timeout and kunit feedback from Petr Mladek. ] > > The above line makes me suspect that this was paulmck tweaking someone > else's patch. If the authorship correct on this one? Looks to me that Paul just took some advice from Petr and was just giving credit. Perhaps he could lose the "paulmck:" part? Perhaps: Suggested-by: Petr Mladek <pmladek@suse.com> # for timeout and kunit feedback ? -- Steve
On Wed, Jul 09, 2025 at 06:46:29PM -0400, Steven Rostedt wrote: > On Wed, 9 Jul 2025 15:41:52 -0700 > Andrew Morton <akpm@linux-foundation.org> wrote: > > > On Wed, 9 Jul 2025 11:03:33 -0700 "Paul E. McKenney" <paulmck@kernel.org> wrote: > > > > > Add a simple single-threaded smoke test for lib/ratelimit.c > > > > > > To run on x86: > > > > > > make ARCH=x86_64 mrproper > > > ./tools/testing/kunit/kunit.py run --arch x86_64 --kconfig_add CONFIG_RATELIMIT_KUNIT_TEST=y --kconfig_add CONFIG_SMP=y lib_ratelimit > > > > > > This will fail on old ___ratelimit(), and subsequent patches provide > > > the fixes that are required. > > > > > > [ paulmck: Apply timeout and kunit feedback from Petr Mladek. ] > > > > The above line makes me suspect that this was paulmck tweaking someone > > else's patch. If the authorship correct on this one? > > Looks to me that Paul just took some advice from Petr and was just giving > credit. Perhaps he could lose the "paulmck:" part? You got it! > Perhaps: > > Suggested-by: Petr Mladek <pmladek@suse.com> # for timeout and kunit feedback That would work for me. Thanx, Paul
From: Petr Mladek <pmladek@suse.com>
The selftest fails most of the times when running in qemu with
a kernel configured with CONFIG_HZ = 250:
> test_ratelimit_smoke: 1 callbacks suppressed
> # test_ratelimit_smoke: ASSERTION FAILED at lib/tests/test_ratelimit.c:28
> Expected ___ratelimit(&testrl, "test_ratelimit_smoke") == (false), but
> ___ratelimit(&testrl, "test_ratelimit_smoke") == 1 (0x1)
> (false) == 0 (0x0)
Try to make the test slightly more reliable by calling the problematic
ratelimit in the middle of the interval.
Signed-off-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
lib/tests/test_ratelimit.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/lib/tests/test_ratelimit.c b/lib/tests/test_ratelimit.c
index 0374107f5ea89..5d6ec88546005 100644
--- a/lib/tests/test_ratelimit.c
+++ b/lib/tests/test_ratelimit.c
@@ -24,19 +24,19 @@ static void test_ratelimit_smoke(struct kunit *test)
test_ratelimited(test, true);
test_ratelimited(test, false);
- schedule_timeout_idle(TESTRL_INTERVAL - 40);
+ schedule_timeout_idle(TESTRL_INTERVAL / 2);
test_ratelimited(test, false);
- schedule_timeout_idle(50);
+ schedule_timeout_idle(TESTRL_INTERVAL * 3 / 4);
test_ratelimited(test, true);
schedule_timeout_idle(2 * TESTRL_INTERVAL);
test_ratelimited(test, true);
test_ratelimited(test, true);
- schedule_timeout_idle(TESTRL_INTERVAL - 40);
+ schedule_timeout_idle(TESTRL_INTERVAL / 2 );
test_ratelimited(test, true);
- schedule_timeout_idle(50);
+ schedule_timeout_idle(TESTRL_INTERVAL * 3 / 4);
test_ratelimited(test, true);
test_ratelimited(test, true);
test_ratelimited(test, true);
--
2.40.1
On Wed, 9 Jul 2025 11:03:34 -0700 "Paul E. McKenney" <paulmck@kernel.org> wrote: > The selftest fails most of the times when running in qemu with > a kernel configured with CONFIG_HZ = 250: > > > test_ratelimit_smoke: 1 callbacks suppressed > > # test_ratelimit_smoke: ASSERTION FAILED at lib/tests/test_ratelimit.c:28 > > Expected ___ratelimit(&testrl, "test_ratelimit_smoke") == (false), but > > ___ratelimit(&testrl, "test_ratelimit_smoke") == 1 (0x1) > > (false) == 0 (0x0) > > Try to make the test slightly more reliable by calling the problematic > ratelimit in the middle of the interval. > > Signed-off-by: Petr Mladek <pmladek@suse.com> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org> > --- > lib/tests/test_ratelimit.c | 8 ++++---- > 1 file changed, 4 insertions(+), 4 deletions(-) Patch 1 adds test_ratelimit.c and patch 2 fixes it. Unconventional (and undesirable IMO). Would the world end if I folded 2 into 1?
On Wed, Jul 09, 2025 at 03:44:54PM -0700, Andrew Morton wrote: > On Wed, 9 Jul 2025 11:03:34 -0700 "Paul E. McKenney" <paulmck@kernel.org> wrote: > > > The selftest fails most of the times when running in qemu with > > a kernel configured with CONFIG_HZ = 250: > > > > > test_ratelimit_smoke: 1 callbacks suppressed > > > # test_ratelimit_smoke: ASSERTION FAILED at lib/tests/test_ratelimit.c:28 > > > Expected ___ratelimit(&testrl, "test_ratelimit_smoke") == (false), but > > > ___ratelimit(&testrl, "test_ratelimit_smoke") == 1 (0x1) > > > (false) == 0 (0x0) > > > > Try to make the test slightly more reliable by calling the problematic > > ratelimit in the middle of the interval. > > > > Signed-off-by: Petr Mladek <pmladek@suse.com> > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org> > > --- > > lib/tests/test_ratelimit.c | 8 ++++---- > > 1 file changed, 4 insertions(+), 4 deletions(-) > > Patch 1 adds test_ratelimit.c and patch 2 fixes it. > > Unconventional (and undesirable IMO). Would the world end if I folded > 2 into 1? Folding them together works for me, as long as Petr is properly credited. Thanx, Paul
Add a simple stress test for lib/ratelimit.c
To run on x86:
./tools/testing/kunit/kunit.py run --arch x86_64 --kconfig_add CONFIG_RATELIMIT_KUNIT_TEST=y --kconfig_add CONFIG_SMP=y --qemu_args "-smp 4" lib_ratelimit
On a 16-CPU system, the "4" in "-smp 4" can be varied between 1 and 8.
Larger numbers have higher probabilities of introducing delays that
break the smoke test. In the extreme case, increasing the number to
larger than the number of CPUs in the underlying system is an excellent
way to get a test failure.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Jon Pan-Doh <pandoh@google.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Karolina Stolarek <karolina.stolarek@oracle.com>
---
lib/tests/test_ratelimit.c | 69 ++++++++++++++++++++++++++++++++++++--
1 file changed, 67 insertions(+), 2 deletions(-)
diff --git a/lib/tests/test_ratelimit.c b/lib/tests/test_ratelimit.c
index 5d6ec88546005..bfaeca49304a5 100644
--- a/lib/tests/test_ratelimit.c
+++ b/lib/tests/test_ratelimit.c
@@ -4,6 +4,8 @@
#include <linux/ratelimit.h>
#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/cpumask.h>
/* a simple boot-time regression test */
@@ -63,14 +65,77 @@ static void test_ratelimit_smoke(struct kunit *test)
test_ratelimited(test, false);
}
-static struct kunit_case sort_test_cases[] = {
+static struct ratelimit_state stressrl = RATELIMIT_STATE_INIT_FLAGS("stressrl", HZ / 10, 3,
+ RATELIMIT_MSG_ON_RELEASE);
+
+static int doneflag;
+static const int stress_duration = 2 * HZ;
+
+struct stress_kthread {
+ unsigned long nattempts;
+ unsigned long nunlimited;
+ unsigned long nlimited;
+ unsigned long nmissed;
+ struct task_struct *tp;
+};
+
+static int test_ratelimit_stress_child(void *arg)
+{
+ struct stress_kthread *sktp = arg;
+
+ set_user_nice(current, MAX_NICE);
+ WARN_ON_ONCE(!sktp->tp);
+
+ while (!READ_ONCE(doneflag)) {
+ sktp->nattempts++;
+ if (___ratelimit(&stressrl, __func__))
+ sktp->nunlimited++;
+ else
+ sktp->nlimited++;
+ cond_resched();
+ }
+
+ sktp->nmissed = ratelimit_state_reset_miss(&stressrl);
+ return 0;
+}
+
+static void test_ratelimit_stress(struct kunit *test)
+{
+ int i;
+ const int n_stress_kthread = cpumask_weight(cpu_online_mask);
+ struct stress_kthread skt = { 0 };
+ struct stress_kthread *sktp = kcalloc(n_stress_kthread, sizeof(*sktp), GFP_KERNEL);
+
+ KUNIT_EXPECT_NOT_NULL_MSG(test, sktp, "Memory allocation failure");
+ for (i = 0; i < n_stress_kthread; i++) {
+ sktp[i].tp = kthread_run(test_ratelimit_stress_child, &sktp[i], "%s/%i",
+ "test_ratelimit_stress_child", i);
+ KUNIT_EXPECT_NOT_NULL_MSG(test, sktp, "kthread creation failure");
+ pr_alert("Spawned test_ratelimit_stress_child %d\n", i);
+ }
+ schedule_timeout_idle(stress_duration);
+ WRITE_ONCE(doneflag, 1);
+ for (i = 0; i < n_stress_kthread; i++) {
+ kthread_stop(sktp[i].tp);
+ skt.nattempts += sktp[i].nattempts;
+ skt.nunlimited += sktp[i].nunlimited;
+ skt.nlimited += sktp[i].nlimited;
+ skt.nmissed += sktp[i].nmissed;
+ }
+ KUNIT_ASSERT_EQ_MSG(test, skt.nunlimited + skt.nlimited, skt.nattempts,
+ "Outcomes not equal to attempts");
+ KUNIT_ASSERT_EQ_MSG(test, skt.nlimited, skt.nmissed, "Misses not equal to limits");
+}
+
+static struct kunit_case ratelimit_test_cases[] = {
KUNIT_CASE_SLOW(test_ratelimit_smoke),
+ KUNIT_CASE_SLOW(test_ratelimit_stress),
{}
};
static struct kunit_suite ratelimit_test_suite = {
.name = "lib_ratelimit",
- .test_cases = sort_test_cases,
+ .test_cases = ratelimit_test_cases,
};
kunit_test_suites(&ratelimit_test_suite);
--
2.40.1
A number of ratelimit use cases do open-coded access to the
ratelimit_state structure's ->missed field. This works, but is a bit
messy and makes it more annoying to make changes to this field.
Therefore, provide a ratelimit_state_inc_miss() function that increments
the ->missed field, a ratelimit_state_get_miss() function that reads
out the ->missed field, and a ratelimit_state_reset_miss() function
that reads out that field, but that also resets its value to zero.
These functions will replace client-code open-coded uses of ->missed.
In addition, a new ratelimit_state_reset_interval() function encapsulates
what was previously open-coded lock acquisition and direct field updates.
[ paulmck: Apply kernel test robot feedback. ]
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
include/linux/ratelimit.h | 40 ++++++++++++++++++++++++++++++++++-----
lib/ratelimit.c | 8 ++++----
2 files changed, 39 insertions(+), 9 deletions(-)
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index b17e0cd0a30cf..8400c5356c187 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -22,16 +22,46 @@ static inline void ratelimit_default_init(struct ratelimit_state *rs)
DEFAULT_RATELIMIT_BURST);
}
+static inline void ratelimit_state_inc_miss(struct ratelimit_state *rs)
+{
+ rs->missed++;
+}
+
+static inline int ratelimit_state_get_miss(struct ratelimit_state *rs)
+{
+ return rs->missed;
+}
+
+static inline int ratelimit_state_reset_miss(struct ratelimit_state *rs)
+{
+ int ret = rs->missed;
+
+ rs->missed = 0;
+ return ret;
+}
+
+static inline void ratelimit_state_reset_interval(struct ratelimit_state *rs, int interval_init)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&rs->lock, flags);
+ rs->interval = interval_init;
+ rs->begin = 0;
+ rs->printed = 0;
+ ratelimit_state_reset_miss(rs);
+ raw_spin_unlock_irqrestore(&rs->lock, flags);
+}
+
static inline void ratelimit_state_exit(struct ratelimit_state *rs)
{
+ int m;
+
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE))
return;
- if (rs->missed) {
- pr_warn("%s: %d output lines suppressed due to ratelimiting\n",
- current->comm, rs->missed);
- rs->missed = 0;
- }
+ m = ratelimit_state_reset_miss(rs);
+ if (m)
+ pr_warn("%s: %d output lines suppressed due to ratelimiting\n", current->comm, m);
}
static inline void
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index ce945c17980b9..85e22f00180c5 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -51,12 +51,12 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
rs->begin = jiffies;
if (time_is_before_jiffies(rs->begin + interval)) {
- if (rs->missed) {
+ int m = ratelimit_state_reset_miss(rs);
+
+ if (m) {
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
printk_deferred(KERN_WARNING
- "%s: %d callbacks suppressed\n",
- func, rs->missed);
- rs->missed = 0;
+ "%s: %d callbacks suppressed\n", func, m);
}
}
rs->begin = jiffies;
--
2.40.1
The _credit_init_bits() function directly accesses the ratelimit_state
structure's ->missed field, which works, but which also makes it
more difficult to change this field. Therefore, make use of the
ratelimit_state_get_miss() and ratelimit_state_inc_miss() functions
instead of directly accessing the ->missed field.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
"Jason A. Donenfeld" <Jason@zx2c4.com>
---
drivers/char/random.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 38f2fab29c569..416dac0ab565d 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -726,6 +726,7 @@ static void __cold _credit_init_bits(size_t bits)
static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
+ int m;
if (!bits)
return;
@@ -748,9 +749,9 @@ static void __cold _credit_init_bits(size_t bits)
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
- if (urandom_warning.missed)
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
+ m = ratelimit_state_get_miss(&urandom_warning);
+ if (m)
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n", m);
} else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
spin_lock_irqsave(&base_crng.lock, flags);
/* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
@@ -1466,7 +1467,7 @@ static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
if (!crng_ready()) {
if (!ratelimit_disable && maxwarn <= 0)
- ++urandom_warning.missed;
+ ratelimit_state_inc_miss(&urandom_warning);
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
--maxwarn;
pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
--
2.40.1
The i915_oa_stream_destroy() function directly accesses the
ratelimit_state structure's ->missed field, which works, but which also
makes it more difficult to change this field. Therefore, make use of
the ratelimit_state_get_miss() function instead of directly accessing
the ->missed field.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Acked-by: Jani Nikula <jani.nikula@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Tvrtko Ursulin <tursulin@ursulin.net>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: <intel-gfx@lists.freedesktop.org>
Cc: <dri-devel@lists.freedesktop.org>
---
drivers/gpu/drm/i915/i915_perf.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index de0b413600a15..1658f1246c6fa 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1666,6 +1666,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
struct i915_perf *perf = stream->perf;
struct intel_gt *gt = stream->engine->gt;
struct i915_perf_group *g = stream->engine->oa_group;
+ int m;
if (WARN_ON(stream != g->exclusive_stream))
return;
@@ -1690,10 +1691,9 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_configs(stream);
free_noa_wait(stream);
- if (perf->spurious_report_rs.missed) {
- gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n",
- perf->spurious_report_rs.missed);
- }
+ m = ratelimit_state_get_miss(&perf->spurious_report_rs);
+ if (m)
+ gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n", m);
}
static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
--
2.40.1
The amdgpu_set_thermal_throttling_logging() function directly accesses
the ratelimit_state structure's ->missed field, which works, but which
also makes it more difficult to change this field. Therefore, make use
of the ratelimit_state_reset_interval() function instead of directly
accessing the ->missed field.
Nevertheless, open-coded use of ->burst and ->interval is still permitted,
for example, for runtime sysfs adjustment of these fields.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202503180826.EiekA1MB-lkp@intel.com/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Kenneth Feng <kenneth.feng@amd.com>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Xinhui Pan <Xinhui.Pan@amd.com>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: <amd-gfx@lists.freedesktop.org>
Cc: <dri-devel@lists.freedesktop.org>
---
drivers/gpu/drm/amd/pm/amdgpu_pm.c | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 922def51685b0..d533c79f7e215 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1606,7 +1606,6 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
long throttling_logging_interval;
- unsigned long flags;
int ret = 0;
ret = kstrtol(buf, 0, &throttling_logging_interval);
@@ -1617,18 +1616,12 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
return -EINVAL;
if (throttling_logging_interval > 0) {
- raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
/*
* Reset the ratelimit timer internals.
* This can effectively restart the timer.
*/
- adev->throttling_logging_rs.interval =
- (throttling_logging_interval - 1) * HZ;
- adev->throttling_logging_rs.begin = 0;
- adev->throttling_logging_rs.printed = 0;
- adev->throttling_logging_rs.missed = 0;
- raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
-
+ ratelimit_state_reset_interval(&adev->throttling_logging_rs,
+ (throttling_logging_interval - 1) * HZ);
atomic_set(&adev->throttling_logging_enabled, 1);
} else {
atomic_set(&adev->throttling_logging_enabled, 0);
--
2.40.1
The ratelimit_state structure's ->missed field is sometimes incremented
locklessly, and it would be good to avoid lost counts. This is also
needed to count the number of misses due to trylock failure. Therefore,
convert the ratelimit_state structure's ->missed field to atomic_t.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
include/linux/ratelimit.h | 9 +++------
include/linux/ratelimit_types.h | 2 +-
lib/ratelimit.c | 2 +-
3 files changed, 5 insertions(+), 8 deletions(-)
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 8400c5356c187..c78b92b3e5cd8 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -24,20 +24,17 @@ static inline void ratelimit_default_init(struct ratelimit_state *rs)
static inline void ratelimit_state_inc_miss(struct ratelimit_state *rs)
{
- rs->missed++;
+ atomic_inc(&rs->missed);
}
static inline int ratelimit_state_get_miss(struct ratelimit_state *rs)
{
- return rs->missed;
+ return atomic_read(&rs->missed);
}
static inline int ratelimit_state_reset_miss(struct ratelimit_state *rs)
{
- int ret = rs->missed;
-
- rs->missed = 0;
- return ret;
+ return atomic_xchg_relaxed(&rs->missed, 0);
}
static inline void ratelimit_state_reset_interval(struct ratelimit_state *rs, int interval_init)
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
index 765232ce0b5e9..d21fe82b67f67 100644
--- a/include/linux/ratelimit_types.h
+++ b/include/linux/ratelimit_types.h
@@ -18,7 +18,7 @@ struct ratelimit_state {
int interval;
int burst;
int printed;
- int missed;
+ atomic_t missed;
unsigned int flags;
unsigned long begin;
};
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 85e22f00180c5..18703f92d73e7 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -66,7 +66,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
rs->printed++;
ret = 1;
} else {
- rs->missed++;
+ ratelimit_state_inc_miss(rs);
ret = 0;
}
raw_spin_unlock_irqrestore(&rs->lock, flags);
--
2.40.1
The ___ratelimit() function simply returns zero ("do ratelimiting")
if the trylock fails, but does not adjust the ->missed field. This
means that the resulting dropped printk()s are dropped silently, which
could seriously confuse people trying to do console-log-based debugging.
Therefore, increment the ->missed field upon trylock failure.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
lib/ratelimit.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 18703f92d73e7..19ad3cdbd1711 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -44,8 +44,10 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
* in addition to the one that will be printed by
* the entity that is holding the lock already:
*/
- if (!raw_spin_trylock_irqsave(&rs->lock, flags))
+ if (!raw_spin_trylock_irqsave(&rs->lock, flags)) {
+ ratelimit_state_inc_miss(rs);
return 0;
+ }
if (!rs->begin)
rs->begin = jiffies;
--
2.40.1
The ___ratelimit() function special-cases the jiffies-counter value of zero
as "uninitialized". This works well on 64-bit systems, where the jiffies
counter is not going to return to zero for more than half a billion years
on systems with HZ=1000, but similar 32-bit systems take less than 50 days
to wrap the jiffies counter. And although the consequences of wrapping the
jiffies counter seem to be limited to minor confusion on the duration of
the rate-limiting interval that happens to end at time zero, it is almost
no work to avoid this confusion.
Therefore, introduce a RATELIMIT_INITIALIZED bit to the ratelimit_state
structure's ->flags field so that a ->begin value of zero is no longer
special.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
include/linux/ratelimit.h | 2 +-
include/linux/ratelimit_types.h | 1 +
lib/ratelimit.c | 4 +++-
3 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index c78b92b3e5cd8..adfec24061d16 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -43,7 +43,7 @@ static inline void ratelimit_state_reset_interval(struct ratelimit_state *rs, in
raw_spin_lock_irqsave(&rs->lock, flags);
rs->interval = interval_init;
- rs->begin = 0;
+ rs->flags &= ~RATELIMIT_INITIALIZED;
rs->printed = 0;
ratelimit_state_reset_miss(rs);
raw_spin_unlock_irqrestore(&rs->lock, flags);
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
index d21fe82b67f67..ef6711b6b229f 100644
--- a/include/linux/ratelimit_types.h
+++ b/include/linux/ratelimit_types.h
@@ -11,6 +11,7 @@
/* issue num suppressed message on exit */
#define RATELIMIT_MSG_ON_RELEASE BIT(0)
+#define RATELIMIT_INITIALIZED BIT(1)
struct ratelimit_state {
raw_spinlock_t lock; /* protect the state */
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 19ad3cdbd1711..bd6e3b429e333 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -49,8 +49,10 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
return 0;
}
- if (!rs->begin)
+ if (!(rs->flags & RATELIMIT_INITIALIZED)) {
rs->begin = jiffies;
+ rs->flags |= RATELIMIT_INITIALIZED;
+ }
if (time_is_before_jiffies(rs->begin + interval)) {
int m = ratelimit_state_reset_miss(rs);
--
2.40.1
From: Petr Mladek <pmladek@suse.com>
Retain the locked design, but check rate-limiting even when the lock
could not be acquired.
Link: https://lore.kernel.org/all/Z_VRo63o2UsVoxLG@pathway.suse.cz/
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
include/linux/ratelimit.h | 2 +-
include/linux/ratelimit_types.h | 2 +-
lib/ratelimit.c | 51 ++++++++++++++++++++++++---------
3 files changed, 40 insertions(+), 15 deletions(-)
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index adfec24061d16..7aaad158ee373 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -44,7 +44,7 @@ static inline void ratelimit_state_reset_interval(struct ratelimit_state *rs, in
raw_spin_lock_irqsave(&rs->lock, flags);
rs->interval = interval_init;
rs->flags &= ~RATELIMIT_INITIALIZED;
- rs->printed = 0;
+ atomic_set(&rs->rs_n_left, rs->burst);
ratelimit_state_reset_miss(rs);
raw_spin_unlock_irqrestore(&rs->lock, flags);
}
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
index ef6711b6b229f..b19c4354540ab 100644
--- a/include/linux/ratelimit_types.h
+++ b/include/linux/ratelimit_types.h
@@ -18,7 +18,7 @@ struct ratelimit_state {
int interval;
int burst;
- int printed;
+ atomic_t rs_n_left;
atomic_t missed;
unsigned int flags;
unsigned long begin;
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index bd6e3b429e333..90c9fe57eb422 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -39,12 +39,22 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
return 1;
/*
- * If we contend on this state's lock then almost
- * by definition we are too busy to print a message,
- * in addition to the one that will be printed by
- * the entity that is holding the lock already:
+ * If we contend on this state's lock then just check if
+ * the current burst is used or not. It might cause
+ * false positive when we are past the interval and
+ * the current lock owner is just about to reset it.
*/
if (!raw_spin_trylock_irqsave(&rs->lock, flags)) {
+ unsigned int rs_flags = READ_ONCE(rs->flags);
+
+ if (rs_flags & RATELIMIT_INITIALIZED && burst) {
+ int n_left;
+
+ n_left = atomic_dec_return(&rs->rs_n_left);
+ if (n_left >= 0)
+ return 1;
+ }
+
ratelimit_state_inc_miss(rs);
return 0;
}
@@ -52,27 +62,42 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
if (!(rs->flags & RATELIMIT_INITIALIZED)) {
rs->begin = jiffies;
rs->flags |= RATELIMIT_INITIALIZED;
+ atomic_set(&rs->rs_n_left, rs->burst);
}
if (time_is_before_jiffies(rs->begin + interval)) {
- int m = ratelimit_state_reset_miss(rs);
+ int m;
+
+ /*
+ * Reset rs_n_left ASAP to reduce false positives
+ * in parallel calls, see above.
+ */
+ atomic_set(&rs->rs_n_left, rs->burst);
+ rs->begin = jiffies;
+ m = ratelimit_state_reset_miss(rs);
if (m) {
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
printk_deferred(KERN_WARNING
"%s: %d callbacks suppressed\n", func, m);
}
}
- rs->begin = jiffies;
- rs->printed = 0;
}
- if (burst && burst > rs->printed) {
- rs->printed++;
- ret = 1;
- } else {
- ratelimit_state_inc_miss(rs);
- ret = 0;
+ if (burst) {
+ int n_left;
+
+ /* The burst might have been taken by a parallel call. */
+ n_left = atomic_dec_return(&rs->rs_n_left);
+ if (n_left >= 0) {
+ ret = 1;
+ goto unlock_ret;
+ }
}
+
+ ratelimit_state_inc_miss(rs);
+ ret = 0;
+
+unlock_ret:
raw_spin_unlock_irqrestore(&rs->lock, flags);
return ret;
--
2.40.1
If ->interval is zero, then rate-limiting will be disabled.
Alternatively, if interval is greater than zero and ->burst is zero,
then rate-limiting will be applied unconditionally. The point of this
distinction is to handle current users that pass zero-initialized
ratelimit_state structures to ___ratelimit(), and in such cases the
->lock field will be uninitialized. Acquiring ->lock in this case is
clearly not a strategy to win.
Therefore, make this classification be lockless.
Note that although negative ->interval and ->burst happen to be treated
as if they were zero, this is an accident of the current implementation.
The semantics of negative values for these fields is subject to change
without notice. Especially given that Bert Karwatzki determined that
no current calls to ___ratelimit() ever have negative values for these
fields.
This commit replaces an earlier buggy versions.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Reported-by: Bert Karwatzki <spasswolf@web.de>
Reported-by: "Aithal, Srikanth" <sraithal@amd.com>
Closes: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Reported-by: Mark Brown <broonie@kernel.org>
Closes: https://lore.kernel.org/all/257c3b91-e30f-48be-9788-d27a4445a416@sirena.org.uk/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Tested-by: "Aithal, Srikanth" <sraithal@amd.com>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
lib/ratelimit.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 90c9fe57eb422..7a7ba4835639f 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -35,8 +35,12 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
unsigned long flags;
int ret;
- if (!interval)
- return 1;
+ if (interval <= 0 || burst <= 0) {
+ ret = interval == 0 || burst > 0;
+ if (!ret)
+ ratelimit_state_inc_miss(rs);
+ return ret;
+ }
/*
* If we contend on this state's lock then just check if
--
2.40.1
Currently, if rate limiting is disabled, ___ratelimit() does an immediate
early return with no state changes. This can result in false-positive
drops when re-enabling rate limiting. Therefore, mark the ratelimit_state
structure "uninitialized" when rate limiting is disabled.
[ paulmck: Apply Petr Mladek feedback. ]
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
lib/ratelimit.c | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 7a7ba4835639f..7d4f4e241213e 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -35,11 +35,24 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
unsigned long flags;
int ret;
+ /*
+ * Zero interval says never limit, otherwise, non-positive burst
+ * says always limit.
+ */
if (interval <= 0 || burst <= 0) {
ret = interval == 0 || burst > 0;
+ if (!(READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED) || (!interval && !burst) ||
+ !raw_spin_trylock_irqsave(&rs->lock, flags)) {
+ if (!ret)
+ ratelimit_state_inc_miss(rs);
+ return ret;
+ }
+
+ /* Force re-initialization once re-enabled. */
+ rs->flags &= ~RATELIMIT_INITIALIZED;
if (!ret)
ratelimit_state_inc_miss(rs);
- return ret;
+ goto unlock_ret;
}
/*
--
2.40.1
Restore the previous semantics where the misses counter is unchanged if
the RATELIMIT_MSG_ON_RELEASE flag is set.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
lib/ratelimit.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 7d4f4e241213e..4e520d029d28f 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -92,9 +92,9 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
atomic_set(&rs->rs_n_left, rs->burst);
rs->begin = jiffies;
- m = ratelimit_state_reset_miss(rs);
- if (m) {
- if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
+ if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
+ m = ratelimit_state_reset_miss(rs);
+ if (m) {
printk_deferred(KERN_WARNING
"%s: %d callbacks suppressed\n", func, m);
}
--
2.40.1
Currently, if the lock could not be acquired, the code unconditionally
does an atomic decrement on ->rs_n_left, even if that atomic operation
is guaranteed to return a limit-rate verdict. This incurs needless
overhead and also raises the spectre of counter wrap.
Therefore, do the atomic decrement only if there is some chance that
rates won't be limited.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
lib/ratelimit.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 4e520d029d28f..a7aaebb7a7189 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -65,8 +65,10 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
unsigned int rs_flags = READ_ONCE(rs->flags);
if (rs_flags & RATELIMIT_INITIALIZED && burst) {
- int n_left;
+ int n_left = atomic_read(&rs->rs_n_left);
+ if (n_left <= 0)
+ return 0;
n_left = atomic_dec_return(&rs->rs_n_left);
if (n_left >= 0)
return 1;
--
2.40.1
Currently, if the lock is acquired, the code unconditionally does
an atomic decrement on ->rs_n_left, even if that atomic operation is
guaranteed to return a limit-rate verdict. A limit-rate verdict will
in fact be the common case when something is spewing into a rate limit.
This unconditional atomic operation incurs needless overhead and also
raises the spectre of counter wrap.
Therefore, do the atomic decrement only if there is some chance that
rates won't be limited.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
lib/ratelimit.c | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index a7aaebb7a7189..ab8472edeb1d2 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -103,13 +103,16 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
}
}
if (burst) {
- int n_left;
+ int n_left = atomic_read(&rs->rs_n_left);
/* The burst might have been taken by a parallel call. */
- n_left = atomic_dec_return(&rs->rs_n_left);
- if (n_left >= 0) {
- ret = 1;
- goto unlock_ret;
+
+ if (n_left > 0) {
+ n_left = atomic_dec_return(&rs->rs_n_left);
+ if (n_left >= 0) {
+ ret = 1;
+ goto unlock_ret;
+ }
}
}
--
2.40.1
From: Petr Mladek <pmladek@suse.com>
Currently, ___ratelimit() treats a negative ->interval or ->burst as
if it was zero, but this is an accident of the current implementation.
Therefore, splat in this case, which might have the benefit of detecting
use of uninitialized ratelimit_state structures on the one hand or easing
addition of new features on the other.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
lib/ratelimit.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index ab8472edeb1d2..6a5cb05413013 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -40,6 +40,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
* says always limit.
*/
if (interval <= 0 || burst <= 0) {
+ WARN_ONCE(interval < 0 || burst < 0, "Negative interval (%d) or burst (%d): Uninitialized ratelimit_state structure?\n", interval, burst);
ret = interval == 0 || burst > 0;
if (!(READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED) || (!interval && !burst) ||
!raw_spin_trylock_irqsave(&rs->lock, flags)) {
--
2.40.1
By making "ret" always be initialized, and moving the final call to
ratelimit_state_inc_miss() out from under the lock, we save a goto and
a couple lines of code. This also saves a couple of lines of code from
the unconditional enable/disable slowpath.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
lib/ratelimit.c | 14 +++++---------
1 file changed, 5 insertions(+), 9 deletions(-)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 6a5cb05413013..7c6e864306db2 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -33,7 +33,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
int interval = READ_ONCE(rs->interval);
int burst = READ_ONCE(rs->burst);
unsigned long flags;
- int ret;
+ int ret = 0;
/*
* Zero interval says never limit, otherwise, non-positive burst
@@ -51,8 +51,6 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
/* Force re-initialization once re-enabled. */
rs->flags &= ~RATELIMIT_INITIALIZED;
- if (!ret)
- ratelimit_state_inc_miss(rs);
goto unlock_ret;
}
@@ -110,19 +108,17 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
if (n_left > 0) {
n_left = atomic_dec_return(&rs->rs_n_left);
- if (n_left >= 0) {
+ if (n_left >= 0)
ret = 1;
- goto unlock_ret;
- }
}
}
- ratelimit_state_inc_miss(rs);
- ret = 0;
-
unlock_ret:
raw_spin_unlock_irqrestore(&rs->lock, flags);
+ if (!ret)
+ ratelimit_state_inc_miss(rs);
+
return ret;
}
EXPORT_SYMBOL(___ratelimit);
--
2.40.1
Create a nolock_ret label in order to start consolidating the unlocked
return paths that conditionally invoke ratelimit_state_inc_miss().
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
lib/ratelimit.c | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 7c6e864306db2..e7101a79c6973 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -43,11 +43,8 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
WARN_ONCE(interval < 0 || burst < 0, "Negative interval (%d) or burst (%d): Uninitialized ratelimit_state structure?\n", interval, burst);
ret = interval == 0 || burst > 0;
if (!(READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED) || (!interval && !burst) ||
- !raw_spin_trylock_irqsave(&rs->lock, flags)) {
- if (!ret)
- ratelimit_state_inc_miss(rs);
- return ret;
- }
+ !raw_spin_trylock_irqsave(&rs->lock, flags))
+ goto nolock_ret;
/* Force re-initialization once re-enabled. */
rs->flags &= ~RATELIMIT_INITIALIZED;
@@ -116,6 +113,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
unlock_ret:
raw_spin_unlock_irqrestore(&rs->lock, flags);
+nolock_ret:
if (!ret)
ratelimit_state_inc_miss(rs);
--
2.40.1
Now that we have a nolock_ret label that handles ->missed correctly
based on the value of ret, we can eliminate a local variable and collapse
several "if" statements on the lock-acquisition-failure code path.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
lib/ratelimit.c | 18 ++++--------------
1 file changed, 4 insertions(+), 14 deletions(-)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index e7101a79c6973..bcda7c61fc6ff 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -58,20 +58,10 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
* the current lock owner is just about to reset it.
*/
if (!raw_spin_trylock_irqsave(&rs->lock, flags)) {
- unsigned int rs_flags = READ_ONCE(rs->flags);
-
- if (rs_flags & RATELIMIT_INITIALIZED && burst) {
- int n_left = atomic_read(&rs->rs_n_left);
-
- if (n_left <= 0)
- return 0;
- n_left = atomic_dec_return(&rs->rs_n_left);
- if (n_left >= 0)
- return 1;
- }
-
- ratelimit_state_inc_miss(rs);
- return 0;
+ if (READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED && burst &&
+ atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
+ ret = 1;
+ goto nolock_ret;
}
if (!(rs->flags & RATELIMIT_INITIALIZED)) {
--
2.40.1
Now that unlock_ret releases the lock, then falls into nolock_ret, which
handles ->missed based on the value of ret, the common-case lock-held
code can be collapsed into a single "if" statement with a single-statement
"then" clause.
Yes, we could go further and just assign the "if" condition to ret,
but in the immortal words of MSDOS, "Are you sure?".
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
lib/ratelimit.c | 13 +++----------
1 file changed, 3 insertions(+), 10 deletions(-)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index bcda7c61fc6ff..dcc063af195eb 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -88,17 +88,10 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
}
}
}
- if (burst) {
- int n_left = atomic_read(&rs->rs_n_left);
- /* The burst might have been taken by a parallel call. */
-
- if (n_left > 0) {
- n_left = atomic_dec_return(&rs->rs_n_left);
- if (n_left >= 0)
- ret = 1;
- }
- }
+ /* Note that the burst might be taken by a parallel call. */
+ if (burst && atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
+ ret = 1;
unlock_ret:
raw_spin_unlock_irqrestore(&rs->lock, flags);
--
2.40.1
Now that there is the "burst <= 0" fastpath, for all later code, burst
must be strictly greater than zero. Therefore, drop the redundant checks
of this local variable.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
---
lib/ratelimit.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index dcc063af195eb..859c251b23ce2 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -58,7 +58,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
* the current lock owner is just about to reset it.
*/
if (!raw_spin_trylock_irqsave(&rs->lock, flags)) {
- if (READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED && burst &&
+ if (READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED &&
atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
ret = 1;
goto nolock_ret;
@@ -90,7 +90,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
}
/* Note that the burst might be taken by a parallel call. */
- if (burst && atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
+ if (atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
ret = 1;
unlock_ret:
--
2.40.1
Add a simple single-threaded smoke test for lib/ratelimit.c
To run on x86:
make ARCH=x86_64 mrproper
./tools/testing/kunit/kunit.py run --arch x86_64 --kconfig_add CONFIG_RATELIMIT_KUNIT_TEST=y --kconfig_add CONFIG_SMP=y lib_ratelimit
This will fail on old ___ratelimit(), and subsequent patches provide
the fixes that are required.
[ paulmck: Apply timeout and kunit feedback from Petr Mladek. ]
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Jon Pan-Doh <pandoh@google.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Karolina Stolarek <karolina.stolarek@oracle.com>
---
lib/Kconfig.debug | 11 ++++++
lib/tests/Makefile | 1 +
lib/tests/test_ratelimit.c | 79 ++++++++++++++++++++++++++++++++++++++
3 files changed, 91 insertions(+)
create mode 100644 lib/tests/test_ratelimit.c
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9fe4d8dfe5782..c239099218c2b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -3232,6 +3232,17 @@ config TEST_OBJPOOL
If unsure, say N.
+config RATELIMIT_KUNIT_TEST
+ tristate "KUnit Test for correctness and stress of ratelimit" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the "test_ratelimit" module that should be used
+ for correctness verification and concurrent testings of rate
+ limiting.
+
+ If unsure, say N.
+
config INT_POW_KUNIT_TEST
tristate "Integer exponentiation (int_pow) test" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/lib/tests/Makefile b/lib/tests/Makefile
index 5a4794c1826e7..1c7c2d20fe501 100644
--- a/lib/tests/Makefile
+++ b/lib/tests/Makefile
@@ -45,5 +45,6 @@ obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
+obj-$(CONFIG_RATELIMIT_KUNIT_TEST) += test_ratelimit.o
obj-$(CONFIG_TEST_RUNTIME_MODULE) += module/
diff --git a/lib/tests/test_ratelimit.c b/lib/tests/test_ratelimit.c
new file mode 100644
index 0000000000000..0374107f5ea89
--- /dev/null
+++ b/lib/tests/test_ratelimit.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+
+#include <linux/ratelimit.h>
+#include <linux/module.h>
+
+/* a simple boot-time regression test */
+
+#define TESTRL_INTERVAL (5 * HZ)
+static DEFINE_RATELIMIT_STATE(testrl, TESTRL_INTERVAL, 3);
+
+#define test_ratelimited(test, expected) \
+ KUNIT_ASSERT_EQ(test, ___ratelimit(&testrl, "test_ratelimit_smoke"), (expected))
+
+static void test_ratelimit_smoke(struct kunit *test)
+{
+ // Check settings.
+ KUNIT_ASSERT_GE(test, TESTRL_INTERVAL, 100);
+
+ // Test normal operation.
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, false);
+
+ schedule_timeout_idle(TESTRL_INTERVAL - 40);
+ test_ratelimited(test, false);
+
+ schedule_timeout_idle(50);
+ test_ratelimited(test, true);
+
+ schedule_timeout_idle(2 * TESTRL_INTERVAL);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+
+ schedule_timeout_idle(TESTRL_INTERVAL - 40);
+ test_ratelimited(test, true);
+ schedule_timeout_idle(50);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, false);
+
+ // Test disabling.
+ testrl.burst = 0;
+ test_ratelimited(test, false);
+ testrl.burst = 2;
+ testrl.interval = 0;
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+
+ // Testing re-enabling.
+ testrl.interval = TESTRL_INTERVAL;
+ test_ratelimited(test, true);
+ test_ratelimited(test, true);
+ test_ratelimited(test, false);
+ test_ratelimited(test, false);
+}
+
+static struct kunit_case sort_test_cases[] = {
+ KUNIT_CASE_SLOW(test_ratelimit_smoke),
+ {}
+};
+
+static struct kunit_suite ratelimit_test_suite = {
+ .name = "lib_ratelimit",
+ .test_cases = sort_test_cases,
+};
+
+kunit_test_suites(&ratelimit_test_suite);
+
+MODULE_DESCRIPTION("___ratelimit() KUnit test suite");
+MODULE_LICENSE("GPL");
--
2.40.1
On Thu 2025-05-08 16:33:34, Paul E. McKenney wrote:
> Add a simple single-threaded smoke test for lib/ratelimit.c
>
> To run on x86:
>
> make ARCH=x86_64 mrproper
> ./tools/testing/kunit/kunit.py run --arch x86_64 --kconfig_add CONFIG_RATELIMIT_KUNIT_TEST=y --kconfig_add CONFIG_SMP=y lib_ratelimit
>
> This will fail on old ___ratelimit(), and subsequent patches provide
> the fixes that are required.
>
> --- /dev/null
> +++ b/lib/tests/test_ratelimit.c
> @@ -0,0 +1,79 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +
> +#include <kunit/test.h>
> +
> +#include <linux/ratelimit.h>
> +#include <linux/module.h>
> +
> +/* a simple boot-time regression test */
> +
> +#define TESTRL_INTERVAL (5 * HZ)
> +static DEFINE_RATELIMIT_STATE(testrl, TESTRL_INTERVAL, 3);
> +
> +#define test_ratelimited(test, expected) \
> + KUNIT_ASSERT_EQ(test, ___ratelimit(&testrl, "test_ratelimit_smoke"), (expected))
> +
> +static void test_ratelimit_smoke(struct kunit *test)
> +{
> + // Check settings.
> + KUNIT_ASSERT_GE(test, TESTRL_INTERVAL, 100);
> +
> + // Test normal operation.
> + test_ratelimited(test, true);
> + test_ratelimited(test, true);
> + test_ratelimited(test, true);
> + test_ratelimited(test, false);
> +
> + schedule_timeout_idle(TESTRL_INTERVAL - 40);
Heh, I have got a new laptop. The battery in the previous one was
about to explode. And the test started failing on the next line most
of the time.
The following change helped me:
From 005e00ca09b4bd5b4a5f3026f1835e0435ecfbd9 Mon Sep 17 00:00:00 2001
From: Petr Mladek <pmladek@suse.com>
Date: Mon, 12 May 2025 16:38:02 +0200
Subject: [PATCH] lib: Make the ratelimit test more reliable
The selftest fails most of the times when running in qemu with
a kernel configured with CONFIG_HZ = 250:
> test_ratelimit_smoke: 1 callbacks suppressed
> # test_ratelimit_smoke: ASSERTION FAILED at lib/tests/test_ratelimit.c:28
> Expected ___ratelimit(&testrl, "test_ratelimit_smoke") == (false), but
> ___ratelimit(&testrl, "test_ratelimit_smoke") == 1 (0x1)
> (false) == 0 (0x0)
Try to make the test slightly more reliable by calling the problematic
ratelimit in the middle of the interval.
Signed-off-by: Petr Mladek <pmladek@suse.com>
---
lib/tests/test_ratelimit.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/lib/tests/test_ratelimit.c b/lib/tests/test_ratelimit.c
index 0374107f5ea8..5d6ec8854600 100644
--- a/lib/tests/test_ratelimit.c
+++ b/lib/tests/test_ratelimit.c
@@ -24,19 +24,19 @@ static void test_ratelimit_smoke(struct kunit *test)
test_ratelimited(test, true);
test_ratelimited(test, false);
- schedule_timeout_idle(TESTRL_INTERVAL - 40);
+ schedule_timeout_idle(TESTRL_INTERVAL / 2);
test_ratelimited(test, false);
- schedule_timeout_idle(50);
+ schedule_timeout_idle(TESTRL_INTERVAL * 3 / 4);
test_ratelimited(test, true);
schedule_timeout_idle(2 * TESTRL_INTERVAL);
test_ratelimited(test, true);
test_ratelimited(test, true);
- schedule_timeout_idle(TESTRL_INTERVAL - 40);
+ schedule_timeout_idle(TESTRL_INTERVAL / 2 );
test_ratelimited(test, true);
- schedule_timeout_idle(50);
+ schedule_timeout_idle(TESTRL_INTERVAL * 3 / 4);
test_ratelimited(test, true);
test_ratelimited(test, true);
test_ratelimited(test, true);
--
2.49.0
Feel free to squash it into the original patch which added the test.
> + test_ratelimited(test, false);
> +
> + schedule_timeout_idle(50);
> + test_ratelimited(test, true);
> +
> + schedule_timeout_idle(2 * TESTRL_INTERVAL);
> + test_ratelimited(test, true);
> + test_ratelimited(test, true);
> +
Best Regards,
Petr
On Mon, May 12, 2025 at 05:22:02PM +0200, Petr Mladek wrote:
> On Thu 2025-05-08 16:33:34, Paul E. McKenney wrote:
> > Add a simple single-threaded smoke test for lib/ratelimit.c
> >
> > To run on x86:
> >
> > make ARCH=x86_64 mrproper
> > ./tools/testing/kunit/kunit.py run --arch x86_64 --kconfig_add CONFIG_RATELIMIT_KUNIT_TEST=y --kconfig_add CONFIG_SMP=y lib_ratelimit
> >
> > This will fail on old ___ratelimit(), and subsequent patches provide
> > the fixes that are required.
> >
> > --- /dev/null
> > +++ b/lib/tests/test_ratelimit.c
> > @@ -0,0 +1,79 @@
> > +// SPDX-License-Identifier: GPL-2.0-only
> > +
> > +#include <kunit/test.h>
> > +
> > +#include <linux/ratelimit.h>
> > +#include <linux/module.h>
> > +
> > +/* a simple boot-time regression test */
> > +
> > +#define TESTRL_INTERVAL (5 * HZ)
> > +static DEFINE_RATELIMIT_STATE(testrl, TESTRL_INTERVAL, 3);
> > +
> > +#define test_ratelimited(test, expected) \
> > + KUNIT_ASSERT_EQ(test, ___ratelimit(&testrl, "test_ratelimit_smoke"), (expected))
> > +
> > +static void test_ratelimit_smoke(struct kunit *test)
> > +{
> > + // Check settings.
> > + KUNIT_ASSERT_GE(test, TESTRL_INTERVAL, 100);
> > +
> > + // Test normal operation.
> > + test_ratelimited(test, true);
> > + test_ratelimited(test, true);
> > + test_ratelimited(test, true);
> > + test_ratelimited(test, false);
> > +
> > + schedule_timeout_idle(TESTRL_INTERVAL - 40);
>
> Heh, I have got a new laptop. The battery in the previous one was
> about to explode. And the test started failing on the next line most
> of the time.
>
> The following change helped me:
Thank you very much! I have queued this, and intend to keep it as its
own commit, following my original.
Thanx, Paul
> >From 005e00ca09b4bd5b4a5f3026f1835e0435ecfbd9 Mon Sep 17 00:00:00 2001
> From: Petr Mladek <pmladek@suse.com>
> Date: Mon, 12 May 2025 16:38:02 +0200
> Subject: [PATCH] lib: Make the ratelimit test more reliable
>
> The selftest fails most of the times when running in qemu with
> a kernel configured with CONFIG_HZ = 250:
>
> > test_ratelimit_smoke: 1 callbacks suppressed
> > # test_ratelimit_smoke: ASSERTION FAILED at lib/tests/test_ratelimit.c:28
> > Expected ___ratelimit(&testrl, "test_ratelimit_smoke") == (false), but
> > ___ratelimit(&testrl, "test_ratelimit_smoke") == 1 (0x1)
> > (false) == 0 (0x0)
>
> Try to make the test slightly more reliable by calling the problematic
> ratelimit in the middle of the interval.
>
> Signed-off-by: Petr Mladek <pmladek@suse.com>
> ---
> lib/tests/test_ratelimit.c | 8 ++++----
> 1 file changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/lib/tests/test_ratelimit.c b/lib/tests/test_ratelimit.c
> index 0374107f5ea8..5d6ec8854600 100644
> --- a/lib/tests/test_ratelimit.c
> +++ b/lib/tests/test_ratelimit.c
> @@ -24,19 +24,19 @@ static void test_ratelimit_smoke(struct kunit *test)
> test_ratelimited(test, true);
> test_ratelimited(test, false);
>
> - schedule_timeout_idle(TESTRL_INTERVAL - 40);
> + schedule_timeout_idle(TESTRL_INTERVAL / 2);
> test_ratelimited(test, false);
>
> - schedule_timeout_idle(50);
> + schedule_timeout_idle(TESTRL_INTERVAL * 3 / 4);
> test_ratelimited(test, true);
>
> schedule_timeout_idle(2 * TESTRL_INTERVAL);
> test_ratelimited(test, true);
> test_ratelimited(test, true);
>
> - schedule_timeout_idle(TESTRL_INTERVAL - 40);
> + schedule_timeout_idle(TESTRL_INTERVAL / 2 );
> test_ratelimited(test, true);
> - schedule_timeout_idle(50);
> + schedule_timeout_idle(TESTRL_INTERVAL * 3 / 4);
> test_ratelimited(test, true);
> test_ratelimited(test, true);
> test_ratelimited(test, true);
> --
> 2.49.0
>
> Feel free to squash it into the original patch which added the test.
>
> > + test_ratelimited(test, false);
> > +
> > + schedule_timeout_idle(50);
> > + test_ratelimited(test, true);
> > +
> > + schedule_timeout_idle(2 * TESTRL_INTERVAL);
> > + test_ratelimited(test, true);
> > + test_ratelimited(test, true);
> > +
>
> Best Regards,
> Petr
Add a simple stress test for lib/ratelimit.c
To run on x86:
./tools/testing/kunit/kunit.py run --arch x86_64 --kconfig_add CONFIG_RATELIMIT_KUNIT_TEST=y --kconfig_add CONFIG_SMP=y --qemu_args "-smp 4" lib_ratelimit
On a 16-CPU system, the "4" in "-smp 4" can be varied between 1 and 8.
Larger numbers have higher probabilities of introducing delays that
break the smoke test. In the extreme case, increasing the number to
larger than the number of CPUs in the underlying system is an excellent
way to get a test failure.
Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/
Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kuniyuki Iwashima <kuniyu@amazon.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: John Ogness <john.ogness@linutronix.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Jon Pan-Doh <pandoh@google.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Karolina Stolarek <karolina.stolarek@oracle.com>
---
lib/tests/test_ratelimit.c | 69 ++++++++++++++++++++++++++++++++++++--
1 file changed, 67 insertions(+), 2 deletions(-)
diff --git a/lib/tests/test_ratelimit.c b/lib/tests/test_ratelimit.c
index 0374107f5ea89..bce80d9dd21bf 100644
--- a/lib/tests/test_ratelimit.c
+++ b/lib/tests/test_ratelimit.c
@@ -4,6 +4,8 @@
#include <linux/ratelimit.h>
#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/cpumask.h>
/* a simple boot-time regression test */
@@ -63,14 +65,77 @@ static void test_ratelimit_smoke(struct kunit *test)
test_ratelimited(test, false);
}
-static struct kunit_case sort_test_cases[] = {
+static struct ratelimit_state stressrl = RATELIMIT_STATE_INIT_FLAGS("stressrl", HZ / 10, 3,
+ RATELIMIT_MSG_ON_RELEASE);
+
+static int doneflag;
+static const int stress_duration = 2 * HZ;
+
+struct stress_kthread {
+ unsigned long nattempts;
+ unsigned long nunlimited;
+ unsigned long nlimited;
+ unsigned long nmissed;
+ struct task_struct *tp;
+};
+
+static int test_ratelimit_stress_child(void *arg)
+{
+ struct stress_kthread *sktp = arg;
+
+ set_user_nice(current, MAX_NICE);
+ WARN_ON_ONCE(!sktp->tp);
+
+ while (!READ_ONCE(doneflag)) {
+ sktp->nattempts++;
+ if (___ratelimit(&stressrl, __func__))
+ sktp->nunlimited++;
+ else
+ sktp->nlimited++;
+ cond_resched();
+ }
+
+ sktp->nmissed = ratelimit_state_reset_miss(&stressrl);
+ return 0;
+}
+
+static void test_ratelimit_stress(struct kunit *test)
+{
+ int i;
+ const int n_stress_kthread = cpumask_weight(cpu_online_mask);
+ struct stress_kthread skt = { 0 };
+ struct stress_kthread *sktp = kcalloc(n_stress_kthread, sizeof(*sktp), GFP_KERNEL);
+
+ KUNIT_EXPECT_NOT_NULL_MSG(test, sktp, "Memory allocation failure");
+ for (i = 0; i < n_stress_kthread; i++) {
+ sktp[i].tp = kthread_run(test_ratelimit_stress_child, &sktp[i], "%s/%i",
+ "test_ratelimit_stress_child", i);
+ KUNIT_EXPECT_NOT_NULL_MSG(test, sktp, "kthread creation failure");
+ pr_alert("Spawned test_ratelimit_stress_child %d\n", i);
+ }
+ schedule_timeout_idle(stress_duration);
+ WRITE_ONCE(doneflag, 1);
+ for (i = 0; i < n_stress_kthread; i++) {
+ kthread_stop(sktp[i].tp);
+ skt.nattempts += sktp[i].nattempts;
+ skt.nunlimited += sktp[i].nunlimited;
+ skt.nlimited += sktp[i].nlimited;
+ skt.nmissed += sktp[i].nmissed;
+ }
+ KUNIT_ASSERT_EQ_MSG(test, skt.nunlimited + skt.nlimited, skt.nattempts,
+ "Outcomes not equal to attempts");
+ KUNIT_ASSERT_EQ_MSG(test, skt.nlimited, skt.nmissed, "Misses not equal to limits");
+}
+
+static struct kunit_case ratelimit_test_cases[] = {
KUNIT_CASE_SLOW(test_ratelimit_smoke),
+ KUNIT_CASE_SLOW(test_ratelimit_stress),
{}
};
static struct kunit_suite ratelimit_test_suite = {
.name = "lib_ratelimit",
- .test_cases = sort_test_cases,
+ .test_cases = ratelimit_test_cases,
};
kunit_test_suites(&ratelimit_test_suite);
--
2.40.1
© 2016 - 2026 Red Hat, Inc.