kernel/locking/test-ww_mutex.c | 70 ++++++++++++++++++++++++++++++---- 1 file changed, 63 insertions(+), 7 deletions(-)
In cases where the ww_mutex test was occasionally tripping on
hard to find issues, leaving qemu in a reboot loop was my best
way to reproduce problems. These reboots however wasted time
when I just wanted to run the test-ww_mutex logic.
So tweak the test-ww_mutex test so that it can be re-triggered
via a sysfs file, so the test can be run repeatedly without
doing module loads or restarting.
To use, run as root:
echo 1 > /sys/kernel/test_ww_mutex/run_tests
Thoughts or feedback would be appreciated!
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: Waiman Long <longman@redhat.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: "Paul E . McKenney" <paulmck@kernel.org>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: kernel-team@android.com
Signed-off-by: John Stultz <jstultz@google.com>
---
kernel/locking/test-ww_mutex.c | 70 ++++++++++++++++++++++++++++++----
1 file changed, 63 insertions(+), 7 deletions(-)
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 10a5736a21c2..cc0d2e59049a 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -636,19 +636,15 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
return 0;
}
-static int __init test_ww_mutex_init(void)
+static DEFINE_MUTEX(run_lock);
+
+static int run_tests(void)
{
int ncpus = num_online_cpus();
int ret, i;
printk(KERN_INFO "Beginning ww mutex selftests\n");
- prandom_seed_state(&rng, get_random_u64());
-
- wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
- if (!wq)
- return -ENOMEM;
-
ret = test_mutex();
if (ret)
return ret;
@@ -687,8 +683,68 @@ static int __init test_ww_mutex_init(void)
return 0;
}
+static ssize_t run_tests_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (!mutex_trylock(&run_lock)) {
+ pr_err("Test already running\n");
+ return count;
+ }
+
+ run_tests();
+ mutex_unlock(&run_lock);
+
+ return count;
+}
+
+static struct kobj_attribute run_tests_attribute =
+ __ATTR(run_tests, 0664, NULL, run_tests_store);
+
+static struct attribute *attrs[] = {
+ &run_tests_attribute.attr,
+ NULL, /* need to NULL terminate the list of attributes */
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static struct kobject *test_ww_mutex_kobj;
+
+static int __init test_ww_mutex_init(void)
+{
+ int ret;
+
+ prandom_seed_state(&rng, get_random_u64());
+
+ wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
+ if (!wq)
+ return -ENOMEM;
+
+ test_ww_mutex_kobj = kobject_create_and_add("test_ww_mutex", kernel_kobj);
+ if (!test_ww_mutex_kobj) {
+ destroy_workqueue(wq);
+ return -ENOMEM;
+ }
+
+ /* Create the files associated with this kobject */
+ ret = sysfs_create_group(test_ww_mutex_kobj, &attr_group);
+ if (ret) {
+ kobject_put(test_ww_mutex_kobj);
+ destroy_workqueue(wq);
+ return ret;
+ }
+
+ mutex_lock(&run_lock);
+ ret = run_tests();
+ mutex_unlock(&run_lock);
+
+ return ret;
+}
+
static void __exit test_ww_mutex_exit(void)
{
+ kobject_put(test_ww_mutex_kobj);
destroy_workqueue(wq);
}
--
2.47.0.338.g60cca15819-goog
© 2016 - 2024 Red Hat, Inc.