[PATCHv6 1/2] watchdog/softlockup: low-overhead detection of interrupt

Bitao Hu posted 2 patches 2 years ago
There is a newer version of this series
[PATCHv6 1/2] watchdog/softlockup: low-overhead detection of interrupt
Posted by Bitao Hu 2 years ago
The following softlockup is caused by interrupt storm, but it cannot be
identified from the call tree. Because the call tree is just a snapshot
and doesn't fully capture the behavior of the CPU during the soft lockup.
  watchdog: BUG: soft lockup - CPU#28 stuck for 23s! [fio:83921]
  ...
  Call trace:
    __do_softirq+0xa0/0x37c
    __irq_exit_rcu+0x108/0x140
    irq_exit+0x14/0x20
    __handle_domain_irq+0x84/0xe0
    gic_handle_irq+0x80/0x108
    el0_irq_naked+0x50/0x58

Therefore,I think it is necessary to report CPU utilization during the
softlockup_thresh period (report once every sample_period, for a total
of 5 reportings), like this:
  watchdog: BUG: soft lockup - CPU#28 stuck for 23s! [fio:83921]
  CPU#28 Utilization every 4s during lockup:
    #1: 0% system, 0% softirq, 100% hardirq, 0% idle
    #2: 0% system, 0% softirq, 100% hardirq, 0% idle
    #3: 0% system, 0% softirq, 100% hardirq, 0% idle
    #4: 0% system, 0% softirq, 100% hardirq, 0% idle
    #5: 0% system, 0% softirq, 100% hardirq, 0% idle
  ...

This would be helpful in determining whether an interrupt storm has
occurred or in identifying the cause of the softlockup. The criteria for
determination are as follows:
  a. If the hardirq utilization is high, then interrupt storm should be
  considered and the root cause cannot be determined from the call tree.
  b. If the softirq utilization is high, then we could analyze the call
  tree but it may cannot reflect the root cause.
  c. If the system utilization is high, then we could analyze the root
  cause from the call tree.

The mechanism requires a considerable amount of global storage space
when configured for the maximum number of CPUs. Therefore, adding a
SOFTLOCKUP_DETECTOR_INTR_STORM Kconfig knob that defaults to "yes"
if the max number of CPUs is <= 128.

Signed-off-by: Bitao Hu <yaoma@linux.alibaba.com>
---
 kernel/watchdog.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++
 lib/Kconfig.debug | 13 +++++++
 2 files changed, 104 insertions(+)

diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 81a8862295d6..380b60074f1d 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -16,6 +16,8 @@
 #include <linux/cpu.h>
 #include <linux/nmi.h>
 #include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/math64.h>
 #include <linux/module.h>
 #include <linux/sysctl.h>
 #include <linux/tick.h>
@@ -333,6 +335,92 @@ __setup("watchdog_thresh=", watchdog_thresh_setup);
 
 static void __lockup_detector_cleanup(void);
 
+#ifdef CONFIG_SOFTLOCKUP_DETECTOR_INTR_STORM
+#define NUM_STATS_GROUPS	5
+enum stats_per_group {
+	STATS_SYSTEM,
+	STATS_SOFTIRQ,
+	STATS_HARDIRQ,
+	STATS_IDLE,
+	NUM_STATS_PER_GROUP,
+};
+
+static const enum cpu_usage_stat tracked_stats[NUM_STATS_PER_GROUP] = {
+	CPUTIME_SYSTEM,
+	CPUTIME_SOFTIRQ,
+	CPUTIME_IRQ,
+	CPUTIME_IDLE,
+};
+
+static DEFINE_PER_CPU(u16, cpustat_old[NUM_STATS_PER_GROUP]);
+static DEFINE_PER_CPU(u8, cpustat_util[NUM_STATS_GROUPS][NUM_STATS_PER_GROUP]);
+static DEFINE_PER_CPU(u8, cpustat_tail);
+
+/*
+ * We don't need nanosecond resolution. A granularity of 16ms is
+ * sufficient for our precision, allowing us to use u16 to store
+ * cpustats, which will roll over roughly every ~1000 seconds.
+ * 2^24 ~= 16 * 10^6
+ */
+static u16 get_16bit_precision(u64 data_ns)
+{
+	return data_ns >> 24LL; /* 2^24ns ~= 16.8ms */
+}
+
+static void update_cpustat(void)
+{
+	int i;
+	u8 util;
+	u16 old_stat, new_stat;
+	struct kernel_cpustat kcpustat;
+	u64 *cpustat = kcpustat.cpustat;
+	u8 tail = __this_cpu_read(cpustat_tail);
+	u16 sample_period_16 = get_16bit_precision(sample_period);
+
+	kcpustat_cpu_fetch(&kcpustat, smp_processor_id());
+	for (i = 0; i < NUM_STATS_PER_GROUP; i++) {
+		old_stat = __this_cpu_read(cpustat_old[i]);
+		new_stat = get_16bit_precision(cpustat[tracked_stats[i]]);
+		util = DIV_ROUND_UP(100 * (new_stat - old_stat), sample_period_16);
+		__this_cpu_write(cpustat_util[tail][i], util);
+		__this_cpu_write(cpustat_old[i], new_stat);
+	}
+	__this_cpu_write(cpustat_tail, (tail + 1) % NUM_STATS_GROUPS);
+}
+
+static void print_cpustat(void)
+{
+	int i, group;
+	u8 tail = __this_cpu_read(cpustat_tail);
+	u64 sample_period_second = sample_period;
+
+	do_div(sample_period_second, NSEC_PER_SEC);
+	/*
+	 * We do not want the "watchdog: " prefix on every line,
+	 * hence we use "printk" instead of "pr_crit".
+	 */
+	printk(KERN_CRIT "CPU#%d Utilization every %llus during lockup:\n",
+	       smp_processor_id(), sample_period_second);
+	for (i = 0; i < NUM_STATS_GROUPS; i++) {
+		group = (tail + i) % NUM_STATS_GROUPS;
+		printk(KERN_CRIT "\t#%d: %3u%% system,\t%3u%% softirq,\t"
+			"%3u%% hardirq,\t%3u%% idle\n", i + 1,
+			__this_cpu_read(cpustat_util[group][STATS_SYSTEM]),
+			__this_cpu_read(cpustat_util[group][STATS_SOFTIRQ]),
+			__this_cpu_read(cpustat_util[group][STATS_HARDIRQ]),
+			__this_cpu_read(cpustat_util[group][STATS_IDLE]));
+	}
+}
+
+static void report_cpu_status(void)
+{
+	print_cpustat();
+}
+#else
+static inline void update_cpustat(void) { }
+static inline void report_cpu_status(void) { }
+#endif
+
 /*
  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
  * lockups can have false positives under extreme conditions. So we generally
@@ -504,6 +592,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 	 */
 	period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
 
+	update_cpustat();
+
 	/* Reset the interval when touched by known problematic code. */
 	if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
 		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
@@ -539,6 +629,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
 			smp_processor_id(), duration,
 			current->comm, task_pid_nr(current));
+		report_cpu_status();
 		print_modules();
 		print_irqtrace_events(current);
 		if (regs)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 975a07f9f1cc..49f652674bd8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1029,6 +1029,19 @@ config SOFTLOCKUP_DETECTOR
 	  chance to run.  The current stack trace is displayed upon
 	  detection and the system will stay locked up.
 
+config SOFTLOCKUP_DETECTOR_INTR_STORM
+	bool "Detect Interrupt Storm in Soft Lockups"
+	depends on SOFTLOCKUP_DETECTOR && IRQ_TIME_ACCOUNTING
+	default y if NR_CPUS <= 128
+	help
+	  Say Y here to enable the kernel to detect interrupt storm
+	  during "soft lockups".
+
+	  "soft lockups" can be caused by a variety of reasons. If one is
+	  caused by an interrupt storm, then the storming interrupts will not
+	  be on the callstack. To detect this case, it is necessary to report
+	  the CPU stats and the interrupt counts during the "soft lockups".
+
 config BOOTPARAM_SOFTLOCKUP_PANIC
 	bool "Panic (Reboot) On Soft Lockups"
 	depends on SOFTLOCKUP_DETECTOR
-- 
2.37.1 (Apple Git-137.1)

Re: [PATCHv6 1/2] watchdog/softlockup: low-overhead detection of interrupt
Posted by Liu Song 1 year, 12 months ago
Looks good!

Reviewed-by: Liu Song <liusong@linux.alibaba.com>

在 2024/2/8 20:54, Bitao Hu 写道:
> The following softlockup is caused by interrupt storm, but it cannot be
> identified from the call tree. Because the call tree is just a snapshot
> and doesn't fully capture the behavior of the CPU during the soft lockup.
>    watchdog: BUG: soft lockup - CPU#28 stuck for 23s! [fio:83921]
>    ...
>    Call trace:
>      __do_softirq+0xa0/0x37c
>      __irq_exit_rcu+0x108/0x140
>      irq_exit+0x14/0x20
>      __handle_domain_irq+0x84/0xe0
>      gic_handle_irq+0x80/0x108
>      el0_irq_naked+0x50/0x58
>
> Therefore,I think it is necessary to report CPU utilization during the
> softlockup_thresh period (report once every sample_period, for a total
> of 5 reportings), like this:
>    watchdog: BUG: soft lockup - CPU#28 stuck for 23s! [fio:83921]
>    CPU#28 Utilization every 4s during lockup:
>      #1: 0% system, 0% softirq, 100% hardirq, 0% idle
>      #2: 0% system, 0% softirq, 100% hardirq, 0% idle
>      #3: 0% system, 0% softirq, 100% hardirq, 0% idle
>      #4: 0% system, 0% softirq, 100% hardirq, 0% idle
>      #5: 0% system, 0% softirq, 100% hardirq, 0% idle
>    ...
>
> This would be helpful in determining whether an interrupt storm has
> occurred or in identifying the cause of the softlockup. The criteria for
> determination are as follows:
>    a. If the hardirq utilization is high, then interrupt storm should be
>    considered and the root cause cannot be determined from the call tree.
>    b. If the softirq utilization is high, then we could analyze the call
>    tree but it may cannot reflect the root cause.
>    c. If the system utilization is high, then we could analyze the root
>    cause from the call tree.
>
> The mechanism requires a considerable amount of global storage space
> when configured for the maximum number of CPUs. Therefore, adding a
> SOFTLOCKUP_DETECTOR_INTR_STORM Kconfig knob that defaults to "yes"
> if the max number of CPUs is <= 128.
>
> Signed-off-by: Bitao Hu <yaoma@linux.alibaba.com>
> ---
>   kernel/watchdog.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++
>   lib/Kconfig.debug | 13 +++++++
>   2 files changed, 104 insertions(+)
>
> diff --git a/kernel/watchdog.c b/kernel/watchdog.c
> index 81a8862295d6..380b60074f1d 100644
> --- a/kernel/watchdog.c
> +++ b/kernel/watchdog.c
> @@ -16,6 +16,8 @@
>   #include <linux/cpu.h>
>   #include <linux/nmi.h>
>   #include <linux/init.h>
> +#include <linux/kernel_stat.h>
> +#include <linux/math64.h>
>   #include <linux/module.h>
>   #include <linux/sysctl.h>
>   #include <linux/tick.h>
> @@ -333,6 +335,92 @@ __setup("watchdog_thresh=", watchdog_thresh_setup);
>   
>   static void __lockup_detector_cleanup(void);
>   
> +#ifdef CONFIG_SOFTLOCKUP_DETECTOR_INTR_STORM
> +#define NUM_STATS_GROUPS	5
> +enum stats_per_group {
> +	STATS_SYSTEM,
> +	STATS_SOFTIRQ,
> +	STATS_HARDIRQ,
> +	STATS_IDLE,
> +	NUM_STATS_PER_GROUP,
> +};
> +
> +static const enum cpu_usage_stat tracked_stats[NUM_STATS_PER_GROUP] = {
> +	CPUTIME_SYSTEM,
> +	CPUTIME_SOFTIRQ,
> +	CPUTIME_IRQ,
> +	CPUTIME_IDLE,
> +};
> +
> +static DEFINE_PER_CPU(u16, cpustat_old[NUM_STATS_PER_GROUP]);
> +static DEFINE_PER_CPU(u8, cpustat_util[NUM_STATS_GROUPS][NUM_STATS_PER_GROUP]);
> +static DEFINE_PER_CPU(u8, cpustat_tail);
> +
> +/*
> + * We don't need nanosecond resolution. A granularity of 16ms is
> + * sufficient for our precision, allowing us to use u16 to store
> + * cpustats, which will roll over roughly every ~1000 seconds.
> + * 2^24 ~= 16 * 10^6
> + */
> +static u16 get_16bit_precision(u64 data_ns)
> +{
> +	return data_ns >> 24LL; /* 2^24ns ~= 16.8ms */
> +}
> +
> +static void update_cpustat(void)
> +{
> +	int i;
> +	u8 util;
> +	u16 old_stat, new_stat;
> +	struct kernel_cpustat kcpustat;
> +	u64 *cpustat = kcpustat.cpustat;
> +	u8 tail = __this_cpu_read(cpustat_tail);
> +	u16 sample_period_16 = get_16bit_precision(sample_period);
> +
> +	kcpustat_cpu_fetch(&kcpustat, smp_processor_id());
> +	for (i = 0; i < NUM_STATS_PER_GROUP; i++) {
> +		old_stat = __this_cpu_read(cpustat_old[i]);
> +		new_stat = get_16bit_precision(cpustat[tracked_stats[i]]);
> +		util = DIV_ROUND_UP(100 * (new_stat - old_stat), sample_period_16);
> +		__this_cpu_write(cpustat_util[tail][i], util);
> +		__this_cpu_write(cpustat_old[i], new_stat);
> +	}
> +	__this_cpu_write(cpustat_tail, (tail + 1) % NUM_STATS_GROUPS);
> +}
> +
> +static void print_cpustat(void)
> +{
> +	int i, group;
> +	u8 tail = __this_cpu_read(cpustat_tail);
> +	u64 sample_period_second = sample_period;
> +
> +	do_div(sample_period_second, NSEC_PER_SEC);
> +	/*
> +	 * We do not want the "watchdog: " prefix on every line,
> +	 * hence we use "printk" instead of "pr_crit".
> +	 */
> +	printk(KERN_CRIT "CPU#%d Utilization every %llus during lockup:\n",
> +	       smp_processor_id(), sample_period_second);
> +	for (i = 0; i < NUM_STATS_GROUPS; i++) {
> +		group = (tail + i) % NUM_STATS_GROUPS;
> +		printk(KERN_CRIT "\t#%d: %3u%% system,\t%3u%% softirq,\t"
> +			"%3u%% hardirq,\t%3u%% idle\n", i + 1,
> +			__this_cpu_read(cpustat_util[group][STATS_SYSTEM]),
> +			__this_cpu_read(cpustat_util[group][STATS_SOFTIRQ]),
> +			__this_cpu_read(cpustat_util[group][STATS_HARDIRQ]),
> +			__this_cpu_read(cpustat_util[group][STATS_IDLE]));
> +	}
> +}
> +
> +static void report_cpu_status(void)
> +{
> +	print_cpustat();
> +}
> +#else
> +static inline void update_cpustat(void) { }
> +static inline void report_cpu_status(void) { }
> +#endif
> +
>   /*
>    * Hard-lockup warnings should be triggered after just a few seconds. Soft-
>    * lockups can have false positives under extreme conditions. So we generally
> @@ -504,6 +592,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
>   	 */
>   	period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
>   
> +	update_cpustat();
> +
>   	/* Reset the interval when touched by known problematic code. */
>   	if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
>   		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
> @@ -539,6 +629,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
>   		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
>   			smp_processor_id(), duration,
>   			current->comm, task_pid_nr(current));
> +		report_cpu_status();
>   		print_modules();
>   		print_irqtrace_events(current);
>   		if (regs)
> diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
> index 975a07f9f1cc..49f652674bd8 100644
> --- a/lib/Kconfig.debug
> +++ b/lib/Kconfig.debug
> @@ -1029,6 +1029,19 @@ config SOFTLOCKUP_DETECTOR
>   	  chance to run.  The current stack trace is displayed upon
>   	  detection and the system will stay locked up.
>   
> +config SOFTLOCKUP_DETECTOR_INTR_STORM
> +	bool "Detect Interrupt Storm in Soft Lockups"
> +	depends on SOFTLOCKUP_DETECTOR && IRQ_TIME_ACCOUNTING
> +	default y if NR_CPUS <= 128
> +	help
> +	  Say Y here to enable the kernel to detect interrupt storm
> +	  during "soft lockups".
> +
> +	  "soft lockups" can be caused by a variety of reasons. If one is
> +	  caused by an interrupt storm, then the storming interrupts will not
> +	  be on the callstack. To detect this case, it is necessary to report
> +	  the CPU stats and the interrupt counts during the "soft lockups".
> +
>   config BOOTPARAM_SOFTLOCKUP_PANIC
>   	bool "Panic (Reboot) On Soft Lockups"
>   	depends on SOFTLOCKUP_DETECTOR
Re: [PATCHv6 1/2] watchdog/softlockup: low-overhead detection of interrupt
Posted by Petr Mladek 2 years ago
Hi,

I am sorry for jouning this game so late. But honestly, it went
forward too quickly. A good practice is to wait a week before
sending new version so that you give a chance more people
to provide some feedback.

The only exception might be when you know exactly who could
review it because the area in not interesting for anyone else.
But this is typicall not the case for kernel core code.


On Thu 2024-02-08 20:54:25, Bitao Hu wrote:
> The following softlockup is caused by interrupt storm, but it cannot be
> identified from the call tree. Because the call tree is just a snapshot
> and doesn't fully capture the behavior of the CPU during the soft lockup.
>   watchdog: BUG: soft lockup - CPU#28 stuck for 23s! [fio:83921]
>   ...
>   Call trace:
>     __do_softirq+0xa0/0x37c
>     __irq_exit_rcu+0x108/0x140
>     irq_exit+0x14/0x20
>     __handle_domain_irq+0x84/0xe0
>     gic_handle_irq+0x80/0x108
>     el0_irq_naked+0x50/0x58
> 
> Therefore,I think it is necessary to report CPU utilization during the
> softlockup_thresh period (report once every sample_period, for a total
> of 5 reportings), like this:
>   watchdog: BUG: soft lockup - CPU#28 stuck for 23s! [fio:83921]
>   CPU#28 Utilization every 4s during lockup:
>     #1: 0% system, 0% softirq, 100% hardirq, 0% idle
>     #2: 0% system, 0% softirq, 100% hardirq, 0% idle
>     #3: 0% system, 0% softirq, 100% hardirq, 0% idle
>     #4: 0% system, 0% softirq, 100% hardirq, 0% idle
>     #5: 0% system, 0% softirq, 100% hardirq, 0% idle

I like this. IMHO, it might be really useful.

> --- a/kernel/watchdog.c
> +++ b/kernel/watchdog.c
> @@ -333,6 +335,92 @@ __setup("watchdog_thresh=", watchdog_thresh_setup);
>  
>  static void __lockup_detector_cleanup(void);
>  
> +#ifdef CONFIG_SOFTLOCKUP_DETECTOR_INTR_STORM
> +#define NUM_STATS_GROUPS	5

It would be nice to synchronize this with the hardcoded 5 in:

static void set_sample_period(void)
{
	/*
	 * convert watchdog_thresh from seconds to ns
	 * the divide by 5 is to give hrtimer several chances (two
	 * or three with the current relation between the soft
	 * and hard thresholds) to increment before the
	 * hardlockup detector generates a warning
	 */
	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);

For exmaple, define and use the following in both situations:

#define NUM_SAMPLE_PERIODS	5

> +enum stats_per_group {
> +	STATS_SYSTEM,
> +	STATS_SOFTIRQ,
> +	STATS_HARDIRQ,
> +	STATS_IDLE,
> +	NUM_STATS_PER_GROUP,
> +};
> +
> +static const enum cpu_usage_stat tracked_stats[NUM_STATS_PER_GROUP] = {
> +	CPUTIME_SYSTEM,
> +	CPUTIME_SOFTIRQ,
> +	CPUTIME_IRQ,
> +	CPUTIME_IDLE,
> +};
> +
> +static DEFINE_PER_CPU(u16, cpustat_old[NUM_STATS_PER_GROUP]);
> +static DEFINE_PER_CPU(u8, cpustat_util[NUM_STATS_GROUPS][NUM_STATS_PER_GROUP]);
> +static DEFINE_PER_CPU(u8, cpustat_tail);
> +
> +/*
> + * We don't need nanosecond resolution. A granularity of 16ms is
> + * sufficient for our precision, allowing us to use u16 to store
> + * cpustats, which will roll over roughly every ~1000 seconds.
> + * 2^24 ~= 16 * 10^6
> + */
> +static u16 get_16bit_precision(u64 data_ns)
> +{
> +	return data_ns >> 24LL; /* 2^24ns ~= 16.8ms */

I would personally use

    delta_ns >> 20  /* 2^20ns ~= 1ms */

to make it easier for debugging by a human. It would support
the sample period up to 65s which might be enough.

But I do not resirt on it. ">> 24" provides less granularity
but it supports longer sample periods.

> +static void print_cpustat(void)
> +{
> +	int i, group;
> +	u8 tail = __this_cpu_read(cpustat_tail);
> +	u64 sample_period_second = sample_period;
> +
> +	do_div(sample_period_second, NSEC_PER_SEC);
> +	/*
> +	 * We do not want the "watchdog: " prefix on every line,
> +	 * hence we use "printk" instead of "pr_crit".
> +	 */
> +	printk(KERN_CRIT "CPU#%d Utilization every %llus during lockup:\n",
> +	       smp_processor_id(), sample_period_second);
> +	for (i = 0; i < NUM_STATS_GROUPS; i++) {

This starts with the 1st group in the array. Is it the oldest one?
It should take into account cpustat_tail.


> +		group = (tail + i) % NUM_STATS_GROUPS;
> +		printk(KERN_CRIT "\t#%d: %3u%% system,\t%3u%% softirq,\t"
> +			"%3u%% hardirq,\t%3u%% idle\n", i + 1,
> +			__this_cpu_read(cpustat_util[group][STATS_SYSTEM]),
> +			__this_cpu_read(cpustat_util[group][STATS_SOFTIRQ]),
> +			__this_cpu_read(cpustat_util[group][STATS_HARDIRQ]),
> +			__this_cpu_read(cpustat_util[group][STATS_IDLE]));
> +	}
> +}
> +

Best Regards,
Petr
Re: [PATCHv6 1/2] watchdog/softlockup: low-overhead detection of interrupt
Posted by Doug Anderson 1 year, 12 months ago
Hi,

On Fri, Feb 9, 2024 at 5:35 AM Petr Mladek <pmladek@suse.com> wrote:
>
> Hi,
>
> I am sorry for jouning this game so late. But honestly, it went
> forward too quickly. A good practice is to wait a week before
> sending new version so that you give a chance more people
> to provide some feedback.
>
> The only exception might be when you know exactly who could
> review it because the area in not interesting for anyone else.
> But this is typicall not the case for kernel core code.

Just for the record, I am not personally a fan of the advice that you
need to unconditionally wait a week between spins.

FWIW, I _am_ totally sold on the idea of waiting a while if there is
still ongoing discussion about how to move forward. You don't want to
fragment the conversation with some replies against the old version
and some against the new. However, in this case there was no ongoing
discussion and I don't see any particular harm that was done with
Bitao spinning as often as he did. I actually find it quite nice not
to need to wait a week (or more) between versions because it means
that patches are still fresh in my mind when I review the next
version.

Is your concern that some of my advice to Bitao took the series in the
wrong direction and you wished you could have put a stop to it sooner?
...or is your concern that Andrew has already landed the current
patches in his "unstable" tree? ...or is there some other problem that
was caused by Biao's quick spins of this series?

In any case, I'm happy that you've found time to jump in and review
the code! My current understanding of Andrew's process is that since
things are only in his "unstable" branch that Bitao can still send new
versions of the series and Andrew can update the patches.

-Doug
Re: [PATCHv6 1/2] watchdog/softlockup: low-overhead detection of interrupt
Posted by Bitao Hu 1 year, 12 months ago
Hi,

On 2024/2/9 21:35, Petr Mladek wrote:
> Hi,
> 
> I am sorry for jouning this game so late. But honestly, it went
> forward too quickly. A good practice is to wait a week before
> sending new version so that you give a chance more people
> to provide some feedback.
> 
> The only exception might be when you know exactly who could
> review it because the area in not interesting for anyone else.
> But this is typicall not the case for kernel core code.
Thanks for your reminder, I will be mindful of the pace.
> 
> 
> On Thu 2024-02-08 20:54:25, Bitao Hu wrote:
>> The following softlockup is caused by interrupt storm, but it cannot be
>> identified from the call tree. Because the call tree is just a snapshot
>> and doesn't fully capture the behavior of the CPU during the soft lockup.
>>    watchdog: BUG: soft lockup - CPU#28 stuck for 23s! [fio:83921]
>>    ...
>>    Call trace:
>>      __do_softirq+0xa0/0x37c
>>      __irq_exit_rcu+0x108/0x140
>>      irq_exit+0x14/0x20
>>      __handle_domain_irq+0x84/0xe0
>>      gic_handle_irq+0x80/0x108
>>      el0_irq_naked+0x50/0x58
>>
>> Therefore,I think it is necessary to report CPU utilization during the
>> softlockup_thresh period (report once every sample_period, for a total
>> of 5 reportings), like this:
>>    watchdog: BUG: soft lockup - CPU#28 stuck for 23s! [fio:83921]
>>    CPU#28 Utilization every 4s during lockup:
>>      #1: 0% system, 0% softirq, 100% hardirq, 0% idle
>>      #2: 0% system, 0% softirq, 100% hardirq, 0% idle
>>      #3: 0% system, 0% softirq, 100% hardirq, 0% idle
>>      #4: 0% system, 0% softirq, 100% hardirq, 0% idle
>>      #5: 0% system, 0% softirq, 100% hardirq, 0% idle
> 
> I like this. IMHO, it might be really useful.
> 
>> --- a/kernel/watchdog.c
>> +++ b/kernel/watchdog.c
>> @@ -333,6 +335,92 @@ __setup("watchdog_thresh=", watchdog_thresh_setup);
>>   
>>   static void __lockup_detector_cleanup(void);
>>   
>> +#ifdef CONFIG_SOFTLOCKUP_DETECTOR_INTR_STORM
>> +#define NUM_STATS_GROUPS	5
> 
> It would be nice to synchronize this with the hardcoded 5 in:
> 
> static void set_sample_period(void)
> {
> 	/*
> 	 * convert watchdog_thresh from seconds to ns
> 	 * the divide by 5 is to give hrtimer several chances (two
> 	 * or three with the current relation between the soft
> 	 * and hard thresholds) to increment before the
> 	 * hardlockup detector generates a warning
> 	 */
> 	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
OK, I've had the same thought.
> 
> For exmaple, define and use the following in both situations:
> 
> #define NUM_SAMPLE_PERIODS	5
>
>> +enum stats_per_group {
>> +	STATS_SYSTEM,
>> +	STATS_SOFTIRQ,
>> +	STATS_HARDIRQ,
>> +	STATS_IDLE,
>> +	NUM_STATS_PER_GROUP,
>> +};
>> +
>> +static const enum cpu_usage_stat tracked_stats[NUM_STATS_PER_GROUP] = {
>> +	CPUTIME_SYSTEM,
>> +	CPUTIME_SOFTIRQ,
>> +	CPUTIME_IRQ,
>> +	CPUTIME_IDLE,
>> +};
>> +
>> +static DEFINE_PER_CPU(u16, cpustat_old[NUM_STATS_PER_GROUP]);
>> +static DEFINE_PER_CPU(u8, cpustat_util[NUM_STATS_GROUPS][NUM_STATS_PER_GROUP]);
>> +static DEFINE_PER_CPU(u8, cpustat_tail);
>> +
>> +/*
>> + * We don't need nanosecond resolution. A granularity of 16ms is
>> + * sufficient for our precision, allowing us to use u16 to store
>> + * cpustats, which will roll over roughly every ~1000 seconds.
>> + * 2^24 ~= 16 * 10^6
>> + */
>> +static u16 get_16bit_precision(u64 data_ns)
>> +{
>> +	return data_ns >> 24LL; /* 2^24ns ~= 16.8ms */
> 
> I would personally use
> 
>      delta_ns >> 20  /* 2^20ns ~= 1ms */
> 
> to make it easier for debugging by a human. It would support
> the sample period up to 65s which might be enough.
> 
> But I do not resirt on it. ">> 24" provides less granularity
> but it supports longer sample periods.
I considered using ">>20" as it provides more intuitive granularity,
but I wanted to support longer sample periods. After weighing the
options, I chose ">>24".
> 
>> +static void print_cpustat(void)
>> +{
>> +	int i, group;
>> +	u8 tail = __this_cpu_read(cpustat_tail);
>> +	u64 sample_period_second = sample_period;
>> +
>> +	do_div(sample_period_second, NSEC_PER_SEC);
>> +	/*
>> +	 * We do not want the "watchdog: " prefix on every line,
>> +	 * hence we use "printk" instead of "pr_crit".
>> +	 */
>> +	printk(KERN_CRIT "CPU#%d Utilization every %llus during lockup:\n",
>> +	       smp_processor_id(), sample_period_second);
>> +	for (i = 0; i < NUM_STATS_GROUPS; i++) {
> 
> This starts with the 1st group in the array. Is it the oldest one?
> It should take into account cpustat_tail.
Yes, It starts with the oldest one. After "update_cpustat" is completed,
"cpustat_tail" points to the oldest one. Here, I start accessing the 
data pointed to by the "cpustat_tail".
> 
> 
>> +		group = (tail + i) % NUM_STATS_GROUPS;
>> +		printk(KERN_CRIT "\t#%d: %3u%% system,\t%3u%% softirq,\t"
>> +			"%3u%% hardirq,\t%3u%% idle\n", i + 1,
>> +			__this_cpu_read(cpustat_util[group][STATS_SYSTEM]),
>> +			__this_cpu_read(cpustat_util[group][STATS_SOFTIRQ]),
>> +			__this_cpu_read(cpustat_util[group][STATS_HARDIRQ]),
>> +			__this_cpu_read(cpustat_util[group][STATS_IDLE]));
>> +	}
>> +}
>> +
> 

Best Regards,
Bitao
Re: [PATCHv6 1/2] watchdog/softlockup: low-overhead detection of interrupt
Posted by Doug Anderson 2 years ago
Hi,

On Thu, Feb 8, 2024 at 4:54 AM Bitao Hu <yaoma@linux.alibaba.com> wrote:
>
> The following softlockup is caused by interrupt storm, but it cannot be
> identified from the call tree. Because the call tree is just a snapshot
> and doesn't fully capture the behavior of the CPU during the soft lockup.
>   watchdog: BUG: soft lockup - CPU#28 stuck for 23s! [fio:83921]
>   ...
>   Call trace:
>     __do_softirq+0xa0/0x37c
>     __irq_exit_rcu+0x108/0x140
>     irq_exit+0x14/0x20
>     __handle_domain_irq+0x84/0xe0
>     gic_handle_irq+0x80/0x108
>     el0_irq_naked+0x50/0x58
>
> Therefore,I think it is necessary to report CPU utilization during the
> softlockup_thresh period (report once every sample_period, for a total
> of 5 reportings), like this:
>   watchdog: BUG: soft lockup - CPU#28 stuck for 23s! [fio:83921]
>   CPU#28 Utilization every 4s during lockup:
>     #1: 0% system, 0% softirq, 100% hardirq, 0% idle
>     #2: 0% system, 0% softirq, 100% hardirq, 0% idle
>     #3: 0% system, 0% softirq, 100% hardirq, 0% idle
>     #4: 0% system, 0% softirq, 100% hardirq, 0% idle
>     #5: 0% system, 0% softirq, 100% hardirq, 0% idle
>   ...
>
> This would be helpful in determining whether an interrupt storm has
> occurred or in identifying the cause of the softlockup. The criteria for
> determination are as follows:
>   a. If the hardirq utilization is high, then interrupt storm should be
>   considered and the root cause cannot be determined from the call tree.
>   b. If the softirq utilization is high, then we could analyze the call
>   tree but it may cannot reflect the root cause.
>   c. If the system utilization is high, then we could analyze the root
>   cause from the call tree.
>
> The mechanism requires a considerable amount of global storage space
> when configured for the maximum number of CPUs. Therefore, adding a
> SOFTLOCKUP_DETECTOR_INTR_STORM Kconfig knob that defaults to "yes"
> if the max number of CPUs is <= 128.
>
> Signed-off-by: Bitao Hu <yaoma@linux.alibaba.com>
> ---
>  kernel/watchdog.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++
>  lib/Kconfig.debug | 13 +++++++
>  2 files changed, 104 insertions(+)

Thanks, this looks great now!

Reviewed-by: Douglas Anderson <dianders@chromium.org>