The current dirtylimit_throttle_pct trace event is triggered when the
throttle time is adjusted linearly. Modify the trace event so that it
can record non-linear adjustments. Additionally, since the throttle time
might be adjusted again at the end of the dirtylimit_set_throttle
function, move the trace event to after this process and calculate the
final adjustment time and sleep percent.
This patch can fix the following issue:
1. The current dirty rate at 1000MB/s and the dirty limit value at
10000MB/s, before merge this patch, this trace event will print:
CPU[2] throttle percent: 98, throttle adjust time 191590 us
CPU[2] throttle percent: 98, throttle adjust time 191002 us
CPU[2] throttle percent: 98, throttle adjust time 191002 us
After merge this patch, there will be no print.
2. The current dirty rate is 1000MB/s and the dirty limit rate value is
333MB/s, before merge this patch, this trace event will print:
CPU[3] throttle percent: 98, throttle adjust time 32666634 us
It will only print linear adjustment, and the adjust time
will be larger and only have positive values.
After merge this patch, print as following:
CPU[2] throttle percent: 97, throttle adjust time 128766 us
CPU[2] throttle percent: 94, throttle adjust time -61131 us
CPU[2] throttle percent: 92, throttle adjust time -16634 us
...
CPU[2] throttle percent: 74, throttle adjust time -390 us
CPU[2] throttle percent: 73, throttle adjust time -390 us
Signed-off-by: wangfuqiang49 <wangfuqiang49@jd.com>
---
system/dirtylimit.c | 28 ++++++++++++++++++++--------
1 file changed, 20 insertions(+), 8 deletions(-)
diff --git a/system/dirtylimit.c b/system/dirtylimit.c
index 7c071248bb..c7f663e5b9 100644
--- a/system/dirtylimit.c
+++ b/system/dirtylimit.c
@@ -281,31 +281,30 @@ static void dirtylimit_set_throttle(CPUState *cpu,
{
int64_t ring_full_time_us = 0;
uint64_t sleep_pct = 0;
+ uint64_t throttle_pct = 0;
uint64_t throttle_us = 0;
+ int64_t throtlle_us_old = cpu->throttle_us_per_full;
if (current == 0) {
cpu->throttle_us_per_full = 0;
- return;
+ goto end;
}
ring_full_time_us = dirtylimit_dirty_ring_full_time(current);
if (dirtylimit_need_linear_adjustment(quota, current)) {
if (quota < current) {
- sleep_pct = (current - quota) * 100 / current;
+ throttle_pct = (current - quota) * 100 / current;
throttle_us =
- ring_full_time_us * sleep_pct / (double)(100 - sleep_pct);
+ ring_full_time_us * throttle_pct / (double)(100 - throttle_pct);
cpu->throttle_us_per_full += throttle_us;
} else {
- sleep_pct = (quota - current) * 100 / quota;
+ throttle_pct = (quota - current) * 100 / quota;
throttle_us =
- ring_full_time_us * sleep_pct / (double)(100 - sleep_pct);
+ ring_full_time_us * throttle_pct / (double)(100 - throttle_pct);
cpu->throttle_us_per_full -= throttle_us;
}
- trace_dirtylimit_throttle_pct(cpu->cpu_index,
- sleep_pct,
- throttle_us);
} else {
if (quota < current) {
cpu->throttle_us_per_full += ring_full_time_us / 10;
@@ -323,6 +322,19 @@ static void dirtylimit_set_throttle(CPUState *cpu,
ring_full_time_us * DIRTYLIMIT_THROTTLE_PCT_MAX);
cpu->throttle_us_per_full = MAX(cpu->throttle_us_per_full, 0);
+
+end:
+ if (cpu->throttle_us_per_full - throtlle_us_old) {
+ if (current) {
+ sleep_pct = ring_full_time_us * 100 / (ring_full_time_us +
+ cpu->throttle_us_per_full);
+ } else {
+ sleep_pct = 0;
+ }
+ trace_dirtylimit_throttle_pct(cpu->cpu_index,
+ sleep_pct,
+ cpu->throttle_us_per_full -
+ throtlle_us_old); }
}
static void dirtylimit_adjust_throttle(CPUState *cpu)
--
2.47.0
On Tue, Dec 31, 2024 at 9:56 AM fuqiang wang <fuqiang.wng@gmail.com> wrote:
> The current dirtylimit_throttle_pct trace event is triggered when the
> throttle time is adjusted linearly. Modify the trace event so that it
> can record non-linear adjustments.
Please target the optimization mentioned above in a single patch.
.
> Additionally, since the throttle time
> might be adjusted again at the end of the dirtylimit_set_throttle
> function, move the trace event to after this process and calculate the
> final adjustment time and sleep percent.
>
If need, I'd advise dividing this into a different patch.
>
> This patch can fix the following issue:
>
> 1. The current dirty rate at 1000MB/s and the dirty limit value at
> 10000MB/s, before merge this patch, this trace event will print:
>
> CPU[2] throttle percent: 98, throttle adjust time 191590 us
> CPU[2] throttle percent: 98, throttle adjust time 191002 us
> CPU[2] throttle percent: 98, throttle adjust time 191002 us
>
> After merge this patch, there will be no print.
>
> 2. The current dirty rate is 1000MB/s and the dirty limit rate value is
> 333MB/s, before merge this patch, this trace event will print:
>
> CPU[3] throttle percent: 98, throttle adjust time 32666634 us
>
> It will only print linear adjustment, and the adjust time
> will be larger and only have positive values.
>
> After merge this patch, print as following:
> CPU[2] throttle percent: 97, throttle adjust time 128766 us
> CPU[2] throttle percent: 94, throttle adjust time -61131 us
> CPU[2] throttle percent: 92, throttle adjust time -16634 us
> ...
> CPU[2] throttle percent: 74, throttle adjust time -390 us
> CPU[2] throttle percent: 73, throttle adjust time -390 us
>
> Signed-off-by: wangfuqiang49 <wangfuqiang49@jd.com>
> ---
> system/dirtylimit.c | 28 ++++++++++++++++++++--------
> 1 file changed, 20 insertions(+), 8 deletions(-)
>
> diff --git a/system/dirtylimit.c b/system/dirtylimit.c
> index 7c071248bb..c7f663e5b9 100644
> --- a/system/dirtylimit.c
> +++ b/system/dirtylimit.c
> @@ -281,31 +281,30 @@ static void dirtylimit_set_throttle(CPUState *cpu,
> {
> int64_t ring_full_time_us = 0;
> uint64_t sleep_pct = 0;
> + uint64_t throttle_pct = 0;
> uint64_t throttle_us = 0;
> + int64_t throtlle_us_old = cpu->throttle_us_per_full;
>
> if (current == 0) {
> cpu->throttle_us_per_full = 0;
> - return;
> + goto end;
>
The current dirty rate is zero, indicating nothing needs to be done,
including displaying the trace event. return directly seems justified.
> }
>
> ring_full_time_us = dirtylimit_dirty_ring_full_time(current);
>
> if (dirtylimit_need_linear_adjustment(quota, current)) {
> if (quota < current) {
> - sleep_pct = (current - quota) * 100 / current;
>
s/sleep_pct/throttle_pci/ , why ?
> + throttle_pct = (current - quota) * 100 / current;
> throttle_us =
> - ring_full_time_us * sleep_pct / (double)(100 - sleep_pct);
> + ring_full_time_us * throttle_pct / (double)(100 -
> throttle_pct);
> cpu->throttle_us_per_full += throttle_us;
> } else {
> - sleep_pct = (quota - current) * 100 / quota;
> + throttle_pct = (quota - current) * 100 / quota;
> throttle_us =
> - ring_full_time_us * sleep_pct / (double)(100 - sleep_pct);
> + ring_full_time_us * throttle_pct / (double)(100 -
> throttle_pct);
> cpu->throttle_us_per_full -= throttle_us;
> }
>
> - trace_dirtylimit_throttle_pct(cpu->cpu_index,
> - sleep_pct,
> - throttle_us);
> } else {
> if (quota < current) {
> cpu->throttle_us_per_full += ring_full_time_us / 10;
> @@ -323,6 +322,19 @@ static void dirtylimit_set_throttle(CPUState *cpu,
> ring_full_time_us * DIRTYLIMIT_THROTTLE_PCT_MAX);
>
> cpu->throttle_us_per_full = MAX(cpu->throttle_us_per_full, 0);
> +
> +end:
> + if (cpu->throttle_us_per_full - throtlle_us_old) {
> + if (current) {
> + sleep_pct = ring_full_time_us * 100 / (ring_full_time_us +
> + cpu->throttle_us_per_full);
> + } else {
> + sleep_pct = 0;
> + }
> + trace_dirtylimit_throttle_pct(cpu->cpu_index,
> + sleep_pct,
> + cpu->throttle_us_per_full -
> + throtlle_us_old); }
>
This changes the interface of the trace event, We can keep
the throttle_us and add the delta meanwhile:
trace_dirtylimit_throttle_pct(cpu->cpu_index,
sleep_pct,
throttle_us,
cpu->throttle_us_per_full
- throtlle_us_old)
}
>
> static void dirtylimit_adjust_throttle(CPUState *cpu)
> --
> 2.47.0
>
>
--
Best regards
© 2016 - 2026 Red Hat, Inc.