For 32-bit systems, damos_stat now uses unsigned long long for byte
statistics data to avoid integer overflow risks inherent in the
previous design.
Signed-off-by: Quanmin Yan <yanquanmin1@huawei.com>
---
include/linux/damon.h | 6 +++---
mm/damon/modules-common.h | 4 ++--
mm/damon/sysfs-schemes.c | 12 ++++++------
3 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/include/linux/damon.h b/include/linux/damon.h
index aa045dcb5b5d..d85850cf06c5 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -333,10 +333,10 @@ struct damos_watermarks {
*/
struct damos_stat {
unsigned long nr_tried;
- unsigned long sz_tried;
+ unsigned long long sz_tried;
unsigned long nr_applied;
- unsigned long sz_applied;
- unsigned long sz_ops_filter_passed;
+ unsigned long long sz_applied;
+ unsigned long long sz_ops_filter_passed;
unsigned long qt_exceeds;
};
diff --git a/mm/damon/modules-common.h b/mm/damon/modules-common.h
index c7048a449321..ae45d0eb960e 100644
--- a/mm/damon/modules-common.h
+++ b/mm/damon/modules-common.h
@@ -36,11 +36,11 @@
#define DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(stat, try_name, \
succ_name, qt_exceed_name) \
module_param_named(nr_##try_name, stat.nr_tried, ulong, 0400); \
- module_param_named(bytes_##try_name, stat.sz_tried, ulong, \
+ module_param_named(bytes_##try_name, stat.sz_tried, ullong, \
0400); \
module_param_named(nr_##succ_name, stat.nr_applied, ulong, \
0400); \
- module_param_named(bytes_##succ_name, stat.sz_applied, ulong, \
+ module_param_named(bytes_##succ_name, stat.sz_applied, ullong, \
0400); \
module_param_named(nr_##qt_exceed_name, stat.qt_exceeds, ulong, \
0400);
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index 74056bcd6a2c..3c4882549a28 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -199,10 +199,10 @@ static const struct kobj_type damon_sysfs_scheme_regions_ktype = {
struct damon_sysfs_stats {
struct kobject kobj;
unsigned long nr_tried;
- unsigned long sz_tried;
+ unsigned long long sz_tried;
unsigned long nr_applied;
- unsigned long sz_applied;
- unsigned long sz_ops_filter_passed;
+ unsigned long long sz_applied;
+ unsigned long long sz_ops_filter_passed;
unsigned long qt_exceeds;
};
@@ -226,7 +226,7 @@ static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
struct damon_sysfs_stats *stats = container_of(kobj,
struct damon_sysfs_stats, kobj);
- return sysfs_emit(buf, "%lu\n", stats->sz_tried);
+ return sysfs_emit(buf, "%llu\n", stats->sz_tried);
}
static ssize_t nr_applied_show(struct kobject *kobj,
@@ -244,7 +244,7 @@ static ssize_t sz_applied_show(struct kobject *kobj,
struct damon_sysfs_stats *stats = container_of(kobj,
struct damon_sysfs_stats, kobj);
- return sysfs_emit(buf, "%lu\n", stats->sz_applied);
+ return sysfs_emit(buf, "%llu\n", stats->sz_applied);
}
static ssize_t sz_ops_filter_passed_show(struct kobject *kobj,
@@ -253,7 +253,7 @@ static ssize_t sz_ops_filter_passed_show(struct kobject *kobj,
struct damon_sysfs_stats *stats = container_of(kobj,
struct damon_sysfs_stats, kobj);
- return sysfs_emit(buf, "%lu\n", stats->sz_ops_filter_passed);
+ return sysfs_emit(buf, "%llu\n", stats->sz_ops_filter_passed);
}
static ssize_t qt_exceeds_show(struct kobject *kobj,
--
2.34.1
On Wed, 13 Aug 2025 13:07:05 +0800 Quanmin Yan <yanquanmin1@huawei.com> wrote: > For 32-bit systems, damos_stat now uses unsigned long long for byte > statistics data to avoid integer overflow risks inherent in the > previous design. I suggested using the core-layer address unit on stat, and ask users to multiply the addr_unit value to stat values if they want bytes value. If we agree on it, I think this patch wouldn't really be required. > > Signed-off-by: Quanmin Yan <yanquanmin1@huawei.com> > --- > include/linux/damon.h | 6 +++--- > mm/damon/modules-common.h | 4 ++-- > mm/damon/sysfs-schemes.c | 12 ++++++------ > 3 files changed, 11 insertions(+), 11 deletions(-) > > diff --git a/include/linux/damon.h b/include/linux/damon.h > index aa045dcb5b5d..d85850cf06c5 100644 > --- a/include/linux/damon.h > +++ b/include/linux/damon.h > @@ -333,10 +333,10 @@ struct damos_watermarks { > */ > struct damos_stat { > unsigned long nr_tried; > - unsigned long sz_tried; > + unsigned long long sz_tried; > unsigned long nr_applied; > - unsigned long sz_applied; > - unsigned long sz_ops_filter_passed; > + unsigned long long sz_applied; > + unsigned long long sz_ops_filter_passed; > unsigned long qt_exceeds; > }; > > diff --git a/mm/damon/modules-common.h b/mm/damon/modules-common.h > index c7048a449321..ae45d0eb960e 100644 > --- a/mm/damon/modules-common.h > +++ b/mm/damon/modules-common.h > @@ -36,11 +36,11 @@ > #define DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(stat, try_name, \ > succ_name, qt_exceed_name) \ > module_param_named(nr_##try_name, stat.nr_tried, ulong, 0400); \ > - module_param_named(bytes_##try_name, stat.sz_tried, ulong, \ > + module_param_named(bytes_##try_name, stat.sz_tried, ullong, \ > 0400); \ > module_param_named(nr_##succ_name, stat.nr_applied, ulong, \ > 0400); \ > - module_param_named(bytes_##succ_name, stat.sz_applied, ulong, \ > + module_param_named(bytes_##succ_name, stat.sz_applied, ullong, \ > 0400); \ > module_param_named(nr_##qt_exceed_name, stat.qt_exceeds, ulong, \ > 0400); > diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c > index 74056bcd6a2c..3c4882549a28 100644 > --- a/mm/damon/sysfs-schemes.c > +++ b/mm/damon/sysfs-schemes.c > @@ -199,10 +199,10 @@ static const struct kobj_type damon_sysfs_scheme_regions_ktype = { > struct damon_sysfs_stats { > struct kobject kobj; > unsigned long nr_tried; > - unsigned long sz_tried; > + unsigned long long sz_tried; > unsigned long nr_applied; > - unsigned long sz_applied; > - unsigned long sz_ops_filter_passed; > + unsigned long long sz_applied; > + unsigned long long sz_ops_filter_passed; > unsigned long qt_exceeds; > }; > > @@ -226,7 +226,7 @@ static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr, > struct damon_sysfs_stats *stats = container_of(kobj, > struct damon_sysfs_stats, kobj); > > - return sysfs_emit(buf, "%lu\n", stats->sz_tried); > + return sysfs_emit(buf, "%llu\n", stats->sz_tried); > } > > static ssize_t nr_applied_show(struct kobject *kobj, > @@ -244,7 +244,7 @@ static ssize_t sz_applied_show(struct kobject *kobj, > struct damon_sysfs_stats *stats = container_of(kobj, > struct damon_sysfs_stats, kobj); > > - return sysfs_emit(buf, "%lu\n", stats->sz_applied); > + return sysfs_emit(buf, "%llu\n", stats->sz_applied); > } > > static ssize_t sz_ops_filter_passed_show(struct kobject *kobj, > @@ -253,7 +253,7 @@ static ssize_t sz_ops_filter_passed_show(struct kobject *kobj, > struct damon_sysfs_stats *stats = container_of(kobj, > struct damon_sysfs_stats, kobj); > > - return sysfs_emit(buf, "%lu\n", stats->sz_ops_filter_passed); > + return sysfs_emit(buf, "%llu\n", stats->sz_ops_filter_passed); > } > > static ssize_t qt_exceeds_show(struct kobject *kobj, > -- > 2.34.1
Hi SJ, 在 2025/8/14 1:10, SeongJae Park 写道: > On Wed, 13 Aug 2025 13:07:05 +0800 Quanmin Yan <yanquanmin1@huawei.com> wrote: > >> For 32-bit systems, damos_stat now uses unsigned long long for byte >> statistics data to avoid integer overflow risks inherent in the >> previous design. > I suggested using the core-layer address unit on stat, and ask users to > multiply the addr_unit value to stat values if they want bytes value. If we > agree on it, I think this patch wouldn't really be required. Thank you for the guidance, I agree with your perspective. However, this patch doesn't actually belong in the addr_unit series, my apologies for the confusion. It is actually intended to address potential overflow issues in statistical data on 32-bit systems, and it is not directly related to addr_unit. This patch has been dropped from the v2 series. After introducing addr_unit, if it is set to a larger value, it can help mitigate the overflow issue. However, under the default setting of addr_unit=1, statistical data may still overflow after a sufficiently long runtime, for example, when sz_tried exceeds 4GB. Besides, please allow me to mention one point in advance: if addr is extended for use in modules(e.g. DAMON_RECLAIM, LRU_SORT) in the future, the term "bytes" in module_param_named(bytes_##try_name...), although multiplied by addr would yield the actual byte count, might cause confusion due to its seemingly direct naming. Overall, this patch isn’t critically important at the moment, nor does it offer a sufficiently robust solution, but I’d still appreciate hearing your perspective on the matter — I’m all ears. Thanks, Quanmin Yan >> Signed-off-by: Quanmin Yan <yanquanmin1@huawei.com> >> --- >> include/linux/damon.h | 6 +++--- >> mm/damon/modules-common.h | 4 ++-- >> mm/damon/sysfs-schemes.c | 12 ++++++------ >> 3 files changed, 11 insertions(+), 11 deletions(-) >> >> diff --git a/include/linux/damon.h b/include/linux/damon.h >> index aa045dcb5b5d..d85850cf06c5 100644 >> --- a/include/linux/damon.h >> +++ b/include/linux/damon.h >> @@ -333,10 +333,10 @@ struct damos_watermarks { >> */ >> struct damos_stat { >> unsigned long nr_tried; >> - unsigned long sz_tried; >> + unsigned long long sz_tried; >> unsigned long nr_applied; >> - unsigned long sz_applied; >> - unsigned long sz_ops_filter_passed; >> + unsigned long long sz_applied; >> + unsigned long long sz_ops_filter_passed; >> unsigned long qt_exceeds; >> }; >> >> diff --git a/mm/damon/modules-common.h b/mm/damon/modules-common.h >> index c7048a449321..ae45d0eb960e 100644 >> --- a/mm/damon/modules-common.h >> +++ b/mm/damon/modules-common.h >> @@ -36,11 +36,11 @@ >> #define DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(stat, try_name, \ >> succ_name, qt_exceed_name) \ >> module_param_named(nr_##try_name, stat.nr_tried, ulong, 0400); \ >> - module_param_named(bytes_##try_name, stat.sz_tried, ulong, \ >> + module_param_named(bytes_##try_name, stat.sz_tried, ullong, \ >> 0400); \ >> module_param_named(nr_##succ_name, stat.nr_applied, ulong, \ >> 0400); \ >> - module_param_named(bytes_##succ_name, stat.sz_applied, ulong, \ >> + module_param_named(bytes_##succ_name, stat.sz_applied, ullong, \ >> 0400); \ >> module_param_named(nr_##qt_exceed_name, stat.qt_exceeds, ulong, \ >> 0400); >> diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c >> index 74056bcd6a2c..3c4882549a28 100644 >> --- a/mm/damon/sysfs-schemes.c >> +++ b/mm/damon/sysfs-schemes.c >> @@ -199,10 +199,10 @@ static const struct kobj_type damon_sysfs_scheme_regions_ktype = { >> struct damon_sysfs_stats { >> struct kobject kobj; >> unsigned long nr_tried; >> - unsigned long sz_tried; >> + unsigned long long sz_tried; >> unsigned long nr_applied; >> - unsigned long sz_applied; >> - unsigned long sz_ops_filter_passed; >> + unsigned long long sz_applied; >> + unsigned long long sz_ops_filter_passed; >> unsigned long qt_exceeds; >> }; >> >> @@ -226,7 +226,7 @@ static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr, >> struct damon_sysfs_stats *stats = container_of(kobj, >> struct damon_sysfs_stats, kobj); >> >> - return sysfs_emit(buf, "%lu\n", stats->sz_tried); >> + return sysfs_emit(buf, "%llu\n", stats->sz_tried); >> } >> >> static ssize_t nr_applied_show(struct kobject *kobj, >> @@ -244,7 +244,7 @@ static ssize_t sz_applied_show(struct kobject *kobj, >> struct damon_sysfs_stats *stats = container_of(kobj, >> struct damon_sysfs_stats, kobj); >> >> - return sysfs_emit(buf, "%lu\n", stats->sz_applied); >> + return sysfs_emit(buf, "%llu\n", stats->sz_applied); >> } >> >> static ssize_t sz_ops_filter_passed_show(struct kobject *kobj, >> @@ -253,7 +253,7 @@ static ssize_t sz_ops_filter_passed_show(struct kobject *kobj, >> struct damon_sysfs_stats *stats = container_of(kobj, >> struct damon_sysfs_stats, kobj); >> >> - return sysfs_emit(buf, "%lu\n", stats->sz_ops_filter_passed); >> + return sysfs_emit(buf, "%llu\n", stats->sz_ops_filter_passed); >> } >> >> static ssize_t qt_exceeds_show(struct kobject *kobj, >> -- >> 2.34.1
On Wed, 20 Aug 2025 17:54:32 +0800 Quanmin Yan <yanquanmin1@huawei.com> wrote: > Hi SJ, > > 在 2025/8/14 1:10, SeongJae Park 写道: > > On Wed, 13 Aug 2025 13:07:05 +0800 Quanmin Yan <yanquanmin1@huawei.com> wrote: > > > >> For 32-bit systems, damos_stat now uses unsigned long long for byte > >> statistics data to avoid integer overflow risks inherent in the > >> previous design. > > I suggested using the core-layer address unit on stat, and ask users to > > multiply the addr_unit value to stat values if they want bytes value. If we > > agree on it, I think this patch wouldn't really be required. > > Thank you for the guidance, I agree with your perspective. However, this patch doesn't actually belong in the addr_unit series, my apologies > for the confusion. It is actually intended to address potential overflow issues > in statistical data on 32-bit systems, and it is not directly related to addr_unit. > This patch has been dropped from the v2 series. > > After introducing addr_unit, if it is set to a larger value, it can help mitigate > the overflow issue. However, under the default setting of addr_unit=1, statistical > data may still overflow after a sufficiently long runtime, for example, when sz_tried > exceeds 4GB. Thank you for clarifying this! My opinion is that, since we use core-layer address unit for DAMOS stats, as long as users set appropriate addr_unit, I think the overflow wouldn't really happen in real problematic ways? For example, if addr_unit is 2**10 (=1024) and the scheme has tried to 4 * 2**30 bytes (4 GiB) of region, the sz_tried value will be 4 * 2**20, so far from overflowing. I think still the chance to overflow is higher than 64bit, but maybe the user space tools can monitor and handle the overflow...? Maybe we can discuss further, but let's focus on the essential part for now. > > Besides, please allow me to mention one point in advance: if addr is extended for > use in modules(e.g. DAMON_RECLAIM, LRU_SORT) in the future, the term "bytes" in > module_param_named(bytes_##try_name...), although multiplied by addr would yield > the actual byte count, might cause confusion due to its seemingly direct naming. Thank you for heasdup! I agree it could be confusing, but I have no real good idea at the moment, sorry. Let's revisit after the essential part work is done. > > Overall, this patch isn’t critically important at the moment, nor does it offer a > sufficiently robust solution, but I’d still appreciate hearing your perspective on > the matter — I’m all ears. Thank you again for headsup of the remaining issues. Yes, let's keep eyes and revisit those later. Thanks, SJ
© 2016 - 2025 Red Hat, Inc.