This patch adjusts the DMA map benchmark framework to make the DMA
map benchmark framework more flexible and adaptable to other mapping
modes in the future. By abstracting the framework into four interfaces:
prepare, unprepare, prepare_data, do_map, and do_unmap.
The new map schema can be introduced more easily
without major modifications to the existing code structure.
Reviewed-by: Barry Song <baohua@kernel.org>
Signed-off-by: Qinxin Xia <xiaqinxin@huawei.com>
---
include/uapi/linux/map_benchmark.h | 8 +-
kernel/dma/map_benchmark.c | 130 ++++++++++++++++++++++++-----
2 files changed, 115 insertions(+), 23 deletions(-)
diff --git a/include/uapi/linux/map_benchmark.h b/include/uapi/linux/map_benchmark.h
index c2d91088a40d..e076748f2120 100644
--- a/include/uapi/linux/map_benchmark.h
+++ b/include/uapi/linux/map_benchmark.h
@@ -17,6 +17,11 @@
#define DMA_MAP_TO_DEVICE 1
#define DMA_MAP_FROM_DEVICE 2
+enum {
+ DMA_MAP_BENCH_SINGLE_MODE,
+ DMA_MAP_BENCH_MODE_MAX
+};
+
struct map_benchmark {
__u64 avg_map_100ns; /* average map latency in 100ns */
__u64 map_stddev; /* standard deviation of map latency */
@@ -29,7 +34,8 @@ struct map_benchmark {
__u32 dma_dir; /* DMA data direction */
__u32 dma_trans_ns; /* time for DMA transmission in ns */
__u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
- __u8 expansion[76]; /* For future use */
+ __u8 map_mode; /* the mode of dma map */
+ __u8 expansion[75]; /* For future use */
};
#endif /* _UAPI_DMA_BENCHMARK_H */
diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
index 794041a39e65..a6345c10901c 100644
--- a/kernel/dma/map_benchmark.c
+++ b/kernel/dma/map_benchmark.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -31,17 +32,105 @@ struct map_benchmark_data {
atomic64_t loops;
};
+struct map_benchmark_ops {
+ void *(*prepare)(struct map_benchmark_data *map);
+ void (*unprepare)(void *mparam);
+ void (*prepare_data)(void *mparam);
+ int (*do_map)(void *mparam);
+ void (*do_unmap)(void *mparam);
+};
+
+struct dma_single_map_param {
+ struct device *dev;
+ dma_addr_t addr;
+ void *xbuf;
+ u32 npages;
+ u32 dma_dir;
+};
+
+static void *dma_single_map_benchmark_prepare(struct map_benchmark_data *map)
+{
+ struct dma_single_map_param *params __free(kfree) = kzalloc(sizeof(*params),
+ GFP_KERNEL);
+ if (!params)
+ return NULL;
+
+ params->npages = map->bparam.granule;
+ params->dma_dir = map->bparam.dma_dir;
+ params->dev = map->dev;
+ params->xbuf = alloc_pages_exact(params->npages * PAGE_SIZE, GFP_KERNEL);
+ if (!params->xbuf)
+ return NULL;
+
+ return_ptr(params);
+}
+
+static void dma_single_map_benchmark_unprepare(void *mparam)
+{
+ struct dma_single_map_param *params = mparam;
+
+ free_pages_exact(params->xbuf, params->npages * PAGE_SIZE);
+ kfree(params);
+}
+
+static void dma_single_map_benchmark_prepare_data(void *mparam)
+{
+ struct dma_single_map_param *params = mparam;
+
+ /*
+ * for a non-coherent device, if we don't stain them in the
+ * cache, this will give an underestimate of the real-world
+ * overhead of BIDIRECTIONAL or TO_DEVICE mappings;
+ * 66 means evertything goes well! 66 is lucky.
+ */
+ if (params->dma_dir != DMA_FROM_DEVICE)
+ memset(params->xbuf, 0x66, params->npages * PAGE_SIZE);
+}
+
+static int dma_single_map_benchmark_do_map(void *mparam)
+{
+ struct dma_single_map_param *params = mparam;
+
+ params->addr = dma_map_single(params->dev, params->xbuf,
+ params->npages * PAGE_SIZE, params->dma_dir);
+ if (unlikely(dma_mapping_error(params->dev, params->addr))) {
+ pr_err("dma_map_single failed on %s\n", dev_name(params->dev));
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dma_single_map_benchmark_do_unmap(void *mparam)
+{
+ struct dma_single_map_param *params = mparam;
+
+ dma_unmap_single(params->dev, params->addr,
+ params->npages * PAGE_SIZE, params->dma_dir);
+}
+
+static struct map_benchmark_ops dma_single_map_benchmark_ops = {
+ .prepare = dma_single_map_benchmark_prepare,
+ .unprepare = dma_single_map_benchmark_unprepare,
+ .prepare_data = dma_single_map_benchmark_prepare_data,
+ .do_map = dma_single_map_benchmark_do_map,
+ .do_unmap = dma_single_map_benchmark_do_unmap,
+};
+
+static struct map_benchmark_ops *dma_map_benchmark_ops[DMA_MAP_BENCH_MODE_MAX] = {
+ [DMA_MAP_BENCH_SINGLE_MODE] = &dma_single_map_benchmark_ops,
+};
+
static int map_benchmark_thread(void *data)
{
- void *buf;
- dma_addr_t dma_addr;
struct map_benchmark_data *map = data;
- int npages = map->bparam.granule;
- u64 size = npages * PAGE_SIZE;
+ __u8 map_mode = map->bparam.map_mode;
int ret = 0;
- buf = alloc_pages_exact(size, GFP_KERNEL);
- if (!buf)
+ struct map_benchmark_ops *mb_ops = dma_map_benchmark_ops[map_mode];
+ void *mparam = mb_ops->prepare(map);
+
+ if (!mparam)
return -ENOMEM;
while (!kthread_should_stop()) {
@@ -49,23 +138,14 @@ static int map_benchmark_thread(void *data)
ktime_t map_stime, map_etime, unmap_stime, unmap_etime;
ktime_t map_delta, unmap_delta;
- /*
- * for a non-coherent device, if we don't stain them in the
- * cache, this will give an underestimate of the real-world
- * overhead of BIDIRECTIONAL or TO_DEVICE mappings;
- * 66 means evertything goes well! 66 is lucky.
- */
- if (map->dir != DMA_FROM_DEVICE)
- memset(buf, 0x66, size);
+ if (!mb_ops->prepare_data)
+ mb_ops->prepare_data(mparam);
map_stime = ktime_get();
- dma_addr = dma_map_single(map->dev, buf, size, map->dir);
- if (unlikely(dma_mapping_error(map->dev, dma_addr))) {
- pr_err("dma_map_single failed on %s\n",
- dev_name(map->dev));
- ret = -ENOMEM;
+ ret = mb_ops->do_map(mparam);
+ if (ret)
goto out;
- }
+
map_etime = ktime_get();
map_delta = ktime_sub(map_etime, map_stime);
@@ -73,7 +153,8 @@ static int map_benchmark_thread(void *data)
ndelay(map->bparam.dma_trans_ns);
unmap_stime = ktime_get();
- dma_unmap_single(map->dev, dma_addr, size, map->dir);
+ mb_ops->do_unmap(mparam);
+
unmap_etime = ktime_get();
unmap_delta = ktime_sub(unmap_etime, unmap_stime);
@@ -108,7 +189,7 @@ static int map_benchmark_thread(void *data)
}
out:
- free_pages_exact(buf, size);
+ mb_ops->unprepare(mparam);
return ret;
}
@@ -209,6 +290,11 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case DMA_MAP_BENCHMARK:
+ if (map->bparam.map_mode >= DMA_MAP_BENCH_MODE_MAX) {
+ pr_err("invalid map mode\n");
+ return -EINVAL;
+ }
+
if (map->bparam.threads == 0 ||
map->bparam.threads > DMA_MAP_MAX_THREADS) {
pr_err("invalid thread number\n");
--
2.33.0
On Tue, Dec 23, 2025 at 4:33 AM Qinxin Xia <xiaqinxin@huawei.com> wrote:
>
> This patch adjusts the DMA map benchmark framework to make the DMA
> map benchmark framework more flexible and adaptable to other mapping
> modes in the future. By abstracting the framework into four interfaces:
> prepare, unprepare, prepare_data, do_map, and do_unmap.
> The new map schema can be introduced more easily
> without major modifications to the existing code structure.
>
> Reviewed-by: Barry Song <baohua@kernel.org>
> Signed-off-by: Qinxin Xia <xiaqinxin@huawei.com>
> ---
> include/uapi/linux/map_benchmark.h | 8 +-
> kernel/dma/map_benchmark.c | 130 ++++++++++++++++++++++++-----
> 2 files changed, 115 insertions(+), 23 deletions(-)
>
> diff --git a/include/uapi/linux/map_benchmark.h b/include/uapi/linux/map_benchmark.h
> index c2d91088a40d..e076748f2120 100644
> --- a/include/uapi/linux/map_benchmark.h
> +++ b/include/uapi/linux/map_benchmark.h
> @@ -17,6 +17,11 @@
> #define DMA_MAP_TO_DEVICE 1
> #define DMA_MAP_FROM_DEVICE 2
>
> +enum {
> + DMA_MAP_BENCH_SINGLE_MODE,
> + DMA_MAP_BENCH_MODE_MAX
> +};
> +
> struct map_benchmark {
> __u64 avg_map_100ns; /* average map latency in 100ns */
> __u64 map_stddev; /* standard deviation of map latency */
> @@ -29,7 +34,8 @@ struct map_benchmark {
> __u32 dma_dir; /* DMA data direction */
> __u32 dma_trans_ns; /* time for DMA transmission in ns */
> __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
> - __u8 expansion[76]; /* For future use */
> + __u8 map_mode; /* the mode of dma map */
> + __u8 expansion[75]; /* For future use */
> };
>
> #endif /* _UAPI_DMA_BENCHMARK_H */
> diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
> index 794041a39e65..a6345c10901c 100644
> --- a/kernel/dma/map_benchmark.c
> +++ b/kernel/dma/map_benchmark.c
> @@ -5,6 +5,7 @@
>
> #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
>
> +#include <linux/cleanup.h>
Are you still using scope-api and need cleanup.h?
> #include <linux/debugfs.h>
> #include <linux/delay.h>
> #include <linux/device.h>
> @@ -31,17 +32,105 @@ struct map_benchmark_data {
> atomic64_t loops;
> };
>
> +struct map_benchmark_ops {
> + void *(*prepare)(struct map_benchmark_data *map);
> + void (*unprepare)(void *mparam);
> + void (*prepare_data)(void *mparam);
> + int (*do_map)(void *mparam);
> + void (*do_unmap)(void *mparam);
> +};
> +
> +struct dma_single_map_param {
> + struct device *dev;
> + dma_addr_t addr;
> + void *xbuf;
> + u32 npages;
> + u32 dma_dir;
> +};
> +
> +static void *dma_single_map_benchmark_prepare(struct map_benchmark_data *map)
> +{
> + struct dma_single_map_param *params __free(kfree) = kzalloc(sizeof(*params),
> + GFP_KERNEL);
> + if (!params)
> + return NULL;
> +
> + params->npages = map->bparam.granule;
> + params->dma_dir = map->bparam.dma_dir;
> + params->dev = map->dev;
> + params->xbuf = alloc_pages_exact(params->npages * PAGE_SIZE, GFP_KERNEL);
> + if (!params->xbuf)
> + return NULL;
> +
> + return_ptr(params);
> +}
> +
> +static void dma_single_map_benchmark_unprepare(void *mparam)
> +{
> + struct dma_single_map_param *params = mparam;
> +
> + free_pages_exact(params->xbuf, params->npages * PAGE_SIZE);
> + kfree(params);
> +}
> +
> +static void dma_single_map_benchmark_prepare_data(void *mparam)
> +{
> + struct dma_single_map_param *params = mparam;
> +
> + /*
> + * for a non-coherent device, if we don't stain them in the
> + * cache, this will give an underestimate of the real-world
> + * overhead of BIDIRECTIONAL or TO_DEVICE mappings;
> + * 66 means evertything goes well! 66 is lucky.
> + */
> + if (params->dma_dir != DMA_FROM_DEVICE)
> + memset(params->xbuf, 0x66, params->npages * PAGE_SIZE);
> +}
> +
> +static int dma_single_map_benchmark_do_map(void *mparam)
> +{
> + struct dma_single_map_param *params = mparam;
> +
> + params->addr = dma_map_single(params->dev, params->xbuf,
> + params->npages * PAGE_SIZE, params->dma_dir);
> + if (unlikely(dma_mapping_error(params->dev, params->addr))) {
> + pr_err("dma_map_single failed on %s\n", dev_name(params->dev));
> + return -ENOMEM;
> + }
> +
> + return 0;
> +}
> +
> +static void dma_single_map_benchmark_do_unmap(void *mparam)
> +{
> + struct dma_single_map_param *params = mparam;
> +
> + dma_unmap_single(params->dev, params->addr,
> + params->npages * PAGE_SIZE, params->dma_dir);
> +}
> +
> +static struct map_benchmark_ops dma_single_map_benchmark_ops = {
> + .prepare = dma_single_map_benchmark_prepare,
> + .unprepare = dma_single_map_benchmark_unprepare,
> + .prepare_data = dma_single_map_benchmark_prepare_data,
> + .do_map = dma_single_map_benchmark_do_map,
> + .do_unmap = dma_single_map_benchmark_do_unmap,
> +};
> +
> +static struct map_benchmark_ops *dma_map_benchmark_ops[DMA_MAP_BENCH_MODE_MAX] = {
> + [DMA_MAP_BENCH_SINGLE_MODE] = &dma_single_map_benchmark_ops,
> +};
> +
> static int map_benchmark_thread(void *data)
> {
> - void *buf;
> - dma_addr_t dma_addr;
> struct map_benchmark_data *map = data;
> - int npages = map->bparam.granule;
> - u64 size = npages * PAGE_SIZE;
> + __u8 map_mode = map->bparam.map_mode;
> int ret = 0;
>
> - buf = alloc_pages_exact(size, GFP_KERNEL);
> - if (!buf)
> + struct map_benchmark_ops *mb_ops = dma_map_benchmark_ops[map_mode];
> + void *mparam = mb_ops->prepare(map);
> +
> + if (!mparam)
> return -ENOMEM;
>
> while (!kthread_should_stop()) {
> @@ -49,23 +138,14 @@ static int map_benchmark_thread(void *data)
> ktime_t map_stime, map_etime, unmap_stime, unmap_etime;
> ktime_t map_delta, unmap_delta;
>
> - /*
> - * for a non-coherent device, if we don't stain them in the
> - * cache, this will give an underestimate of the real-world
> - * overhead of BIDIRECTIONAL or TO_DEVICE mappings;
> - * 66 means evertything goes well! 66 is lucky.
> - */
> - if (map->dir != DMA_FROM_DEVICE)
> - memset(buf, 0x66, size);
> + if (!mb_ops->prepare_data)
> + mb_ops->prepare_data(mparam);
Did you actually test it? and why don't you need prepare_data() sometimes?
Thanks
Barry
On 2025/12/23 05:03:46, Barry Song <21cnbao@gmail.com> wrote:
> On Tue, Dec 23, 2025 at 4:33 AM Qinxin Xia <xiaqinxin@huawei.com> wrote:
>>
>> This patch adjusts the DMA map benchmark framework to make the DMA
>> map benchmark framework more flexible and adaptable to other mapping
>> modes in the future. By abstracting the framework into four interfaces:
>> prepare, unprepare, prepare_data, do_map, and do_unmap.
>> The new map schema can be introduced more easily
>> without major modifications to the existing code structure.
>>
>> Reviewed-by: Barry Song <baohua@kernel.org>
>> Signed-off-by: Qinxin Xia <xiaqinxin@huawei.com>
>> ---
>> include/uapi/linux/map_benchmark.h | 8 +-
>> kernel/dma/map_benchmark.c | 130 ++++++++++++++++++++++++-----
>> 2 files changed, 115 insertions(+), 23 deletions(-)
>>
>> diff --git a/include/uapi/linux/map_benchmark.h b/include/uapi/linux/map_benchmark.h
>> index c2d91088a40d..e076748f2120 100644
>> --- a/include/uapi/linux/map_benchmark.h
>> +++ b/include/uapi/linux/map_benchmark.h
>> @@ -17,6 +17,11 @@
>> #define DMA_MAP_TO_DEVICE 1
>> #define DMA_MAP_FROM_DEVICE 2
>>
>> +enum {
>> + DMA_MAP_BENCH_SINGLE_MODE,
>> + DMA_MAP_BENCH_MODE_MAX
>> +};
>> +
>> struct map_benchmark {
>> __u64 avg_map_100ns; /* average map latency in 100ns */
>> __u64 map_stddev; /* standard deviation of map latency */
>> @@ -29,7 +34,8 @@ struct map_benchmark {
>> __u32 dma_dir; /* DMA data direction */
>> __u32 dma_trans_ns; /* time for DMA transmission in ns */
>> __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
>> - __u8 expansion[76]; /* For future use */
>> + __u8 map_mode; /* the mode of dma map */
>> + __u8 expansion[75]; /* For future use */
>> };
>>
>> #endif /* _UAPI_DMA_BENCHMARK_H */
>> diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
>> index 794041a39e65..a6345c10901c 100644
>> --- a/kernel/dma/map_benchmark.c
>> +++ b/kernel/dma/map_benchmark.c
>> @@ -5,6 +5,7 @@
>>
>> #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
>>
>> +#include <linux/cleanup.h>
>
> Are you still using scope-api and need cleanup.h?
>
Yes, in dma_single_map_benchmark_prepare:
struct dma_single_map_param *params __free(kfree)
>> #include <linux/debugfs.h>
>> #include <linux/delay.h>
>> #include <linux/device.h>
>> @@ -31,17 +32,105 @@ struct map_benchmark_data {
>> atomic64_t loops;
>> };
>>
>> +struct map_benchmark_ops {
>> + void *(*prepare)(struct map_benchmark_data *map);
>> + void (*unprepare)(void *mparam);
>> + void (*prepare_data)(void *mparam);
>> + int (*do_map)(void *mparam);
>> + void (*do_unmap)(void *mparam);
>> +};
>> +
>> +struct dma_single_map_param {
>> + struct device *dev;
>> + dma_addr_t addr;
>> + void *xbuf;
>> + u32 npages;
>> + u32 dma_dir;
>> +};
>> +
>> +static void *dma_single_map_benchmark_prepare(struct map_benchmark_data *map)
>> +{
>> + struct dma_single_map_param *params __free(kfree) = kzalloc(sizeof(*params),
>> + GFP_KERNEL);
>> + if (!params)
>> + return NULL;
>> +
>> + params->npages = map->bparam.granule;
>> + params->dma_dir = map->bparam.dma_dir;
>> + params->dev = map->dev;
>> + params->xbuf = alloc_pages_exact(params->npages * PAGE_SIZE, GFP_KERNEL);
>> + if (!params->xbuf)
>> + return NULL;
>> +
>> + return_ptr(params);
>> +}
>> +
>> +static void dma_single_map_benchmark_unprepare(void *mparam)
>> +{
>> + struct dma_single_map_param *params = mparam;
>> +
>> + free_pages_exact(params->xbuf, params->npages * PAGE_SIZE);
>> + kfree(params);
>> +}
>> +
>> +static void dma_single_map_benchmark_prepare_data(void *mparam)
>> +{
>> + struct dma_single_map_param *params = mparam;
>> +
>> + /*
>> + * for a non-coherent device, if we don't stain them in the
>> + * cache, this will give an underestimate of the real-world
>> + * overhead of BIDIRECTIONAL or TO_DEVICE mappings;
>> + * 66 means evertything goes well! 66 is lucky.
>> + */
>> + if (params->dma_dir != DMA_FROM_DEVICE)
>> + memset(params->xbuf, 0x66, params->npages * PAGE_SIZE);
>> +}
>> +
>> +static int dma_single_map_benchmark_do_map(void *mparam)
>> +{
>> + struct dma_single_map_param *params = mparam;
>> +
>> + params->addr = dma_map_single(params->dev, params->xbuf,
>> + params->npages * PAGE_SIZE, params->dma_dir);
>> + if (unlikely(dma_mapping_error(params->dev, params->addr))) {
>> + pr_err("dma_map_single failed on %s\n", dev_name(params->dev));
>> + return -ENOMEM;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static void dma_single_map_benchmark_do_unmap(void *mparam)
>> +{
>> + struct dma_single_map_param *params = mparam;
>> +
>> + dma_unmap_single(params->dev, params->addr,
>> + params->npages * PAGE_SIZE, params->dma_dir);
>> +}
>> +
>> +static struct map_benchmark_ops dma_single_map_benchmark_ops = {
>> + .prepare = dma_single_map_benchmark_prepare,
>> + .unprepare = dma_single_map_benchmark_unprepare,
>> + .prepare_data = dma_single_map_benchmark_prepare_data,
>> + .do_map = dma_single_map_benchmark_do_map,
>> + .do_unmap = dma_single_map_benchmark_do_unmap,
>> +};
>> +
>> +static struct map_benchmark_ops *dma_map_benchmark_ops[DMA_MAP_BENCH_MODE_MAX] = {
>> + [DMA_MAP_BENCH_SINGLE_MODE] = &dma_single_map_benchmark_ops,
>> +};
>> +
>> static int map_benchmark_thread(void *data)
>> {
>> - void *buf;
>> - dma_addr_t dma_addr;
>> struct map_benchmark_data *map = data;
>> - int npages = map->bparam.granule;
>> - u64 size = npages * PAGE_SIZE;
>> + __u8 map_mode = map->bparam.map_mode;
>> int ret = 0;
>>
>> - buf = alloc_pages_exact(size, GFP_KERNEL);
>> - if (!buf)
>> + struct map_benchmark_ops *mb_ops = dma_map_benchmark_ops[map_mode];
>> + void *mparam = mb_ops->prepare(map);
>> +
>> + if (!mparam)
>> return -ENOMEM;
>>
>> while (!kthread_should_stop()) {
>> @@ -49,23 +138,14 @@ static int map_benchmark_thread(void *data)
>> ktime_t map_stime, map_etime, unmap_stime, unmap_etime;
>> ktime_t map_delta, unmap_delta;
>>
>> - /*
>> - * for a non-coherent device, if we don't stain them in the
>> - * cache, this will give an underestimate of the real-world
>> - * overhead of BIDIRECTIONAL or TO_DEVICE mappings;
>> - * 66 means evertything goes well! 66 is lucky.
>> - */
>> - if (map->dir != DMA_FROM_DEVICE)
>> - memset(buf, 0x66, size);
>> + if (!mb_ops->prepare_data)
>> + mb_ops->prepare_data(mparam);
>
> Did you actually test it? and why don't you need prepare_data() sometimes?
>
> Thanks
> Barry
'if (mb_ops->prepare_data)' is correct.
If you want to do some tests before I release the next version, you can
modify it in this way. I have tested it and it is work.
To be compatible with other modes that in the future, 'prepare_data' may
not be necessary. So, I add a judgment here.We can delete it in the
current version and add it after this scenario occurs.
--
Thanks,
Qinxin
> >> + if (!mb_ops->prepare_data)
> >> + mb_ops->prepare_data(mparam);
> >
> > Did you actually test it? and why don't you need prepare_data() sometimes?
> >
> > Thanks
> > Barry
> 'if (mb_ops->prepare_data)' is correct.
> If you want to do some tests before I release the next version, you can
> modify it in this way. I have tested it and it is work.
I’ve reviewed it many times. You are:
+ if (!mb_ops->prepare_data)
+ mb_ops->prepare_data(mparam);
not
if (mb_ops->prepare_data)
mb_ops->prepare_data(mparam);
Am I crazy?
Thanks
Barry
On 2025/12/23 10:58:28, Barry Song <21cnbao@gmail.com> wrote: > I’ve reviewed it many times. You are: > > + if (!mb_ops->prepare_data) > + mb_ops->prepare_data(mparam); > > not > if (mb_ops->prepare_data) > mb_ops->prepare_data(mparam); > Am I crazy? > > Thanks > Barry Yes, I've found the mistake... So in my just reply, I mentioned the correct modification of the "if (mb_ops->prepare_data)" and suggested that if you want to test it before the next version, you can modify the patch in this way. Sorry for the confusion -- Thanks, Qinxin
On 2025/12/23 11:13:32, Qinxin Xia <xiaqinxin@huawei.com> wrote: > > > On 2025/12/23 10:58:28, Barry Song <21cnbao@gmail.com> wrote: >> I’ve reviewed it many times. You are: >> >> + if (!mb_ops->prepare_data) >> + mb_ops->prepare_data(mparam); >> >> not >> if (mb_ops->prepare_data) >> mb_ops->prepare_data(mparam); >> Am I crazy? >> >> Thanks >> Barry > Yes, I've found the mistake... > > So in my just reply, I mentioned the correct modification of the > "if (mb_ops->prepare_data)" and suggested that if you want to test it > before the next version, you can modify the patch in this way. > > Sorry for the confusion > if (!mb_ops->prepare_data) is a bug. Thank you for catching it. I've tested the correct version and it works. -- Thanks, Qinxin
> To be compatible with other modes that in the future, 'prepare_data' may > not be necessary. So, I add a judgment here.We can delete it in the > current version and add it after this scenario occurs. Right, feel free to drop it. I guess __GFP_ZERO for allocation is sufficient.
© 2016 - 2026 Red Hat, Inc.