Support for dma scatter-gather mapping and is intended for testing
mapping performance. It achieves by introducing the dma_sg_map_param
structure and related functions, which enable the implementation of
scatter-gather mapping preparation, mapping, and unmapping operations.
Additionally, the dma_map_benchmark_ops array is updated to include
operations for scatter-gather mapping. This commit aims to provide
a wider range of mapping performance test to cater to different scenarios.
Reviewed-by: Barry Song <baohua@kernel.org>
Signed-off-by: Qinxin Xia <xiaqinxin@huawei.com>
---
include/uapi/linux/map_benchmark.h | 1 +
kernel/dma/map_benchmark.c | 117 ++++++++++++++++++++++++++++-
2 files changed, 117 insertions(+), 1 deletion(-)
diff --git a/include/uapi/linux/map_benchmark.h b/include/uapi/linux/map_benchmark.h
index e076748f2120..5fa3745e1651 100644
--- a/include/uapi/linux/map_benchmark.h
+++ b/include/uapi/linux/map_benchmark.h
@@ -19,6 +19,7 @@
enum {
DMA_MAP_BENCH_SINGLE_MODE,
+ DMA_MAP_BENCH_SG_MODE,
DMA_MAP_BENCH_MODE_MAX
};
diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
index a6345c10901c..76c0e1d5ff08 100644
--- a/kernel/dma/map_benchmark.c
+++ b/kernel/dma/map_benchmark.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/timekeeping.h>
#include <uapi/linux/map_benchmark.h>
@@ -117,8 +118,121 @@ static struct map_benchmark_ops dma_single_map_benchmark_ops = {
.do_unmap = dma_single_map_benchmark_do_unmap,
};
+struct dma_sg_map_param {
+ struct sg_table sgt;
+ struct device *dev;
+ void **buf;
+ u32 npages;
+ u32 dma_dir;
+};
+
+static void *dma_sg_map_benchmark_prepare(struct map_benchmark_data *map)
+{
+ struct scatterlist *sg;
+ int i;
+
+ struct dma_sg_map_param *params = kzalloc(sizeof(*params), GFP_KERNEL);
+
+ if (!params)
+ return NULL;
+ /*
+ * Set the number of scatterlist entries based on the granule.
+ * In SG mode, 'granule' represents the number of scatterlist entries.
+ * Each scatterlist entry corresponds to a single page.
+ */
+ params->npages = map->bparam.granule;
+ params->dma_dir = map->bparam.dma_dir;
+ params->dev = map->dev;
+ params->buf = kmalloc_array(params->npages, sizeof(*params->buf),
+ GFP_KERNEL);
+ if (!params->buf)
+ goto out;
+
+ if (sg_alloc_table(¶ms->sgt, params->npages, GFP_KERNEL))
+ goto free_buf;
+
+ for_each_sgtable_sg(¶ms->sgt, sg, i) {
+ params->buf[i] = (void *)__get_free_page(GFP_KERNEL);
+ if (!params->buf[i])
+ goto free_page;
+
+ sg_set_buf(sg, params->buf[i], PAGE_SIZE);
+ }
+
+ return_ptr(params);
+
+free_page:
+ while (i-- > 0)
+ free_page((unsigned long)params->buf[i]);
+
+ sg_free_table(¶ms->sgt);
+free_buf:
+ kfree(params->buf);
+out:
+ return NULL;
+}
+
+static void dma_sg_map_benchmark_unprepare(void *mparam)
+{
+ struct dma_sg_map_param *params = mparam;
+ int i;
+
+ for (i = 0; i < params->npages; i++)
+ free_page((unsigned long)params->buf[i]);
+
+ sg_free_table(¶ms->sgt);
+
+ kfree(params->buf);
+ kfree(params);
+}
+
+static void dma_sg_map_benchmark_prepare_data(void *mparam)
+{
+ struct dma_sg_map_param *params = mparam;
+ struct scatterlist *sg;
+ int i = 0;
+
+ if (params->dma_dir == DMA_FROM_DEVICE)
+ return;
+
+ for_each_sgtable_sg(¶ms->sgt, sg, i)
+ memset(params->buf[i], 0x66, PAGE_SIZE);
+}
+
+static int dma_sg_map_benchmark_do_map(void *mparam)
+{
+ struct dma_sg_map_param *params = mparam;
+ int ret = 0;
+
+ int sg_mapped = dma_map_sg(params->dev, params->sgt.sgl,
+ params->npages, params->dma_dir);
+ if (!sg_mapped) {
+ pr_err("dma_map_sg failed on %s\n", dev_name(params->dev));
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+static void dma_sg_map_benchmark_do_unmap(void *mparam)
+{
+ struct dma_sg_map_param *params = mparam;
+
+ dma_unmap_sg(params->dev, params->sgt.sgl, params->npages,
+ params->dma_dir);
+}
+
+static struct map_benchmark_ops dma_sg_map_benchmark_ops = {
+ .prepare = dma_sg_map_benchmark_prepare,
+ .unprepare = dma_sg_map_benchmark_unprepare,
+ .prepare_data = dma_sg_map_benchmark_prepare_data,
+ .do_map = dma_sg_map_benchmark_do_map,
+ .do_unmap = dma_sg_map_benchmark_do_unmap,
+};
+
static struct map_benchmark_ops *dma_map_benchmark_ops[DMA_MAP_BENCH_MODE_MAX] = {
[DMA_MAP_BENCH_SINGLE_MODE] = &dma_single_map_benchmark_ops,
+ [DMA_MAP_BENCH_SG_MODE] = &dma_sg_map_benchmark_ops,
};
static int map_benchmark_thread(void *data)
@@ -290,7 +404,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case DMA_MAP_BENCHMARK:
- if (map->bparam.map_mode >= DMA_MAP_BENCH_MODE_MAX) {
+ if (map->bparam.threads < 0 ||
+ map->bparam.map_mode >= DMA_MAP_BENCH_MODE_MAX) {
pr_err("invalid map mode\n");
return -EINVAL;
}
--
2.33.0
On Tue, Dec 23, 2025 at 4:33 AM Qinxin Xia <xiaqinxin@huawei.com> wrote:
>
> Support for dma scatter-gather mapping and is intended for testing
> mapping performance. It achieves by introducing the dma_sg_map_param
> structure and related functions, which enable the implementation of
> scatter-gather mapping preparation, mapping, and unmapping operations.
> Additionally, the dma_map_benchmark_ops array is updated to include
> operations for scatter-gather mapping. This commit aims to provide
> a wider range of mapping performance test to cater to different scenarios.
>
> Reviewed-by: Barry Song <baohua@kernel.org>
> Signed-off-by: Qinxin Xia <xiaqinxin@huawei.com>
> ---
> include/uapi/linux/map_benchmark.h | 1 +
> kernel/dma/map_benchmark.c | 117 ++++++++++++++++++++++++++++-
> 2 files changed, 117 insertions(+), 1 deletion(-)
>
> diff --git a/include/uapi/linux/map_benchmark.h b/include/uapi/linux/map_benchmark.h
> index e076748f2120..5fa3745e1651 100644
> --- a/include/uapi/linux/map_benchmark.h
> +++ b/include/uapi/linux/map_benchmark.h
> @@ -19,6 +19,7 @@
>
> enum {
> DMA_MAP_BENCH_SINGLE_MODE,
> + DMA_MAP_BENCH_SG_MODE,
> DMA_MAP_BENCH_MODE_MAX
> };
>
> diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
> index a6345c10901c..76c0e1d5ff08 100644
> --- a/kernel/dma/map_benchmark.c
> +++ b/kernel/dma/map_benchmark.c
> @@ -16,6 +16,7 @@
> #include <linux/module.h>
> #include <linux/pci.h>
> #include <linux/platform_device.h>
> +#include <linux/scatterlist.h>
> #include <linux/slab.h>
> #include <linux/timekeeping.h>
> #include <uapi/linux/map_benchmark.h>
> @@ -117,8 +118,121 @@ static struct map_benchmark_ops dma_single_map_benchmark_ops = {
> .do_unmap = dma_single_map_benchmark_do_unmap,
> };
>
> +struct dma_sg_map_param {
> + struct sg_table sgt;
> + struct device *dev;
> + void **buf;
> + u32 npages;
> + u32 dma_dir;
> +};
> +
> +static void *dma_sg_map_benchmark_prepare(struct map_benchmark_data *map)
> +{
> + struct scatterlist *sg;
> + int i;
> +
> + struct dma_sg_map_param *params = kzalloc(sizeof(*params), GFP_KERNEL);
> +
> + if (!params)
> + return NULL;
> + /*
> + * Set the number of scatterlist entries based on the granule.
> + * In SG mode, 'granule' represents the number of scatterlist entries.
> + * Each scatterlist entry corresponds to a single page.
> + */
> + params->npages = map->bparam.granule;
> + params->dma_dir = map->bparam.dma_dir;
> + params->dev = map->dev;
> + params->buf = kmalloc_array(params->npages, sizeof(*params->buf),
> + GFP_KERNEL);
> + if (!params->buf)
> + goto out;
> +
> + if (sg_alloc_table(¶ms->sgt, params->npages, GFP_KERNEL))
> + goto free_buf;
> +
> + for_each_sgtable_sg(¶ms->sgt, sg, i) {
> + params->buf[i] = (void *)__get_free_page(GFP_KERNEL);
> + if (!params->buf[i])
> + goto free_page;
> +
> + sg_set_buf(sg, params->buf[i], PAGE_SIZE);
> + }
> +
> + return_ptr(params);
> +
> +free_page:
> + while (i-- > 0)
> + free_page((unsigned long)params->buf[i]);
> +
> + sg_free_table(¶ms->sgt);
> +free_buf:
> + kfree(params->buf);
> +out:
> + return NULL;
> +}
> +
> +static void dma_sg_map_benchmark_unprepare(void *mparam)
> +{
> + struct dma_sg_map_param *params = mparam;
> + int i;
> +
> + for (i = 0; i < params->npages; i++)
> + free_page((unsigned long)params->buf[i]);
> +
> + sg_free_table(¶ms->sgt);
> +
> + kfree(params->buf);
> + kfree(params);
> +}
> +
> +static void dma_sg_map_benchmark_prepare_data(void *mparam)
> +{
> + struct dma_sg_map_param *params = mparam;
> + struct scatterlist *sg;
> + int i = 0;
> +
> + if (params->dma_dir == DMA_FROM_DEVICE)
> + return;
> +
> + for_each_sgtable_sg(¶ms->sgt, sg, i)
> + memset(params->buf[i], 0x66, PAGE_SIZE);
> +}
> +
> +static int dma_sg_map_benchmark_do_map(void *mparam)
> +{
> + struct dma_sg_map_param *params = mparam;
> + int ret = 0;
> +
> + int sg_mapped = dma_map_sg(params->dev, params->sgt.sgl,
> + params->npages, params->dma_dir);
> + if (!sg_mapped) {
> + pr_err("dma_map_sg failed on %s\n", dev_name(params->dev));
> + ret = -ENOMEM;
> + }
> +
> + return ret;
> +}
> +
> +static void dma_sg_map_benchmark_do_unmap(void *mparam)
> +{
> + struct dma_sg_map_param *params = mparam;
> +
> + dma_unmap_sg(params->dev, params->sgt.sgl, params->npages,
> + params->dma_dir);
> +}
> +
> +static struct map_benchmark_ops dma_sg_map_benchmark_ops = {
> + .prepare = dma_sg_map_benchmark_prepare,
> + .unprepare = dma_sg_map_benchmark_unprepare,
> + .prepare_data = dma_sg_map_benchmark_prepare_data,
Instead of using both prepare and prepare_data, which are ambiguous,
it would be better to rename prepare_data to initialize_data.
Thanks
Barry
© 2016 - 2026 Red Hat, Inc.