From nobody Thu Dec 18 20:34:44 2025 Received: from szxga07-in.huawei.com (szxga07-in.huawei.com [45.249.212.35]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1BABC1C5490 for ; Wed, 12 Feb 2025 02:27:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.35 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739327244; cv=none; b=JIXemmHB+CCUePcV8365Ir2Y4umwvLtLanYY/Nn2tTn0Z/19yczg8toegut7GdgTpkYNREqP1+vDlhssxidjLVAogCQbRU9fGVj95jy8Sh0rSRMxZTVbKA8o7fRVLAj8LigozFbP4qhC33fNyaSEw+Ve8L+70rkzGpSEx6ltkU0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739327244; c=relaxed/simple; bh=yXGqKcJon/u8mbx1iNo+l7Z19hSzslXlgUOdkIET56U=; h=From:To:CC:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=Zqtzz712kXLklCpIhkWV13jz9XOGhJWHwWkGj6G9UneMTqwlu4AGL22V4cvEwHXJrupT617baU+0cmmBNXUlsYDteusFS6A/2o23pBePI5eluTOfoIOtkHUxffQVoAbUmwZSbmIsKD67W4dgS2v4b1D98fkH9j9mpSabGsIjedg= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com; spf=pass smtp.mailfrom=huawei.com; arc=none smtp.client-ip=45.249.212.35 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huawei.com Received: from mail.maildlp.com (unknown [172.19.88.214]) by szxga07-in.huawei.com (SkyGuard) with ESMTP id 4Yt2DQ5XNvz1V6b9; Wed, 12 Feb 2025 10:23:34 +0800 (CST) Received: from kwepemj200003.china.huawei.com (unknown [7.202.194.15]) by mail.maildlp.com (Postfix) with ESMTPS id C02C41A016C; Wed, 12 Feb 2025 10:27:20 +0800 (CST) Received: from localhost.huawei.com (10.90.30.45) by kwepemj200003.china.huawei.com (7.202.194.15) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1544.11; Wed, 12 Feb 2025 10:27:20 +0800 From: Qinxin Xia To: , CC: , , , , , , , Subject: [PATCH 2/3] dma-mapping: benchmark: add support for dma_map_sg Date: Wed, 12 Feb 2025 10:27:17 +0800 Message-ID: <20250212022718.1995504-3-xiaqinxin@huawei.com> X-Mailer: git-send-email 2.33.0 In-Reply-To: <20250212022718.1995504-1-xiaqinxin@huawei.com> References: <20250212022718.1995504-1-xiaqinxin@huawei.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: dggems704-chm.china.huawei.com (10.3.19.181) To kwepemj200003.china.huawei.com (7.202.194.15) Content-Type: text/plain; charset="utf-8" Support for dma scatter-gather mapping and is intended for testing mapping performance. It achieves by introducing the dma_sg_map_param structure and related functions, which enable the implementation of scatter-gather mapping preparation, mapping, and unmapping operations. Additionally, the dma_map_benchmark_ops array is updated to include operations for scatter-gather mapping. This commit aims to provide a wider range of mapping performance test to cater to different scenarios. Signed-off-by: Qinxin Xia --- include/linux/map_benchmark.h | 1 + kernel/dma/map_benchmark.c | 102 ++++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+) diff --git a/include/linux/map_benchmark.h b/include/linux/map_benchmark.h index 054db02a03a7..a9c1a104ba4f 100644 --- a/include/linux/map_benchmark.h +++ b/include/linux/map_benchmark.h @@ -17,6 +17,7 @@ =20 enum { DMA_MAP_SINGLE_MODE, + DMA_MAP_SG_MODE, DMA_MAP_MODE_MAX }; =20 diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c index d8ec0ce058d8..b5828eeb3db7 100644 --- a/kernel/dma/map_benchmark.c +++ b/kernel/dma/map_benchmark.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include =20 @@ -111,8 +112,109 @@ static struct map_benchmark_ops dma_single_map_benchm= ark_ops =3D { .do_unmap =3D dma_single_map_benchmark_do_unmap, }; =20 +struct dma_sg_map_param { + struct sg_table sgt; + struct device *dev; + void **buf; + u32 npages; + u32 dma_dir; +}; + +static void *dma_sg_map_benchmark_prepare(struct map_benchmark_data *map) +{ + struct scatterlist *sg; + int i =3D 0; + + struct dma_sg_map_param *mparam __free(kfree) =3D kzalloc(sizeof(*mparam)= , GFP_KERNEL); + if (!mparam) + return NULL; + + mparam->npages =3D map->bparam.granule; + mparam->dma_dir =3D map->bparam.dma_dir; + mparam->dev =3D map->dev; + mparam->buf =3D kmalloc_array(mparam->npages, sizeof(*mparam->buf), + GFP_KERNEL); + if (!mparam->buf) + goto err1; + + if (sg_alloc_table(&mparam->sgt, mparam->npages, GFP_KERNEL)) + goto err2; + + for_each_sgtable_sg(&mparam->sgt, sg, i) { + mparam->buf[i] =3D (void *)__get_free_page(GFP_KERNEL); + if (!mparam->buf[i]) + goto err3; + + if (mparam->dma_dir !=3D DMA_FROM_DEVICE) + memset(mparam->buf[i], 0x66, PAGE_SIZE); + + sg_set_buf(sg, mparam->buf[i], PAGE_SIZE); + } + + return_ptr(mparam); + +err3: + while (i-- > 0) + free_page((unsigned long)mparam->buf[i]); + + pr_err("dma_map_sg failed get free page on %s\n", dev_name(mparam->dev)); + sg_free_table(&mparam->sgt); +err2: + pr_err("dma_map_sg failed alloc sg table on %s\n", dev_name(mparam->dev)); + kfree(mparam->buf); +err1: + pr_err("dma_map_sg failed alloc mparam buf on %s\n", dev_name(mparam->dev= )); + return NULL; +} + +static void dma_sg_map_benchmark_unprepare(void *arg) +{ + struct dma_sg_map_param *mparam =3D arg; + int i; + + for (i =3D 0; i < mparam->npages; i++) + free_page((unsigned long)mparam->buf[i]); + + sg_free_table(&mparam->sgt); + + kfree(mparam->buf); + kfree(mparam); +} + +static int dma_sg_map_benchmark_do_map(void *arg) +{ + struct dma_sg_map_param *mparam =3D arg; + + int sg_mapped =3D dma_map_sg(mparam->dev, mparam->sgt.sgl, + mparam->npages, mparam->dma_dir); + if (!sg_mapped) { + pr_err("dma_map_sg failed on %s\n", dev_name(mparam->dev)); + return -ENOMEM; + } + + return 0; +} + +static int dma_sg_map_benchmark_do_unmap(void *arg) +{ + struct dma_sg_map_param *mparam =3D arg; + + dma_unmap_sg(mparam->dev, mparam->sgt.sgl, mparam->npages, + mparam->dma_dir); + + return 0; +} + +static struct map_benchmark_ops dma_sg_map_benchmark_ops =3D { + .prepare =3D dma_sg_map_benchmark_prepare, + .unprepare =3D dma_sg_map_benchmark_unprepare, + .do_map =3D dma_sg_map_benchmark_do_map, + .do_unmap =3D dma_sg_map_benchmark_do_unmap, +}; + static struct map_benchmark_ops *dma_map_benchmark_ops[DMA_MAP_MODE_MAX] = =3D { [DMA_MAP_SINGLE_MODE] =3D &dma_single_map_benchmark_ops, + [DMA_MAP_SG_MODE] =3D &dma_sg_map_benchmark_ops, }; =20 static int map_benchmark_thread(void *data) --=20 2.33.0