From nobody Wed May  7 04:13:39 2025
Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org
 [10.30.226.201])
	(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
	(No client certificate requested)
	by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7C5A9205510;
	Tue,  1 Apr 2025 15:12:40 +0000 (UTC)
Authentication-Results: smtp.subspace.kernel.org;
 arc=none smtp.client-ip=10.30.226.201
ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116;
	t=1743520361; cv=none;
 b=hG9c/IUZgNRla6Q9rHcGTfj3F0jdKZBQyXNUMsRldYXklb4RHTVCUtKik6EWJX88/6skM1n0RtQpwPK9RUwGYScjWZyNg59VmfoUl3uZuq5Y753dtZga46BRmB2mUMvWRRgoYGey4kzBSb5XuBmRmbH1cBh1Xz+AzFYNhJAnMOs=
ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org;
	s=arc-20240116; t=1743520361; c=relaxed/simple;
	bh=y9uUCO5LiRtQLj6Fsm+YQo4CsMyHiN/s5Cd299WnI0c=;
	h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References:
	 In-Reply-To:To:Cc;
 b=sV4OXfXs/G1G1lrzxPldoLlVjy3iD2idPrtVPhR19HUOPJ0A5vYxLZ7Pu2bByGJg7Qd9ENVDt6gZd8FHu3Vosqg2kJkW7SKJ1RxQe0E/2Cneot95MOENLNBsTdv4GkUubMPirIMOfX7IjC0klr99ot/3PIHLdzE+wQabCr869Xk=
ARC-Authentication-Results: i=1; smtp.subspace.kernel.org;
 dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org
 header.b=sTnjfXsD; arc=none smtp.client-ip=10.30.226.201
Authentication-Results: smtp.subspace.kernel.org;
	dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org
 header.b="sTnjfXsD"
Received: by smtp.kernel.org (Postfix) with ESMTPSA id 7D505C4CEE4;
	Tue,  1 Apr 2025 15:12:39 +0000 (UTC)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org;
	s=k20201202; t=1743520360;
	bh=y9uUCO5LiRtQLj6Fsm+YQo4CsMyHiN/s5Cd299WnI0c=;
	h=From:Date:Subject:References:In-Reply-To:To:Cc:From;
	b=sTnjfXsD8tRSR4y8taH3NAbycapO9M7X27x1Ir32al2QBw6f0vZdZiqal16oK25kx
	 ShRflDKODNz1eG4wm0Up/WGasPqeZuUaaBhIp+jTxmky8+/QABwkIa/ewbPf054TKL
	 HYSXQIw1Iug9h154hiU0/cuks+scAGOsmvmCnWH/SttnvWyozyyJQhVe34usJR2ov9
	 dLhf/1SBkNslYjEvU80AWTEwid8Sj8Ar5+inmRUS+rh4lEeP523lXULCDzVQQnwSU2
	 XiM6svLfaitzfCbDsNmq6nax9cwxLO3rojjrZnGBiRkMvkNDqoTFmfD9W323UWZLBk
	 HwFSVLymcpOuQ==
From: Maxime Ripard <mripard@kernel.org>
Date: Tue, 01 Apr 2025 17:12:22 +0200
Subject: [PATCH v2 2/2] dma-buf: heaps: Introduce a new heap for reserved
 memory
Precedence: bulk
X-Mailing-List: linux-kernel@vger.kernel.org
List-Id: <linux-kernel.vger.kernel.org>
List-Subscribe: <mailto:linux-kernel+subscribe@vger.kernel.org>
List-Unsubscribe: <mailto:linux-kernel+unsubscribe@vger.kernel.org>
MIME-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: quoted-printable
Message-Id: <20250401-dma-buf-ecc-heap-v2-2-043fd006a1af@kernel.org>
References: <20250401-dma-buf-ecc-heap-v2-0-043fd006a1af@kernel.org>
In-Reply-To: <20250401-dma-buf-ecc-heap-v2-0-043fd006a1af@kernel.org>
To: Rob Herring <robh@kernel.org>, Saravana Kannan <saravanak@google.com>,
 Sumit Semwal <sumit.semwal@linaro.org>,
 Benjamin Gaignard <benjamin.gaignard@collabora.com>,
 Brian Starkey <Brian.Starkey@arm.com>, John Stultz <jstultz@google.com>,
 "T.J. Mercier" <tjmercier@google.com>,
 =?utf-8?q?Christian_K=C3=B6nig?= <christian.koenig@amd.com>
Cc: Mattijs Korpershoek <mkorpershoek@kernel.org>,
 devicetree@vger.kernel.org, linux-kernel@vger.kernel.org,
 linux-media@vger.kernel.org, dri-devel@lists.freedesktop.org,
 linaro-mm-sig@lists.linaro.org, Maxime Ripard <mripard@kernel.org>
X-Mailer: b4 0.14.2
X-Developer-Signature: v=1; a=openpgp-sha256; l=10813; i=mripard@kernel.org;
 h=from:subject:message-id; bh=y9uUCO5LiRtQLj6Fsm+YQo4CsMyHiN/s5Cd299WnI0c=;
 b=owGbwMvMwCX2+D1vfrpE4FHG02pJDOlvmCJe2G/89GTq7m+tjNovjkjsNtj5wPTWzTdpgZMWp
 jZs38Qb2lHKwiDGxSArpsgSI2y+JO7UrNedbHzzYOawMoEMYeDiFICJbA1l+B99NUqlX2CqVA0b
 P0/Y0rNiFc0NR3n3/V26TVnoctyD/aUMf7gOux/cM80n/WKX0R2FzJQXTWvOxt2pZ0r5w21VbJu
 hww4A
X-Developer-Key: i=mripard@kernel.org; a=openpgp;
 fpr=BE5675C37E818C8B5764241C254BCFC56BF6CE8D

Some reserved memory regions might have particular memory setup or
attributes that make them good candidates for heaps.

Let's provide a heap type that will create a new heap for each reserved
memory region flagged as such.

Signed-off-by: Maxime Ripard <mripard@kernel.org>
---
 drivers/dma-buf/heaps/Kconfig         |   8 +
 drivers/dma-buf/heaps/Makefile        |   1 +
 drivers/dma-buf/heaps/carveout_heap.c | 360 ++++++++++++++++++++++++++++++=
++++
 3 files changed, 369 insertions(+)

diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index a5eef06c422644e8aadaf5aff2bd9a33c49c1ba3..c6981d696733b4d8d0c3f6f5a37=
d967fd6a1a4a2 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -1,5 +1,13 @@
+config DMABUF_HEAPS_CARVEOUT
+	bool "Carveout Heaps"
+	depends on DMABUF_HEAPS
+	help
+	  Choose this option to enable the carveout dmabuf heap. The carveout
+	  heap is backed by pages from reserved memory regions flagged as
+	  exportable. If in doubt, say Y.
+
 config DMABUF_HEAPS_SYSTEM
 	bool "DMA-BUF System Heap"
 	depends on DMABUF_HEAPS
 	help
 	  Choose this option to enable the system dmabuf heap. The system heap
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
index 974467791032ffb8a7aba17b1407d9a19b3f3b44..b734647ad5c84f4491067481602=
58e372f153df2 100644
--- a/drivers/dma-buf/heaps/Makefile
+++ b/drivers/dma-buf/heaps/Makefile
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DMABUF_HEAPS_CARVEOUT)	+=3D carveout_heap.o
 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)	+=3D system_heap.o
 obj-$(CONFIG_DMABUF_HEAPS_CMA)		+=3D cma_heap.o
diff --git a/drivers/dma-buf/heaps/carveout_heap.c b/drivers/dma-buf/heaps/=
carveout_heap.c
new file mode 100644
index 0000000000000000000000000000000000000000..f7198b781ea57f4f60e554d917c=
9277e9a716b16
--- /dev/null
+++ b/drivers/dma-buf/heaps/carveout_heap.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/genalloc.h>
+#include <linux/highmem.h>
+#include <linux/of_reserved_mem.h>
+
+struct carveout_heap_priv {
+	struct dma_heap *heap;
+	struct gen_pool *pool;
+};
+
+struct carveout_heap_buffer_priv {
+	struct mutex lock;
+	struct list_head attachments;
+
+	unsigned long num_pages;
+	struct carveout_heap_priv *heap;
+	dma_addr_t daddr;
+	void *vaddr;
+	unsigned int vmap_cnt;
+};
+
+struct carveout_heap_attachment {
+	struct list_head head;
+	struct sg_table table;
+
+	struct device *dev;
+	bool mapped;
+};
+
+static int carveout_heap_attach(struct dma_buf *buf,
+				struct dma_buf_attachment *attachment)
+{
+	struct carveout_heap_buffer_priv *priv =3D buf->priv;
+	struct carveout_heap_attachment *a;
+	struct sg_table *sgt;
+	unsigned long len =3D priv->num_pages * PAGE_SIZE;
+	int ret;
+
+	a =3D kzalloc(sizeof(*a), GFP_KERNEL);
+	if (!a)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&a->head);
+	a->dev =3D attachment->dev;
+	attachment->priv =3D a;
+
+	sgt =3D &a->table;
+	ret =3D sg_alloc_table(sgt, 1, GFP_KERNEL);
+	if (ret)
+		goto err_cleanup_attach;
+
+	sg_dma_address(sgt->sgl) =3D priv->daddr;
+	sg_dma_len(sgt->sgl) =3D len;
+
+	mutex_lock(&priv->lock);
+	list_add(&a->head, &priv->attachments);
+	mutex_unlock(&priv->lock);
+
+	return 0;
+
+err_cleanup_attach:
+	kfree(a);
+	return ret;
+}
+
+static void carveout_heap_detach(struct dma_buf *dmabuf,
+				 struct dma_buf_attachment *attachment)
+{
+	struct carveout_heap_buffer_priv *priv =3D dmabuf->priv;
+	struct carveout_heap_attachment *a =3D attachment->priv;
+
+	mutex_lock(&priv->lock);
+	list_del(&a->head);
+	mutex_unlock(&priv->lock);
+
+	sg_free_table(&a->table);
+	kfree(a);
+}
+
+static struct sg_table *
+carveout_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+			  enum dma_data_direction direction)
+{
+	struct carveout_heap_attachment *a =3D attachment->priv;
+	struct sg_table *table =3D &a->table;
+	int ret;
+
+	ret =3D dma_map_sgtable(a->dev, table, direction, 0);
+	if (ret)
+		return ERR_PTR(-ENOMEM);
+
+	a->mapped =3D true;
+
+	return table;
+}
+
+static void carveout_heap_unmap_dma_buf(struct dma_buf_attachment *attachm=
ent,
+					struct sg_table *table,
+					enum dma_data_direction direction)
+{
+	struct carveout_heap_attachment *a =3D attachment->priv;
+
+	a->mapped =3D false;
+	dma_unmap_sgtable(a->dev, table, direction, 0);
+}
+
+static int
+carveout_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+				       enum dma_data_direction direction)
+{
+	struct carveout_heap_buffer_priv *priv =3D dmabuf->priv;
+	struct carveout_heap_attachment *a;
+	unsigned long len =3D priv->num_pages * PAGE_SIZE;
+
+	mutex_lock(&priv->lock);
+
+	if (priv->vmap_cnt > 0)
+		invalidate_kernel_vmap_range(priv->vaddr, len);
+
+	list_for_each_entry(a, &priv->attachments, head) {
+		if (!a->mapped)
+			continue;
+
+		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
+	}
+
+	mutex_unlock(&priv->lock);
+
+	return 0;
+}
+
+static int
+carveout_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+				     enum dma_data_direction direction)
+{
+	struct carveout_heap_buffer_priv *priv =3D dmabuf->priv;
+	struct carveout_heap_attachment *a;
+	unsigned long len =3D priv->num_pages * PAGE_SIZE;
+
+	mutex_lock(&priv->lock);
+
+	if (priv->vmap_cnt > 0)
+		flush_kernel_vmap_range(priv->vaddr, len);
+
+	list_for_each_entry(a, &priv->attachments, head) {
+		if (!a->mapped)
+			continue;
+
+		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
+	}
+
+	mutex_unlock(&priv->lock);
+
+	return 0;
+}
+
+static int carveout_heap_mmap(struct dma_buf *dmabuf,
+			      struct vm_area_struct *vma)
+{
+	struct carveout_heap_buffer_priv *priv =3D dmabuf->priv;
+	unsigned long len =3D priv->num_pages * PAGE_SIZE;
+	struct page *page =3D virt_to_page(priv->vaddr);
+
+	return remap_pfn_range(vma, vma->vm_start, page_to_pfn(page),
+			       len, vma->vm_page_prot);
+}
+
+static int carveout_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *ma=
p)
+{
+	struct carveout_heap_buffer_priv *priv =3D dmabuf->priv;
+
+	mutex_lock(&priv->lock);
+
+	iosys_map_set_vaddr(map, priv->vaddr);
+	priv->vmap_cnt++;
+
+	mutex_unlock(&priv->lock);
+
+	return 0;
+}
+
+static void carveout_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map =
*map)
+{
+	struct carveout_heap_buffer_priv *priv =3D dmabuf->priv;
+
+	mutex_lock(&priv->lock);
+
+	priv->vmap_cnt--;
+	mutex_unlock(&priv->lock);
+
+	iosys_map_clear(map);
+}
+
+static void carveout_heap_dma_buf_release(struct dma_buf *buf)
+{
+	struct carveout_heap_buffer_priv *buffer_priv =3D buf->priv;
+	struct carveout_heap_priv *heap_priv =3D buffer_priv->heap;
+	unsigned long len =3D buffer_priv->num_pages * PAGE_SIZE;
+
+	gen_pool_free(heap_priv->pool, (unsigned long)buffer_priv->vaddr, len);
+	kfree(buffer_priv);
+}
+
+static const struct dma_buf_ops carveout_heap_buf_ops =3D {
+	.attach		=3D carveout_heap_attach,
+	.detach		=3D carveout_heap_detach,
+	.map_dma_buf	=3D carveout_heap_map_dma_buf,
+	.unmap_dma_buf	=3D carveout_heap_unmap_dma_buf,
+	.begin_cpu_access	=3D carveout_heap_dma_buf_begin_cpu_access,
+	.end_cpu_access	=3D carveout_heap_dma_buf_end_cpu_access,
+	.mmap		=3D carveout_heap_mmap,
+	.vmap		=3D carveout_heap_vmap,
+	.vunmap		=3D carveout_heap_vunmap,
+	.release	=3D carveout_heap_dma_buf_release,
+};
+
+static struct dma_buf *carveout_heap_allocate(struct dma_heap *heap,
+					      unsigned long len,
+					      u32 fd_flags,
+					      u64 heap_flags)
+{
+	struct carveout_heap_priv *heap_priv =3D dma_heap_get_drvdata(heap);
+	struct carveout_heap_buffer_priv *buffer_priv;
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+	struct dma_buf *buf;
+	dma_addr_t daddr;
+	size_t size =3D PAGE_ALIGN(len);
+	void *vaddr;
+	int ret;
+
+	buffer_priv =3D kzalloc(sizeof(*buffer_priv), GFP_KERNEL);
+	if (!buffer_priv)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&buffer_priv->attachments);
+	mutex_init(&buffer_priv->lock);
+
+	vaddr =3D gen_pool_dma_zalloc(heap_priv->pool, size, &daddr);
+	if (!vaddr) {
+		ret =3D -ENOMEM;
+		goto err_free_buffer_priv;
+	}
+
+	buffer_priv->vaddr =3D vaddr;
+	buffer_priv->daddr =3D daddr;
+	buffer_priv->heap =3D heap_priv;
+	buffer_priv->num_pages =3D size >> PAGE_SHIFT;
+
+	/* create the dmabuf */
+	exp_info.exp_name =3D dma_heap_get_name(heap);
+	exp_info.ops =3D &carveout_heap_buf_ops;
+	exp_info.size =3D size;
+	exp_info.flags =3D fd_flags;
+	exp_info.priv =3D buffer_priv;
+
+	buf =3D dma_buf_export(&exp_info);
+	if (IS_ERR(buf)) {
+		ret =3D PTR_ERR(buf);
+		goto err_free_buffer;
+	}
+
+	return buf;
+
+err_free_buffer:
+	gen_pool_free(heap_priv->pool, (unsigned long)vaddr, len);
+err_free_buffer_priv:
+	kfree(buffer_priv);
+
+	return ERR_PTR(ret);
+}
+
+static const struct dma_heap_ops carveout_heap_ops =3D {
+	.allocate =3D carveout_heap_allocate,
+};
+
+static int __init carveout_heap_setup(struct device_node *node)
+{
+	struct dma_heap_export_info exp_info =3D {};
+	const struct reserved_mem *rmem;
+	struct carveout_heap_priv *priv;
+	struct dma_heap *heap;
+	struct gen_pool *pool;
+	void *base;
+	int ret;
+
+	rmem =3D of_reserved_mem_lookup(node);
+	if (!rmem)
+		return -EINVAL;
+
+	priv =3D kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	pool =3D gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
+	if (!pool) {
+		ret =3D -ENOMEM;
+		goto err_cleanup_heap;
+	}
+	priv->pool =3D pool;
+
+	base =3D memremap(rmem->base, rmem->size, MEMREMAP_WB);
+	if (!base) {
+		ret =3D -ENOMEM;
+		goto err_release_mem_region;
+	}
+
+	ret =3D gen_pool_add_virt(pool, (unsigned long)base, rmem->base,
+				rmem->size, NUMA_NO_NODE);
+	if (ret)
+		goto err_unmap;
+
+	exp_info.name =3D node->full_name;
+	exp_info.ops =3D &carveout_heap_ops;
+	exp_info.priv =3D priv;
+
+	heap =3D dma_heap_add(&exp_info);
+	if (IS_ERR(heap)) {
+		ret =3D PTR_ERR(heap);
+		goto err_cleanup_pool_region;
+	}
+	priv->heap =3D heap;
+
+	return 0;
+
+err_cleanup_pool_region:
+	gen_pool_free(pool, (unsigned long)base, rmem->size);
+err_unmap:
+	memunmap(base);
+err_release_mem_region:
+	gen_pool_destroy(pool);
+err_cleanup_heap:
+	kfree(priv);
+	return ret;
+}
+
+static int __init carveout_heap_init(void)
+{
+	struct device_node *rmem_node;
+	struct device_node *node;
+	int ret;
+
+	rmem_node =3D of_find_node_by_path("/reserved-memory");
+	if (!rmem_node)
+		return 0;
+
+	for_each_child_of_node(rmem_node, node) {
+		if (!of_property_read_bool(node, "export"))
+			continue;
+
+		ret =3D carveout_heap_setup(node);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+module_init(carveout_heap_init);

--=20
2.49.0