Add tee_shm_alloc_dma_mem() to allocate DMA memory. The memory is
represented by a tee_shm object using the new flag TEE_SHM_DMA_MEM to
identify it as DMA memory. The allocated memory will later be lent to
the TEE to be used as protected memory.
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
---
drivers/tee/tee_shm.c | 85 +++++++++++++++++++++++++++++++++++++++-
include/linux/tee_core.h | 5 +++
2 files changed, 88 insertions(+), 2 deletions(-)
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 76195a398c89..e195c892431d 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -5,6 +5,8 @@
#include <linux/anon_inodes.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/io.h>
#include <linux/mm.h>
@@ -13,9 +15,14 @@
#include <linux/tee_core.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
-#include <linux/highmem.h>
#include "tee_private.h"
+struct tee_shm_dma_mem {
+ struct tee_shm shm;
+ dma_addr_t dma_addr;
+ struct page *page;
+};
+
static void shm_put_kernel_pages(struct page **pages, size_t page_count)
{
size_t n;
@@ -48,7 +55,16 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
{
void *p = shm;
- if (shm->flags & TEE_SHM_DMA_BUF) {
+ if (shm->flags & TEE_SHM_DMA_MEM) {
+#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
+ struct tee_shm_dma_mem *dma_mem;
+
+ dma_mem = container_of(shm, struct tee_shm_dma_mem, shm);
+ p = dma_mem;
+ dma_free_pages(&teedev->dev, shm->size, dma_mem->page,
+ dma_mem->dma_addr, DMA_BIDIRECTIONAL);
+#endif
+ } else if (shm->flags & TEE_SHM_DMA_BUF) {
struct tee_shm_dmabuf_ref *ref;
ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
@@ -268,6 +284,71 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
}
EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
+#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
+/**
+ * tee_shm_alloc_dma_mem() - Allocate DMA memory as shared memory object
+ * @ctx: Context that allocates the shared memory
+ * @page_count: Number of pages
+ *
+ * The allocated memory is expected to be lent (made inaccessible to the
+ * kernel) to the TEE while it's used and returned (accessible to the
+ * kernel again) before it's freed.
+ *
+ * This function should normally only be used internally in the TEE
+ * drivers.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm_dma_mem *dma_mem;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ if (!tee_device_get(teedev))
+ return ERR_PTR(-EINVAL);
+
+ page = dma_alloc_pages(&teedev->dev, page_count * PAGE_SIZE,
+ &dma_addr, DMA_BIDIRECTIONAL, GFP_KERNEL);
+ if (!page)
+ goto err_put_teedev;
+
+ dma_mem = kzalloc(sizeof(*dma_mem), GFP_KERNEL);
+ if (!dma_mem)
+ goto err_free_pages;
+
+ refcount_set(&dma_mem->shm.refcount, 1);
+ dma_mem->shm.ctx = ctx;
+ dma_mem->shm.paddr = page_to_phys(page);
+ dma_mem->dma_addr = dma_addr;
+ dma_mem->page = page;
+ dma_mem->shm.size = page_count * PAGE_SIZE;
+ dma_mem->shm.flags = TEE_SHM_DMA_MEM;
+
+ teedev_ctx_get(ctx);
+
+ return &dma_mem->shm;
+
+err_free_pages:
+ dma_free_pages(&teedev->dev, page_count * PAGE_SIZE, page, dma_addr,
+ DMA_BIDIRECTIONAL);
+err_put_teedev:
+ tee_device_put(teedev);
+
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
+#else
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count)
+{
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
+#endif
+
int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
int (*shm_register)(struct tee_context *ctx,
struct tee_shm *shm,
diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h
index b6c54b34a8b5..7b0c1da2ca6c 100644
--- a/include/linux/tee_core.h
+++ b/include/linux/tee_core.h
@@ -29,6 +29,8 @@
#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */
#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */
#define TEE_SHM_DMA_BUF BIT(4) /* Memory with dma-buf handle */
+#define TEE_SHM_DMA_MEM BIT(5) /* Memory allocated with */
+ /* dma_alloc_pages() */
#define TEE_DEVICE_FLAG_REGISTERED 0x1
#define TEE_MAX_DEV_NAME_LEN 32
@@ -298,6 +300,9 @@ void *tee_get_drvdata(struct tee_device *teedev);
*/
struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count);
+
int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
int (*shm_register)(struct tee_context *ctx,
struct tee_shm *shm,
--
2.43.0
On Wed, Aug 13, 2025 at 08:02:55AM +0200, Jens Wiklander wrote:
> Add tee_shm_alloc_dma_mem() to allocate DMA memory. The memory is
> represented by a tee_shm object using the new flag TEE_SHM_DMA_MEM to
> identify it as DMA memory. The allocated memory will later be lent to
> the TEE to be used as protected memory.
>
> Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
> ---
> drivers/tee/tee_shm.c | 85 +++++++++++++++++++++++++++++++++++++++-
> include/linux/tee_core.h | 5 +++
> 2 files changed, 88 insertions(+), 2 deletions(-)
Reviewed-by: Sumit Garg <sumit.garg@oss.qualcomm.com>
-Sumit
>
> diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
> index 76195a398c89..e195c892431d 100644
> --- a/drivers/tee/tee_shm.c
> +++ b/drivers/tee/tee_shm.c
> @@ -5,6 +5,8 @@
> #include <linux/anon_inodes.h>
> #include <linux/device.h>
> #include <linux/dma-buf.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/highmem.h>
> #include <linux/idr.h>
> #include <linux/io.h>
> #include <linux/mm.h>
> @@ -13,9 +15,14 @@
> #include <linux/tee_core.h>
> #include <linux/uaccess.h>
> #include <linux/uio.h>
> -#include <linux/highmem.h>
> #include "tee_private.h"
>
> +struct tee_shm_dma_mem {
> + struct tee_shm shm;
> + dma_addr_t dma_addr;
> + struct page *page;
> +};
> +
> static void shm_put_kernel_pages(struct page **pages, size_t page_count)
> {
> size_t n;
> @@ -48,7 +55,16 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
> {
> void *p = shm;
>
> - if (shm->flags & TEE_SHM_DMA_BUF) {
> + if (shm->flags & TEE_SHM_DMA_MEM) {
> +#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
> + struct tee_shm_dma_mem *dma_mem;
> +
> + dma_mem = container_of(shm, struct tee_shm_dma_mem, shm);
> + p = dma_mem;
> + dma_free_pages(&teedev->dev, shm->size, dma_mem->page,
> + dma_mem->dma_addr, DMA_BIDIRECTIONAL);
> +#endif
> + } else if (shm->flags & TEE_SHM_DMA_BUF) {
> struct tee_shm_dmabuf_ref *ref;
>
> ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
> @@ -268,6 +284,71 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
> }
> EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
>
> +#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
> +/**
> + * tee_shm_alloc_dma_mem() - Allocate DMA memory as shared memory object
> + * @ctx: Context that allocates the shared memory
> + * @page_count: Number of pages
> + *
> + * The allocated memory is expected to be lent (made inaccessible to the
> + * kernel) to the TEE while it's used and returned (accessible to the
> + * kernel again) before it's freed.
> + *
> + * This function should normally only be used internally in the TEE
> + * drivers.
> + *
> + * @returns a pointer to 'struct tee_shm'
> + */
> +struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
> + size_t page_count)
> +{
> + struct tee_device *teedev = ctx->teedev;
> + struct tee_shm_dma_mem *dma_mem;
> + dma_addr_t dma_addr;
> + struct page *page;
> +
> + if (!tee_device_get(teedev))
> + return ERR_PTR(-EINVAL);
> +
> + page = dma_alloc_pages(&teedev->dev, page_count * PAGE_SIZE,
> + &dma_addr, DMA_BIDIRECTIONAL, GFP_KERNEL);
> + if (!page)
> + goto err_put_teedev;
> +
> + dma_mem = kzalloc(sizeof(*dma_mem), GFP_KERNEL);
> + if (!dma_mem)
> + goto err_free_pages;
> +
> + refcount_set(&dma_mem->shm.refcount, 1);
> + dma_mem->shm.ctx = ctx;
> + dma_mem->shm.paddr = page_to_phys(page);
> + dma_mem->dma_addr = dma_addr;
> + dma_mem->page = page;
> + dma_mem->shm.size = page_count * PAGE_SIZE;
> + dma_mem->shm.flags = TEE_SHM_DMA_MEM;
> +
> + teedev_ctx_get(ctx);
> +
> + return &dma_mem->shm;
> +
> +err_free_pages:
> + dma_free_pages(&teedev->dev, page_count * PAGE_SIZE, page, dma_addr,
> + DMA_BIDIRECTIONAL);
> +err_put_teedev:
> + tee_device_put(teedev);
> +
> + return ERR_PTR(-ENOMEM);
> +}
> +EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
> +#else
> +struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
> + size_t page_count)
> +{
> + return ERR_PTR(-EINVAL);
> +}
> +EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
> +#endif
> +
> int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
> int (*shm_register)(struct tee_context *ctx,
> struct tee_shm *shm,
> diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h
> index b6c54b34a8b5..7b0c1da2ca6c 100644
> --- a/include/linux/tee_core.h
> +++ b/include/linux/tee_core.h
> @@ -29,6 +29,8 @@
> #define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */
> #define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */
> #define TEE_SHM_DMA_BUF BIT(4) /* Memory with dma-buf handle */
> +#define TEE_SHM_DMA_MEM BIT(5) /* Memory allocated with */
> + /* dma_alloc_pages() */
>
> #define TEE_DEVICE_FLAG_REGISTERED 0x1
> #define TEE_MAX_DEV_NAME_LEN 32
> @@ -298,6 +300,9 @@ void *tee_get_drvdata(struct tee_device *teedev);
> */
> struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
>
> +struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
> + size_t page_count);
> +
> int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
> int (*shm_register)(struct tee_context *ctx,
> struct tee_shm *shm,
> --
> 2.43.0
>
© 2016 - 2025 Red Hat, Inc.