The kernel pages used by shm_get_kernel_pages() are allocated using
GFP_KERNEL through the following call stack:
trusted_instantiate()
trusted_payload_alloc() -> GFP_KERNEL
<trusted key op>
tee_shm_register_kernel_buf()
register_shm_helper()
shm_get_kernel_pages()
Where <trusted key op> is one of:
trusted_key_unseal()
trusted_key_get_random()
trusted_key_seal()
Remove the vmalloc page support from shm_get_kernel_pages(). Replace
with a warn on once.
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: "Fabio M. De Francesco" <fmdefrancesco@gmail.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: Jens Wiklander <jens.wiklander@linaro.org>
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
---
drivers/tee/tee_shm.c | 36 ++++++++++++------------------------
1 file changed, 12 insertions(+), 24 deletions(-)
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 27295bda3e0b..527a6eabc03e 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -24,37 +24,25 @@ static void shm_put_kernel_pages(struct page **pages, size_t page_count)
static int shm_get_kernel_pages(unsigned long start, size_t page_count,
struct page **pages)
{
+ struct kvec *kiov;
size_t n;
int rc;
- if (is_vmalloc_addr((void *)start)) {
- struct page *page;
-
- for (n = 0; n < page_count; n++) {
- page = vmalloc_to_page((void *)(start + PAGE_SIZE * n));
- if (!page)
- return -ENOMEM;
-
- get_page(page);
- pages[n] = page;
- }
- rc = page_count;
- } else {
- struct kvec *kiov;
-
- kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
- if (!kiov)
- return -ENOMEM;
+ if (WARN_ON_ONCE(is_vmalloc_addr((void *)start)))
+ return -EINVAL;
- for (n = 0; n < page_count; n++) {
- kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
- kiov[n].iov_len = PAGE_SIZE;
- }
+ kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
+ if (!kiov)
+ return -ENOMEM;
- rc = get_kernel_pages(kiov, page_count, 0, pages);
- kfree(kiov);
+ for (n = 0; n < page_count; n++) {
+ kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
+ kiov[n].iov_len = PAGE_SIZE;
}
+ rc = get_kernel_pages(kiov, page_count, 0, pages);
+ kfree(kiov);
+
return rc;
}
--
2.39.1
On Fri, Feb 03, 2023 at 08:06:33PM -0800, Ira Weiny wrote: > The kernel pages used by shm_get_kernel_pages() are allocated using > GFP_KERNEL through the following call stack: > > trusted_instantiate() > trusted_payload_alloc() -> GFP_KERNEL > <trusted key op> > tee_shm_register_kernel_buf() > register_shm_helper() > shm_get_kernel_pages() > > Where <trusted key op> is one of: > > trusted_key_unseal() > trusted_key_get_random() > trusted_key_seal() > > Remove the vmalloc page support from shm_get_kernel_pages(). Replace > with a warn on once. > > Cc: Al Viro <viro@zeniv.linux.org.uk> > Cc: "Fabio M. De Francesco" <fmdefrancesco@gmail.com> > Cc: Christoph Hellwig <hch@lst.de> > Cc: Linus Torvalds <torvalds@linux-foundation.org> > Reviewed-by: Jens Wiklander <jens.wiklander@linaro.org> > Signed-off-by: Ira Weiny <ira.weiny@intel.com> > --- > drivers/tee/tee_shm.c | 36 ++++++++++++------------------------ > 1 file changed, 12 insertions(+), 24 deletions(-) Added to https://git.linaro.org/people/jens.wiklander/linux-tee.git/log/?h=get_kernel_pages-for-v6.4 Thanks, Jens > > diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c > index 27295bda3e0b..527a6eabc03e 100644 > --- a/drivers/tee/tee_shm.c > +++ b/drivers/tee/tee_shm.c > @@ -24,37 +24,25 @@ static void shm_put_kernel_pages(struct page **pages, size_t page_count) > static int shm_get_kernel_pages(unsigned long start, size_t page_count, > struct page **pages) > { > + struct kvec *kiov; > size_t n; > int rc; > > - if (is_vmalloc_addr((void *)start)) { > - struct page *page; > - > - for (n = 0; n < page_count; n++) { > - page = vmalloc_to_page((void *)(start + PAGE_SIZE * n)); > - if (!page) > - return -ENOMEM; > - > - get_page(page); > - pages[n] = page; > - } > - rc = page_count; > - } else { > - struct kvec *kiov; > - > - kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL); > - if (!kiov) > - return -ENOMEM; > + if (WARN_ON_ONCE(is_vmalloc_addr((void *)start))) > + return -EINVAL; > > - for (n = 0; n < page_count; n++) { > - kiov[n].iov_base = (void *)(start + n * PAGE_SIZE); > - kiov[n].iov_len = PAGE_SIZE; > - } > + kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL); > + if (!kiov) > + return -ENOMEM; > > - rc = get_kernel_pages(kiov, page_count, 0, pages); > - kfree(kiov); > + for (n = 0; n < page_count; n++) { > + kiov[n].iov_base = (void *)(start + n * PAGE_SIZE); > + kiov[n].iov_len = PAGE_SIZE; > } > > + rc = get_kernel_pages(kiov, page_count, 0, pages); > + kfree(kiov); > + > return rc; > } > > > -- > 2.39.1
On Sat, 4 Feb 2023 at 09:36, Ira Weiny <ira.weiny@intel.com> wrote: > > The kernel pages used by shm_get_kernel_pages() are allocated using > GFP_KERNEL through the following call stack: > > trusted_instantiate() > trusted_payload_alloc() -> GFP_KERNEL > <trusted key op> > tee_shm_register_kernel_buf() > register_shm_helper() > shm_get_kernel_pages() > > Where <trusted key op> is one of: > > trusted_key_unseal() > trusted_key_get_random() > trusted_key_seal() > > Remove the vmalloc page support from shm_get_kernel_pages(). Replace > with a warn on once. > > Cc: Al Viro <viro@zeniv.linux.org.uk> > Cc: "Fabio M. De Francesco" <fmdefrancesco@gmail.com> > Cc: Christoph Hellwig <hch@lst.de> > Cc: Linus Torvalds <torvalds@linux-foundation.org> > Reviewed-by: Jens Wiklander <jens.wiklander@linaro.org> > Signed-off-by: Ira Weiny <ira.weiny@intel.com> > --- > drivers/tee/tee_shm.c | 36 ++++++++++++------------------------ > 1 file changed, 12 insertions(+), 24 deletions(-) > Reviewed-by: Sumit Garg <sumit.garg@linaro.org> -Sumit > diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c > index 27295bda3e0b..527a6eabc03e 100644 > --- a/drivers/tee/tee_shm.c > +++ b/drivers/tee/tee_shm.c > @@ -24,37 +24,25 @@ static void shm_put_kernel_pages(struct page **pages, size_t page_count) > static int shm_get_kernel_pages(unsigned long start, size_t page_count, > struct page **pages) > { > + struct kvec *kiov; > size_t n; > int rc; > > - if (is_vmalloc_addr((void *)start)) { > - struct page *page; > - > - for (n = 0; n < page_count; n++) { > - page = vmalloc_to_page((void *)(start + PAGE_SIZE * n)); > - if (!page) > - return -ENOMEM; > - > - get_page(page); > - pages[n] = page; > - } > - rc = page_count; > - } else { > - struct kvec *kiov; > - > - kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL); > - if (!kiov) > - return -ENOMEM; > + if (WARN_ON_ONCE(is_vmalloc_addr((void *)start))) > + return -EINVAL; > > - for (n = 0; n < page_count; n++) { > - kiov[n].iov_base = (void *)(start + n * PAGE_SIZE); > - kiov[n].iov_len = PAGE_SIZE; > - } > + kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL); > + if (!kiov) > + return -ENOMEM; > > - rc = get_kernel_pages(kiov, page_count, 0, pages); > - kfree(kiov); > + for (n = 0; n < page_count; n++) { > + kiov[n].iov_base = (void *)(start + n * PAGE_SIZE); > + kiov[n].iov_len = PAGE_SIZE; > } > > + rc = get_kernel_pages(kiov, page_count, 0, pages); > + kfree(kiov); > + > return rc; > } > > > -- > 2.39.1
© 2016 - 2025 Red Hat, Inc.