Let's also implement HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE, so we can convert
arch_make_page_accessible() to be a simple wrapper around
arch_make_folio_accessible(). Unfortuantely, we cannot do that in the
header.
There are only two arch_make_page_accessible() calls remaining in gup.c.
We can now drop HAVE_ARCH_MAKE_PAGE_ACCESSIBLE completely form core-MM.
We'll handle that separately, once the s390x part landed.
Suggested-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
arch/s390/include/asm/page.h | 3 +++
arch/s390/kernel/uv.c | 18 +++++++++++-------
arch/s390/mm/fault.c | 14 ++++++++------
3 files changed, 22 insertions(+), 13 deletions(-)
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index b64384872c0f..03bbc782e286 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -162,6 +162,7 @@ static inline int page_reset_referenced(unsigned long addr)
#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
struct page;
+struct folio;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
@@ -174,6 +175,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define HAVE_ARCH_ALLOC_PAGE
#if IS_ENABLED(CONFIG_PGSTE)
+int arch_make_folio_accessible(struct folio *folio);
+#define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
int arch_make_page_accessible(struct page *page);
#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
#endif
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index b456066d72da..fa62fa0e369f 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -498,14 +498,13 @@ int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
EXPORT_SYMBOL_GPL(gmap_destroy_page);
/*
- * To be called with the page locked or with an extra reference! This will
- * prevent gmap_make_secure from touching the page concurrently. Having 2
- * parallel make_page_accessible is fine, as the UV calls will become a
- * no-op if the page is already exported.
+ * To be called with the folio locked or with an extra reference! This will
+ * prevent gmap_make_secure from touching the folio concurrently. Having 2
+ * parallel arch_make_folio_accessible is fine, as the UV calls will become a
+ * no-op if the folio is already exported.
*/
-int arch_make_page_accessible(struct page *page)
+int arch_make_folio_accessible(struct folio *folio)
{
- struct folio *folio = page_folio(page);
int rc = 0;
/* See gmap_make_secure(): large folios cannot be secure */
@@ -537,8 +536,13 @@ int arch_make_page_accessible(struct page *page)
return rc;
}
-EXPORT_SYMBOL_GPL(arch_make_page_accessible);
+EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
+int arch_make_page_accessible(struct page *page)
+{
+ return arch_make_folio_accessible(page_folio(page));
+}
+EXPORT_SYMBOL_GPL(arch_make_page_accessible);
#endif
#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index c421dd44ffbe..a1ba58460593 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -491,6 +491,7 @@ void do_secure_storage_access(struct pt_regs *regs)
unsigned long addr = get_fault_address(regs);
struct vm_area_struct *vma;
struct mm_struct *mm;
+ struct folio *folio;
struct page *page;
struct gmap *gmap;
int rc;
@@ -538,17 +539,18 @@ void do_secure_storage_access(struct pt_regs *regs)
mmap_read_unlock(mm);
break;
}
- if (arch_make_page_accessible(page))
+ folio = page_folio(page);
+ if (arch_make_folio_accessible(folio))
send_sig(SIGSEGV, current, 0);
- put_page(page);
+ folio_put(folio);
mmap_read_unlock(mm);
break;
case KERNEL_FAULT:
- page = phys_to_page(addr);
- if (unlikely(!try_get_page(page)))
+ folio = phys_to_folio(addr);
+ if (unlikely(!folio_try_get(folio)))
break;
- rc = arch_make_page_accessible(page);
- put_page(page);
+ rc = arch_make_folio_accessible(folio);
+ folio_put(folio);
if (rc)
BUG();
break;
--
2.44.0
On Fri, 12 Apr 2024 16:21:19 +0200
David Hildenbrand <david@redhat.com> wrote:
> Let's also implement HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE, so we can convert
> arch_make_page_accessible() to be a simple wrapper around
> arch_make_folio_accessible(). Unfortuantely, we cannot do that in the
> header.
>
> There are only two arch_make_page_accessible() calls remaining in gup.c.
> We can now drop HAVE_ARCH_MAKE_PAGE_ACCESSIBLE completely form core-MM.
> We'll handle that separately, once the s390x part landed.
>
> Suggested-by: Matthew Wilcox <willy@infradead.org>
> Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
> ---
> arch/s390/include/asm/page.h | 3 +++
> arch/s390/kernel/uv.c | 18 +++++++++++-------
> arch/s390/mm/fault.c | 14 ++++++++------
> 3 files changed, 22 insertions(+), 13 deletions(-)
>
> diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
> index b64384872c0f..03bbc782e286 100644
> --- a/arch/s390/include/asm/page.h
> +++ b/arch/s390/include/asm/page.h
> @@ -162,6 +162,7 @@ static inline int page_reset_referenced(unsigned long addr)
> #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
>
> struct page;
> +struct folio;
> void arch_free_page(struct page *page, int order);
> void arch_alloc_page(struct page *page, int order);
>
> @@ -174,6 +175,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
> #define HAVE_ARCH_ALLOC_PAGE
>
> #if IS_ENABLED(CONFIG_PGSTE)
> +int arch_make_folio_accessible(struct folio *folio);
> +#define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
> int arch_make_page_accessible(struct page *page);
> #define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
> #endif
> diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
> index b456066d72da..fa62fa0e369f 100644
> --- a/arch/s390/kernel/uv.c
> +++ b/arch/s390/kernel/uv.c
> @@ -498,14 +498,13 @@ int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
> EXPORT_SYMBOL_GPL(gmap_destroy_page);
>
> /*
> - * To be called with the page locked or with an extra reference! This will
> - * prevent gmap_make_secure from touching the page concurrently. Having 2
> - * parallel make_page_accessible is fine, as the UV calls will become a
> - * no-op if the page is already exported.
> + * To be called with the folio locked or with an extra reference! This will
> + * prevent gmap_make_secure from touching the folio concurrently. Having 2
> + * parallel arch_make_folio_accessible is fine, as the UV calls will become a
> + * no-op if the folio is already exported.
> */
> -int arch_make_page_accessible(struct page *page)
> +int arch_make_folio_accessible(struct folio *folio)
> {
> - struct folio *folio = page_folio(page);
> int rc = 0;
>
> /* See gmap_make_secure(): large folios cannot be secure */
> @@ -537,8 +536,13 @@ int arch_make_page_accessible(struct page *page)
>
> return rc;
> }
> -EXPORT_SYMBOL_GPL(arch_make_page_accessible);
> +EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
>
> +int arch_make_page_accessible(struct page *page)
> +{
> + return arch_make_folio_accessible(page_folio(page));
> +}
> +EXPORT_SYMBOL_GPL(arch_make_page_accessible);
> #endif
>
> #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
> diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
> index c421dd44ffbe..a1ba58460593 100644
> --- a/arch/s390/mm/fault.c
> +++ b/arch/s390/mm/fault.c
> @@ -491,6 +491,7 @@ void do_secure_storage_access(struct pt_regs *regs)
> unsigned long addr = get_fault_address(regs);
> struct vm_area_struct *vma;
> struct mm_struct *mm;
> + struct folio *folio;
> struct page *page;
> struct gmap *gmap;
> int rc;
> @@ -538,17 +539,18 @@ void do_secure_storage_access(struct pt_regs *regs)
> mmap_read_unlock(mm);
> break;
> }
> - if (arch_make_page_accessible(page))
> + folio = page_folio(page);
> + if (arch_make_folio_accessible(folio))
> send_sig(SIGSEGV, current, 0);
> - put_page(page);
> + folio_put(folio);
> mmap_read_unlock(mm);
> break;
> case KERNEL_FAULT:
> - page = phys_to_page(addr);
> - if (unlikely(!try_get_page(page)))
> + folio = phys_to_folio(addr);
> + if (unlikely(!folio_try_get(folio)))
> break;
> - rc = arch_make_page_accessible(page);
> - put_page(page);
> + rc = arch_make_folio_accessible(folio);
> + folio_put(folio);
> if (rc)
> BUG();
> break;
© 2016 - 2026 Red Hat, Inc.