Move the suitable huge orders validation into shmem_suitable_orders() for
tmpfs, which can reuse some code to simplify the logic.
In addition, we don't have special handling for the error code -E2BIG when
checking for conflicts with PMD sized THP in the pagecache for tmpfs, instead,
it will just fallback to order-0 allocations like this patch does, so this
simplification will not add functional changes.
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
mm/shmem.c | 39 +++++++++++++++------------------------
1 file changed, 15 insertions(+), 24 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index f24dfbd387ba..db7e9808830f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1685,19 +1685,29 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
struct address_space *mapping, pgoff_t index,
unsigned long orders)
{
- struct vm_area_struct *vma = vmf->vma;
+ struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
unsigned long pages;
int order;
- orders = thp_vma_suitable_orders(vma, vmf->address, orders);
- if (!orders)
- return 0;
+ if (vma) {
+ orders = thp_vma_suitable_orders(vma, vmf->address, orders);
+ if (!orders)
+ return 0;
+ }
/* Find the highest order that can add into the page cache */
order = highest_order(orders);
while (orders) {
pages = 1UL << order;
index = round_down(index, pages);
+ /*
+ * Check for conflict before waiting on a huge allocation.
+ * Conflict might be that a huge page has just been allocated
+ * and added to page cache by a racing thread, or that there
+ * is already at least one small page in the huge extent.
+ * Be careful to retry when appropriate, but not forever!
+ * Elsewhere -EEXIST would be the right code, but not here.
+ */
if (!xa_find(&mapping->i_pages, &index,
index + pages - 1, XA_PRESENT))
break;
@@ -1735,7 +1745,6 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
- struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
unsigned long suitable_orders = 0;
struct folio *folio = NULL;
long pages;
@@ -1745,26 +1754,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
orders = 0;
if (orders > 0) {
- if (vma && vma_is_anon_shmem(vma)) {
- suitable_orders = shmem_suitable_orders(inode, vmf,
+ suitable_orders = shmem_suitable_orders(inode, vmf,
mapping, index, orders);
- } else if (orders & BIT(HPAGE_PMD_ORDER)) {
- pages = HPAGE_PMD_NR;
- suitable_orders = BIT(HPAGE_PMD_ORDER);
- index = round_down(index, HPAGE_PMD_NR);
-
- /*
- * Check for conflict before waiting on a huge allocation.
- * Conflict might be that a huge page has just been allocated
- * and added to page cache by a racing thread, or that there
- * is already at least one small page in the huge extent.
- * Be careful to retry when appropriate, but not forever!
- * Elsewhere -EEXIST would be the right code, but not here.
- */
- if (xa_find(&mapping->i_pages, &index,
- index + HPAGE_PMD_NR - 1, XA_PRESENT))
- return ERR_PTR(-E2BIG);
- }
order = highest_order(suitable_orders);
while (suitable_orders) {
--
2.39.3
On 13.07.24 15:24, Baolin Wang wrote: > Move the suitable huge orders validation into shmem_suitable_orders() for > tmpfs, which can reuse some code to simplify the logic. > > In addition, we don't have special handling for the error code -E2BIG when > checking for conflicts with PMD sized THP in the pagecache for tmpfs, instead, > it will just fallback to order-0 allocations like this patch does, so this > simplification will not add functional changes. > > Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> > --- Acked-by: David Hildenbrand <david@redhat.com> -- Cheers, David / dhildenb
On 13/07/2024 14:24, Baolin Wang wrote:
> Move the suitable huge orders validation into shmem_suitable_orders() for
> tmpfs, which can reuse some code to simplify the logic.
>
> In addition, we don't have special handling for the error code -E2BIG when
> checking for conflicts with PMD sized THP in the pagecache for tmpfs, instead,
> it will just fallback to order-0 allocations like this patch does, so this
> simplification will not add functional changes.
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
> mm/shmem.c | 39 +++++++++++++++------------------------
> 1 file changed, 15 insertions(+), 24 deletions(-)
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index f24dfbd387ba..db7e9808830f 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1685,19 +1685,29 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
> struct address_space *mapping, pgoff_t index,
> unsigned long orders)
> {
> - struct vm_area_struct *vma = vmf->vma;
> + struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
> unsigned long pages;
> int order;
>
> - orders = thp_vma_suitable_orders(vma, vmf->address, orders);
> - if (!orders)
> - return 0;
> + if (vma) {
> + orders = thp_vma_suitable_orders(vma, vmf->address, orders);
> + if (!orders)
> + return 0;
> + }
>
> /* Find the highest order that can add into the page cache */
> order = highest_order(orders);
> while (orders) {
> pages = 1UL << order;
> index = round_down(index, pages);
> + /*
> + * Check for conflict before waiting on a huge allocation.
> + * Conflict might be that a huge page has just been allocated
> + * and added to page cache by a racing thread, or that there
> + * is already at least one small page in the huge extent.
> + * Be careful to retry when appropriate, but not forever!
> + * Elsewhere -EEXIST would be the right code, but not here.
> + */
> if (!xa_find(&mapping->i_pages, &index,
> index + pages - 1, XA_PRESENT))
> break;
> @@ -1735,7 +1745,6 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
> {
> struct address_space *mapping = inode->i_mapping;
> struct shmem_inode_info *info = SHMEM_I(inode);
> - struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
> unsigned long suitable_orders = 0;
> struct folio *folio = NULL;
> long pages;
> @@ -1745,26 +1754,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
> orders = 0;
>
> if (orders > 0) {
> - if (vma && vma_is_anon_shmem(vma)) {
> - suitable_orders = shmem_suitable_orders(inode, vmf,
> + suitable_orders = shmem_suitable_orders(inode, vmf,
> mapping, index, orders);
> - } else if (orders & BIT(HPAGE_PMD_ORDER)) {
> - pages = HPAGE_PMD_NR;
> - suitable_orders = BIT(HPAGE_PMD_ORDER);
> - index = round_down(index, HPAGE_PMD_NR);
> -
> - /*
> - * Check for conflict before waiting on a huge allocation.
> - * Conflict might be that a huge page has just been allocated
> - * and added to page cache by a racing thread, or that there
> - * is already at least one small page in the huge extent.
> - * Be careful to retry when appropriate, but not forever!
> - * Elsewhere -EEXIST would be the right code, but not here.
> - */
> - if (xa_find(&mapping->i_pages, &index,
> - index + HPAGE_PMD_NR - 1, XA_PRESENT))
> - return ERR_PTR(-E2BIG);
> - }
>
> order = highest_order(suitable_orders);
> while (suitable_orders) {
© 2016 - 2026 Red Hat, Inc.