linux-next: manual merge of the drm-misc tree with the drm tree

Mark Brown posted 1 patch 1 day, 16 hours ago
linux-next: manual merge of the drm-misc tree with the drm tree
Posted by Mark Brown 1 day, 16 hours ago
Hi all,

Today's linux-next merge of the drm-misc tree got a conflict in:

  drivers/gpu/drm/drm_gem_shmem_helper.c

between commits:

  d9a4a2021d4a5 ("Merge tag 'drm-misc-next-2026-03-12' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next")

from the drm tree and commit:

  6fdfd24017756 ("Merge drm/drm-fixes into drm-misc-next-fixes")

from the drm-misc tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

diff --combined drivers/gpu/drm/drm_gem_shmem_helper.c
index 044f0d4ebf555,2062ca6078330..0000000000000
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@@ -554,20 -554,24 +554,20 @@@ int drm_gem_shmem_dumb_create(struct dr
  }
  EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
  
 -static vm_fault_t try_insert_pfn(struct vm_fault *vmf, unsigned int order,
 -				 unsigned long pfn)
 +static vm_fault_t drm_gem_shmem_try_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn)
  {
 -	if (!order) {
 -		return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
  #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
 -	} else if (order == PMD_ORDER) {
 -		unsigned long paddr = pfn << PAGE_SHIFT;
 -		bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
 +	unsigned long paddr = pfn << PAGE_SHIFT;
 +	bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
  
 -		if (aligned &&
 -		    folio_test_pmd_mappable(page_folio(pfn_to_page(pfn)))) {
 -			pfn &= PMD_MASK >> PAGE_SHIFT;
 -			return vmf_insert_pfn_pmd(vmf, pfn, false);
 -		}
 -#endif
 +	if (aligned && pmd_none(*vmf->pmd)) {
 +		/* Read-only mapping; split upon write fault */
 +		pfn &= PMD_MASK >> PAGE_SHIFT;
 +		return vmf_insert_pfn_pmd(vmf, pfn, false);
  	}
 -	return VM_FAULT_FALLBACK;
 +#endif
 +
 +	return 0;
  }
  
  static vm_fault_t drm_gem_shmem_any_fault(struct vm_fault *vmf, unsigned int order)
@@@ -584,6 -588,9 +584,9 @@@
  	struct folio *folio;
  	unsigned long pfn;
  
+ 	if (order && order != PMD_ORDER)
+ 		return VM_FAULT_FALLBACK;
+ 
  	dma_resv_lock(obj->resv, NULL);
  
  	if (page_offset >= num_pages || drm_WARN_ON_ONCE(dev, !shmem->pages) ||
@@@ -597,11 -604,7 +600,11 @@@
  
  	pfn = page_to_pfn(page);
  
 -	ret = try_insert_pfn(vmf, order, pfn);
 +	if (folio_test_pmd_mappable(folio))
 +		ret = drm_gem_shmem_try_insert_pfn_pmd(vmf, pfn);
 +	if (ret != VM_FAULT_NOPAGE)
 +		ret = vmf_insert_pfn(vma, vmf->address, pfn);
 +
  	if (ret == VM_FAULT_NOPAGE)
  		folio_mark_accessed(folio);