Hi all,
Today's linux-next merge of the drm tree got a conflict in:
drivers/gpu/drm/drm_gem_shmem_helper.c
between commit:
fc3bbf34e643f ("drm/shmem-helper: Fix huge page mapping in fault handler")
from the drm-misc-fixes tree and commits:
5cf8de6cd1620 ("drm/gem-shmem: Return vm_fault_t from drm_gem_shmem_try_map_pmd()")
06f3662cb3ba9 ("drm/gem-shmem: Refactor drm_gem_shmem_try_map_pmd()")
from the drm tree.
I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging. You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.
diff --cc drivers/gpu/drm/drm_gem_shmem_helper.c
index c549293b5bb61,4500deef41278..0000000000000
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@@ -550,27 -554,23 +554,23 @@@ int drm_gem_shmem_dumb_create(struct dr
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
- static vm_fault_t try_insert_pfn(struct vm_fault *vmf, unsigned int order,
- unsigned long pfn)
+ static vm_fault_t drm_gem_shmem_try_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn)
{
- if (!order) {
- return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
- } else if (order == PMD_ORDER) {
- unsigned long paddr = pfn << PAGE_SHIFT;
- bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
+ unsigned long paddr = pfn << PAGE_SHIFT;
+ bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
- if (aligned &&
- folio_test_pmd_mappable(page_folio(pfn_to_page(pfn)))) {
- pfn &= PMD_MASK >> PAGE_SHIFT;
- return vmf_insert_pfn_pmd(vmf, pfn, false);
- }
- #endif
+ if (aligned && pmd_none(*vmf->pmd)) {
+ /* Read-only mapping; split upon write fault */
+ pfn &= PMD_MASK >> PAGE_SHIFT;
+ return vmf_insert_pfn_pmd(vmf, pfn, false);
}
- return VM_FAULT_FALLBACK;
+ #endif
+
+ return 0;
}
-static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+static vm_fault_t drm_gem_shmem_any_fault(struct vm_fault *vmf, unsigned int order)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
@@@ -644,13 -645,29 +650,32 @@@ static void drm_gem_shmem_vm_close(stru
drm_gem_vm_close(vma);
}
+ static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
+ {
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ loff_t num_pages = obj->size >> PAGE_SHIFT;
+ pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+
+ if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages))
+ return VM_FAULT_SIGBUS;
+
+ file_update_time(vma->vm_file);
+
+ folio_mark_dirty(page_folio(shmem->pages[page_offset]));
+
+ return 0;
+ }
+
const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+ .huge_fault = drm_gem_shmem_any_fault,
+#endif
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
+ .pfn_mkwrite = drm_gem_shmem_pfn_mkwrite,
};
EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
Hello Mark,
On Fri, 20 Mar 2026 14:17:46 +0000
Mark Brown <broonie@kernel.org> wrote:
> Hi all,
>
> Today's linux-next merge of the drm tree got a conflict in:
>
> drivers/gpu/drm/drm_gem_shmem_helper.c
>
> between commit:
>
> fc3bbf34e643f ("drm/shmem-helper: Fix huge page mapping in fault handler")
>
> from the drm-misc-fixes tree and commits:
>
> 5cf8de6cd1620 ("drm/gem-shmem: Return vm_fault_t from drm_gem_shmem_try_map_pmd()")
> 06f3662cb3ba9 ("drm/gem-shmem: Refactor drm_gem_shmem_try_map_pmd()")
>
> from the drm tree.
>
> I fixed it up (see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging. You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
I have a slightly different conflict resolution (it's the one we currently
have in drm-tip[1]).
Regards,
Boris
[1]https://gitlab.freedesktop.org/drm/tip
--->8---
diff --cc drivers/gpu/drm/drm_gem_shmem_helper.c
index c549293b5bb6,4500deef4127..2062ca607833
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@@ -574,33 -574,39 +578,38 @@@ static vm_fault_t drm_gem_shmem_any_fau
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
loff_t num_pages = obj->size >> PAGE_SHIFT;
- vm_fault_t ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
struct page **pages = shmem->pages;
- pgoff_t page_offset;
+ pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+ struct page *page;
+ struct folio *folio;
unsigned long pfn;
+ if (order && order != PMD_ORDER)
+ return VM_FAULT_FALLBACK;
+
- /* Offset to faulty address in the VMA. */
- page_offset = vmf->pgoff - vma->vm_pgoff;
+ dma_resv_lock(obj->resv, NULL);
- dma_resv_lock(shmem->base.resv, NULL);
-
- if (page_offset >= num_pages ||
- drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
- shmem->madv < 0) {
- ret = VM_FAULT_SIGBUS;
+ if (page_offset >= num_pages || drm_WARN_ON_ONCE(dev, !shmem->pages) ||
+ shmem->madv < 0)
goto out;
- }
- pfn = page_to_pfn(pages[page_offset]);
+ page = pages[page_offset];
+ if (drm_WARN_ON_ONCE(dev, !page))
+ goto out;
+ folio = page_folio(page);
+
+ pfn = page_to_pfn(page);
+
- if (folio_test_pmd_mappable(folio))
- ret = drm_gem_shmem_try_insert_pfn_pmd(vmf, pfn);
- if (ret != VM_FAULT_NOPAGE)
- ret = vmf_insert_pfn(vma, vmf->address, pfn);
-
+ ret = try_insert_pfn(vmf, order, pfn);
+ if (ret == VM_FAULT_NOPAGE)
+ folio_mark_accessed(folio);
- out:
- dma_resv_unlock(shmem->base.resv);
+ out:
+ dma_resv_unlock(obj->resv);
return ret;
}
@@@ -644,13 -645,29 +653,32 @@@ static void drm_gem_shmem_vm_close(stru
drm_gem_vm_close(vma);
}
+ static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
+ {
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ loff_t num_pages = obj->size >> PAGE_SHIFT;
+ pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+
+ if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages))
+ return VM_FAULT_SIGBUS;
+
+ file_update_time(vma->vm_file);
+
+ folio_mark_dirty(page_folio(shmem->pages[page_offset]));
+
+ return 0;
+ }
+
const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+ .huge_fault = drm_gem_shmem_any_fault,
+#endif
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
+ .pfn_mkwrite = drm_gem_shmem_pfn_mkwrite,
};
EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
© 2016 - 2026 Red Hat, Inc.