Hi all,
Today's linux-next merge of the drm-misc tree got a conflict in:
drivers/gpu/drm/drm_gem_shmem_helper.c
between commits:
d9a4a2021d4a5 ("Merge tag 'drm-misc-next-2026-03-12' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next")
f6225b546dfcc ("BackMerge tag 'v7.0-rc6' into drm-next")
from the drm tree and commit:
6fdfd24017756 ("Merge drm/drm-fixes into drm-misc-next-fixes")
from the drm-misc tree.
I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging. You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.
diff --combined drivers/gpu/drm/drm_gem_shmem_helper.c
index 2062ca6078330,545933c7f7121..0000000000000
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@@ -554,6 -554,21 +554,21 @@@ int drm_gem_shmem_dumb_create(struct dr
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
+ static void drm_gem_shmem_record_mkwrite(struct vm_fault *vmf)
+ {
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ loff_t num_pages = obj->size >> PAGE_SHIFT;
+ pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+
+ if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages))
+ return;
+
+ file_update_time(vma->vm_file);
+ folio_mark_dirty(page_folio(shmem->pages[page_offset]));
+ }
+
static vm_fault_t try_insert_pfn(struct vm_fault *vmf, unsigned int order,
unsigned long pfn)
{
@@@ -566,8 -581,23 +581,23 @@@
if (aligned &&
folio_test_pmd_mappable(page_folio(pfn_to_page(pfn)))) {
+ vm_fault_t ret;
+
pfn &= PMD_MASK >> PAGE_SHIFT;
- return vmf_insert_pfn_pmd(vmf, pfn, false);
+
+ /* Unlike PTEs which are automatically upgraded to
+ * writeable entries, the PMD upgrades go through
+ * .huge_fault(). Make sure we pass the "write" info
+ * along in that case.
+ * This also means we have to record the write fault
+ * here, instead of in .pfn_mkwrite().
+ */
+ ret = vmf_insert_pfn_pmd(vmf, pfn,
+ vmf->flags & FAULT_FLAG_WRITE);
+ if (ret == VM_FAULT_NOPAGE && (vmf->flags & FAULT_FLAG_WRITE))
+ drm_gem_shmem_record_mkwrite(vmf);
+
+ return ret;
}
#endif
}
@@@ -655,19 -685,7 +685,7 @@@ static void drm_gem_shmem_vm_close(stru
static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- loff_t num_pages = obj->size >> PAGE_SHIFT;
- pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
-
- if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages))
- return VM_FAULT_SIGBUS;
-
- file_update_time(vma->vm_file);
-
- folio_mark_dirty(page_folio(shmem->pages[page_offset]));
-
+ drm_gem_shmem_record_mkwrite(vmf);
return 0;
}
© 2016 - 2026 Red Hat, Inc.