mmap() calls on the drm file pointer currently always end up using
mm_get_unmapped_area() to get a free mapping region. On builds with
CONFIG_TRANSPARENT_HUGEPAGE enabled, this isn't ideal for GEM objects
backed by shmem buffers on mount points setting the 'huge=' option
because it can't correctly figure out the potentially huge address
alignment required.
This commit introduces the drm_gem_get_unmapped_area() function which
is meant to be used as a get_unmapped_area file operation on the drm
file pointer to lookup GEM objects based on their fake offsets and get
a properly aligned region by calling shmem_get_unmapped_area() with
the right file pointer. If a GEM object isn't available at the given
offset or if the caller isn't granted access to it, the function falls
back to mm_get_unmapped_area().
This also makes drm_gem_get_unmapped_area() part of the default GEM
file operations so that all the drm drivers can benefit from more
efficient mappings thanks to the huge page fault handler introduced in
previous commit 'drm/shmem-helper: Add huge page fault handler'.
The shmem_get_unmapped_area() function needs to be exported so that
it can be used from the drm subsystem.
Signed-off-by: Loïc Molinari <loic.molinari@collabora.com>
---
drivers/gpu/drm/drm_gem.c | 110 ++++++++++++++++++++++++++++++--------
include/drm/drm_gem.h | 4 ++
mm/shmem.c | 1 +
3 files changed, 93 insertions(+), 22 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index cbeb76b2124f..d027db462c2d 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1187,36 +1187,27 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
-/**
- * drm_gem_mmap - memory map routine for GEM objects
- * @filp: DRM file pointer
- * @vma: VMA for the area to be mapped
- *
- * If a driver supports GEM object mapping, mmap calls on the DRM file
- * descriptor will end up here.
- *
- * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
- * contain the fake offset we created when the GTT map ioctl was called on
- * the object) and map it with a call to drm_gem_mmap_obj().
- *
- * If the caller is not granted access to the buffer object, the mmap will fail
- * with EACCES. Please see the vma manager for more information.
+/*
+ * Look up a GEM object in offset space based on the exact start address. The
+ * caller must be granted access to the object. Returns a GEM object on success
+ * or a negative error code on failure. The returned GEM object needs to be
+ * released with drm_gem_object_put().
*/
-int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static struct drm_gem_object *
+drm_gem_object_lookup_from_offset(struct file *filp, unsigned long start,
+ unsigned long pages)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_object *obj = NULL;
struct drm_vma_offset_node *node;
- int ret;
if (drm_dev_is_unplugged(dev))
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
+ start, pages);
if (likely(node)) {
obj = container_of(node, struct drm_gem_object, vma_node);
/*
@@ -1235,14 +1226,89 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
if (!obj)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (!drm_vma_node_is_allowed(node, priv)) {
drm_gem_object_put(obj);
- return -EACCES;
+ return ERR_PTR(-EACCES);
}
- ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
+ return obj;
+}
+
+/**
+ * drm_gem_get_unmapped_area - get memory mapping region routine for GEM objects
+ * @filp: DRM file pointer
+ * @uaddr: User address hint
+ * @len: Mapping length
+ * @pgoff: Offset (in pages)
+ * @flags: Mapping flags
+ *
+ * If a driver supports GEM object mapping, before ending up in drm_gem_mmap(),
+ * mmap calls on the DRM file descriptor will first try to find a free linear
+ * address space large enough for a mapping. Since GEM objects are backed by
+ * shmem buffers, this should preferably be handled by the shmem virtual memory
+ * filesystem which can appropriately align addresses to huge page sizes when
+ * needed.
+ *
+ * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
+ * contain the fake offset we created) and call shmem_get_unmapped_area() with
+ * the right file pointer.
+ *
+ * If a GEM object is not available at the given offset or if the caller is not
+ * granted access to it, fall back to mm_get_unmapped_area().
+ */
+unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct drm_gem_object *obj;
+ unsigned long ret;
+
+ obj = drm_gem_object_lookup_from_offset(filp, pgoff, len >> PAGE_SHIFT);
+ if (IS_ERR(obj))
+ return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0,
+ flags);
+
+ ret = shmem_get_unmapped_area(obj->filp, uaddr, len, 0, flags);
+
+ drm_gem_object_put(obj);
+
+ return ret;
+#else
+ return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, flags);
+#endif
+}
+EXPORT_SYMBOL(drm_gem_get_unmapped_area);
+
+/**
+ * drm_gem_mmap - memory map routine for GEM objects
+ * @filp: DRM file pointer
+ * @vma: VMA for the area to be mapped
+ *
+ * If a driver supports GEM object mapping, mmap calls on the DRM file
+ * descriptor will end up here.
+ *
+ * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
+ * contain the fake offset we created) and map it with a call to
+ * drm_gem_mmap_obj().
+ *
+ * If the caller is not granted access to the buffer object, the mmap will fail
+ * with EACCES. Please see the vma manager for more information.
+ */
+int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup_from_offset(filp, vma->vm_pgoff,
+ vma_pages(vma));
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = drm_gem_mmap_obj(obj,
+ drm_vma_node_size(&obj->vma_node) << PAGE_SHIFT,
vma);
drm_gem_object_put(obj);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 8d48d2af2649..7c8bd67d087c 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -469,6 +469,7 @@ struct drm_gem_object {
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek,\
+ .get_unmapped_area = drm_gem_get_unmapped_area,\
.mmap = drm_gem_mmap, \
.fop_flags = FOP_UNSIGNED_OFFSET
@@ -506,6 +507,9 @@ void drm_gem_vm_close(struct vm_area_struct *vma);
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma);
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
/**
* drm_gem_object_get - acquire a GEM buffer object reference
diff --git a/mm/shmem.c b/mm/shmem.c
index e2c76a30802b..b2f41b430daa 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2915,6 +2915,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
return addr;
return inflated_addr;
}
+EXPORT_SYMBOL_GPL(shmem_get_unmapped_area);
#ifdef CONFIG_NUMA
static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
--
2.47.3
On Mon, 29 Sep 2025 22:03:10 +0200 Loïc Molinari <loic.molinari@collabora.com> wrote: > mmap() calls on the drm file pointer currently always end up using > mm_get_unmapped_area() to get a free mapping region. On builds with > CONFIG_TRANSPARENT_HUGEPAGE enabled, this isn't ideal for GEM objects > backed by shmem buffers on mount points setting the 'huge=' option > because it can't correctly figure out the potentially huge address > alignment required. > > This commit introduces the drm_gem_get_unmapped_area() function which > is meant to be used as a get_unmapped_area file operation on the drm > file pointer to lookup GEM objects based on their fake offsets and get > a properly aligned region by calling shmem_get_unmapped_area() with > the right file pointer. If a GEM object isn't available at the given > offset or if the caller isn't granted access to it, the function falls > back to mm_get_unmapped_area(). > > This also makes drm_gem_get_unmapped_area() part of the default GEM > file operations so that all the drm drivers can benefit from more > efficient mappings thanks to the huge page fault handler introduced in > previous commit 'drm/shmem-helper: Add huge page fault handler'. > > The shmem_get_unmapped_area() function needs to be exported so that > it can be used from the drm subsystem. > > Signed-off-by: Loïc Molinari <loic.molinari@collabora.com> > --- > drivers/gpu/drm/drm_gem.c | 110 ++++++++++++++++++++++++++++++-------- > include/drm/drm_gem.h | 4 ++ > mm/shmem.c | 1 + > 3 files changed, 93 insertions(+), 22 deletions(-) > > diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c > index cbeb76b2124f..d027db462c2d 100644 > --- a/drivers/gpu/drm/drm_gem.c > +++ b/drivers/gpu/drm/drm_gem.c > @@ -1187,36 +1187,27 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, > } > EXPORT_SYMBOL(drm_gem_mmap_obj); > > -/** > - * drm_gem_mmap - memory map routine for GEM objects > - * @filp: DRM file pointer > - * @vma: VMA for the area to be mapped > - * > - * If a driver supports GEM object mapping, mmap calls on the DRM file > - * descriptor will end up here. > - * > - * Look up the GEM object based on the offset passed in (vma->vm_pgoff will > - * contain the fake offset we created when the GTT map ioctl was called on > - * the object) and map it with a call to drm_gem_mmap_obj(). > - * > - * If the caller is not granted access to the buffer object, the mmap will fail > - * with EACCES. Please see the vma manager for more information. > +/* > + * Look up a GEM object in offset space based on the exact start address. The > + * caller must be granted access to the object. Returns a GEM object on success > + * or a negative error code on failure. The returned GEM object needs to be > + * released with drm_gem_object_put(). > */ > -int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) > +static struct drm_gem_object * > +drm_gem_object_lookup_from_offset(struct file *filp, unsigned long start, > + unsigned long pages) > { > struct drm_file *priv = filp->private_data; > struct drm_device *dev = priv->minor->dev; > struct drm_gem_object *obj = NULL; > struct drm_vma_offset_node *node; > - int ret; > > if (drm_dev_is_unplugged(dev)) > - return -ENODEV; > + return ERR_PTR(-ENODEV); > > drm_vma_offset_lock_lookup(dev->vma_offset_manager); > node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, > - vma->vm_pgoff, > - vma_pages(vma)); > + start, pages); > if (likely(node)) { > obj = container_of(node, struct drm_gem_object, vma_node); > /* > @@ -1235,14 +1226,89 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) > drm_vma_offset_unlock_lookup(dev->vma_offset_manager); > > if (!obj) > - return -EINVAL; > + return ERR_PTR(-EINVAL); > > if (!drm_vma_node_is_allowed(node, priv)) { > drm_gem_object_put(obj); > - return -EACCES; > + return ERR_PTR(-EACCES); > } > > - ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, > + return obj; > +} > + > +/** > + * drm_gem_get_unmapped_area - get memory mapping region routine for GEM objects > + * @filp: DRM file pointer > + * @uaddr: User address hint > + * @len: Mapping length > + * @pgoff: Offset (in pages) > + * @flags: Mapping flags > + * > + * If a driver supports GEM object mapping, before ending up in drm_gem_mmap(), > + * mmap calls on the DRM file descriptor will first try to find a free linear > + * address space large enough for a mapping. Since GEM objects are backed by > + * shmem buffers, this should preferably be handled by the shmem virtual memory > + * filesystem which can appropriately align addresses to huge page sizes when > + * needed. > + * > + * Look up the GEM object based on the offset passed in (vma->vm_pgoff will > + * contain the fake offset we created) and call shmem_get_unmapped_area() with > + * the right file pointer. > + * > + * If a GEM object is not available at the given offset or if the caller is not > + * granted access to it, fall back to mm_get_unmapped_area(). > + */ > +unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr, > + unsigned long len, unsigned long pgoff, > + unsigned long flags) > +{ > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE > + struct drm_gem_object *obj; > + unsigned long ret; > + > + obj = drm_gem_object_lookup_from_offset(filp, pgoff, len >> PAGE_SHIFT); > + if (IS_ERR(obj)) Is this supposed to happen? If not, I'd be tempted to add a WARN_ON_ONCE(). > + return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, > + flags); > + > + ret = shmem_get_unmapped_area(obj->filp, uaddr, len, 0, flags); > + > + drm_gem_object_put(obj); > + > + return ret; > +#else > + return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, flags); Looks like the above code covers the non-THP case too, do we really need to specialize for !CONFIG_TRANSPARENT_HUGEPAGE here? > +#endif > +} > +EXPORT_SYMBOL(drm_gem_get_unmapped_area); > + > +/** > + * drm_gem_mmap - memory map routine for GEM objects > + * @filp: DRM file pointer > + * @vma: VMA for the area to be mapped > + * > + * If a driver supports GEM object mapping, mmap calls on the DRM file > + * descriptor will end up here. > + * > + * Look up the GEM object based on the offset passed in (vma->vm_pgoff will > + * contain the fake offset we created) and map it with a call to > + * drm_gem_mmap_obj(). > + * > + * If the caller is not granted access to the buffer object, the mmap will fail > + * with EACCES. Please see the vma manager for more information. > + */ > +int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) > +{ > + struct drm_gem_object *obj; > + int ret; > + > + obj = drm_gem_object_lookup_from_offset(filp, vma->vm_pgoff, > + vma_pages(vma)); > + if (IS_ERR(obj)) > + return PTR_ERR(obj); > + > + ret = drm_gem_mmap_obj(obj, > + drm_vma_node_size(&obj->vma_node) << PAGE_SHIFT, > vma); > > drm_gem_object_put(obj); > diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h > index 8d48d2af2649..7c8bd67d087c 100644 > --- a/include/drm/drm_gem.h > +++ b/include/drm/drm_gem.h > @@ -469,6 +469,7 @@ struct drm_gem_object { > .poll = drm_poll,\ > .read = drm_read,\ > .llseek = noop_llseek,\ > + .get_unmapped_area = drm_gem_get_unmapped_area,\ > .mmap = drm_gem_mmap, \ > .fop_flags = FOP_UNSIGNED_OFFSET > > @@ -506,6 +507,9 @@ void drm_gem_vm_close(struct vm_area_struct *vma); > int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, > struct vm_area_struct *vma); > int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); > +unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr, > + unsigned long len, unsigned long pgoff, > + unsigned long flags); > > /** > * drm_gem_object_get - acquire a GEM buffer object reference > diff --git a/mm/shmem.c b/mm/shmem.c > index e2c76a30802b..b2f41b430daa 100644 > --- a/mm/shmem.c > +++ b/mm/shmem.c > @@ -2915,6 +2915,7 @@ unsigned long shmem_get_unmapped_area(struct file *file, > return addr; > return inflated_addr; > } > +EXPORT_SYMBOL_GPL(shmem_get_unmapped_area); > > #ifdef CONFIG_NUMA > static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
On 30/09/2025 12:30, Boris Brezillon wrote: > On Mon, 29 Sep 2025 22:03:10 +0200 > > Loïc Molinari <loic.molinari@collabora.com> wrote: >> +unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr, >> + unsigned long len, unsigned long pgoff, >> + unsigned long flags) >> +{ >> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE >> + struct drm_gem_object *obj; >> + unsigned long ret; >> + >> + obj = drm_gem_object_lookup_from_offset(filp, pgoff, len >> PAGE_SHIFT); >> + if (IS_ERR(obj)) >> + return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, >> + flags); >> + >> + ret = shmem_get_unmapped_area(obj->filp, uaddr, len, 0, flags); >> + >> + drm_gem_object_put(obj); >> + >> + return ret; >> +#else >> + return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, flags); > > Looks like the above code covers the non-THP case too, do we really need > to specialize for !CONFIG_TRANSPARENT_HUGEPAGE here? It does cover the !CONFIG_TRANSPARENT_HUGEPAGE case (shmem_get_unmapped_area() would just call and return the mm_get_unmapped_area() address) but the idea here is to avoid the GEM object lookup cost by calling mm_get_unmapped_area() directly. >> +#endif >> +} >> +EXPORT_SYMBOL(drm_gem_get_unmapped_area); Loïc
On Tue, 30 Sep 2025 18:09:37 +0200 Loïc Molinari <loic.molinari@collabora.com> wrote: > On 30/09/2025 12:30, Boris Brezillon wrote: > > On Mon, 29 Sep 2025 22:03:10 +0200 > > > > Loïc Molinari <loic.molinari@collabora.com> wrote: > >> +unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr, > >> + unsigned long len, unsigned long pgoff, > >> + unsigned long flags) > >> +{ > >> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE > >> + struct drm_gem_object *obj; > >> + unsigned long ret; > >> + > >> + obj = drm_gem_object_lookup_from_offset(filp, pgoff, len >> PAGE_SHIFT); > >> + if (IS_ERR(obj)) > >> + return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, > >> + flags); > >> + > >> + ret = shmem_get_unmapped_area(obj->filp, uaddr, len, 0, flags); > >> + > >> + drm_gem_object_put(obj); > >> + > >> + return ret; > >> +#else > >> + return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, flags); > > > > Looks like the above code covers the non-THP case too, do we really need > > to specialize for !CONFIG_TRANSPARENT_HUGEPAGE here? > > It does cover the !CONFIG_TRANSPARENT_HUGEPAGE case > (shmem_get_unmapped_area() would just call and return the > mm_get_unmapped_area() address) but the idea here is to avoid the GEM > object lookup cost by calling mm_get_unmapped_area() directly. I'd expect the extra GEM lookup to be negligible compared to the overall mmap() operation to be honest, but I guess if we really want to avoid the overhead, we could still write it without this ifdef. if (!IS_ENABLED(TRANSPARENT_HUGEPAGE)) return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, flags); ... My main concern is that shmem_get_unmapped_area() evolves with more !TRANSPARENT_HUGEPAGE cases, and by calling mm_get_unmapped_area() directly, we miss the opportunity to get optimizations for these cases, just like we missed them by not forwarding the ->get_unmapped_area() requests to the shmem layer so far. > > >> +#endif > >> +} > >> +EXPORT_SYMBOL(drm_gem_get_unmapped_area); > > Loïc
On 30/09/2025 18:29, Boris Brezillon wrote: > On Tue, 30 Sep 2025 18:09:37 +0200 > Loïc Molinari <loic.molinari@collabora.com> wrote: > >> On 30/09/2025 12:30, Boris Brezillon wrote: >>> On Mon, 29 Sep 2025 22:03:10 +0200 >>> >>> Loïc Molinari <loic.molinari@collabora.com> wrote: >>>> +unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr, >>>> + unsigned long len, unsigned long pgoff, >>>> + unsigned long flags) >>>> +{ >>>> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE >>>> + struct drm_gem_object *obj; >>>> + unsigned long ret; >>>> + >>>> + obj = drm_gem_object_lookup_from_offset(filp, pgoff, len >> PAGE_SHIFT); >>>> + if (IS_ERR(obj)) >>>> + return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, >>>> + flags); >>>> + >>>> + ret = shmem_get_unmapped_area(obj->filp, uaddr, len, 0, flags); >>>> + >>>> + drm_gem_object_put(obj); >>>> + >>>> + return ret; >>>> +#else >>>> + return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, flags); >>> >>> Looks like the above code covers the non-THP case too, do we really need >>> to specialize for !CONFIG_TRANSPARENT_HUGEPAGE here? >> >> It does cover the !CONFIG_TRANSPARENT_HUGEPAGE case >> (shmem_get_unmapped_area() would just call and return the >> mm_get_unmapped_area() address) but the idea here is to avoid the GEM >> object lookup cost by calling mm_get_unmapped_area() directly. > > I'd expect the extra GEM lookup to be negligible compared to the overall > mmap() operation to be honest, but I guess if we really want to avoid > the overhead, we could still write it without this ifdef. > > if (!IS_ENABLED(TRANSPARENT_HUGEPAGE)) > return mm_get_unmapped_area(current->mm, filp, uaddr, > len, 0, flags); > > ... > > My main concern is that shmem_get_unmapped_area() evolves with more > !TRANSPARENT_HUGEPAGE cases, and by calling mm_get_unmapped_area() > directly, we miss the opportunity to get optimizations for these cases, > just like we missed them by not forwarding the ->get_unmapped_area() > requests to the shmem layer so far. Yes, sounds like a very good point. I'll remove the ifdef and forward to the shmem layer unconditionally. >> >>>> +#endif >>>> +} >>>> +EXPORT_SYMBOL(drm_gem_get_unmapped_area); >> >> Loïc >
On Tue, 30 Sep 2025 12:30:03 +0200 Boris Brezillon <boris.brezillon@collabora.com> wrote: > > +unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr, > > + unsigned long len, unsigned long pgoff, > > + unsigned long flags) > > +{ > > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE > > + struct drm_gem_object *obj; > > + unsigned long ret; > > + > > + obj = drm_gem_object_lookup_from_offset(filp, pgoff, len >> PAGE_SHIFT); > > + if (IS_ERR(obj)) > > Is this supposed to happen? If not, I'd be tempted to add a > WARN_ON_ONCE(). Taking that back. I think you need it for non-GEM backed mappings, like userland IOMEM mappings. > > > + return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, > > + flags); > > +
Hi Loïc, kernel test robot noticed the following build errors: [auto build test ERROR on drm-misc/drm-misc-next] [also build test ERROR on linus/master v6.17 next-20250929] [cannot apply to akpm-mm/mm-everything] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Lo-c-Molinari/drm-shmem-helper-Add-huge-page-fault-handler/20250930-040600 base: git://anongit.freedesktop.org/drm/drm-misc drm-misc-next patch link: https://lore.kernel.org/r/20250929200316.18417-3-loic.molinari%40collabora.com patch subject: [PATCH 2/8] drm/gem: Introduce drm_gem_get_unmapped_area() fop config: arm-randconfig-002-20250930 (https://download.01.org/0day-ci/archive/20250930/202509301702.DL6CeU62-lkp@intel.com/config) compiler: arm-linux-gnueabi-gcc (GCC) 8.5.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250930/202509301702.DL6CeU62-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202509301702.DL6CeU62-lkp@intel.com/ All errors (new ones prefixed by >>): drivers/gpu/drm/drm_gem.c: In function 'drm_gem_get_unmapped_area': >> drivers/gpu/drm/drm_gem.c:1280:9: error: implicit declaration of function 'mm_get_unmapped_area'; did you mean 'shmem_get_unmapped_area'? [-Werror=implicit-function-declaration] return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, flags); ^~~~~~~~~~~~~~~~~~~~ shmem_get_unmapped_area cc1: some warnings being treated as errors vim +1280 drivers/gpu/drm/drm_gem.c 1238 1239 /** 1240 * drm_gem_get_unmapped_area - get memory mapping region routine for GEM objects 1241 * @filp: DRM file pointer 1242 * @uaddr: User address hint 1243 * @len: Mapping length 1244 * @pgoff: Offset (in pages) 1245 * @flags: Mapping flags 1246 * 1247 * If a driver supports GEM object mapping, before ending up in drm_gem_mmap(), 1248 * mmap calls on the DRM file descriptor will first try to find a free linear 1249 * address space large enough for a mapping. Since GEM objects are backed by 1250 * shmem buffers, this should preferably be handled by the shmem virtual memory 1251 * filesystem which can appropriately align addresses to huge page sizes when 1252 * needed. 1253 * 1254 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1255 * contain the fake offset we created) and call shmem_get_unmapped_area() with 1256 * the right file pointer. 1257 * 1258 * If a GEM object is not available at the given offset or if the caller is not 1259 * granted access to it, fall back to mm_get_unmapped_area(). 1260 */ 1261 unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr, 1262 unsigned long len, unsigned long pgoff, 1263 unsigned long flags) 1264 { 1265 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1266 struct drm_gem_object *obj; 1267 unsigned long ret; 1268 1269 obj = drm_gem_object_lookup_from_offset(filp, pgoff, len >> PAGE_SHIFT); 1270 if (IS_ERR(obj)) 1271 return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, 1272 flags); 1273 1274 ret = shmem_get_unmapped_area(obj->filp, uaddr, len, 0, flags); 1275 1276 drm_gem_object_put(obj); 1277 1278 return ret; 1279 #else > 1280 return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0, flags); 1281 #endif 1282 } 1283 EXPORT_SYMBOL(drm_gem_get_unmapped_area); 1284 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
© 2016 - 2025 Red Hat, Inc.