From: Rob Clark <robdclark@chromium.org>
Buffers that are not shared between contexts can share a single resv
object. This way drm_gpuvm will not track them as external objects, and
submit-time validating overhead will be O(1) for all N non-shared BOs,
instead of O(n).
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
drivers/gpu/drm/msm/msm_drv.h | 1 +
drivers/gpu/drm/msm/msm_gem.c | 23 +++++++++++++++++++++++
drivers/gpu/drm/msm/msm_gem_prime.c | 15 +++++++++++++++
include/uapi/drm/msm_drm.h | 14 ++++++++++++++
4 files changed, 53 insertions(+)
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b77fd2c531c3..b0add236cbb3 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -246,6 +246,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
+struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags);
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 3708d4579203..d0f44c981351 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -532,6 +532,9 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
msm_gem_assert_locked(obj);
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return -EINVAL;
+
vma = get_vma_locked(obj, vm, range_start, range_end);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -1060,6 +1063,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
+ if (msm_obj->flags & MSM_BO_NO_SHARE) {
+ struct drm_gem_object *r_obj =
+ container_of(obj->resv, struct drm_gem_object, _resv);
+
+ BUG_ON(obj->resv == &obj->_resv);
+
+ /* Drop reference we hold to shared resv obj: */
+ drm_gem_object_put(r_obj);
+ }
+
drm_gem_object_release(obj);
kfree(msm_obj->metadata);
@@ -1092,6 +1105,15 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
if (name)
msm_gem_object_set_name(obj, "%s", name);
+ if (flags & MSM_BO_NO_SHARE) {
+ struct msm_context *ctx = file->driver_priv;
+ struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
+
+ drm_gem_object_get(r_obj);
+
+ obj->resv = r_obj->resv;
+ }
+
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
@@ -1124,6 +1146,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.free = msm_gem_free_object,
.open = msm_gem_open,
.close = msm_gem_close,
+ .export = msm_gem_prime_export,
.pin = msm_gem_prime_pin,
.unpin = msm_gem_prime_unpin,
.get_sg_table = msm_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index ee267490c935..1a6d8099196a 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -16,6 +16,9 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int npages = obj->size >> PAGE_SHIFT;
+ if (msm_obj->flags & MSM_BO_NO_SHARE)
+ return ERR_PTR(-EINVAL);
+
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
return ERR_PTR(-ENOMEM);
@@ -45,6 +48,15 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
return msm_gem_import(dev, attach->dmabuf, sg);
}
+
+struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags)
+{
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return ERR_PTR(-EPERM);
+
+ return drm_gem_prime_export(obj, flags);
+}
+
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
struct page **pages;
@@ -53,6 +65,9 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
if (obj->import_attach)
return 0;
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
+ return -EINVAL;
+
pages = msm_gem_pin_pages_locked(obj);
if (IS_ERR(pages))
ret = PTR_ERR(pages);
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index b974f5a24dbc..1bccc347945c 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -140,6 +140,19 @@ struct drm_msm_param {
#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
#define MSM_BO_GPU_READONLY 0x00000002
+/* Private buffers do not need to be explicitly listed in the SUBMIT
+ * ioctl, unless referenced by a drm_msm_gem_submit_cmd. Private
+ * buffers may NOT be imported/exported or used for scanout (or any
+ * other situation where buffers can be indefinitely pinned, but
+ * cases other than scanout are all kernel owned BOs which are not
+ * visible to userspace).
+ *
+ * In exchange for those constraints, all private BOs associated with
+ * a single context (drm_file) share a single dma_resv, and if there
+ * has been no eviction since the last submit, there are no per-BO
+ * bookeeping to do, significantly cutting the SUBMIT overhead.
+ */
+#define MSM_BO_NO_SHARE 0x00000004
#define MSM_BO_CACHE_MASK 0x000f0000
/* cache modes */
#define MSM_BO_CACHED 0x00010000
@@ -149,6 +162,7 @@ struct drm_msm_param {
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
MSM_BO_GPU_READONLY | \
+ MSM_BO_NO_SHARE | \
MSM_BO_CACHE_MASK)
struct drm_msm_gem_new {
--
2.49.0
On 5/2/25 18:56, Rob Clark wrote:
> From: Rob Clark <robdclark@chromium.org>
>
> Buffers that are not shared between contexts can share a single resv
> object. This way drm_gpuvm will not track them as external objects, and
> submit-time validating overhead will be O(1) for all N non-shared BOs,
> instead of O(n).
>
> Signed-off-by: Rob Clark <robdclark@chromium.org>
> ---
> drivers/gpu/drm/msm/msm_drv.h | 1 +
> drivers/gpu/drm/msm/msm_gem.c | 23 +++++++++++++++++++++++
> drivers/gpu/drm/msm/msm_gem_prime.c | 15 +++++++++++++++
> include/uapi/drm/msm_drm.h | 14 ++++++++++++++
> 4 files changed, 53 insertions(+)
>
> diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
> index b77fd2c531c3..b0add236cbb3 100644
> --- a/drivers/gpu/drm/msm/msm_drv.h
> +++ b/drivers/gpu/drm/msm/msm_drv.h
> @@ -246,6 +246,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
> void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
> struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
> struct dma_buf_attachment *attach, struct sg_table *sg);
> +struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags);
> int msm_gem_prime_pin(struct drm_gem_object *obj);
> void msm_gem_prime_unpin(struct drm_gem_object *obj);
>
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index 3708d4579203..d0f44c981351 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -532,6 +532,9 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
>
> msm_gem_assert_locked(obj);
>
> + if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
> + return -EINVAL;
> +
> vma = get_vma_locked(obj, vm, range_start, range_end);
> if (IS_ERR(vma))
> return PTR_ERR(vma);
> @@ -1060,6 +1063,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
> put_pages(obj);
> }
>
> + if (msm_obj->flags & MSM_BO_NO_SHARE) {
> + struct drm_gem_object *r_obj =
> + container_of(obj->resv, struct drm_gem_object, _resv);
> +
> + BUG_ON(obj->resv == &obj->_resv);
> +
> + /* Drop reference we hold to shared resv obj: */
> + drm_gem_object_put(r_obj);
> + }
> +
> drm_gem_object_release(obj);
>
> kfree(msm_obj->metadata);
> @@ -1092,6 +1105,15 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
> if (name)
> msm_gem_object_set_name(obj, "%s", name);
>
> + if (flags & MSM_BO_NO_SHARE) {
> + struct msm_context *ctx = file->driver_priv;
> + struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
> +
> + drm_gem_object_get(r_obj);
> +
> + obj->resv = r_obj->resv;
> + }
> +
> ret = drm_gem_handle_create(file, obj, handle);
>
> /* drop reference from allocate - handle holds it now */
> @@ -1124,6 +1146,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
> .free = msm_gem_free_object,
> .open = msm_gem_open,
> .close = msm_gem_close,
> + .export = msm_gem_prime_export,
> .pin = msm_gem_prime_pin,
> .unpin = msm_gem_prime_unpin,
> .get_sg_table = msm_gem_prime_get_sg_table,
> diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
> index ee267490c935..1a6d8099196a 100644
> --- a/drivers/gpu/drm/msm/msm_gem_prime.c
> +++ b/drivers/gpu/drm/msm/msm_gem_prime.c
> @@ -16,6 +16,9 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
> struct msm_gem_object *msm_obj = to_msm_bo(obj);
> int npages = obj->size >> PAGE_SHIFT;
>
> + if (msm_obj->flags & MSM_BO_NO_SHARE)
> + return ERR_PTR(-EINVAL);
> +
> if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
> return ERR_PTR(-ENOMEM);
>
> @@ -45,6 +48,15 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
> return msm_gem_import(dev, attach->dmabuf, sg);
> }
>
> +
> +struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags)
> +{
> + if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
> + return ERR_PTR(-EPERM);
> +
> + return drm_gem_prime_export(obj, flags);
> +}
> +
> int msm_gem_prime_pin(struct drm_gem_object *obj)
> {
> struct page **pages;
> @@ -53,6 +65,9 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
> if (obj->import_attach)
> return 0;
>
> + if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
> + return -EINVAL;
> +
> pages = msm_gem_pin_pages_locked(obj);
> if (IS_ERR(pages))
> ret = PTR_ERR(pages);
> diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
> index b974f5a24dbc..1bccc347945c 100644
> --- a/include/uapi/drm/msm_drm.h
> +++ b/include/uapi/drm/msm_drm.h
> @@ -140,6 +140,19 @@ struct drm_msm_param {
>
> #define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
> #define MSM_BO_GPU_READONLY 0x00000002
> +/* Private buffers do not need to be explicitly listed in the SUBMIT
> + * ioctl, unless referenced by a drm_msm_gem_submit_cmd. Private
> + * buffers may NOT be imported/exported or used for scanout (or any
> + * other situation where buffers can be indefinitely pinned, but
> + * cases other than scanout are all kernel owned BOs which are not
> + * visible to userspace).
Why is pinning for scanout a problem with those?
Maybe I missed something but for other drivers that doesn't seem to be a problem.
Regards,
Christian.
> + *
> + * In exchange for those constraints, all private BOs associated with
> + * a single context (drm_file) share a single dma_resv, and if there
> + * has been no eviction since the last submit, there are no per-BO
> + * bookeeping to do, significantly cutting the SUBMIT overhead.
> + */
> +#define MSM_BO_NO_SHARE 0x00000004
> #define MSM_BO_CACHE_MASK 0x000f0000
> /* cache modes */
> #define MSM_BO_CACHED 0x00010000
> @@ -149,6 +162,7 @@ struct drm_msm_param {
>
> #define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
> MSM_BO_GPU_READONLY | \
> + MSM_BO_NO_SHARE | \
> MSM_BO_CACHE_MASK)
>
> struct drm_msm_gem_new {
On Mon, May 5, 2025 at 12:54 AM Christian König
<christian.koenig@amd.com> wrote:
>
> On 5/2/25 18:56, Rob Clark wrote:
> > From: Rob Clark <robdclark@chromium.org>
> >
> > Buffers that are not shared between contexts can share a single resv
> > object. This way drm_gpuvm will not track them as external objects, and
> > submit-time validating overhead will be O(1) for all N non-shared BOs,
> > instead of O(n).
> >
> > Signed-off-by: Rob Clark <robdclark@chromium.org>
> > ---
> > drivers/gpu/drm/msm/msm_drv.h | 1 +
> > drivers/gpu/drm/msm/msm_gem.c | 23 +++++++++++++++++++++++
> > drivers/gpu/drm/msm/msm_gem_prime.c | 15 +++++++++++++++
> > include/uapi/drm/msm_drm.h | 14 ++++++++++++++
> > 4 files changed, 53 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
> > index b77fd2c531c3..b0add236cbb3 100644
> > --- a/drivers/gpu/drm/msm/msm_drv.h
> > +++ b/drivers/gpu/drm/msm/msm_drv.h
> > @@ -246,6 +246,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
> > void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
> > struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
> > struct dma_buf_attachment *attach, struct sg_table *sg);
> > +struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags);
> > int msm_gem_prime_pin(struct drm_gem_object *obj);
> > void msm_gem_prime_unpin(struct drm_gem_object *obj);
> >
> > diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> > index 3708d4579203..d0f44c981351 100644
> > --- a/drivers/gpu/drm/msm/msm_gem.c
> > +++ b/drivers/gpu/drm/msm/msm_gem.c
> > @@ -532,6 +532,9 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
> >
> > msm_gem_assert_locked(obj);
> >
> > + if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
> > + return -EINVAL;
> > +
> > vma = get_vma_locked(obj, vm, range_start, range_end);
> > if (IS_ERR(vma))
> > return PTR_ERR(vma);
> > @@ -1060,6 +1063,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
> > put_pages(obj);
> > }
> >
> > + if (msm_obj->flags & MSM_BO_NO_SHARE) {
> > + struct drm_gem_object *r_obj =
> > + container_of(obj->resv, struct drm_gem_object, _resv);
> > +
> > + BUG_ON(obj->resv == &obj->_resv);
> > +
> > + /* Drop reference we hold to shared resv obj: */
> > + drm_gem_object_put(r_obj);
> > + }
> > +
> > drm_gem_object_release(obj);
> >
> > kfree(msm_obj->metadata);
> > @@ -1092,6 +1105,15 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
> > if (name)
> > msm_gem_object_set_name(obj, "%s", name);
> >
> > + if (flags & MSM_BO_NO_SHARE) {
> > + struct msm_context *ctx = file->driver_priv;
> > + struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
> > +
> > + drm_gem_object_get(r_obj);
> > +
> > + obj->resv = r_obj->resv;
> > + }
> > +
> > ret = drm_gem_handle_create(file, obj, handle);
> >
> > /* drop reference from allocate - handle holds it now */
> > @@ -1124,6 +1146,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
> > .free = msm_gem_free_object,
> > .open = msm_gem_open,
> > .close = msm_gem_close,
> > + .export = msm_gem_prime_export,
> > .pin = msm_gem_prime_pin,
> > .unpin = msm_gem_prime_unpin,
> > .get_sg_table = msm_gem_prime_get_sg_table,
> > diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
> > index ee267490c935..1a6d8099196a 100644
> > --- a/drivers/gpu/drm/msm/msm_gem_prime.c
> > +++ b/drivers/gpu/drm/msm/msm_gem_prime.c
> > @@ -16,6 +16,9 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
> > struct msm_gem_object *msm_obj = to_msm_bo(obj);
> > int npages = obj->size >> PAGE_SHIFT;
> >
> > + if (msm_obj->flags & MSM_BO_NO_SHARE)
> > + return ERR_PTR(-EINVAL);
> > +
> > if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
> > return ERR_PTR(-ENOMEM);
> >
> > @@ -45,6 +48,15 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
> > return msm_gem_import(dev, attach->dmabuf, sg);
> > }
> >
> > +
> > +struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags)
> > +{
> > + if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
> > + return ERR_PTR(-EPERM);
> > +
> > + return drm_gem_prime_export(obj, flags);
> > +}
> > +
> > int msm_gem_prime_pin(struct drm_gem_object *obj)
> > {
> > struct page **pages;
> > @@ -53,6 +65,9 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
> > if (obj->import_attach)
> > return 0;
> >
> > + if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
> > + return -EINVAL;
> > +
> > pages = msm_gem_pin_pages_locked(obj);
> > if (IS_ERR(pages))
> > ret = PTR_ERR(pages);
> > diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
> > index b974f5a24dbc..1bccc347945c 100644
> > --- a/include/uapi/drm/msm_drm.h
> > +++ b/include/uapi/drm/msm_drm.h
> > @@ -140,6 +140,19 @@ struct drm_msm_param {
> >
> > #define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
> > #define MSM_BO_GPU_READONLY 0x00000002
> > +/* Private buffers do not need to be explicitly listed in the SUBMIT
> > + * ioctl, unless referenced by a drm_msm_gem_submit_cmd. Private
> > + * buffers may NOT be imported/exported or used for scanout (or any
> > + * other situation where buffers can be indefinitely pinned, but
> > + * cases other than scanout are all kernel owned BOs which are not
> > + * visible to userspace).
>
> Why is pinning for scanout a problem with those?
>
> Maybe I missed something but for other drivers that doesn't seem to be a problem.
I guess _technically_ it could be ok because we track pin-count
separately from dma_resv. But the motivation for that statement was
simply that _NO_SHARE buffers share a resv obj with the VM, so they
should not be used in a different VM (in this case, the display, which
has it's own VM).
BR,
-R
> Regards,
> Christian.
>
>
> > + *
> > + * In exchange for those constraints, all private BOs associated with
> > + * a single context (drm_file) share a single dma_resv, and if there
> > + * has been no eviction since the last submit, there are no per-BO
> > + * bookeeping to do, significantly cutting the SUBMIT overhead.
> > + */
> > +#define MSM_BO_NO_SHARE 0x00000004
> > #define MSM_BO_CACHE_MASK 0x000f0000
> > /* cache modes */
> > #define MSM_BO_CACHED 0x00010000
> > @@ -149,6 +162,7 @@ struct drm_msm_param {
> >
> > #define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
> > MSM_BO_GPU_READONLY | \
> > + MSM_BO_NO_SHARE | \
> > MSM_BO_CACHE_MASK)
> >
> > struct drm_msm_gem_new {
>
On 5/5/25 16:15, Rob Clark wrote:
> On Mon, May 5, 2025 at 12:54 AM Christian König
> <christian.koenig@amd.com> wrote:
>>
>> On 5/2/25 18:56, Rob Clark wrote:
>>> From: Rob Clark <robdclark@chromium.org>
>>>
>>> Buffers that are not shared between contexts can share a single resv
>>> object. This way drm_gpuvm will not track them as external objects, and
>>> submit-time validating overhead will be O(1) for all N non-shared BOs,
>>> instead of O(n).
>>>
>>> Signed-off-by: Rob Clark <robdclark@chromium.org>
>>> ---
>>> drivers/gpu/drm/msm/msm_drv.h | 1 +
>>> drivers/gpu/drm/msm/msm_gem.c | 23 +++++++++++++++++++++++
>>> drivers/gpu/drm/msm/msm_gem_prime.c | 15 +++++++++++++++
>>> include/uapi/drm/msm_drm.h | 14 ++++++++++++++
>>> 4 files changed, 53 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
>>> index b77fd2c531c3..b0add236cbb3 100644
>>> --- a/drivers/gpu/drm/msm/msm_drv.h
>>> +++ b/drivers/gpu/drm/msm/msm_drv.h
>>> @@ -246,6 +246,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
>>> void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
>>> struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
>>> struct dma_buf_attachment *attach, struct sg_table *sg);
>>> +struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags);
>>> int msm_gem_prime_pin(struct drm_gem_object *obj);
>>> void msm_gem_prime_unpin(struct drm_gem_object *obj);
>>>
>>> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
>>> index 3708d4579203..d0f44c981351 100644
>>> --- a/drivers/gpu/drm/msm/msm_gem.c
>>> +++ b/drivers/gpu/drm/msm/msm_gem.c
>>> @@ -532,6 +532,9 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
>>>
>>> msm_gem_assert_locked(obj);
>>>
>>> + if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
>>> + return -EINVAL;
>>> +
>>> vma = get_vma_locked(obj, vm, range_start, range_end);
>>> if (IS_ERR(vma))
>>> return PTR_ERR(vma);
>>> @@ -1060,6 +1063,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
>>> put_pages(obj);
>>> }
>>>
>>> + if (msm_obj->flags & MSM_BO_NO_SHARE) {
>>> + struct drm_gem_object *r_obj =
>>> + container_of(obj->resv, struct drm_gem_object, _resv);
>>> +
>>> + BUG_ON(obj->resv == &obj->_resv);
>>> +
>>> + /* Drop reference we hold to shared resv obj: */
>>> + drm_gem_object_put(r_obj);
>>> + }
>>> +
>>> drm_gem_object_release(obj);
>>>
>>> kfree(msm_obj->metadata);
>>> @@ -1092,6 +1105,15 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
>>> if (name)
>>> msm_gem_object_set_name(obj, "%s", name);
>>>
>>> + if (flags & MSM_BO_NO_SHARE) {
>>> + struct msm_context *ctx = file->driver_priv;
>>> + struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
>>> +
>>> + drm_gem_object_get(r_obj);
>>> +
>>> + obj->resv = r_obj->resv;
>>> + }
>>> +
>>> ret = drm_gem_handle_create(file, obj, handle);
>>>
>>> /* drop reference from allocate - handle holds it now */
>>> @@ -1124,6 +1146,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
>>> .free = msm_gem_free_object,
>>> .open = msm_gem_open,
>>> .close = msm_gem_close,
>>> + .export = msm_gem_prime_export,
>>> .pin = msm_gem_prime_pin,
>>> .unpin = msm_gem_prime_unpin,
>>> .get_sg_table = msm_gem_prime_get_sg_table,
>>> diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
>>> index ee267490c935..1a6d8099196a 100644
>>> --- a/drivers/gpu/drm/msm/msm_gem_prime.c
>>> +++ b/drivers/gpu/drm/msm/msm_gem_prime.c
>>> @@ -16,6 +16,9 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
>>> struct msm_gem_object *msm_obj = to_msm_bo(obj);
>>> int npages = obj->size >> PAGE_SHIFT;
>>>
>>> + if (msm_obj->flags & MSM_BO_NO_SHARE)
>>> + return ERR_PTR(-EINVAL);
>>> +
>>> if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
>>> return ERR_PTR(-ENOMEM);
>>>
>>> @@ -45,6 +48,15 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
>>> return msm_gem_import(dev, attach->dmabuf, sg);
>>> }
>>>
>>> +
>>> +struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags)
>>> +{
>>> + if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
>>> + return ERR_PTR(-EPERM);
>>> +
>>> + return drm_gem_prime_export(obj, flags);
>>> +}
>>> +
>>> int msm_gem_prime_pin(struct drm_gem_object *obj)
>>> {
>>> struct page **pages;
>>> @@ -53,6 +65,9 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
>>> if (obj->import_attach)
>>> return 0;
>>>
>>> + if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
>>> + return -EINVAL;
>>> +
>>> pages = msm_gem_pin_pages_locked(obj);
>>> if (IS_ERR(pages))
>>> ret = PTR_ERR(pages);
>>> diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
>>> index b974f5a24dbc..1bccc347945c 100644
>>> --- a/include/uapi/drm/msm_drm.h
>>> +++ b/include/uapi/drm/msm_drm.h
>>> @@ -140,6 +140,19 @@ struct drm_msm_param {
>>>
>>> #define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
>>> #define MSM_BO_GPU_READONLY 0x00000002
>>> +/* Private buffers do not need to be explicitly listed in the SUBMIT
>>> + * ioctl, unless referenced by a drm_msm_gem_submit_cmd. Private
>>> + * buffers may NOT be imported/exported or used for scanout (or any
>>> + * other situation where buffers can be indefinitely pinned, but
>>> + * cases other than scanout are all kernel owned BOs which are not
>>> + * visible to userspace).
>>
>> Why is pinning for scanout a problem with those?
>>
>> Maybe I missed something but for other drivers that doesn't seem to be a problem.
>
> I guess _technically_ it could be ok because we track pin-count
> separately from dma_resv. But the motivation for that statement was
> simply that _NO_SHARE buffers share a resv obj with the VM, so they
> should not be used in a different VM (in this case, the display, which
> has it's own VM).
Ah, yes that makes perfect sense.
You should indeed avoid importing the BO into a different VM when it shares the reservation object with it. That will only cause trouble.
But at least amdgpu/radeon and I think i915 as well don't need to do that. Scanout is just separate from all VMs.
Regards,
Christian.
>
> BR,
> -R
>
>> Regards,
>> Christian.
>>
>>
>>> + *
>>> + * In exchange for those constraints, all private BOs associated with
>>> + * a single context (drm_file) share a single dma_resv, and if there
>>> + * has been no eviction since the last submit, there are no per-BO
>>> + * bookeeping to do, significantly cutting the SUBMIT overhead.
>>> + */
>>> +#define MSM_BO_NO_SHARE 0x00000004
>>> #define MSM_BO_CACHE_MASK 0x000f0000
>>> /* cache modes */
>>> #define MSM_BO_CACHED 0x00010000
>>> @@ -149,6 +162,7 @@ struct drm_msm_param {
>>>
>>> #define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
>>> MSM_BO_GPU_READONLY | \
>>> + MSM_BO_NO_SHARE | \
>>> MSM_BO_CACHE_MASK)
>>>
>>> struct drm_msm_gem_new {
>>
© 2016 - 2026 Red Hat, Inc.