From nobody Thu Apr 9 13:53:24 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2CF6CC43219 for ; Sat, 5 Nov 2022 23:29:28 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230134AbiKEX3V (ORCPT ); Sat, 5 Nov 2022 19:29:21 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59400 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230119AbiKEX3L (ORCPT ); Sat, 5 Nov 2022 19:29:11 -0400 Received: from madras.collabora.co.uk (madras.collabora.co.uk [IPv6:2a00:1098:0:82:1000:25:2eeb:e5ab]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id ABD5810B53 for ; Sat, 5 Nov 2022 16:29:09 -0700 (PDT) Received: from dimapc.. (109-252-117-140.nat.spd-mgts.ru [109.252.117.140]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest SHA256) (No client certificate requested) (Authenticated sender: dmitry.osipenko) by madras.collabora.co.uk (Postfix) with ESMTPSA id 2E5C5660238B; Sat, 5 Nov 2022 23:29:06 +0000 (GMT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=collabora.com; s=mail; t=1667690948; bh=NfV/b1Ti3Qd0DHR3OkGQfniLQ2cv7XxhFDJ8dLltAX8=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=oNCNzQSHpaDu4oyQl/fInEEA/lGi2tEQq6K30vPpsxO/bwN0YM01gW1PvqNC5o0CW jfL0+iE+h+ULesrMojXT1YRqUjEBnd4T2zTKwUQaimW0GSb8GoSKIlU7jePz1aNISg EsmqvNDogS7/C5L53I7f4b23dCOhZMbBFsWlsDSC/Vv9zg8gWSvuWsEItVFsdPnghf QOZxp4w7U5cdpZivCo7x1sMngHgUOb8XIlxsfh6kIG/hNNtnDcAzL6TraVIFnhaWaQ qBlStV3k/F8vSkGFceJ/Pv1OReHmFeSZKHE42ahDrvt66aCDu0l6pDN1yJznA+2xi6 j2LfWqv/0IXGg== From: Dmitry Osipenko To: David Airlie , Gerd Hoffmann , Gurchetan Singh , Chia-I Wu , Daniel Vetter , Daniel Almeida , Gustavo Padovan , Daniel Stone , Tomeu Vizoso , Maarten Lankhorst , Maxime Ripard , Thomas Zimmermann , Rob Clark , Sumit Semwal , =?UTF-8?q?Christian=20K=C3=B6nig?= , Qiang Yu , Steven Price , Alyssa Rosenzweig , Rob Herring , Sean Paul , Dmitry Baryshkov , Abhinav Kumar Cc: dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org, Dmitry Osipenko , kernel@collabora.com, virtualization@lists.linux-foundation.org Subject: [PATCH v8 3/7] drm/shmem-helper: Switch to reservation lock Date: Sun, 6 Nov 2022 02:27:15 +0300 Message-Id: <20221105232719.302619-4-dmitry.osipenko@collabora.com> X-Mailer: git-send-email 2.37.3 In-Reply-To: <20221105232719.302619-1-dmitry.osipenko@collabora.com> References: <20221105232719.302619-1-dmitry.osipenko@collabora.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Replace all drm-shmem locks with a GEM reservation lock. This makes locks consistent with dma-buf locking convention where importers are responsible for holding reservation lock for all operations performed over dma-bufs, preventing deadlock between dma-buf importers and exporters. Suggested-by: Daniel Vetter Signed-off-by: Dmitry Osipenko --- drivers/gpu/drm/drm_gem_shmem_helper.c | 184 +++++++----------- drivers/gpu/drm/lima/lima_gem.c | 8 +- drivers/gpu/drm/panfrost/panfrost_drv.c | 7 +- .../gpu/drm/panfrost/panfrost_gem_shrinker.c | 6 +- drivers/gpu/drm/panfrost/panfrost_mmu.c | 19 +- include/drm/drm_gem_shmem_helper.h | 14 +- 6 files changed, 95 insertions(+), 143 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_g= em_shmem_helper.c index 801033b48893..d6e62f228989 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -86,8 +86,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t siz= e, bool private) if (ret) goto err_release; =20 - mutex_init(&shmem->pages_lock); - mutex_init(&shmem->vmap_lock); INIT_LIST_HEAD(&shmem->madv_list); =20 if (!private) { @@ -139,11 +137,13 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *= shmem) { struct drm_gem_object *obj =3D &shmem->base; =20 - WARN_ON(shmem->vmap_use_count); - if (obj->import_attach) { drm_prime_gem_destroy(obj, shmem->sgt); } else { + dma_resv_lock(shmem->base.resv, NULL); + + WARN_ON(shmem->vmap_use_count); + if (shmem->sgt) { dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); @@ -152,18 +152,18 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *= shmem) } if (shmem->pages) drm_gem_shmem_put_pages(shmem); - } =20 - WARN_ON(shmem->pages_use_count); + WARN_ON(shmem->pages_use_count); + + dma_resv_unlock(shmem->base.resv); + } =20 drm_gem_object_release(obj); - mutex_destroy(&shmem->pages_lock); - mutex_destroy(&shmem->vmap_lock); kfree(shmem); } EXPORT_SYMBOL_GPL(drm_gem_shmem_free); =20 -static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shm= em) +static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj =3D &shmem->base; struct page **pages; @@ -194,35 +194,17 @@ static int drm_gem_shmem_get_pages_locked(struct drm_= gem_shmem_object *shmem) } =20 /* - * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object + * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a= shmem GEM object * @shmem: shmem GEM object * - * This function makes sure that backing pages exists for the shmem GEM ob= ject - * and increases the use count. - * - * Returns: - * 0 on success or a negative error code on failure. + * This function decreases the use count and puts the backing pages when u= se drops to zero. */ -int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) -{ - int ret; - - WARN_ON(shmem->base.import_attach); - - ret =3D mutex_lock_interruptible(&shmem->pages_lock); - if (ret) - return ret; - ret =3D drm_gem_shmem_get_pages_locked(shmem); - mutex_unlock(&shmem->pages_lock); - - return ret; -} -EXPORT_SYMBOL(drm_gem_shmem_get_pages); - -static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *sh= mem) +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj =3D &shmem->base; =20 + dma_resv_assert_held(shmem->base.resv); + if (WARN_ON_ONCE(!shmem->pages_use_count)) return; =20 @@ -239,19 +221,6 @@ static void drm_gem_shmem_put_pages_locked(struct drm_= gem_shmem_object *shmem) shmem->pages_mark_accessed_on_put); shmem->pages =3D NULL; } - -/* - * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a= shmem GEM object - * @shmem: shmem GEM object - * - * This function decreases the use count and puts the backing pages when u= se drops to zero. - */ -void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) -{ - mutex_lock(&shmem->pages_lock); - drm_gem_shmem_put_pages_locked(shmem); - mutex_unlock(&shmem->pages_lock); -} EXPORT_SYMBOL(drm_gem_shmem_put_pages); =20 /** @@ -266,6 +235,8 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages); */ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) { + dma_resv_assert_held(shmem->base.resv); + WARN_ON(shmem->base.import_attach); =20 return drm_gem_shmem_get_pages(shmem); @@ -281,14 +252,31 @@ EXPORT_SYMBOL(drm_gem_shmem_pin); */ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) { + dma_resv_assert_held(shmem->base.resv); + WARN_ON(shmem->base.import_attach); =20 drm_gem_shmem_put_pages(shmem); } EXPORT_SYMBOL(drm_gem_shmem_unpin); =20 -static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +/* + * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object + * @shmem: shmem GEM object + * @map: Returns the kernel virtual address of the SHMEM GEM object's back= ing + * store. + * + * This function makes sure that a contiguous kernel virtual address mappi= ng + * exists for the buffer backing the shmem GEM object. It hides the differ= ences + * between dma-buf imported and natively allocated objects. + * + * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(= ). + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj =3D &shmem->base; int ret =3D 0; @@ -304,6 +292,8 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shm= em_object *shmem, } else { pgprot_t prot =3D PAGE_KERNEL; =20 + dma_resv_assert_held(shmem->base.resv); + if (shmem->vmap_use_count++ > 0) { iosys_map_set_vaddr(map, shmem->vaddr); return 0; @@ -338,45 +328,30 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_s= hmem_object *shmem, =20 return ret; } +EXPORT_SYMBOL(drm_gem_shmem_vmap); =20 /* - * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object + * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object * @shmem: shmem GEM object - * @map: Returns the kernel virtual address of the SHMEM GEM object's back= ing - * store. - * - * This function makes sure that a contiguous kernel virtual address mappi= ng - * exists for the buffer backing the shmem GEM object. It hides the differ= ences - * between dma-buf imported and natively allocated objects. + * @map: Kernel virtual address where the SHMEM GEM object was mapped * - * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(= ). + * This function cleans up a kernel virtual address mapping acquired by + * drm_gem_shmem_vmap(). The mapping is only removed when the use count dr= ops to + * zero. * - * Returns: - * 0 on success or a negative error code on failure. + * This function hides the differences between dma-buf imported and native= ly + * allocated objects. */ -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) -{ - int ret; - - ret =3D mutex_lock_interruptible(&shmem->vmap_lock); - if (ret) - return ret; - ret =3D drm_gem_shmem_vmap_locked(shmem, map); - mutex_unlock(&shmem->vmap_lock); - - return ret; -} -EXPORT_SYMBOL(drm_gem_shmem_vmap); - -static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj =3D &shmem->base; =20 if (obj->import_attach) { dma_buf_vunmap(obj->import_attach->dmabuf, map); } else { + dma_resv_assert_held(shmem->base.resv); + if (WARN_ON_ONCE(!shmem->vmap_use_count)) return; =20 @@ -389,26 +364,6 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem= _shmem_object *shmem, =20 shmem->vaddr =3D NULL; } - -/* - * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object - * @shmem: shmem GEM object - * @map: Kernel virtual address where the SHMEM GEM object was mapped - * - * This function cleans up a kernel virtual address mapping acquired by - * drm_gem_shmem_vmap(). The mapping is only removed when the use count dr= ops to - * zero. - * - * This function hides the differences between dma-buf imported and native= ly - * allocated objects. - */ -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) -{ - mutex_lock(&shmem->vmap_lock); - drm_gem_shmem_vunmap_locked(shmem, map); - mutex_unlock(&shmem->vmap_lock); -} EXPORT_SYMBOL(drm_gem_shmem_vunmap); =20 static struct drm_gem_shmem_object * @@ -441,24 +396,24 @@ drm_gem_shmem_create_with_handle(struct drm_file *fil= e_priv, */ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) { - mutex_lock(&shmem->pages_lock); + dma_resv_assert_held(shmem->base.resv); =20 if (shmem->madv >=3D 0) shmem->madv =3D madv; =20 madv =3D shmem->madv; =20 - mutex_unlock(&shmem->pages_lock); - return (madv >=3D 0); } EXPORT_SYMBOL(drm_gem_shmem_madvise); =20 -void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj =3D &shmem->base; struct drm_device *dev =3D obj->dev; =20 + dma_resv_assert_held(shmem->base.resv); + WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); =20 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); @@ -466,7 +421,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_ob= ject *shmem) kfree(shmem->sgt); shmem->sgt =3D NULL; =20 - drm_gem_shmem_put_pages_locked(shmem); + drm_gem_shmem_put_pages(shmem); =20 shmem->madv =3D -1; =20 @@ -482,17 +437,6 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_o= bject *shmem) =20 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); } -EXPORT_SYMBOL(drm_gem_shmem_purge_locked); - -bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) -{ - if (!mutex_trylock(&shmem->pages_lock)) - return false; - drm_gem_shmem_purge_locked(shmem); - mutex_unlock(&shmem->pages_lock); - - return true; -} EXPORT_SYMBOL(drm_gem_shmem_purge); =20 /** @@ -548,7 +492,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *= vmf) /* We don't use vmf->pgoff since that has the fake offset */ page_offset =3D (vmf->address - vma->vm_start) >> PAGE_SHIFT; =20 - mutex_lock(&shmem->pages_lock); + dma_resv_lock(shmem->base.resv, NULL); =20 if (page_offset >=3D num_pages || WARN_ON_ONCE(!shmem->pages) || @@ -560,7 +504,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *= vmf) ret =3D vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); } =20 - mutex_unlock(&shmem->pages_lock); + dma_resv_unlock(shmem->base.resv); =20 return ret; } @@ -573,8 +517,10 @@ static void drm_gem_shmem_vm_open(struct vm_area_struc= t *vma) =20 WARN_ON(shmem->base.import_attach); =20 + dma_resv_lock(shmem->base.resv, NULL); ret =3D drm_gem_shmem_get_pages(shmem); WARN_ON_ONCE(ret !=3D 0); + dma_resv_unlock(shmem->base.resv); =20 drm_gem_vm_open(vma); } @@ -584,7 +530,10 @@ static void drm_gem_shmem_vm_close(struct vm_area_stru= ct *vma) struct drm_gem_object *obj =3D vma->vm_private_data; struct drm_gem_shmem_object *shmem =3D to_drm_gem_shmem_obj(obj); =20 + dma_resv_lock(shmem->base.resv, NULL); drm_gem_shmem_put_pages(shmem); + dma_resv_unlock(shmem->base.resv); + drm_gem_vm_close(vma); } =20 @@ -619,7 +568,10 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *sh= mem, struct vm_area_struct return dma_buf_mmap(obj->dma_buf, vma, 0); } =20 + dma_resv_lock(shmem->base.resv, NULL); ret =3D drm_gem_shmem_get_pages(shmem); + dma_resv_unlock(shmem->base.resv); + if (ret) { drm_gem_vm_close(vma); return ret; @@ -707,9 +659,11 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct dr= m_gem_shmem_object *shmem) =20 WARN_ON(obj->import_attach); =20 + dma_resv_lock(shmem->base.resv, NULL); + ret =3D drm_gem_shmem_get_pages(shmem); if (ret) - return ERR_PTR(ret); + goto err_unlock; =20 sgt =3D drm_gem_shmem_get_sg_table(shmem); if (IS_ERR(sgt)) { @@ -723,6 +677,8 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm= _gem_shmem_object *shmem) =20 shmem->sgt =3D sgt; =20 + dma_resv_unlock(shmem->base.resv); + return sgt; =20 err_free_sgt: @@ -730,6 +686,8 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm= _gem_shmem_object *shmem) kfree(sgt); err_put_pages: drm_gem_shmem_put_pages(shmem); +err_unlock: + dma_resv_unlock(shmem->base.resv); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_ge= m.c index 0f1ca0b0db49..5008f0c2428f 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *v= m) =20 new_size =3D min(new_size, bo->base.base.size); =20 - mutex_lock(&bo->base.pages_lock); + dma_resv_lock(bo->base.base.resv, NULL); =20 if (bo->base.pages) { pages =3D bo->base.pages; @@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *v= m) pages =3D kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL | __GFP_ZERO); if (!pages) { - mutex_unlock(&bo->base.pages_lock); + dma_resv_unlock(bo->base.base.resv); return -ENOMEM; } =20 @@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm = *vm) struct page *page =3D shmem_read_mapping_page(mapping, i); =20 if (IS_ERR(page)) { - mutex_unlock(&bo->base.pages_lock); + dma_resv_unlock(bo->base.base.resv); return PTR_ERR(page); } pages[i] =3D page; } =20 - mutex_unlock(&bo->base.pages_lock); + dma_resv_unlock(bo->base.base.resv); =20 ret =3D sg_alloc_table_from_pages(&sgt, pages, i, 0, new_size, GFP_KERNEL); diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panf= rost/panfrost_drv.c index 2fa5afe21288..94b8e6de34b8 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -405,6 +405,10 @@ static int panfrost_ioctl_madvise(struct drm_device *d= ev, void *data, =20 bo =3D to_panfrost_bo(gem_obj); =20 + ret =3D dma_resv_lock_interruptible(bo->base.base.resv, NULL); + if (ret) + goto out_put_object; + mutex_lock(&pfdev->shrinker_lock); mutex_lock(&bo->mappings.lock); if (args->madv =3D=3D PANFROST_MADV_DONTNEED) { @@ -442,7 +446,8 @@ static int panfrost_ioctl_madvise(struct drm_device *de= v, void *data, out_unlock_mappings: mutex_unlock(&bo->mappings.lock); mutex_unlock(&pfdev->shrinker_lock); - + dma_resv_unlock(bo->base.base.resv); +out_put_object: drm_gem_object_put(gem_obj); return ret; } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu= /drm/panfrost/panfrost_gem_shrinker.c index bf0170782f25..6a71a2555f85 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *o= bj) if (!mutex_trylock(&bo->mappings.lock)) return false; =20 - if (!mutex_trylock(&shmem->pages_lock)) + if (!dma_resv_trylock(shmem->base.resv)) goto unlock_mappings; =20 panfrost_gem_teardown_mappings_locked(bo); - drm_gem_shmem_purge_locked(&bo->base); + drm_gem_shmem_purge(&bo->base); ret =3D true; =20 - mutex_unlock(&shmem->pages_lock); + dma_resv_unlock(shmem->base.resv); =20 unlock_mappings: mutex_unlock(&bo->mappings.lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panf= rost/panfrost_mmu.c index e246d914e7f6..0d5a75b23ed2 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -434,6 +434,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_= device *pfdev, int as, struct panfrost_gem_mapping *bomapping; struct panfrost_gem_object *bo; struct address_space *mapping; + struct drm_gem_object *obj; pgoff_t page_offset; struct sg_table *sgt; struct page **pages; @@ -456,15 +457,16 @@ static int panfrost_mmu_map_fault_addr(struct panfros= t_device *pfdev, int as, page_offset =3D addr >> PAGE_SHIFT; page_offset -=3D bomapping->mmnode.start; =20 - mutex_lock(&bo->base.pages_lock); + obj =3D &bo->base.base; + + dma_resv_lock(obj->resv, NULL); =20 if (!bo->base.pages) { bo->sgts =3D kvmalloc_array(bo->base.base.size / SZ_2M, sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); if (!bo->sgts) { - mutex_unlock(&bo->base.pages_lock); ret =3D -ENOMEM; - goto err_bo; + goto err_unlock; } =20 pages =3D kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, @@ -472,9 +474,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_= device *pfdev, int as, if (!pages) { kvfree(bo->sgts); bo->sgts =3D NULL; - mutex_unlock(&bo->base.pages_lock); ret =3D -ENOMEM; - goto err_bo; + goto err_unlock; } bo->base.pages =3D pages; bo->base.pages_use_count =3D 1; @@ -482,7 +483,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_= device *pfdev, int as, pages =3D bo->base.pages; if (pages[page_offset]) { /* Pages are already mapped, bail out. */ - mutex_unlock(&bo->base.pages_lock); goto out; } } @@ -493,14 +493,11 @@ static int panfrost_mmu_map_fault_addr(struct panfros= t_device *pfdev, int as, for (i =3D page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { pages[i] =3D shmem_read_mapping_page(mapping, i); if (IS_ERR(pages[i])) { - mutex_unlock(&bo->base.pages_lock); ret =3D PTR_ERR(pages[i]); goto err_pages; } } =20 - mutex_unlock(&bo->base.pages_lock); - sgt =3D &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; ret =3D sg_alloc_table_from_pages(sgt, pages + page_offset, NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); @@ -519,6 +516,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_= device *pfdev, int as, dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); =20 out: + dma_resv_unlock(obj->resv); + panfrost_gem_mapping_put(bomapping); =20 return 0; @@ -527,6 +526,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_= device *pfdev, int as, sg_free_table(sgt); err_pages: drm_gem_shmem_put_pages(&bo->base); +err_unlock: + dma_resv_unlock(obj->resv); err_bo: panfrost_gem_mapping_put(bomapping); return ret; diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem= _helper.h index a2201b2488c5..3b055d238584 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -26,11 +26,6 @@ struct drm_gem_shmem_object { */ struct drm_gem_object base; =20 - /** - * @pages_lock: Protects the page table and use count - */ - struct mutex pages_lock; - /** * @pages: Page table */ @@ -79,11 +74,6 @@ struct drm_gem_shmem_object { */ struct sg_table *sgt; =20 - /** - * @vmap_lock: Protects the vmap address and use count - */ - struct mutex vmap_lock; - /** * @vaddr: Kernel virtual address of the backing memory */ @@ -109,7 +99,6 @@ struct drm_gem_shmem_object { struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, = size_t size); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); =20 -int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); @@ -128,8 +117,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct dr= m_gem_shmem_object *shmem !shmem->base.dma_buf && !shmem->base.import_attach; } =20 -void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem); -bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); =20 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *s= hmem); struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *= shmem); --=20 2.37.3