Make use of the new drm_gem_huge_mnt_create() and
drm_gem_get_huge_mnt() helpers to avoid code duplication. Now that
it's just a few lines long, the single function in i915_gemfs.c is
moved into i915_gem_shmem.c.
v3:
- use huge tmpfs mountpoint in drm_device
- move i915_gemfs.c into i915_gem_shmem.c
v4:
- clean up mountpoint creation error handling
v5:
- use drm_gem_has_huge_mnt() helper
v7:
- include <drm/drm_print.h> in i915_gem_shmem.c
v8:
- keep logging notice message with CONFIG_TRANSPARENT_HUGEPAGE=n
- don't access huge_mnt field with CONFIG_TRANSPARENT_HUGEPAGE=n
v9:
- replace drm_gem_has_huge_mnt() by drm_gem_get_huge_mnt()
- remove useless ternary op test in selftests/huge_pages.c
v12:
- fix layering violation in selftests (Tvrtko)
- fix incorrect filename in commit message
v13:
- add Tvrtko A-b
Signed-off-by: Loïc Molinari <loic.molinari@collabora.com>
Acked-by: Tvrtko Ursulin <tursulin@ursulin.net>
---
drivers/gpu/drm/i915/Makefile | 3 +-
.../gpu/drm/i915/gem/i915_gem_object_types.h | 9 +--
drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 58 ++++++++++-----
drivers/gpu/drm/i915/gem/i915_gemfs.c | 71 -------------------
drivers/gpu/drm/i915/gem/i915_gemfs.h | 14 ----
.../gpu/drm/i915/gem/selftests/huge_pages.c | 15 +---
drivers/gpu/drm/i915/i915_drv.h | 5 --
7 files changed, 50 insertions(+), 125 deletions(-)
delete mode 100644 drivers/gpu/drm/i915/gem/i915_gemfs.c
delete mode 100644 drivers/gpu/drm/i915/gem/i915_gemfs.h
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 84ec79b64960..b5a8c0a6b747 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -169,8 +169,7 @@ gem-y += \
gem/i915_gem_ttm_move.o \
gem/i915_gem_ttm_pm.o \
gem/i915_gem_userptr.o \
- gem/i915_gem_wait.o \
- gem/i915_gemfs.o
+ gem/i915_gem_wait.o
i915-y += \
$(gem-y) \
i915_active.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 64600aa8227f..f94409e8ec4c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -348,12 +348,13 @@ struct drm_i915_gem_object {
*/
#define I915_BO_ALLOC_GPU_ONLY BIT(6)
#define I915_BO_ALLOC_CCS_AUX BIT(7)
+#define I915_BO_ALLOC_NOTHP BIT(8)
/*
* Object is allowed to retain its initial data and will not be cleared on first
* access if used along with I915_BO_ALLOC_USER. This is mainly to keep
* preallocated framebuffer data intact while transitioning it to i915drmfb.
*/
-#define I915_BO_PREALLOC BIT(8)
+#define I915_BO_PREALLOC BIT(9)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_CPU_CLEAR | \
@@ -363,9 +364,9 @@ struct drm_i915_gem_object {
I915_BO_ALLOC_GPU_ONLY | \
I915_BO_ALLOC_CCS_AUX | \
I915_BO_PREALLOC)
-#define I915_BO_READONLY BIT(9)
-#define I915_TILING_QUIRK_BIT 10 /* unknown swizzling; do not release! */
-#define I915_BO_PROTECTED BIT(11)
+#define I915_BO_READONLY BIT(10)
+#define I915_TILING_QUIRK_BIT 11 /* unknown swizzling; do not release! */
+#define I915_BO_PROTECTED BIT(12)
/**
* @mem_flags - Mutable placement-related flags
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 26dda55a07ff..6ad1d6f99363 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -9,14 +9,16 @@
#include <linux/uio.h>
#include <drm/drm_cache.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_print.h>
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_gem_tiling.h"
-#include "i915_gemfs.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
+#include "i915_utils.h"
/*
* Move folios to appropriate lru and release the batch, decrementing the
@@ -494,9 +496,11 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
static int __create_shmem(struct drm_i915_private *i915,
struct drm_gem_object *obj,
- resource_size_t size)
+ resource_size_t size,
+ unsigned int flags)
{
- unsigned long flags = VM_NORESERVE;
+ unsigned long shmem_flags = VM_NORESERVE;
+ struct vfsmount *huge_mnt;
struct file *filp;
drm_gem_private_object_init(&i915->drm, obj, size);
@@ -515,11 +519,12 @@ static int __create_shmem(struct drm_i915_private *i915,
if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE)
return -E2BIG;
- if (i915->mm.gemfs)
- filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
- flags);
+ huge_mnt = drm_gem_get_huge_mnt(&i915->drm);
+ if (!(flags & I915_BO_ALLOC_NOTHP) && huge_mnt)
+ filp = shmem_file_setup_with_mnt(huge_mnt, "i915", size,
+ shmem_flags);
else
- filp = shmem_file_setup("i915", size, flags);
+ filp = shmem_file_setup("i915", size, shmem_flags);
if (IS_ERR(filp))
return PTR_ERR(filp);
@@ -548,7 +553,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
gfp_t mask;
int ret;
- ret = __create_shmem(i915, &obj->base, size);
+ ret = __create_shmem(i915, &obj->base, size, flags);
if (ret)
return ret;
@@ -644,21 +649,40 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
static int init_shmem(struct intel_memory_region *mem)
{
- i915_gemfs_init(mem->i915);
- intel_memory_region_set_name(mem, "system");
+ struct drm_i915_private *i915 = mem->i915;
- return 0; /* We have fallback to the kernel mnt if gemfs init failed. */
-}
+ /*
+ * By creating our own shmemfs mountpoint, we can pass in
+ * mount flags that better match our usecase.
+ *
+ * One example, although it is probably better with a per-file
+ * control, is selecting huge page allocations ("huge=within_size").
+ * However, we only do so on platforms which benefit from it, or to
+ * offset the overhead of iommu lookups, where with latter it is a net
+ * win even on platforms which would otherwise see some performance
+ * regressions such a slow reads issue on Broadwell and Skylake.
+ */
-static int release_shmem(struct intel_memory_region *mem)
-{
- i915_gemfs_fini(mem->i915);
- return 0;
+ if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915))
+ goto no_thp;
+
+ drm_gem_huge_mnt_create(&i915->drm, "within_size");
+ if (drm_gem_get_huge_mnt(&i915->drm))
+ drm_info(&i915->drm, "Using Transparent Hugepages\n");
+ else
+ drm_notice(&i915->drm,
+ "Transparent Hugepage support is recommended for optimal performance%s\n",
+ GRAPHICS_VER(i915) >= 11 ? " on this platform!" :
+ " when IOMMU is enabled!");
+
+ no_thp:
+ intel_memory_region_set_name(mem, "system");
+
+ return 0; /* We have fallback to the kernel mnt if huge mnt failed. */
}
static const struct intel_memory_region_ops shmem_region_ops = {
.init = init_shmem,
- .release = release_shmem,
.init_object = shmem_object_init,
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
deleted file mode 100644
index 1f1290214031..000000000000
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2017 Intel Corporation
- */
-
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/fs_context.h>
-
-#include <drm/drm_print.h>
-
-#include "i915_drv.h"
-#include "i915_gemfs.h"
-#include "i915_utils.h"
-
-void i915_gemfs_init(struct drm_i915_private *i915)
-{
- struct file_system_type *type;
- struct fs_context *fc;
- struct vfsmount *gemfs;
- int ret;
-
- /*
- * By creating our own shmemfs mountpoint, we can pass in
- * mount flags that better match our usecase.
- *
- * One example, although it is probably better with a per-file
- * control, is selecting huge page allocations ("huge=within_size").
- * However, we only do so on platforms which benefit from it, or to
- * offset the overhead of iommu lookups, where with latter it is a net
- * win even on platforms which would otherwise see some performance
- * regressions such a slow reads issue on Broadwell and Skylake.
- */
-
- if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915))
- return;
-
- if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
- goto err;
-
- type = get_fs_type("tmpfs");
- if (!type)
- goto err;
-
- fc = fs_context_for_mount(type, SB_KERNMOUNT);
- if (IS_ERR(fc))
- goto err;
- ret = vfs_parse_fs_string(fc, "source", "tmpfs");
- if (!ret)
- ret = vfs_parse_fs_string(fc, "huge", "within_size");
- if (!ret)
- gemfs = fc_mount_longterm(fc);
- put_fs_context(fc);
- if (ret)
- goto err;
-
- i915->mm.gemfs = gemfs;
- drm_info(&i915->drm, "Using Transparent Hugepages\n");
- return;
-
-err:
- drm_notice(&i915->drm,
- "Transparent Hugepage support is recommended for optimal performance%s\n",
- GRAPHICS_VER(i915) >= 11 ? " on this platform!" :
- " when IOMMU is enabled!");
-}
-
-void i915_gemfs_fini(struct drm_i915_private *i915)
-{
- kern_unmount(i915->mm.gemfs);
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h
deleted file mode 100644
index 16d4333c9a4e..000000000000
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2017 Intel Corporation
- */
-
-#ifndef __I915_GEMFS_H__
-#define __I915_GEMFS_H__
-
-struct drm_i915_private;
-
-void i915_gemfs_init(struct drm_i915_private *i915);
-void i915_gemfs_fini(struct drm_i915_private *i915);
-
-#endif
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index bd08605a1611..02e9bf87f654 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1316,7 +1316,7 @@ typedef struct drm_i915_gem_object *
static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
{
- return i915->mm.gemfs && has_transparent_hugepage();
+ return !!drm_gem_get_huge_mnt(&i915->drm);
}
static struct drm_i915_gem_object *
@@ -1761,7 +1761,6 @@ static int igt_tmpfs_fallback(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_address_space *vm;
struct i915_gem_context *ctx;
- struct vfsmount *gemfs = i915->mm.gemfs;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
struct file *file;
@@ -1779,15 +1778,8 @@ static int igt_tmpfs_fallback(void *arg)
}
vm = i915_gem_context_get_eb_vm(ctx);
- /*
- * Make sure that we don't burst into a ball of flames upon falling back
- * to tmpfs, which we rely on if on the off-chance we encounter a failure
- * when setting up gemfs.
- */
-
- i915->mm.gemfs = NULL;
-
- obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
+ PAGE_SIZE, 0, I915_BO_ALLOC_NOTHP);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_restore;
@@ -1819,7 +1811,6 @@ static int igt_tmpfs_fallback(void *arg)
out_put:
i915_gem_object_put(obj);
out_restore:
- i915->mm.gemfs = gemfs;
i915_vm_put(vm);
out:
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 95f9ddf22ce4..93a5af3de334 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -141,11 +141,6 @@ struct i915_gem_mm {
*/
atomic_t free_count;
- /**
- * tmpfs instance used for shmem backed objects
- */
- struct vfsmount *gemfs;
-
struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
struct notifier_block oom_notifier;
--
2.47.3
On 05/12/2025 19:22, Loïc Molinari wrote:
> Make use of the new drm_gem_huge_mnt_create() and
> drm_gem_get_huge_mnt() helpers to avoid code duplication. Now that
> it's just a few lines long, the single function in i915_gemfs.c is
> moved into i915_gem_shmem.c.
>
> v3:
> - use huge tmpfs mountpoint in drm_device
> - move i915_gemfs.c into i915_gem_shmem.c
>
> v4:
> - clean up mountpoint creation error handling
>
> v5:
> - use drm_gem_has_huge_mnt() helper
>
> v7:
> - include <drm/drm_print.h> in i915_gem_shmem.c
>
> v8:
> - keep logging notice message with CONFIG_TRANSPARENT_HUGEPAGE=n
> - don't access huge_mnt field with CONFIG_TRANSPARENT_HUGEPAGE=n
>
> v9:
> - replace drm_gem_has_huge_mnt() by drm_gem_get_huge_mnt()
> - remove useless ternary op test in selftests/huge_pages.c
>
> v12:
> - fix layering violation in selftests (Tvrtko)
> - fix incorrect filename in commit message
>
> v13:
> - add Tvrtko A-b
>
> Signed-off-by: Loïc Molinari <loic.molinari@collabora.com>
> Acked-by: Tvrtko Ursulin <tursulin@ursulin.net>
Forgot to say - from my point of view it is okay to land this via
drm-misc. I don't think there should be any conflicts in this area.
Regards,
Tvrtko
> ---
> drivers/gpu/drm/i915/Makefile | 3 +-
> .../gpu/drm/i915/gem/i915_gem_object_types.h | 9 +--
> drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 58 ++++++++++-----
> drivers/gpu/drm/i915/gem/i915_gemfs.c | 71 -------------------
> drivers/gpu/drm/i915/gem/i915_gemfs.h | 14 ----
> .../gpu/drm/i915/gem/selftests/huge_pages.c | 15 +---
> drivers/gpu/drm/i915/i915_drv.h | 5 --
> 7 files changed, 50 insertions(+), 125 deletions(-)
> delete mode 100644 drivers/gpu/drm/i915/gem/i915_gemfs.c
> delete mode 100644 drivers/gpu/drm/i915/gem/i915_gemfs.h
>
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index 84ec79b64960..b5a8c0a6b747 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -169,8 +169,7 @@ gem-y += \
> gem/i915_gem_ttm_move.o \
> gem/i915_gem_ttm_pm.o \
> gem/i915_gem_userptr.o \
> - gem/i915_gem_wait.o \
> - gem/i915_gemfs.o
> + gem/i915_gem_wait.o
> i915-y += \
> $(gem-y) \
> i915_active.o \
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> index 64600aa8227f..f94409e8ec4c 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> @@ -348,12 +348,13 @@ struct drm_i915_gem_object {
> */
> #define I915_BO_ALLOC_GPU_ONLY BIT(6)
> #define I915_BO_ALLOC_CCS_AUX BIT(7)
> +#define I915_BO_ALLOC_NOTHP BIT(8)
> /*
> * Object is allowed to retain its initial data and will not be cleared on first
> * access if used along with I915_BO_ALLOC_USER. This is mainly to keep
> * preallocated framebuffer data intact while transitioning it to i915drmfb.
> */
> -#define I915_BO_PREALLOC BIT(8)
> +#define I915_BO_PREALLOC BIT(9)
> #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
> I915_BO_ALLOC_VOLATILE | \
> I915_BO_ALLOC_CPU_CLEAR | \
> @@ -363,9 +364,9 @@ struct drm_i915_gem_object {
> I915_BO_ALLOC_GPU_ONLY | \
> I915_BO_ALLOC_CCS_AUX | \
> I915_BO_PREALLOC)
> -#define I915_BO_READONLY BIT(9)
> -#define I915_TILING_QUIRK_BIT 10 /* unknown swizzling; do not release! */
> -#define I915_BO_PROTECTED BIT(11)
> +#define I915_BO_READONLY BIT(10)
> +#define I915_TILING_QUIRK_BIT 11 /* unknown swizzling; do not release! */
> +#define I915_BO_PROTECTED BIT(12)
> /**
> * @mem_flags - Mutable placement-related flags
> *
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
> index 26dda55a07ff..6ad1d6f99363 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
> @@ -9,14 +9,16 @@
> #include <linux/uio.h>
>
> #include <drm/drm_cache.h>
> +#include <drm/drm_gem.h>
> +#include <drm/drm_print.h>
>
> #include "gem/i915_gem_region.h"
> #include "i915_drv.h"
> #include "i915_gem_object.h"
> #include "i915_gem_tiling.h"
> -#include "i915_gemfs.h"
> #include "i915_scatterlist.h"
> #include "i915_trace.h"
> +#include "i915_utils.h"
>
> /*
> * Move folios to appropriate lru and release the batch, decrementing the
> @@ -494,9 +496,11 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
>
> static int __create_shmem(struct drm_i915_private *i915,
> struct drm_gem_object *obj,
> - resource_size_t size)
> + resource_size_t size,
> + unsigned int flags)
> {
> - unsigned long flags = VM_NORESERVE;
> + unsigned long shmem_flags = VM_NORESERVE;
> + struct vfsmount *huge_mnt;
> struct file *filp;
>
> drm_gem_private_object_init(&i915->drm, obj, size);
> @@ -515,11 +519,12 @@ static int __create_shmem(struct drm_i915_private *i915,
> if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE)
> return -E2BIG;
>
> - if (i915->mm.gemfs)
> - filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
> - flags);
> + huge_mnt = drm_gem_get_huge_mnt(&i915->drm);
> + if (!(flags & I915_BO_ALLOC_NOTHP) && huge_mnt)
> + filp = shmem_file_setup_with_mnt(huge_mnt, "i915", size,
> + shmem_flags);
> else
> - filp = shmem_file_setup("i915", size, flags);
> + filp = shmem_file_setup("i915", size, shmem_flags);
> if (IS_ERR(filp))
> return PTR_ERR(filp);
>
> @@ -548,7 +553,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
> gfp_t mask;
> int ret;
>
> - ret = __create_shmem(i915, &obj->base, size);
> + ret = __create_shmem(i915, &obj->base, size, flags);
> if (ret)
> return ret;
>
> @@ -644,21 +649,40 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
>
> static int init_shmem(struct intel_memory_region *mem)
> {
> - i915_gemfs_init(mem->i915);
> - intel_memory_region_set_name(mem, "system");
> + struct drm_i915_private *i915 = mem->i915;
>
> - return 0; /* We have fallback to the kernel mnt if gemfs init failed. */
> -}
> + /*
> + * By creating our own shmemfs mountpoint, we can pass in
> + * mount flags that better match our usecase.
> + *
> + * One example, although it is probably better with a per-file
> + * control, is selecting huge page allocations ("huge=within_size").
> + * However, we only do so on platforms which benefit from it, or to
> + * offset the overhead of iommu lookups, where with latter it is a net
> + * win even on platforms which would otherwise see some performance
> + * regressions such a slow reads issue on Broadwell and Skylake.
> + */
>
> -static int release_shmem(struct intel_memory_region *mem)
> -{
> - i915_gemfs_fini(mem->i915);
> - return 0;
> + if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915))
> + goto no_thp;
> +
> + drm_gem_huge_mnt_create(&i915->drm, "within_size");
> + if (drm_gem_get_huge_mnt(&i915->drm))
> + drm_info(&i915->drm, "Using Transparent Hugepages\n");
> + else
> + drm_notice(&i915->drm,
> + "Transparent Hugepage support is recommended for optimal performance%s\n",
> + GRAPHICS_VER(i915) >= 11 ? " on this platform!" :
> + " when IOMMU is enabled!");
> +
> + no_thp:
> + intel_memory_region_set_name(mem, "system");
> +
> + return 0; /* We have fallback to the kernel mnt if huge mnt failed. */
> }
>
> static const struct intel_memory_region_ops shmem_region_ops = {
> .init = init_shmem,
> - .release = release_shmem,
> .init_object = shmem_object_init,
> };
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
> deleted file mode 100644
> index 1f1290214031..000000000000
> --- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
> +++ /dev/null
> @@ -1,71 +0,0 @@
> -// SPDX-License-Identifier: MIT
> -/*
> - * Copyright © 2017 Intel Corporation
> - */
> -
> -#include <linux/fs.h>
> -#include <linux/mount.h>
> -#include <linux/fs_context.h>
> -
> -#include <drm/drm_print.h>
> -
> -#include "i915_drv.h"
> -#include "i915_gemfs.h"
> -#include "i915_utils.h"
> -
> -void i915_gemfs_init(struct drm_i915_private *i915)
> -{
> - struct file_system_type *type;
> - struct fs_context *fc;
> - struct vfsmount *gemfs;
> - int ret;
> -
> - /*
> - * By creating our own shmemfs mountpoint, we can pass in
> - * mount flags that better match our usecase.
> - *
> - * One example, although it is probably better with a per-file
> - * control, is selecting huge page allocations ("huge=within_size").
> - * However, we only do so on platforms which benefit from it, or to
> - * offset the overhead of iommu lookups, where with latter it is a net
> - * win even on platforms which would otherwise see some performance
> - * regressions such a slow reads issue on Broadwell and Skylake.
> - */
> -
> - if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915))
> - return;
> -
> - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
> - goto err;
> -
> - type = get_fs_type("tmpfs");
> - if (!type)
> - goto err;
> -
> - fc = fs_context_for_mount(type, SB_KERNMOUNT);
> - if (IS_ERR(fc))
> - goto err;
> - ret = vfs_parse_fs_string(fc, "source", "tmpfs");
> - if (!ret)
> - ret = vfs_parse_fs_string(fc, "huge", "within_size");
> - if (!ret)
> - gemfs = fc_mount_longterm(fc);
> - put_fs_context(fc);
> - if (ret)
> - goto err;
> -
> - i915->mm.gemfs = gemfs;
> - drm_info(&i915->drm, "Using Transparent Hugepages\n");
> - return;
> -
> -err:
> - drm_notice(&i915->drm,
> - "Transparent Hugepage support is recommended for optimal performance%s\n",
> - GRAPHICS_VER(i915) >= 11 ? " on this platform!" :
> - " when IOMMU is enabled!");
> -}
> -
> -void i915_gemfs_fini(struct drm_i915_private *i915)
> -{
> - kern_unmount(i915->mm.gemfs);
> -}
> diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h
> deleted file mode 100644
> index 16d4333c9a4e..000000000000
> --- a/drivers/gpu/drm/i915/gem/i915_gemfs.h
> +++ /dev/null
> @@ -1,14 +0,0 @@
> -/* SPDX-License-Identifier: MIT */
> -/*
> - * Copyright © 2017 Intel Corporation
> - */
> -
> -#ifndef __I915_GEMFS_H__
> -#define __I915_GEMFS_H__
> -
> -struct drm_i915_private;
> -
> -void i915_gemfs_init(struct drm_i915_private *i915);
> -void i915_gemfs_fini(struct drm_i915_private *i915);
> -
> -#endif
> diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> index bd08605a1611..02e9bf87f654 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> @@ -1316,7 +1316,7 @@ typedef struct drm_i915_gem_object *
>
> static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
> {
> - return i915->mm.gemfs && has_transparent_hugepage();
> + return !!drm_gem_get_huge_mnt(&i915->drm);
> }
>
> static struct drm_i915_gem_object *
> @@ -1761,7 +1761,6 @@ static int igt_tmpfs_fallback(void *arg)
> struct drm_i915_private *i915 = arg;
> struct i915_address_space *vm;
> struct i915_gem_context *ctx;
> - struct vfsmount *gemfs = i915->mm.gemfs;
> struct drm_i915_gem_object *obj;
> struct i915_vma *vma;
> struct file *file;
> @@ -1779,15 +1778,8 @@ static int igt_tmpfs_fallback(void *arg)
> }
> vm = i915_gem_context_get_eb_vm(ctx);
>
> - /*
> - * Make sure that we don't burst into a ball of flames upon falling back
> - * to tmpfs, which we rely on if on the off-chance we encounter a failure
> - * when setting up gemfs.
> - */
> -
> - i915->mm.gemfs = NULL;
> -
> - obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
> + obj = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
> + PAGE_SIZE, 0, I915_BO_ALLOC_NOTHP);
> if (IS_ERR(obj)) {
> err = PTR_ERR(obj);
> goto out_restore;
> @@ -1819,7 +1811,6 @@ static int igt_tmpfs_fallback(void *arg)
> out_put:
> i915_gem_object_put(obj);
> out_restore:
> - i915->mm.gemfs = gemfs;
>
> i915_vm_put(vm);
> out:
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 95f9ddf22ce4..93a5af3de334 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -141,11 +141,6 @@ struct i915_gem_mm {
> */
> atomic_t free_count;
>
> - /**
> - * tmpfs instance used for shmem backed objects
> - */
> - struct vfsmount *gemfs;
> -
> struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
>
> struct notifier_block oom_notifier;
© 2016 - 2025 Red Hat, Inc.