On 10/11/2025 15:49, Loïc Molinari wrote:
Make use of the new drm_gem_huge_mnt_create() and
drm_gem_has_huge_mnt() helpers to avoid code duplication. Now that
it's just a few lines long, the single function in i915_gemfs.c is
moved into v3d_gem_shmem.c.

v3:
- use huge tmpfs mountpoint in drm_device
- move i915_gemfs.c into i915_gem_shmem.c

v4:
- clean up mountpoint creation error handling

v5:
- use drm_gem_has_huge_mnt() helper

v7:
- include <drm/drm_print.h> in i915_gem_shmem.c

Signed-off-by: Loïc Molinari <[email protected]>
---
  drivers/gpu/drm/i915/Makefile                 |  3 +-
  drivers/gpu/drm/i915/gem/i915_gem_shmem.c     | 48 +++++++++----
  drivers/gpu/drm/i915/gem/i915_gemfs.c         | 71 -------------------
  drivers/gpu/drm/i915/gem/i915_gemfs.h         | 14 ----
  .../gpu/drm/i915/gem/selftests/huge_pages.c   | 11 +--
  drivers/gpu/drm/i915/i915_drv.h               |  5 --
  6 files changed, 42 insertions(+), 110 deletions(-)
  delete mode 100644 drivers/gpu/drm/i915/gem/i915_gemfs.c
  delete mode 100644 drivers/gpu/drm/i915/gem/i915_gemfs.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 84ec79b64960..b5a8c0a6b747 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -169,8 +169,7 @@ gem-y += \
        gem/i915_gem_ttm_move.o \
        gem/i915_gem_ttm_pm.o \
        gem/i915_gem_userptr.o \
-       gem/i915_gem_wait.o \
-       gem/i915_gemfs.o
+       gem/i915_gem_wait.o
  i915-y += \
        $(gem-y) \
        i915_active.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 26dda55a07ff..9bba6f8cdee2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -9,14 +9,16 @@
  #include <linux/uio.h>
#include <drm/drm_cache.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_print.h>
#include "gem/i915_gem_region.h"
  #include "i915_drv.h"
  #include "i915_gem_object.h"
  #include "i915_gem_tiling.h"
-#include "i915_gemfs.h"
  #include "i915_scatterlist.h"
  #include "i915_trace.h"
+#include "i915_utils.h"
/*
   * Move folios to appropriate lru and release the batch, decrementing the
@@ -515,9 +517,9 @@ static int __create_shmem(struct drm_i915_private *i915,
        if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE)
                return -E2BIG;
- if (i915->mm.gemfs)
-               filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
-                                                flags);
+       if (drm_gem_has_huge_mnt(&i915->drm))
+               filp = shmem_file_setup_with_mnt(i915->drm.huge_mnt, "i915",
+                                                size, flags);
        else
                filp = shmem_file_setup("i915", size, flags);
        if (IS_ERR(filp))
@@ -644,21 +646,41 @@ i915_gem_object_create_shmem_from_data(struct 
drm_i915_private *i915,
static int init_shmem(struct intel_memory_region *mem)
  {
-       i915_gemfs_init(mem->i915);
-       intel_memory_region_set_name(mem, "system");
+       struct drm_i915_private *i915 = mem->i915;
+       int err;
- return 0; /* We have fallback to the kernel mnt if gemfs init failed. */
-}
+       /*
+        * By creating our own shmemfs mountpoint, we can pass in
+        * mount flags that better match our usecase.
+        *
+        * One example, although it is probably better with a per-file
+        * control, is selecting huge page allocations ("huge=within_size").
+        * However, we only do so on platforms which benefit from it, or to
+        * offset the overhead of iommu lookups, where with latter it is a net
+        * win even on platforms which would otherwise see some performance
+        * regressions such a slow reads issue on Broadwell and Skylake.
+        */
-static int release_shmem(struct intel_memory_region *mem)
-{
-       i915_gemfs_fini(mem->i915);
-       return 0;
+       if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915))
+               goto no_thp;
+
+       err = drm_gem_huge_mnt_create(&i915->drm, "within_size");
+       if (drm_gem_has_huge_mnt(&i915->drm))
+               drm_info(&i915->drm, "Using Transparent Hugepages\n");
+       else if (err)
+               drm_notice(&i915->drm,
+                          "Transparent Hugepage support is recommended for optimal 
performance%s\n",
+                          GRAPHICS_VER(i915) >= 11 ? " on this platform!" :
+                                                     " when IOMMU is 
enabled!");


Drm_gem_huge_mnt_create() will return 0, and drm_gem_has_huge_mnt() will return false.

So looking from the i915 perspective, when CONFIG_TRANSPARENT_HUGEPAGE=n, currently it will log the above notice message, and after the change it will not.

To preserve current behaviour it might be that "else if (err)" just needs to become a plan "else"?

Regards,

Tvrtko

+
+ no_thp:
+       intel_memory_region_set_name(mem, "system");
+
+       return 0; /* We have fallback to the kernel mnt if huge mnt failed. */
  }
static const struct intel_memory_region_ops shmem_region_ops = {
        .init = init_shmem,
-       .release = release_shmem,
        .init_object = shmem_object_init,
  };
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
deleted file mode 100644
index 1f1290214031..000000000000
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2017 Intel Corporation
- */
-
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/fs_context.h>
-
-#include <drm/drm_print.h>
-
-#include "i915_drv.h"
-#include "i915_gemfs.h"
-#include "i915_utils.h"
-
-void i915_gemfs_init(struct drm_i915_private *i915)
-{
-       struct file_system_type *type;
-       struct fs_context *fc;
-       struct vfsmount *gemfs;
-       int ret;
-
-       /*
-        * By creating our own shmemfs mountpoint, we can pass in
-        * mount flags that better match our usecase.
-        *
-        * One example, although it is probably better with a per-file
-        * control, is selecting huge page allocations ("huge=within_size").
-        * However, we only do so on platforms which benefit from it, or to
-        * offset the overhead of iommu lookups, where with latter it is a net
-        * win even on platforms which would otherwise see some performance
-        * regressions such a slow reads issue on Broadwell and Skylake.
-        */
-
-       if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915))
-               return;
-
-       if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
-               goto err;
-
-       type = get_fs_type("tmpfs");
-       if (!type)
-               goto err;
-
-       fc = fs_context_for_mount(type, SB_KERNMOUNT);
-       if (IS_ERR(fc))
-               goto err;
-       ret = vfs_parse_fs_string(fc, "source", "tmpfs");
-       if (!ret)
-               ret = vfs_parse_fs_string(fc, "huge", "within_size");
-       if (!ret)
-               gemfs = fc_mount_longterm(fc);
-       put_fs_context(fc);
-       if (ret)
-               goto err;
-
-       i915->mm.gemfs = gemfs;
-       drm_info(&i915->drm, "Using Transparent Hugepages\n");
-       return;
-
-err:
-       drm_notice(&i915->drm,
-                  "Transparent Hugepage support is recommended for optimal 
performance%s\n",
-                  GRAPHICS_VER(i915) >= 11 ? " on this platform!" :
-                                             " when IOMMU is enabled!");
-}
-
-void i915_gemfs_fini(struct drm_i915_private *i915)
-{
-       kern_unmount(i915->mm.gemfs);
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h 
b/drivers/gpu/drm/i915/gem/i915_gemfs.h
deleted file mode 100644
index 16d4333c9a4e..000000000000
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2017 Intel Corporation
- */
-
-#ifndef __I915_GEMFS_H__
-#define __I915_GEMFS_H__
-
-struct drm_i915_private;
-
-void i915_gemfs_init(struct drm_i915_private *i915);
-void i915_gemfs_fini(struct drm_i915_private *i915);
-
-#endif
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c 
b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index bd08605a1611..2b9f7d86b46e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1316,7 +1316,7 @@ typedef struct drm_i915_gem_object *
static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
  {
-       return i915->mm.gemfs && has_transparent_hugepage();
+       return drm_gem_has_huge_mnt(&i915->drm);
  }
static struct drm_i915_gem_object *
@@ -1761,7 +1761,8 @@ static int igt_tmpfs_fallback(void *arg)
        struct drm_i915_private *i915 = arg;
        struct i915_address_space *vm;
        struct i915_gem_context *ctx;
-       struct vfsmount *gemfs = i915->mm.gemfs;
+       struct vfsmount *huge_mnt =
+               drm_gem_has_huge_mnt(&i915->drm) ? i915->drm.huge_mnt : NULL;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
        struct file *file;
@@ -1782,10 +1783,10 @@ static int igt_tmpfs_fallback(void *arg)
        /*
         * Make sure that we don't burst into a ball of flames upon falling back
         * to tmpfs, which we rely on if on the off-chance we encounter a 
failure
-        * when setting up gemfs.
+        * when setting up a huge mountpoint.
         */
- i915->mm.gemfs = NULL;
+       i915->drm.huge_mnt = NULL;
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
@@ -1819,7 +1820,7 @@ static int igt_tmpfs_fallback(void *arg)
  out_put:
        i915_gem_object_put(obj);
  out_restore:
-       i915->mm.gemfs = gemfs;
+       i915->drm.huge_mnt = huge_mnt;
i915_vm_put(vm);
  out:
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 95f9ddf22ce4..93a5af3de334 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -141,11 +141,6 @@ struct i915_gem_mm {
         */
        atomic_t free_count;
- /**
-        * tmpfs instance used for shmem backed objects
-        */
-       struct vfsmount *gemfs;
-
        struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
struct notifier_block oom_notifier;

Reply via email to