Make use of the new drm_gem_huge_mnt_create() helper to avoid code
duplication. Now that it's just a few lines long, the single function
in i915_gemfs.c is moved into v3d_gem_shmem.c.

v3:
- use huge tmpfs mountpoint in drm_device
- move i915_gemfs.c into i915_gem_shmem.c

v4:
- clean up mountpoint creation error handling

Signed-off-by: Loïc Molinari <[email protected]>
---
 drivers/gpu/drm/i915/Makefile                 |  3 +-
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c     | 47 +++++++++----
 drivers/gpu/drm/i915/gem/i915_gemfs.c         | 69 -------------------
 drivers/gpu/drm/i915/gem/i915_gemfs.h         | 14 ----
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 10 +--
 drivers/gpu/drm/i915/i915_drv.h               |  5 --
 6 files changed, 40 insertions(+), 108 deletions(-)
 delete mode 100644 drivers/gpu/drm/i915/gem/i915_gemfs.c
 delete mode 100644 drivers/gpu/drm/i915/gem/i915_gemfs.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e58c0c158b3a..e22393a7cf6f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -163,8 +163,7 @@ gem-y += \
        gem/i915_gem_ttm_move.o \
        gem/i915_gem_ttm_pm.o \
        gem/i915_gem_userptr.o \
-       gem/i915_gem_wait.o \
-       gem/i915_gemfs.o
+       gem/i915_gem_wait.o
 i915-y += \
        $(gem-y) \
        i915_active.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c 
b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index b9dae15c1d16..a72d98f1202f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -9,14 +9,15 @@
 #include <linux/uio.h>
 
 #include <drm/drm_cache.h>
+#include <drm/drm_gem.h>
 
 #include "gem/i915_gem_region.h"
 #include "i915_drv.h"
 #include "i915_gem_object.h"
 #include "i915_gem_tiling.h"
-#include "i915_gemfs.h"
 #include "i915_scatterlist.h"
 #include "i915_trace.h"
+#include "i915_utils.h"
 
 /*
  * Move folios to appropriate lru and release the batch, decrementing the
@@ -506,9 +507,9 @@ static int __create_shmem(struct drm_i915_private *i915,
        if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE)
                return -E2BIG;
 
-       if (i915->mm.gemfs)
-               filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
-                                                flags);
+       if (i915->drm.huge_mnt)
+               filp = shmem_file_setup_with_mnt(i915->drm.huge_mnt, "i915",
+                                                size, flags);
        else
                filp = shmem_file_setup("i915", size, flags);
        if (IS_ERR(filp))
@@ -635,21 +636,41 @@ i915_gem_object_create_shmem_from_data(struct 
drm_i915_private *i915,
 
 static int init_shmem(struct intel_memory_region *mem)
 {
-       i915_gemfs_init(mem->i915);
-       intel_memory_region_set_name(mem, "system");
+       struct drm_i915_private *i915 = mem->i915;
+       int err;
 
-       return 0; /* We have fallback to the kernel mnt if gemfs init failed. */
-}
+       /*
+        * By creating our own shmemfs mountpoint, we can pass in
+        * mount flags that better match our usecase.
+        *
+        * One example, although it is probably better with a per-file
+        * control, is selecting huge page allocations ("huge=within_size").
+        * However, we only do so on platforms which benefit from it, or to
+        * offset the overhead of iommu lookups, where with latter it is a net
+        * win even on platforms which would otherwise see some performance
+        * regressions such a slow reads issue on Broadwell and Skylake.
+        */
 
-static int release_shmem(struct intel_memory_region *mem)
-{
-       i915_gemfs_fini(mem->i915);
-       return 0;
+       if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915))
+               goto no_thp;
+
+       err = drm_gem_huge_mnt_create(&i915->drm, "within_size");
+       if (i915->drm.huge_mnt)
+               drm_info(&i915->drm, "Using Transparent Hugepages\n");
+       else if (err)
+               drm_notice(&i915->drm,
+                          "Transparent Hugepage support is recommended for 
optimal performance%s\n",
+                          GRAPHICS_VER(i915) >= 11 ? " on this platform!" :
+                                                     " when IOMMU is 
enabled!");
+
+ no_thp:
+       intel_memory_region_set_name(mem, "system");
+
+       return 0; /* We have fallback to the kernel mnt if huge mnt failed. */
 }
 
 static const struct intel_memory_region_ops shmem_region_ops = {
        .init = init_shmem,
-       .release = release_shmem,
        .init_object = shmem_object_init,
 };
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c 
b/drivers/gpu/drm/i915/gem/i915_gemfs.c
deleted file mode 100644
index 8f13ec4ff0d0..000000000000
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2017 Intel Corporation
- */
-
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/fs_context.h>
-
-#include "i915_drv.h"
-#include "i915_gemfs.h"
-#include "i915_utils.h"
-
-void i915_gemfs_init(struct drm_i915_private *i915)
-{
-       struct file_system_type *type;
-       struct fs_context *fc;
-       struct vfsmount *gemfs;
-       int ret;
-
-       /*
-        * By creating our own shmemfs mountpoint, we can pass in
-        * mount flags that better match our usecase.
-        *
-        * One example, although it is probably better with a per-file
-        * control, is selecting huge page allocations ("huge=within_size").
-        * However, we only do so on platforms which benefit from it, or to
-        * offset the overhead of iommu lookups, where with latter it is a net
-        * win even on platforms which would otherwise see some performance
-        * regressions such a slow reads issue on Broadwell and Skylake.
-        */
-
-       if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915))
-               return;
-
-       if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
-               goto err;
-
-       type = get_fs_type("tmpfs");
-       if (!type)
-               goto err;
-
-       fc = fs_context_for_mount(type, SB_KERNMOUNT);
-       if (IS_ERR(fc))
-               goto err;
-       ret = vfs_parse_fs_string(fc, "source", "tmpfs");
-       if (!ret)
-               ret = vfs_parse_fs_string(fc, "huge", "within_size");
-       if (!ret)
-               gemfs = fc_mount_longterm(fc);
-       put_fs_context(fc);
-       if (ret)
-               goto err;
-
-       i915->mm.gemfs = gemfs;
-       drm_info(&i915->drm, "Using Transparent Hugepages\n");
-       return;
-
-err:
-       drm_notice(&i915->drm,
-                  "Transparent Hugepage support is recommended for optimal 
performance%s\n",
-                  GRAPHICS_VER(i915) >= 11 ? " on this platform!" :
-                                             " when IOMMU is enabled!");
-}
-
-void i915_gemfs_fini(struct drm_i915_private *i915)
-{
-       kern_unmount(i915->mm.gemfs);
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h 
b/drivers/gpu/drm/i915/gem/i915_gemfs.h
deleted file mode 100644
index 16d4333c9a4e..000000000000
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2017 Intel Corporation
- */
-
-#ifndef __I915_GEMFS_H__
-#define __I915_GEMFS_H__
-
-struct drm_i915_private;
-
-void i915_gemfs_init(struct drm_i915_private *i915);
-void i915_gemfs_fini(struct drm_i915_private *i915);
-
-#endif
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c 
b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index bd08605a1611..b41a38af63fd 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1316,7 +1316,7 @@ typedef struct drm_i915_gem_object *
 
 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
 {
-       return i915->mm.gemfs && has_transparent_hugepage();
+       return i915->drm.huge_mnt && has_transparent_hugepage();
 }
 
 static struct drm_i915_gem_object *
@@ -1761,7 +1761,7 @@ static int igt_tmpfs_fallback(void *arg)
        struct drm_i915_private *i915 = arg;
        struct i915_address_space *vm;
        struct i915_gem_context *ctx;
-       struct vfsmount *gemfs = i915->mm.gemfs;
+       struct vfsmount *huge_mnt = i915->drm.huge_mnt;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
        struct file *file;
@@ -1782,10 +1782,10 @@ static int igt_tmpfs_fallback(void *arg)
        /*
         * Make sure that we don't burst into a ball of flames upon falling back
         * to tmpfs, which we rely on if on the off-chance we encounter a 
failure
-        * when setting up gemfs.
+        * when setting up a huge mountpoint.
         */
 
-       i915->mm.gemfs = NULL;
+       i915->drm.huge_mnt = NULL;
 
        obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
@@ -1819,7 +1819,7 @@ static int igt_tmpfs_fallback(void *arg)
 out_put:
        i915_gem_object_put(obj);
 out_restore:
-       i915->mm.gemfs = gemfs;
+       i915->drm.huge_mnt = huge_mnt;
 
        i915_vm_put(vm);
 out:
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6a768aad8edd..1bfee23e64a3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -141,11 +141,6 @@ struct i915_gem_mm {
         */
        atomic_t free_count;
 
-       /**
-        * tmpfs instance used for shmem backed objects
-        */
-       struct vfsmount *gemfs;
-
        struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
 
        struct notifier_block oom_notifier;
-- 
2.47.3

Reply via email to