> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -22,6 +22,8 @@
>   *
>   */
> 
> +#include <linux/sched/mm.h>
> +
>  #include "display/intel_frontbuffer.h"
>  #include "gt/intel_gt.h"
>  #include "i915_drv.h"
> @@ -52,6 +54,14 @@ void i915_gem_object_init(struct
> drm_i915_gem_object *obj,  {
>       __mutex_init(&obj->mm.lock, "obj->mm.lock", key);
> 
> +     if (IS_ENABLED(CONFIG_LOCKDEP)) {
> +             mutex_lock_nested(&obj->mm.lock,
> I915_MM_GET_PAGES);
> +             fs_reclaim_acquire(GFP_KERNEL);
> +             might_lock(&obj->mm.lock);
> +             fs_reclaim_release(GFP_KERNEL);
> +             mutex_unlock(&obj->mm.lock);
> +     }
> +

I looked the upstream code in drm-tip,   I see other changes but not above.  Is 
this correct?

--CQ


>       spin_lock_init(&obj->vma.lock);
>       INIT_LIST_HEAD(&obj->vma.list);
> 
> @@ -186,7 +196,7 @@ static void __i915_gem_free_objects(struct
> drm_i915_private *i915,
>               GEM_BUG_ON(!list_empty(&obj->lut_list));
> 
>               atomic_set(&obj->mm.pages_pin_count, 0);
> -             __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
> +             __i915_gem_object_put_pages(obj);
>               GEM_BUG_ON(i915_gem_object_has_pages(obj));
>               bitmap_free(obj->bit_17);
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h
> b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> index 458cd51331f1..edaf7126a84d 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> @@ -319,11 +319,22 @@ i915_gem_object_unpin_pages(struct
> drm_i915_gem_object *obj)
> 
>  enum i915_mm_subclass { /* lockdep subclass for obj-
> >mm.lock/struct_mutex */
>       I915_MM_NORMAL = 0,
> -     I915_MM_SHRINKER /* called "recursively" from direct-reclaim-
> esque */
> +     /*
> +      * Only used by struct_mutex, when called "recursively" from
> +      * direct-reclaim-esque. Safe because there is only every one
> +      * struct_mutex in the entire system.
> +      */
> +     I915_MM_SHRINKER = 1,
> +     /*
> +      * Used for obj->mm.lock when allocating pages. Safe because the
> object
> +      * isn't yet on any LRU, and therefore the shrinker can't deadlock on
> +      * it. As soon as the object has pages, obj->mm.lock nests within
> +      * fs_reclaim.
> +      */
> +     I915_MM_GET_PAGES = 1,
>  };
> 
> -int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
> -                             enum i915_mm_subclass subclass);
> +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
>  void i915_gem_object_truncate(struct drm_i915_gem_object *obj);  void
> i915_gem_object_writeback(struct drm_i915_gem_object *obj);
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> index 96008374a412..15f8297dc34e 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> @@ -162,7 +162,11 @@ struct drm_i915_gem_object {
>       atomic_t bind_count;
> 
>       struct {
> -             struct mutex lock; /* protects the pages and their use */
> +             /*
> +              * Protects the pages and their use. Do not use directly, but
> +              * instead go through the pin/unpin interfaces.
> +              */
> +             struct mutex lock;
>               atomic_t pages_pin_count;
>               atomic_t shrink_pin;
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
> b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
> index 29f4c2850745..f402c2c415c2 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
> @@ -106,7 +106,7 @@ int __i915_gem_object_get_pages(struct
> drm_i915_gem_object *obj)  {
>       int err;
> 
> -     err = mutex_lock_interruptible(&obj->mm.lock);
> +     err = mutex_lock_interruptible_nested(&obj->mm.lock,
> +I915_MM_GET_PAGES);
>       if (err)
>               return err;
> 
> @@ -190,8 +190,7 @@ __i915_gem_object_unset_pages(struct
> drm_i915_gem_object *obj)
>       return pages;
>  }
> 
> -int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
> -                             enum i915_mm_subclass subclass)
> +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>  {
>       struct sg_table *pages;
>       int err;
> @@ -202,7 +201,7 @@ int __i915_gem_object_put_pages(struct
> drm_i915_gem_object *obj,
>       GEM_BUG_ON(atomic_read(&obj->bind_count));
> 
>       /* May be called by shrinker from within get_pages() (on another bo)
> */
> -     mutex_lock_nested(&obj->mm.lock, subclass);
> +     mutex_lock(&obj->mm.lock);
>       if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
>               err = -EBUSY;
>               goto unlock;
> @@ -308,7 +307,7 @@ void *i915_gem_object_pin_map(struct
> drm_i915_gem_object *obj,
>       if (!i915_gem_object_type_has(obj, flags))
>               return ERR_PTR(-ENXIO);
> 
> -     err = mutex_lock_interruptible(&obj->mm.lock);
> +     err = mutex_lock_interruptible_nested(&obj->mm.lock,
> +I915_MM_GET_PAGES);
>       if (err)
>               return ERR_PTR(err);
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
> b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
> index 8043ff63d73f..b1b7c1b3038a 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
> @@ -164,7 +164,7 @@ int i915_gem_object_attach_phys(struct
> drm_i915_gem_object *obj, int align)
>       if (err)
>               return err;
> 
> -     mutex_lock(&obj->mm.lock);
> +     mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
> 
>       if (obj->mm.madv != I915_MADV_WILLNEED) {
>               err = -EFAULT;
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> index fd3ce6da8497..066b3df677e8 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> @@ -57,7 +57,7 @@ static bool unsafe_drop_pages(struct
> drm_i915_gem_object *obj,
>               flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
> 
>       if (i915_gem_object_unbind(obj, flags) == 0)
> -             __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
> +             __i915_gem_object_put_pages(obj);
> 
>       return !i915_gem_object_has_pages(obj);  } @@ -209,8 +209,7 @@
> i915_gem_shrink(struct drm_i915_private *i915,
> 
>                       if (unsafe_drop_pages(obj, shrink)) {
>                               /* May arrive from get_pages on another bo
> */
> -                             mutex_lock_nested(&obj->mm.lock,
> -                                               I915_MM_SHRINKER);
> +                             mutex_lock(&obj->mm.lock);
>                               if (!i915_gem_object_has_pages(obj)) {
>                                       try_to_writeback(obj, shrink);
>                                       count += obj->base.size >>
> PAGE_SHIFT; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> index 1e045c337044..ee65c6acf0e2 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> @@ -131,7 +131,7 @@ userptr_mn_invalidate_range_start(struct
> mmu_notifier *_mn,
>               ret = i915_gem_object_unbind(obj,
> 
> I915_GEM_OBJECT_UNBIND_ACTIVE);
>               if (ret == 0)
> -                     ret = __i915_gem_object_put_pages(obj,
> I915_MM_SHRINKER);
> +                     ret = __i915_gem_object_put_pages(obj);
>               i915_gem_object_put(obj);
>               if (ret)
>                       return ret;
> @@ -483,7 +483,7 @@ __i915_gem_userptr_get_pages_worker(struct
> work_struct *_work)
>               }
>       }
> 
> -     mutex_lock(&obj->mm.lock);
> +     mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
>       if (obj->userptr.work == &work->work) {
>               struct sg_table *pages = ERR_PTR(ret);
> 
> diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> index 688c49a24f32..5c9583349077 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> @@ -517,7 +517,7 @@ static int
> igt_mock_memory_region_huge_pages(void *arg)
>                       i915_vma_unpin(vma);
>                       i915_vma_close(vma);
> 
> -                     __i915_gem_object_put_pages(obj,
> I915_MM_NORMAL);
> +                     __i915_gem_object_put_pages(obj);
>                       i915_gem_object_put(obj);
>               }
>       }
> @@ -650,7 +650,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
>               i915_vma_close(vma);
> 
>               i915_gem_object_unpin_pages(obj);
> -             __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
> +             __i915_gem_object_put_pages(obj);
>               i915_gem_object_put(obj);
>       }
> 
> @@ -678,7 +678,7 @@ static void close_object_list(struct list_head *objects,
> 
>               list_del(&obj->st_link);
>               i915_gem_object_unpin_pages(obj);
> -             __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
> +             __i915_gem_object_put_pages(obj);
>               i915_gem_object_put(obj);
>       }
>  }
> @@ -948,7 +948,7 @@ static int igt_mock_ppgtt_64K(void *arg)
>                       i915_vma_close(vma);
> 
>                       i915_gem_object_unpin_pages(obj);
> -                     __i915_gem_object_put_pages(obj,
> I915_MM_NORMAL);
> +                     __i915_gem_object_put_pages(obj);
>                       i915_gem_object_put(obj);
>               }
>       }
> @@ -1301,7 +1301,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
>                       }
> 
>                       i915_gem_object_unpin_pages(obj);
> -                     __i915_gem_object_put_pages(obj,
> I915_MM_NORMAL);
> +                     __i915_gem_object_put_pages(obj);
>                       i915_gem_object_put(obj);
>               }
>       }
> @@ -1442,7 +1442,7 @@ static int igt_ppgtt_smoke_huge(void *arg)
>               }
>  out_unpin:
>               i915_gem_object_unpin_pages(obj);
> -             __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
> +             __i915_gem_object_put_pages(obj);
>  out_put:
>               i915_gem_object_put(obj);
> 
> @@ -1530,7 +1530,7 @@ static int igt_ppgtt_sanity_check(void *arg)
>                       err = igt_write_huge(ctx, obj);
> 
>                       i915_gem_object_unpin_pages(obj);
> -                     __i915_gem_object_put_pages(obj,
> I915_MM_NORMAL);
> +                     __i915_gem_object_put_pages(obj);
>                       i915_gem_object_put(obj);
> 
>                       if (err) {
> diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> index 19e1cca8f143..95d609abd39b 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> @@ -32,7 +32,7 @@ static void close_objects(struct intel_memory_region
> *mem,
>               if (i915_gem_object_has_pinned_pages(obj))
>                       i915_gem_object_unpin_pages(obj);
>               /* No polluting the memory region between tests */
> -             __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
> +             __i915_gem_object_put_pages(obj);
>               list_del(&obj->st_link);
>               i915_gem_object_put(obj);
>       }
> @@ -122,7 +122,7 @@ igt_object_create(struct intel_memory_region *mem,
> static void igt_object_release(struct drm_i915_gem_object *obj)  {
>       i915_gem_object_unpin_pages(obj);
> -     __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
> +     __i915_gem_object_put_pages(obj);
>       list_del(&obj->st_link);
>       i915_gem_object_put(obj);
>  }
> --
> 2.24.0.rc2

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to