[email protected] writes:

> From: Ankitprasad Sharma <[email protected]>
>
> In pwrite_fast, map an object page by page if obj_ggtt_pin fails. First,
> we try a nonblocking pin for the whole object (since that is fastest if
> reused), then failing that we try to grab one page in the mappable
> aperture. It also allows us to handle objects larger than the mappable
> aperture (e.g. if we need to pwrite with vGPU restricting the aperture
> to a measely 8MiB or something like that).
>
> v2: Pin pages before starting pwrite, Combined duplicate loops (Chris)
>
> v3: Combined loops based on local patch by Chris (Chris)
>
> Signed-off-by: Ankitprasad Sharma <[email protected]>
> Signed-off-by: Chris Wilson <[email protected]>
> ---
>  drivers/gpu/drm/i915/i915_gem.c | 75 
> +++++++++++++++++++++++++++++------------
>  1 file changed, 53 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index f1e3fde..9d2e6e3 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -760,20 +760,33 @@ fast_user_write(struct io_mapping *mapping,
>   * user into the GTT, uncached.
>   */
>  static int
> -i915_gem_gtt_pwrite_fast(struct drm_device *dev,
> +i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
>                        struct drm_i915_gem_object *obj,
>                        struct drm_i915_gem_pwrite *args,
>                        struct drm_file *file)
>  {
> -     struct drm_i915_private *dev_priv = dev->dev_private;
> -     ssize_t remain;
> -     loff_t offset, page_base;
> +     struct drm_mm_node node;
> +     uint64_t remain, offset;
>       char __user *user_data;
> -     int page_offset, page_length, ret;
> +     int ret;
>  
>       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
> -     if (ret)
> -             goto out;
> +     if (ret) {
> +             memset(&node, 0, sizeof(node));
> +             ret = drm_mm_insert_node_in_range_generic(&i915->gtt.base.mm,
> +                                                       &node, 4096, 0,
> +                                                       I915_CACHE_NONE, 0,
> +                                                       
> i915->gtt.mappable_end,
> +                                                       DRM_MM_SEARCH_DEFAULT,
> +                                                       
> DRM_MM_CREATE_DEFAULT);
> +             if (ret)
> +                     goto out;
> +
> +             i915_gem_object_pin_pages(obj);
> +     } else {
> +             node.start = i915_gem_obj_ggtt_offset(obj);
> +             node.allocated = false;
> +     }
>  
>       ret = i915_gem_object_set_to_gtt_domain(obj, true);
>       if (ret)
> @@ -783,31 +796,39 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
>       if (ret)
>               goto out_unpin;
>  
> -     user_data = to_user_ptr(args->data_ptr);
> -     remain = args->size;
> -
> -     offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
> -
>       intel_fb_obj_invalidate(obj, ORIGIN_GTT);
> +     obj->dirty = true;
>  
> -     while (remain > 0) {
> +     user_data = to_user_ptr(args->data_ptr);
> +     offset = args->offset;
> +     remain = args->size;
> +     while (remain) {
>               /* Operation in this page
>                *
>                * page_base = page offset within aperture
>                * page_offset = offset within page
>                * page_length = bytes to copy for this page
>                */
> -             page_base = offset & PAGE_MASK;
> -             page_offset = offset_in_page(offset);
> -             page_length = remain;
> -             if ((page_offset + remain) > PAGE_SIZE)
> -                     page_length = PAGE_SIZE - page_offset;
> -
> +             u32 page_base = node.start;

You truncate here as node.start is 64bit offset into the vm area.

-Mika


> +             unsigned page_offset = offset_in_page(offset);
> +             unsigned page_length = PAGE_SIZE - page_offset;
> +             page_length = remain < page_length ? remain : page_length;
> +             if (node.allocated) {
> +                     wmb();
> +                     i915->gtt.base.insert_page(&i915->gtt.base,
> +                                                
> i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
> +                                                node.start,
> +                                                I915_CACHE_NONE,
> +                                                0);
> +                     wmb();
> +             } else {
> +                     page_base += offset & PAGE_MASK;
> +             }
>               /* If we get a fault while copying data, then (presumably) our
>                * source page isn't available.  Return the error and we'll
>                * retry in the slow path.
>                */
> -             if (fast_user_write(dev_priv->gtt.mappable, page_base,
> +             if (fast_user_write(i915->gtt.mappable, page_base,
>                                   page_offset, user_data, page_length)) {
>                       ret = -EFAULT;
>                       goto out_flush;
> @@ -821,7 +842,17 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
>  out_flush:
>       intel_fb_obj_flush(obj, false, ORIGIN_GTT);
>  out_unpin:
> -     i915_gem_object_ggtt_unpin(obj);
> +     if (node.allocated) {
> +             wmb();
> +             i915->gtt.base.clear_range(&i915->gtt.base,
> +                             node.start, node.size,
> +                             true);
> +             drm_mm_remove_node(&node);
> +             i915_gem_object_unpin_pages(obj);
> +     }
> +     else {
> +             i915_gem_object_ggtt_unpin(obj);
> +     }
>  out:
>       return ret;
>  }
> @@ -1086,7 +1117,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void 
> *data,
>       if (obj->tiling_mode == I915_TILING_NONE &&
>           obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
>           cpu_write_needs_clflush(obj)) {
> -             ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
> +             ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
>               /* Note that the gtt paths might fail with non-page-backed user
>                * pointers (e.g. gtt mappings when moving data between
>                * textures). Fallback to the shmem path in that case. */
> -- 
> 1.9.1
>
> _______________________________________________
> Intel-gfx mailing list
> [email protected]
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to