From: Tvrtko Ursulin <[email protected]>

Instead of sprinkling around usage and initialization of
i915_execbuffer_params we can consolidate it just before
execbuf_submit for maintability and readability.

That way we can also drop the memset since it becomes
easy to spot we initialize all the fields.

     text    data     bss     dec     hex filename
  1085466   26398    2628 1114492  11017c i915.ko.0
  1085402   26398    2628 1114428  11013c i915.ko.1

Signed-off-by: Tvrtko Ursulin <[email protected]>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 87 +++++++++++++++---------------
 1 file changed, 44 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 91c2393199a3..ee7b7bd17e29 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -50,14 +50,14 @@
 #define BATCH_OFFSET_BIAS (256*1024)
 
 struct i915_execbuffer_params {
-       struct drm_device               *dev;
-       struct drm_file                 *file;
-       struct i915_vma                 *batch;
-       u32                             dispatch_flags;
-       u32                             args_batch_start_offset;
-       struct intel_engine_cs          *engine;
-       struct i915_gem_context         *ctx;
-       struct drm_i915_gem_request     *request;
+       struct drm_device           *dev;
+       struct drm_file             *file;
+       struct i915_vma             *batch;
+       u32                         dispatch_flags;
+       u32                         batch_start;
+       struct intel_engine_cs      *engine;
+       struct i915_gem_context     *ctx;
+       struct drm_i915_gem_request *request;
 };
 
 struct eb_vmas {
@@ -1484,11 +1484,10 @@ execbuf_submit(struct i915_execbuffer_params *params,
        }
 
        exec_len   = args->batch_len;
-       exec_start = params->batch->node.start +
-                    params->args_batch_start_offset;
+       exec_start = params->batch->node.start + params->batch_start;
 
        if (exec_len == 0)
-               exec_len = params->batch->size - 
params->args_batch_start_offset;
+               exec_len = params->batch->size - params->batch_start;
 
        ret = params->engine->emit_bb_start(params->request,
                                            exec_start, exec_len,
@@ -1592,8 +1591,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void 
*data,
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
        struct i915_address_space *vm;
-       struct i915_execbuffer_params params_master; /* XXX: will be removed 
later */
-       struct i915_execbuffer_params *params = &params_master;
+       struct drm_i915_gem_request *req;
+       struct i915_vma *batch;
+       u32 batch_start_offset;
+       struct i915_execbuffer_params params;
        const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
        u32 dispatch_flags;
        struct dma_fence *in_fence = NULL;
@@ -1685,8 +1686,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        else
                vm = &ggtt->base;
 
-       memset(&params_master, 0x00, sizeof(params_master));
-
        eb = eb_create(dev_priv, args);
        if (eb == NULL) {
                i915_gem_context_put(ctx);
@@ -1701,7 +1700,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto err;
 
        /* take note of the batch buffer before we might reorder the lists */
-       params->batch = eb_get_batch(eb);
+       batch = eb_get_batch(eb);
 
        /* Move the objects en-masse into the GTT, evicting if necessary. */
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1725,24 +1724,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void 
*data,
        }
 
        /* Set the pending read domains for the batch buffer to COMMAND */
-       if (params->batch->obj->base.pending_write_domain) {
+       if (batch->obj->base.pending_write_domain) {
                DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
                ret = -EINVAL;
                goto err;
        }
-       if (args->batch_start_offset > params->batch->size ||
-           args->batch_len > params->batch->size - args->batch_start_offset) {
+       if (args->batch_start_offset > batch->size ||
+           args->batch_len > batch->size - args->batch_start_offset) {
                DRM_DEBUG("Attempting to use out-of-bounds batch\n");
                ret = -EINVAL;
                goto err;
        }
 
-       params->args_batch_start_offset = args->batch_start_offset;
+       batch_start_offset = args->batch_start_offset;
        if (engine->needs_cmd_parser && args->batch_len) {
                struct i915_vma *vma;
 
                vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
-                                               params->batch->obj,
+                                               batch->obj,
                                                eb,
                                                args->batch_start_offset,
                                                args->batch_len,
@@ -1763,18 +1762,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void 
*data,
                         * command parser has accepted.
                         */
                        dispatch_flags |= I915_DISPATCH_SECURE;
-                       params->args_batch_start_offset = 0;
-                       params->batch = vma;
+                       batch_start_offset = 0;
+                       batch = vma;
                }
        }
 
-       params->batch->obj->base.pending_read_domains |= 
I915_GEM_DOMAIN_COMMAND;
+       batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
         * batch" bit. Hence we need to pin secure batches into the global gtt.
         * hsw should have this fixed, but bdw mucks it up again. */
        if (dispatch_flags & I915_DISPATCH_SECURE) {
-               struct drm_i915_gem_object *obj = params->batch->obj;
+               struct drm_i915_gem_object *obj = batch->obj;
                struct i915_vma *vma;
 
                /*
@@ -1793,25 +1792,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void 
*data,
                        goto err;
                }
 
-               params->batch = vma;
+               batch = vma;
        }
 
        /* Allocate a request for this batch buffer nice and early. */
-       params->request = i915_gem_request_alloc(engine, ctx);
-       if (IS_ERR(params->request)) {
-               ret = PTR_ERR(params->request);
+       req = i915_gem_request_alloc(engine, ctx);
+       if (IS_ERR(req)) {
+               ret = PTR_ERR(req);
                goto err_batch_unpin;
        }
 
        if (in_fence) {
-               ret = i915_gem_request_await_dma_fence(params->request,
-                                                      in_fence);
+               ret = i915_gem_request_await_dma_fence(req, in_fence);
                if (ret < 0)
                        goto err_request;
        }
 
        if (out_fence_fd != -1) {
-               out_fence = sync_file_create(&params->request->fence);
+               out_fence = sync_file_create(&req->fence);
                if (!out_fence) {
                        ret = -ENOMEM;
                        goto err_request;
@@ -1824,9 +1822,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         * inactive_list and lose its active reference. Hence we do not need
         * to explicitly hold another reference here.
         */
-       params->request->batch = params->batch;
+       req->batch = batch;
 
-       ret = i915_gem_request_add_to_client(params->request, file);
+       ret = i915_gem_request_add_to_client(req, file);
        if (ret)
                goto err_request;
 
@@ -1836,15 +1834,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void 
*data,
         * kept around beyond the duration of the IOCTL once the GPU
         * scheduler arrives.
         */
-       params->dev                     = dev;
-       params->file                    = file;
-       params->engine                    = engine;
-       params->dispatch_flags          = dispatch_flags;
-       params->ctx                     = ctx;
-
-       ret = execbuf_submit(params, args, &eb->vmas);
+       params.dev            = dev;
+       params.file           = file;
+       params.engine         = engine;
+       params.dispatch_flags = dispatch_flags;
+       params.ctx            = ctx;
+       params.batch          = batch;
+       params.batch_start    = batch_start_offset;
+       params.request        = req;
+
+       ret = execbuf_submit(&params, args, &eb->vmas);
 err_request:
-       __i915_add_request(params->request, ret == 0);
+       __i915_add_request(req, ret == 0);
        if (out_fence) {
                if (ret == 0) {
                        fd_install(out_fence_fd, out_fence->file);
@@ -1864,7 +1865,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         * active.
         */
        if (dispatch_flags & I915_DISPATCH_SECURE)
-               i915_vma_unpin(params->batch);
+               i915_vma_unpin(batch);
 err:
        /* the request owns the ref now */
        i915_gem_context_put(ctx);
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to