On Thu, Mar 7, 2019 at 8:15 AM Dave Airlie <[email protected]> wrote: > > > +#endif > > diff --git a/include/uapi/drm/lima_drm.h b/include/uapi/drm/lima_drm.h > > new file mode 100644 > > index 000000000000..05f8c910d7fb > > --- /dev/null > > +++ b/include/uapi/drm/lima_drm.h > > @@ -0,0 +1,164 @@ > > +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ > > +/* Copyright 2017-2018 Qiang Yu <[email protected]> */ > > + > > +#ifndef __LIMA_DRM_H__ > > +#define __LIMA_DRM_H__ > > + > > +#include "drm.h" > > + > > +#if defined(__cplusplus) > > +extern "C" { > > +#endif > > + > > +enum drm_lima_param_gpu_id { > > + DRM_LIMA_PARAM_GPU_ID_UNKNOWN, > > + DRM_LIMA_PARAM_GPU_ID_MALI400, > > + DRM_LIMA_PARAM_GPU_ID_MALI450, > > +}; > > + > > +enum drm_lima_param { > > + DRM_LIMA_PARAM_GPU_ID, > > + DRM_LIMA_PARAM_NUM_PP, > > + DRM_LIMA_PARAM_GP_VERSION, > > + DRM_LIMA_PARAM_PP_VERSION, > > +}; > > + > > +/** > > + * get various information of the GPU > > + */ > > +struct drm_lima_get_param { > > + __u32 param; /* in, value in enum drm_lima_param */ > > + __u32 pad; /* pad, must be zero */ > > + __u64 value; /* out, parameter value */ > > +}; > > + > > +/** > > + * create a buffer for used by GPU > > + */ > > +struct drm_lima_gem_create { > > + __u32 size; /* in, buffer size */ > > + __u32 flags; /* in, currently no flags, must be zero */ > > + __u32 handle; /* out, GEM buffer handle */ > > + __u32 pad; /* pad, must be zero */ > > +}; > > + > > +/** > > + * get information of a buffer > > + */ > > +struct drm_lima_gem_info { > > + __u32 handle; /* in, GEM buffer handle */ > > + __u32 va; /* out, virtual address mapped into GPU MMU */ > > + __u64 offset; /* out, used to mmap this buffer to CPU */ > > +}; > > + > > +#define LIMA_SUBMIT_BO_READ 0x01 > > +#define LIMA_SUBMIT_BO_WRITE 0x02 > > + > > +/* buffer information used by one task */ > > +struct drm_lima_gem_submit_bo { > > + __u32 handle; /* in, GEM buffer handle */ > > + __u32 flags; /* in, buffer read/write by GPU */ > > +}; > > + > > +#define LIMA_GP_FRAME_REG_NUM 6 > > + > > +/* frame used to setup GP for each task */ > > +struct drm_lima_gp_frame { > > + __u32 frame[LIMA_GP_FRAME_REG_NUM]; > > +}; > > + > > +#define LIMA_PP_FRAME_REG_NUM 23 > > +#define LIMA_PP_WB_REG_NUM 12 > > + > > +/* frame used to setup mali400 GPU PP for each task */ > > +struct drm_lima_m400_pp_frame { > > + __u32 frame[LIMA_PP_FRAME_REG_NUM]; > > + __u32 num_pp; > > + __u32 wb[3 * LIMA_PP_WB_REG_NUM]; > > + __u32 plbu_array_address[4]; > > + __u32 fragment_stack_address[4]; > > +}; > > + > > +/* frame used to setup mali450 GPU PP for each task */ > > +struct drm_lima_m450_pp_frame { > > + __u32 frame[LIMA_PP_FRAME_REG_NUM]; > > + __u32 num_pp; > > + __u32 wb[3 * LIMA_PP_WB_REG_NUM]; > > + __u32 use_dlbu; > > + __u32 _pad; > > + union { > > + __u32 plbu_array_address[8]; > > + __u32 dlbu_regs[4]; > > + }; > > + __u32 fragment_stack_address[8]; > > +}; > > + > > +#define LIMA_PIPE_GP 0x00 > > +#define LIMA_PIPE_PP 0x01 > > + > > +#define LIMA_SUBMIT_FLAG_EXPLICIT_FENCE (1 << 0) > > + > > +/** > > + * submit a task to GPU > > + */ > > +struct drm_lima_gem_submit { > > + __u32 ctx; /* in, context handle task is submitted to */ > > + __u32 pipe; /* in, which pipe to use, GP/PP */ > > + __u32 nr_bos; /* in, array length of bos field */ > > + __u32 frame_size; /* in, size of frame field */ > > + __u64 bos; /* in, array of drm_lima_gem_submit_bo */ > > + __u64 frame; /* in, GP/PP frame */ > > + __u32 flags; /* in, submit flags */ > > + __u32 out_sync; /* in, drm_syncobj handle used to wait task > > finish after submission */ > > + __u32 in_sync[2]; /* in, drm_syncobj handle used to wait before > > start this task */ > > +}; > > This seems a bit limited, is there a reason it's two, at least in > Vulkan drivers we'd want more than two I suspect (Vulkan may not work > on this hw anyways), but it might be required in the future to make > this extensible. Mali4xx GPU does not support Vulkan, the reason I pick two is, one for sync_file fd imported drm_syncobj, one for GP out_sync be able to pass to PP in_sync directly when explicit fence without drm_syncobj -> sync_file -> merge sync_file -> drm_syncobj pass.
> > At least a comment stating why 2 was picked is sufficient for current use > cases. > OK, will add it. Regards, Qiang _______________________________________________ dri-devel mailing list [email protected] https://lists.freedesktop.org/mailman/listinfo/dri-devel
