When a PF is configured in admin-only mode, it is intended for management only and must not expose workload-facing capabilities to userspace.
Limit the exposed ioctl set in admin-only PF mode to XE_DEVICE_QUERY, and suppress capability-bearing query payloads so userspace cannot discover execution-related device details in this mode. Signed-off-by: Satyanarayana K V P <[email protected]> Cc: Michal Wajdeczko <[email protected]> Cc: Rodrigo Vivi <[email protected]> Cc: Piotr Piórkowski <[email protected]> Cc: Matthew Brost <[email protected]> Cc: Thomas Hellström <[email protected]> Cc: Michał Winiarski <[email protected]> Cc: Dunajski Bartosz <[email protected]> Cc: [email protected] --- V4 -> V5: - Updated commit message (Matt B). - Introduced new driver_admin_only_pf structure (Michal Wajdeczko). - Updated all query configs (Michal Wajdeczko). - Renamed xe_device_is_admin_only() to xe_device_is_admin_only_pf() - Fixed other review comments (Michal Wajdeczko). V3 -> V4: - Suppressed device capabilities in admin-only PF mode. (Wajdeczko) V2 -> V3: - Introduced new helper function xe_debugfs_create_files() to create debugfs entries based on admin_only_pf mode or normal mode. V1 -> V2: - Rebased to latest drm-tip. - Update update_minor_dev() to debugfs_minor_dev(). --- drivers/gpu/drm/xe/xe_device.c | 47 +++++++++++++++++++++++++++-- drivers/gpu/drm/xe/xe_query.c | 55 ++++++++++++++++++++++++---------- drivers/gpu/drm/xe/xe_sriov.h | 11 +++++++ 3 files changed, 94 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 3462645ca13c..76d534491e01 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -25,6 +25,7 @@ #include "regs/xe_regs.h" #include "xe_bo.h" #include "xe_bo_evict.h" +#include "xe_configfs.h" #include "xe_debugfs.h" #include "xe_defaults.h" #include "xe_devcoredump.h" @@ -213,6 +214,10 @@ static const struct drm_ioctl_desc xe_ioctls[] = { DRM_RENDER_ALLOW), }; +static const struct drm_ioctl_desc xe_ioctls_admin_only_pf[] = { + DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW), +}; + static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct drm_file *file_priv = file->private_data; @@ -415,6 +420,29 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static struct drm_driver driver_admin_only_pf = { + .driver_features = + DRIVER_GEM | DRIVER_GEM_GPUVA, + .open = xe_file_open, + .postclose = xe_file_close, + + .gem_prime_import = xe_gem_prime_import, + + .dumb_create = xe_bo_dumb_create, + .dumb_map_offset = drm_gem_ttm_dumb_map_offset, +#ifdef CONFIG_PROC_FS + .show_fdinfo = xe_drm_client_fdinfo, +#endif + .ioctls = xe_ioctls_admin_only_pf, + .num_ioctls = ARRAY_SIZE(xe_ioctls_admin_only_pf), + .fops = &xe_driver_fops, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + static void xe_device_destroy(struct drm_device *dev, void *dummy) { struct xe_device *xe = to_xe_device(dev); @@ -439,16 +467,24 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy) struct xe_device *xe_device_create(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct drm_driver *active_drm_driver = &driver; struct xe_device *xe; int err; - xe_display_driver_set_hooks(&driver); + /* + * Since XE device is not initialized yet, read from configfs + * directly to decide whether we are in admin-only PF mode or not. + */ + if (xe_configfs_admin_only_pf(pdev)) + active_drm_driver = &driver_admin_only_pf; + + xe_display_driver_set_hooks(active_drm_driver); - err = aperture_remove_conflicting_pci_devices(pdev, driver.name); + err = aperture_remove_conflicting_pci_devices(pdev, active_drm_driver->name); if (err) return ERR_PTR(err); - xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); + xe = devm_drm_dev_alloc(&pdev->dev, active_drm_driver, struct xe_device, drm); if (IS_ERR(xe)) return xe; @@ -708,6 +744,11 @@ int xe_device_probe_early(struct xe_device *xe) xe_sriov_probe_early(xe); + if (xe_configfs_admin_only_pf(to_pci_dev(xe->drm.dev)) && !IS_SRIOV_PF(xe)) { + drm_err(&xe->drm, "Admin-only PF mode is enabled in non PF mode\n"); + return -ENODEV; + } + if (IS_SRIOV_VF(xe)) vf_update_device_info(xe); diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 34db266b723f..f46e62797dd6 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -55,6 +55,9 @@ static size_t calc_hw_engine_info_size(struct xe_device *xe) u8 gt_id; int i = 0; + if (xe_device_is_admin_only_pf(xe)) + return 0; + for_each_gt(gt, xe, gt_id) for_each_hw_engine(hwe, gt, id) { if (xe_hw_engine_is_reserved(hwe)) @@ -126,7 +129,10 @@ query_engine_cycles(struct xe_device *xe, if (IS_SRIOV_VF(xe)) return -EOPNOTSUPP; - if (query->size == 0) { + if (xe_device_is_admin_only_pf(xe)) + size = 0; + + if (query->size == 0 || !size) { query->size = size; return 0; } else if (XE_IOCTL_DBG(xe, query->size != size)) { @@ -190,7 +196,7 @@ static int query_engines(struct xe_device *xe, u8 gt_id; int i = 0; - if (query->size == 0) { + if (query->size == 0 || !size) { query->size = size; return 0; } else if (XE_IOCTL_DBG(xe, query->size != size)) { @@ -231,6 +237,9 @@ static size_t calc_mem_regions_size(struct xe_device *xe) u32 num_managers = 1; int i; + if (xe_device_is_admin_only_pf(xe)) + return 0; + for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) if (ttm_manager_type(&xe->ttm, i)) num_managers++; @@ -248,7 +257,7 @@ static int query_mem_regions(struct xe_device *xe, struct ttm_resource_manager *man; int ret, i; - if (query->size == 0) { + if (query->size == 0 || !size) { query->size = size; return 0; } else if (XE_IOCTL_DBG(xe, query->size != size)) { @@ -309,13 +318,13 @@ static int query_mem_regions(struct xe_device *xe, static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) { const u32 num_params = DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1; - size_t size = + size_t size = xe_device_is_admin_only_pf(xe) ? 0 : sizeof(struct drm_xe_query_config) + num_params * sizeof(u64); struct drm_xe_query_config __user *query_ptr = u64_to_user_ptr(query->data); struct drm_xe_query_config *config; - if (query->size == 0) { + if (query->size == 0 || !size) { query->size = size; return 0; } else if (XE_IOCTL_DBG(xe, query->size != size)) { @@ -358,15 +367,15 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query) { struct xe_gt *gt; - size_t size = sizeof(struct drm_xe_query_gt_list) + - xe->info.gt_count * sizeof(struct drm_xe_gt); + size_t size = xe_device_is_admin_only_pf(xe) ? 0 : + sizeof(struct drm_xe_query_gt_list) + xe->info.gt_count * sizeof(struct drm_xe_gt); struct drm_xe_query_gt_list __user *query_ptr = u64_to_user_ptr(query->data); struct drm_xe_query_gt_list *gt_list; int iter = 0; u8 id; - if (query->size == 0) { + if (query->size == 0 || !size) { query->size = size; return 0; } else if (XE_IOCTL_DBG(xe, query->size != size)) { @@ -436,7 +445,10 @@ static int query_hwconfig(struct xe_device *xe, void __user *query_ptr = u64_to_user_ptr(query->data); void *hwconfig; - if (query->size == 0) { + if (xe_device_is_admin_only_pf(xe)) + size = 0; + + if (query->size == 0 || !size) { query->size = size; return 0; } else if (XE_IOCTL_DBG(xe, query->size != size)) { @@ -464,6 +476,9 @@ static size_t calc_topo_query_size(struct xe_device *xe) size_t query_size = 0; int id; + if (xe_device_is_admin_only_pf(xe)) + return 0; + for_each_gt(gt, xe, id) { query_size += 3 * sizeof(struct drm_xe_query_topology_mask) + sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) + @@ -505,7 +520,7 @@ static int query_gt_topology(struct xe_device *xe, struct xe_gt *gt; int id; - if (query->size == 0) { + if (query->size == 0 || !size) { query->size = size; return 0; } else if (XE_IOCTL_DBG(xe, query->size != size)) { @@ -559,11 +574,12 @@ static int query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query) { struct drm_xe_query_uc_fw_version __user *query_ptr = u64_to_user_ptr(query->data); - size_t size = sizeof(struct drm_xe_query_uc_fw_version); + size_t size = xe_device_is_admin_only_pf(xe) ? 0 : + sizeof(struct drm_xe_query_uc_fw_version); struct drm_xe_query_uc_fw_version resp; struct xe_uc_fw_version *version = NULL; - if (query->size == 0) { + if (query->size == 0 || !size) { query->size = size; return 0; } else if (XE_IOCTL_DBG(xe, query->size != size)) { @@ -634,6 +650,9 @@ static size_t calc_oa_unit_query_size(struct xe_device *xe) struct xe_gt *gt; int i, id; + if (xe_device_is_admin_only_pf(xe)) + return 0; + for_each_gt(gt, xe, id) { for (i = 0; i < gt->oa.num_oa_units; i++) { size += sizeof(struct drm_xe_oa_unit); @@ -659,7 +678,7 @@ static int query_oa_units(struct xe_device *xe, struct xe_gt *gt; u8 *pdu; - if (query->size == 0) { + if (query->size == 0 || !size) { query->size = size; return 0; } else if (XE_IOCTL_DBG(xe, query->size != size)) { @@ -711,11 +730,12 @@ static int query_oa_units(struct xe_device *xe, static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query) { struct drm_xe_query_pxp_status __user *query_ptr = u64_to_user_ptr(query->data); - size_t size = sizeof(struct drm_xe_query_pxp_status); + size_t size = xe_device_is_admin_only_pf(xe) ? 0 : + sizeof(struct drm_xe_query_pxp_status); struct drm_xe_query_pxp_status resp = { 0 }; int ret; - if (query->size == 0) { + if (query->size == 0 || !size) { query->size = size; return 0; } else if (XE_IOCTL_DBG(xe, query->size != size)) { @@ -751,7 +771,10 @@ static int query_eu_stall(struct xe_device *xe, array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates); size = sizeof(struct drm_xe_query_eu_stall) + array_size; - if (query->size == 0) { + if (xe_device_is_admin_only_pf(xe)) + size = 0; + + if (query->size == 0 || !size) { query->size = size; return 0; } else if (XE_IOCTL_DBG(xe, query->size != size)) { diff --git a/drivers/gpu/drm/xe/xe_sriov.h b/drivers/gpu/drm/xe/xe_sriov.h index 72e55543c30e..ee84978350aa 100644 --- a/drivers/gpu/drm/xe/xe_sriov.h +++ b/drivers/gpu/drm/xe/xe_sriov.h @@ -37,6 +37,17 @@ static inline bool xe_device_is_sriov_vf(const struct xe_device *xe) return xe_device_sriov_mode(xe) == XE_SRIOV_MODE_VF; } +/** + * xe_device_is_admin_only_pf() - Check whether device is admin only PF or not. + * @xe: the &xe_device to check + * + * Return: true if the device is admin only PF, false otherwise. + */ +static inline bool xe_device_is_admin_only_pf(const struct xe_device *xe) +{ + return xe_device_is_sriov_pf(xe) && xe->sriov.pf.admin_only; +} + #define IS_SRIOV_PF(xe) xe_device_is_sriov_pf(xe) #define IS_SRIOV_VF(xe) xe_device_is_sriov_vf(xe) -- 2.43.0
