Lines Matching +full:0 +full:xe

48 static size_t calc_hw_engine_info_size(struct xe_device *xe)  in calc_hw_engine_info_size()  argument
54 int i = 0; in calc_hw_engine_info_size()
56 for_each_gt(gt, xe, gt_id) in calc_hw_engine_info_size()
95 u32 upper, lower, old_upper, loop = 0; in hwe_read_timestamp()
113 query_engine_cycles(struct xe_device *xe, in query_engine_cycles() argument
125 if (IS_SRIOV_VF(xe)) in query_engine_cycles()
128 if (query->size == 0) { in query_engine_cycles()
130 return 0; in query_engine_cycles()
131 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_engine_cycles()
147 gt = xe_device_get_gt(xe, eci->gt_id); in query_engine_cycles()
170 if (GRAPHICS_VER(xe) >= 20) in query_engine_cycles()
182 return 0; in query_engine_cycles()
185 static int query_engines(struct xe_device *xe, in query_engines() argument
188 size_t size = calc_hw_engine_info_size(xe); in query_engines()
196 int i = 0; in query_engines()
198 if (query->size == 0) { in query_engines()
200 return 0; in query_engines()
201 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_engines()
209 for_each_gt(gt, xe, gt_id) in query_engines()
231 return 0; in query_engines()
234 static size_t calc_mem_regions_size(struct xe_device *xe) in calc_mem_regions_size() argument
240 if (ttm_manager_type(&xe->ttm, i)) in calc_mem_regions_size()
246 static int query_mem_regions(struct xe_device *xe, in query_mem_regions() argument
249 size_t size = calc_mem_regions_size(xe); in query_mem_regions()
256 if (query->size == 0) { in query_mem_regions()
258 return 0; in query_mem_regions()
259 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_mem_regions()
264 if (XE_IOCTL_DBG(xe, !mem_regions)) in query_mem_regions()
267 man = ttm_manager_type(&xe->ttm, XE_PL_TT); in query_mem_regions()
268 mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM; in query_mem_regions()
274 mem_regions->mem_regions[0].instance = 0; in query_mem_regions()
275 mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; in query_mem_regions()
276 mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; in query_mem_regions()
278 mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); in query_mem_regions()
282 man = ttm_manager_type(&xe->ttm, i); in query_mem_regions()
289 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? in query_mem_regions()
309 ret = 0; in query_mem_regions()
317 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) in query_config() argument
326 if (query->size == 0) { in query_config()
328 return 0; in query_config()
329 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_config()
339 xe->info.devid | (xe->info.revid << 16); in query_config()
340 if (xe_device_get_root_tile(xe)->mem.vram.usable_size) in query_config()
343 if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_GPUSVM)) in query_config()
349 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; in query_config()
350 config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits; in query_config()
352 xe_exec_queue_device_get_max_priority(xe); in query_config()
360 return 0; in query_config()
363 static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query) in query_gt_list() argument
367 xe->info.gt_count * sizeof(struct drm_xe_gt); in query_gt_list()
373 if (query->size == 0) { in query_gt_list()
375 return 0; in query_gt_list()
376 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_gt_list()
384 gt_list->num_gt = xe->info.gt_count; in query_gt_list()
386 for_each_gt(gt, xe, id) { in query_gt_list()
401 * Bit 0 -> System Memory in query_gt_list()
408 if (!IS_DGFX(xe)) in query_gt_list()
409 gt_list->gt_list[id].near_mem_regions = 0x1; in query_gt_list()
413 gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^ in query_gt_list()
430 return 0; in query_gt_list()
433 static int query_hwconfig(struct xe_device *xe, in query_hwconfig() argument
436 struct xe_gt *gt = xe_root_mmio_gt(xe); in query_hwconfig()
441 if (query->size == 0) { in query_hwconfig()
443 return 0; in query_hwconfig()
444 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_hwconfig()
460 return 0; in query_hwconfig()
463 static size_t calc_topo_query_size(struct xe_device *xe) in calc_topo_query_size() argument
466 size_t query_size = 0; in calc_topo_query_size()
469 for_each_gt(gt, xe, id) { in calc_topo_query_size()
498 return 0; in copy_mask()
501 static int query_gt_topology(struct xe_device *xe, in query_gt_topology() argument
505 size_t size = calc_topo_query_size(xe); in query_gt_topology()
510 if (query->size == 0) { in query_gt_topology()
512 return 0; in query_gt_topology()
513 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_gt_topology()
517 for_each_gt(gt, xe, id) { in query_gt_topology()
557 return 0; in query_gt_topology()
561 query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query) in query_uc_fw_version() argument
568 if (query->size == 0) { in query_uc_fw_version()
570 return 0; in query_uc_fw_version()
571 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_uc_fw_version()
578 if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved)) in query_uc_fw_version()
583 struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc; in query_uc_fw_version()
592 if (MEDIA_VER(xe) >= 13) { in query_uc_fw_version()
596 for_each_tile(tile, xe, gt_id) { in query_uc_fw_version()
603 media_gt = xe->tiles[0].primary_gt; in query_uc_fw_version()
619 resp.branch_ver = 0; in query_uc_fw_version()
630 return 0; in query_uc_fw_version()
633 static size_t calc_oa_unit_query_size(struct xe_device *xe) in calc_oa_unit_query_size() argument
639 for_each_gt(gt, xe, id) { in calc_oa_unit_query_size()
640 for (i = 0; i < gt->oa.num_oa_units; i++) { in calc_oa_unit_query_size()
650 static int query_oa_units(struct xe_device *xe, in query_oa_units() argument
654 size_t size = calc_oa_unit_query_size(xe); in query_oa_units()
664 if (query->size == 0) { in query_oa_units()
666 return 0; in query_oa_units()
667 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_oa_units()
675 pdu = (u8 *)&qoa->oa_units[0]; in query_oa_units()
676 for_each_gt(gt, xe, gt_id) { in query_oa_units()
677 for (i = 0; i < gt->oa.num_oa_units; i++) { in query_oa_units()
688 j = 0; in query_oa_units()
700 pdu += sizeof(*du) + j * sizeof(du->eci[0]); in query_oa_units()
708 return ret ? -EFAULT : 0; in query_oa_units()
711 static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query) in query_pxp_status() argument
715 struct drm_xe_query_pxp_status resp = { 0 }; in query_pxp_status()
718 if (query->size == 0) { in query_pxp_status()
720 return 0; in query_pxp_status()
721 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_pxp_status()
725 ret = xe_pxp_get_readiness_status(xe->pxp); in query_pxp_status()
726 if (ret < 0) in query_pxp_status()
735 return 0; in query_pxp_status()
738 static int query_eu_stall(struct xe_device *xe, in query_eu_stall() argument
748 if (!xe_eu_stall_supported_on_platform(xe)) { in query_eu_stall()
749 drm_dbg(&xe->drm, "EU stall monitoring is not supported on this platform\n"); in query_eu_stall()
756 if (query->size == 0) { in query_eu_stall()
758 return 0; in query_eu_stall()
759 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_eu_stall()
769 info->record_size = xe_eu_stall_data_record_size(xe); in query_eu_stall()
776 return ret ? -EFAULT : 0; in query_eu_stall()
779 static int (* const xe_query_funcs[])(struct xe_device *xe,
796 struct xe_device *xe = to_xe_device(dev); in xe_query_ioctl() local
800 if (XE_IOCTL_DBG(xe, query->extensions) || in xe_query_ioctl()
801 XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1])) in xe_query_ioctl()
804 if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs))) in xe_query_ioctl()
808 if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx])) in xe_query_ioctl()
811 return xe_query_funcs[idx](xe, query); in xe_query_ioctl()