1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_query.h"
7
8 #include <linux/nospec.h>
9 #include <linux/sched/clock.h>
10
11 #include <drm/ttm/ttm_placement.h>
12 #include <uapi/drm/xe_drm.h>
13
14 #include "regs/xe_engine_regs.h"
15 #include "regs/xe_gt_regs.h"
16 #include "xe_bo.h"
17 #include "xe_device.h"
18 #include "xe_exec_queue.h"
19 #include "xe_force_wake.h"
20 #include "xe_ggtt.h"
21 #include "xe_gt.h"
22 #include "xe_guc_hwconfig.h"
23 #include "xe_macros.h"
24 #include "xe_mmio.h"
25 #include "xe_ttm_vram_mgr.h"
26
27 static const u16 xe_to_user_engine_class[] = {
28 [XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER,
29 [XE_ENGINE_CLASS_COPY] = DRM_XE_ENGINE_CLASS_COPY,
30 [XE_ENGINE_CLASS_VIDEO_DECODE] = DRM_XE_ENGINE_CLASS_VIDEO_DECODE,
31 [XE_ENGINE_CLASS_VIDEO_ENHANCE] = DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE,
32 [XE_ENGINE_CLASS_COMPUTE] = DRM_XE_ENGINE_CLASS_COMPUTE,
33 };
34
35 static const enum xe_engine_class user_to_xe_engine_class[] = {
36 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
37 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
38 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
39 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
40 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
41 };
42
calc_hw_engine_info_size(struct xe_device * xe)43 static size_t calc_hw_engine_info_size(struct xe_device *xe)
44 {
45 struct xe_hw_engine *hwe;
46 enum xe_hw_engine_id id;
47 struct xe_gt *gt;
48 u8 gt_id;
49 int i = 0;
50
51 for_each_gt(gt, xe, gt_id)
52 for_each_hw_engine(hwe, gt, id) {
53 if (xe_hw_engine_is_reserved(hwe))
54 continue;
55 i++;
56 }
57
58 return sizeof(struct drm_xe_query_engines) +
59 i * sizeof(struct drm_xe_engine);
60 }
61
62 typedef u64 (*__ktime_func_t)(void);
__clock_id_to_func(clockid_t clk_id)63 static __ktime_func_t __clock_id_to_func(clockid_t clk_id)
64 {
65 /*
66 * Use logic same as the perf subsystem to allow user to select the
67 * reference clock id to be used for timestamps.
68 */
69 switch (clk_id) {
70 case CLOCK_MONOTONIC:
71 return &ktime_get_ns;
72 case CLOCK_MONOTONIC_RAW:
73 return &ktime_get_raw_ns;
74 case CLOCK_REALTIME:
75 return &ktime_get_real_ns;
76 case CLOCK_BOOTTIME:
77 return &ktime_get_boottime_ns;
78 case CLOCK_TAI:
79 return &ktime_get_clocktai_ns;
80 default:
81 return NULL;
82 }
83 }
84
85 static void
__read_timestamps(struct xe_gt * gt,struct xe_reg lower_reg,struct xe_reg upper_reg,u64 * engine_ts,u64 * cpu_ts,u64 * cpu_delta,__ktime_func_t cpu_clock)86 __read_timestamps(struct xe_gt *gt,
87 struct xe_reg lower_reg,
88 struct xe_reg upper_reg,
89 u64 *engine_ts,
90 u64 *cpu_ts,
91 u64 *cpu_delta,
92 __ktime_func_t cpu_clock)
93 {
94 u32 upper, lower, old_upper, loop = 0;
95
96 upper = xe_mmio_read32(gt, upper_reg);
97 do {
98 *cpu_delta = local_clock();
99 *cpu_ts = cpu_clock();
100 lower = xe_mmio_read32(gt, lower_reg);
101 *cpu_delta = local_clock() - *cpu_delta;
102 old_upper = upper;
103 upper = xe_mmio_read32(gt, upper_reg);
104 } while (upper != old_upper && loop++ < 2);
105
106 *engine_ts = (u64)upper << 32 | lower;
107 }
108
109 static int
query_engine_cycles(struct xe_device * xe,struct drm_xe_device_query * query)110 query_engine_cycles(struct xe_device *xe,
111 struct drm_xe_device_query *query)
112 {
113 struct drm_xe_query_engine_cycles __user *query_ptr;
114 struct drm_xe_engine_class_instance *eci;
115 struct drm_xe_query_engine_cycles resp;
116 size_t size = sizeof(resp);
117 __ktime_func_t cpu_clock;
118 struct xe_hw_engine *hwe;
119 struct xe_gt *gt;
120
121 if (query->size == 0) {
122 query->size = size;
123 return 0;
124 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
125 return -EINVAL;
126 }
127
128 query_ptr = u64_to_user_ptr(query->data);
129 if (copy_from_user(&resp, query_ptr, size))
130 return -EFAULT;
131
132 cpu_clock = __clock_id_to_func(resp.clockid);
133 if (!cpu_clock)
134 return -EINVAL;
135
136 eci = &resp.eci;
137 if (eci->gt_id >= XE_MAX_GT_PER_TILE)
138 return -EINVAL;
139
140 gt = xe_device_get_gt(xe, eci->gt_id);
141 if (!gt)
142 return -EINVAL;
143
144 if (eci->engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
145 return -EINVAL;
146
147 hwe = xe_gt_hw_engine(gt, user_to_xe_engine_class[eci->engine_class],
148 eci->engine_instance, true);
149 if (!hwe)
150 return -EINVAL;
151
152 if (xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL))
153 return -EIO;
154
155 __read_timestamps(gt,
156 RING_TIMESTAMP(hwe->mmio_base),
157 RING_TIMESTAMP_UDW(hwe->mmio_base),
158 &resp.engine_cycles,
159 &resp.cpu_timestamp,
160 &resp.cpu_delta,
161 cpu_clock);
162
163 xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
164
165 if (GRAPHICS_VER(xe) >= 20)
166 resp.width = 64;
167 else
168 resp.width = 36;
169
170 /* Only write to the output fields of user query */
171 if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp))
172 return -EFAULT;
173
174 if (put_user(resp.cpu_delta, &query_ptr->cpu_delta))
175 return -EFAULT;
176
177 if (put_user(resp.engine_cycles, &query_ptr->engine_cycles))
178 return -EFAULT;
179
180 if (put_user(resp.width, &query_ptr->width))
181 return -EFAULT;
182
183 return 0;
184 }
185
query_engines(struct xe_device * xe,struct drm_xe_device_query * query)186 static int query_engines(struct xe_device *xe,
187 struct drm_xe_device_query *query)
188 {
189 size_t size = calc_hw_engine_info_size(xe);
190 struct drm_xe_query_engines __user *query_ptr =
191 u64_to_user_ptr(query->data);
192 struct drm_xe_query_engines *engines;
193 struct xe_hw_engine *hwe;
194 enum xe_hw_engine_id id;
195 struct xe_gt *gt;
196 u8 gt_id;
197 int i = 0;
198
199 if (query->size == 0) {
200 query->size = size;
201 return 0;
202 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
203 return -EINVAL;
204 }
205
206 engines = kzalloc(size, GFP_KERNEL);
207 if (!engines)
208 return -ENOMEM;
209
210 for_each_gt(gt, xe, gt_id)
211 for_each_hw_engine(hwe, gt, id) {
212 if (xe_hw_engine_is_reserved(hwe))
213 continue;
214
215 engines->engines[i].instance.engine_class =
216 xe_to_user_engine_class[hwe->class];
217 engines->engines[i].instance.engine_instance =
218 hwe->logical_instance;
219 engines->engines[i].instance.gt_id = gt->info.id;
220
221 i++;
222 }
223
224 engines->num_engines = i;
225
226 if (copy_to_user(query_ptr, engines, size)) {
227 kfree(engines);
228 return -EFAULT;
229 }
230 kfree(engines);
231
232 return 0;
233 }
234
calc_mem_regions_size(struct xe_device * xe)235 static size_t calc_mem_regions_size(struct xe_device *xe)
236 {
237 u32 num_managers = 1;
238 int i;
239
240 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i)
241 if (ttm_manager_type(&xe->ttm, i))
242 num_managers++;
243
244 return offsetof(struct drm_xe_query_mem_regions, mem_regions[num_managers]);
245 }
246
query_mem_regions(struct xe_device * xe,struct drm_xe_device_query * query)247 static int query_mem_regions(struct xe_device *xe,
248 struct drm_xe_device_query *query)
249 {
250 size_t size = calc_mem_regions_size(xe);
251 struct drm_xe_query_mem_regions *mem_regions;
252 struct drm_xe_query_mem_regions __user *query_ptr =
253 u64_to_user_ptr(query->data);
254 struct ttm_resource_manager *man;
255 int ret, i;
256
257 if (query->size == 0) {
258 query->size = size;
259 return 0;
260 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
261 return -EINVAL;
262 }
263
264 mem_regions = kzalloc(size, GFP_KERNEL);
265 if (XE_IOCTL_DBG(xe, !mem_regions))
266 return -ENOMEM;
267
268 man = ttm_manager_type(&xe->ttm, XE_PL_TT);
269 mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM;
270 /*
271 * The instance needs to be a unique number that represents the index
272 * in the placement mask used at xe_gem_create_ioctl() for the
273 * xe_bo_create() placement.
274 */
275 mem_regions->mem_regions[0].instance = 0;
276 mem_regions->mem_regions[0].min_page_size = PAGE_SIZE;
277 mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT;
278 if (perfmon_capable())
279 mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
280 mem_regions->num_mem_regions = 1;
281
282 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
283 man = ttm_manager_type(&xe->ttm, i);
284 if (man) {
285 mem_regions->mem_regions[mem_regions->num_mem_regions].mem_class =
286 DRM_XE_MEM_REGION_CLASS_VRAM;
287 mem_regions->mem_regions[mem_regions->num_mem_regions].instance =
288 mem_regions->num_mem_regions;
289 mem_regions->mem_regions[mem_regions->num_mem_regions].min_page_size =
290 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ?
291 SZ_64K : PAGE_SIZE;
292 mem_regions->mem_regions[mem_regions->num_mem_regions].total_size =
293 man->size;
294
295 if (perfmon_capable()) {
296 xe_ttm_vram_get_used(man,
297 &mem_regions->mem_regions
298 [mem_regions->num_mem_regions].used,
299 &mem_regions->mem_regions
300 [mem_regions->num_mem_regions].cpu_visible_used);
301 }
302
303 mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size =
304 xe_ttm_vram_get_cpu_visible_size(man);
305 mem_regions->num_mem_regions++;
306 }
307 }
308
309 if (!copy_to_user(query_ptr, mem_regions, size))
310 ret = 0;
311 else
312 ret = -ENOSPC;
313
314 kfree(mem_regions);
315 return ret;
316 }
317
query_config(struct xe_device * xe,struct drm_xe_device_query * query)318 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
319 {
320 const u32 num_params = DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1;
321 size_t size =
322 sizeof(struct drm_xe_query_config) + num_params * sizeof(u64);
323 struct drm_xe_query_config __user *query_ptr =
324 u64_to_user_ptr(query->data);
325 struct drm_xe_query_config *config;
326
327 if (query->size == 0) {
328 query->size = size;
329 return 0;
330 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
331 return -EINVAL;
332 }
333
334 config = kzalloc(size, GFP_KERNEL);
335 if (!config)
336 return -ENOMEM;
337
338 config->num_params = num_params;
339 config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
340 xe->info.devid | (xe->info.revid << 16);
341 if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
342 config->info[DRM_XE_QUERY_CONFIG_FLAGS] =
343 DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
344 config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] =
345 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
346 config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits;
347 config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] =
348 xe_exec_queue_device_get_max_priority(xe);
349
350 if (copy_to_user(query_ptr, config, size)) {
351 kfree(config);
352 return -EFAULT;
353 }
354 kfree(config);
355
356 return 0;
357 }
358
query_gt_list(struct xe_device * xe,struct drm_xe_device_query * query)359 static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query)
360 {
361 struct xe_gt *gt;
362 size_t size = sizeof(struct drm_xe_query_gt_list) +
363 xe->info.gt_count * sizeof(struct drm_xe_gt);
364 struct drm_xe_query_gt_list __user *query_ptr =
365 u64_to_user_ptr(query->data);
366 struct drm_xe_query_gt_list *gt_list;
367 u8 id;
368
369 if (query->size == 0) {
370 query->size = size;
371 return 0;
372 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
373 return -EINVAL;
374 }
375
376 gt_list = kzalloc(size, GFP_KERNEL);
377 if (!gt_list)
378 return -ENOMEM;
379
380 gt_list->num_gt = xe->info.gt_count;
381
382 for_each_gt(gt, xe, id) {
383 if (xe_gt_is_media_type(gt))
384 gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
385 else
386 gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MAIN;
387 gt_list->gt_list[id].tile_id = gt_to_tile(gt)->id;
388 gt_list->gt_list[id].gt_id = gt->info.id;
389 gt_list->gt_list[id].reference_clock = gt->info.reference_clock;
390 /*
391 * The mem_regions indexes in the mask below need to
392 * directly identify the struct
393 * drm_xe_query_mem_regions' instance constructed at
394 * query_mem_regions()
395 *
396 * For our current platforms:
397 * Bit 0 -> System Memory
398 * Bit 1 -> VRAM0 on Tile0
399 * Bit 2 -> VRAM1 on Tile1
400 * However the uAPI is generic and it's userspace's
401 * responsibility to check the mem_class, without any
402 * assumption.
403 */
404 if (!IS_DGFX(xe))
405 gt_list->gt_list[id].near_mem_regions = 0x1;
406 else
407 gt_list->gt_list[id].near_mem_regions =
408 BIT(gt_to_tile(gt)->id) << 1;
409 gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^
410 gt_list->gt_list[id].near_mem_regions;
411
412 gt_list->gt_list[id].ip_ver_major =
413 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid);
414 gt_list->gt_list[id].ip_ver_minor =
415 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid);
416 gt_list->gt_list[id].ip_ver_rev =
417 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid);
418 }
419
420 if (copy_to_user(query_ptr, gt_list, size)) {
421 kfree(gt_list);
422 return -EFAULT;
423 }
424 kfree(gt_list);
425
426 return 0;
427 }
428
query_hwconfig(struct xe_device * xe,struct drm_xe_device_query * query)429 static int query_hwconfig(struct xe_device *xe,
430 struct drm_xe_device_query *query)
431 {
432 struct xe_gt *gt = xe_root_mmio_gt(xe);
433 size_t size = xe_guc_hwconfig_size(>->uc.guc);
434 void __user *query_ptr = u64_to_user_ptr(query->data);
435 void *hwconfig;
436
437 if (query->size == 0) {
438 query->size = size;
439 return 0;
440 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
441 return -EINVAL;
442 }
443
444 hwconfig = kzalloc(size, GFP_KERNEL);
445 if (!hwconfig)
446 return -ENOMEM;
447
448 xe_guc_hwconfig_copy(>->uc.guc, hwconfig);
449
450 if (copy_to_user(query_ptr, hwconfig, size)) {
451 kfree(hwconfig);
452 return -EFAULT;
453 }
454 kfree(hwconfig);
455
456 return 0;
457 }
458
calc_topo_query_size(struct xe_device * xe)459 static size_t calc_topo_query_size(struct xe_device *xe)
460 {
461 return xe->info.gt_count *
462 (4 * sizeof(struct drm_xe_query_topology_mask) +
463 sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
464 sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
465 sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask) +
466 sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
467 }
468
copy_mask(void __user ** ptr,struct drm_xe_query_topology_mask * topo,void * mask,size_t mask_size)469 static int copy_mask(void __user **ptr,
470 struct drm_xe_query_topology_mask *topo,
471 void *mask, size_t mask_size)
472 {
473 topo->num_bytes = mask_size;
474
475 if (copy_to_user(*ptr, topo, sizeof(*topo)))
476 return -EFAULT;
477 *ptr += sizeof(topo);
478
479 if (copy_to_user(*ptr, mask, mask_size))
480 return -EFAULT;
481 *ptr += mask_size;
482
483 return 0;
484 }
485
query_gt_topology(struct xe_device * xe,struct drm_xe_device_query * query)486 static int query_gt_topology(struct xe_device *xe,
487 struct drm_xe_device_query *query)
488 {
489 void __user *query_ptr = u64_to_user_ptr(query->data);
490 size_t size = calc_topo_query_size(xe);
491 struct drm_xe_query_topology_mask topo;
492 struct xe_gt *gt;
493 int id;
494
495 if (query->size == 0) {
496 query->size = size;
497 return 0;
498 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
499 return -EINVAL;
500 }
501
502 for_each_gt(gt, xe, id) {
503 int err;
504
505 topo.gt_id = id;
506
507 topo.type = DRM_XE_TOPO_DSS_GEOMETRY;
508 err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask,
509 sizeof(gt->fuse_topo.g_dss_mask));
510 if (err)
511 return err;
512
513 topo.type = DRM_XE_TOPO_DSS_COMPUTE;
514 err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask,
515 sizeof(gt->fuse_topo.c_dss_mask));
516 if (err)
517 return err;
518
519 topo.type = DRM_XE_TOPO_L3_BANK;
520 err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
521 sizeof(gt->fuse_topo.l3_bank_mask));
522 if (err)
523 return err;
524
525 topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ?
526 DRM_XE_TOPO_SIMD16_EU_PER_DSS :
527 DRM_XE_TOPO_EU_PER_DSS;
528 err = copy_mask(&query_ptr, &topo,
529 gt->fuse_topo.eu_mask_per_dss,
530 sizeof(gt->fuse_topo.eu_mask_per_dss));
531 if (err)
532 return err;
533 }
534
535 return 0;
536 }
537
538 static int
query_uc_fw_version(struct xe_device * xe,struct drm_xe_device_query * query)539 query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query)
540 {
541 struct drm_xe_query_uc_fw_version __user *query_ptr = u64_to_user_ptr(query->data);
542 size_t size = sizeof(struct drm_xe_query_uc_fw_version);
543 struct drm_xe_query_uc_fw_version resp;
544 struct xe_uc_fw_version *version = NULL;
545
546 if (query->size == 0) {
547 query->size = size;
548 return 0;
549 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
550 return -EINVAL;
551 }
552
553 if (copy_from_user(&resp, query_ptr, size))
554 return -EFAULT;
555
556 if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved))
557 return -EINVAL;
558
559 switch (resp.uc_type) {
560 case XE_QUERY_UC_TYPE_GUC_SUBMISSION: {
561 struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc;
562
563 version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
564 break;
565 }
566 case XE_QUERY_UC_TYPE_HUC: {
567 struct xe_gt *media_gt = NULL;
568 struct xe_huc *huc;
569
570 if (MEDIA_VER(xe) >= 13) {
571 struct xe_tile *tile;
572 u8 gt_id;
573
574 for_each_tile(tile, xe, gt_id) {
575 if (tile->media_gt) {
576 media_gt = tile->media_gt;
577 break;
578 }
579 }
580 } else {
581 media_gt = xe->tiles[0].primary_gt;
582 }
583
584 if (!media_gt)
585 break;
586
587 huc = &media_gt->uc.huc;
588 if (huc->fw.status == XE_UC_FIRMWARE_RUNNING)
589 version = &huc->fw.versions.found[XE_UC_FW_VER_RELEASE];
590 break;
591 }
592 default:
593 return -EINVAL;
594 }
595
596 if (version) {
597 resp.branch_ver = 0;
598 resp.major_ver = version->major;
599 resp.minor_ver = version->minor;
600 resp.patch_ver = version->patch;
601 } else {
602 return -ENODEV;
603 }
604
605 if (copy_to_user(query_ptr, &resp, size))
606 return -EFAULT;
607
608 return 0;
609 }
610
calc_oa_unit_query_size(struct xe_device * xe)611 static size_t calc_oa_unit_query_size(struct xe_device *xe)
612 {
613 size_t size = sizeof(struct drm_xe_query_oa_units);
614 struct xe_gt *gt;
615 int i, id;
616
617 for_each_gt(gt, xe, id) {
618 for (i = 0; i < gt->oa.num_oa_units; i++) {
619 size += sizeof(struct drm_xe_oa_unit);
620 size += gt->oa.oa_unit[i].num_engines *
621 sizeof(struct drm_xe_engine_class_instance);
622 }
623 }
624
625 return size;
626 }
627
query_oa_units(struct xe_device * xe,struct drm_xe_device_query * query)628 static int query_oa_units(struct xe_device *xe,
629 struct drm_xe_device_query *query)
630 {
631 void __user *query_ptr = u64_to_user_ptr(query->data);
632 size_t size = calc_oa_unit_query_size(xe);
633 struct drm_xe_query_oa_units *qoa;
634 enum xe_hw_engine_id hwe_id;
635 struct drm_xe_oa_unit *du;
636 struct xe_hw_engine *hwe;
637 struct xe_oa_unit *u;
638 int gt_id, i, j, ret;
639 struct xe_gt *gt;
640 u8 *pdu;
641
642 if (query->size == 0) {
643 query->size = size;
644 return 0;
645 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
646 return -EINVAL;
647 }
648
649 qoa = kzalloc(size, GFP_KERNEL);
650 if (!qoa)
651 return -ENOMEM;
652
653 pdu = (u8 *)&qoa->oa_units[0];
654 for_each_gt(gt, xe, gt_id) {
655 for (i = 0; i < gt->oa.num_oa_units; i++) {
656 u = >->oa.oa_unit[i];
657 du = (struct drm_xe_oa_unit *)pdu;
658
659 du->oa_unit_id = u->oa_unit_id;
660 du->oa_unit_type = u->type;
661 du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
662 du->capabilities = DRM_XE_OA_CAPS_BASE;
663
664 j = 0;
665 for_each_hw_engine(hwe, gt, hwe_id) {
666 if (!xe_hw_engine_is_reserved(hwe) &&
667 xe_oa_unit_id(hwe) == u->oa_unit_id) {
668 du->eci[j].engine_class =
669 xe_to_user_engine_class[hwe->class];
670 du->eci[j].engine_instance = hwe->logical_instance;
671 du->eci[j].gt_id = gt->info.id;
672 j++;
673 }
674 }
675 du->num_engines = j;
676 pdu += sizeof(*du) + j * sizeof(du->eci[0]);
677 qoa->num_oa_units++;
678 }
679 }
680
681 ret = copy_to_user(query_ptr, qoa, size);
682 kfree(qoa);
683
684 return ret ? -EFAULT : 0;
685 }
686
687 static int (* const xe_query_funcs[])(struct xe_device *xe,
688 struct drm_xe_device_query *query) = {
689 query_engines,
690 query_mem_regions,
691 query_config,
692 query_gt_list,
693 query_hwconfig,
694 query_gt_topology,
695 query_engine_cycles,
696 query_uc_fw_version,
697 query_oa_units,
698 };
699
xe_query_ioctl(struct drm_device * dev,void * data,struct drm_file * file)700 int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
701 {
702 struct xe_device *xe = to_xe_device(dev);
703 struct drm_xe_device_query *query = data;
704 u32 idx;
705
706 if (XE_IOCTL_DBG(xe, query->extensions) ||
707 XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1]))
708 return -EINVAL;
709
710 if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs)))
711 return -EINVAL;
712
713 idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs));
714 if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx]))
715 return -EINVAL;
716
717 return xe_query_funcs[idx](xe, query);
718 }
719