Lines Matching +full:0 +full:xe

28 	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
34 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
48 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, in __xe_exec_queue_alloc() argument
98 err = exec_queue_user_extensions(xe, q, extensions, 0); in __xe_exec_queue_alloc()
119 for (i = 0; i < q->width; ++i) { in __xe_exec_queue_init()
134 return 0; in __xe_exec_queue_init()
140 for (i = i - 1; i >= 0; --i) in __xe_exec_queue_init()
145 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, in xe_exec_queue_create() argument
153 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, in xe_exec_queue_create()
169 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, in xe_exec_queue_create_class() argument
176 u32 logical_mask = 0; in xe_exec_queue_create_class()
192 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions); in xe_exec_queue_create_class()
197 * @xe: Xe device.
211 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, in xe_exec_queue_create_bind() argument
220 if (xe->info.has_usm) { in xe_exec_queue_create_bind()
231 q = xe_exec_queue_create(xe, migrate_vm, in xe_exec_queue_create_bind()
235 q = xe_exec_queue_create_class(xe, gt, migrate_vm, in xe_exec_queue_create_bind()
268 for (i = 0; i < q->width; ++i) in xe_exec_queue_fini()
314 xe_exec_queue_device_get_max_priority(struct xe_device *xe) in xe_exec_queue_device_get_max_priority() argument
320 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_priority() argument
323 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH)) in exec_queue_set_priority()
326 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe))) in exec_queue_set_priority()
330 return 0; in exec_queue_set_priority()
385 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_timeslice() argument
388 u32 min = 0, max = 0; in exec_queue_set_timeslice()
398 return 0; in exec_queue_set_timeslice()
401 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
410 static int exec_queue_user_ext_set_property(struct xe_device *xe, in exec_queue_user_ext_set_property() argument
420 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_ext_set_property()
423 if (XE_IOCTL_DBG(xe, ext.property >= in exec_queue_user_ext_set_property()
425 XE_IOCTL_DBG(xe, ext.pad) || in exec_queue_user_ext_set_property()
426 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY && in exec_queue_user_ext_set_property()
434 return exec_queue_set_property_funcs[idx](xe, q, ext.value); in exec_queue_user_ext_set_property()
437 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
446 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_user_extensions() argument
454 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) in exec_queue_user_extensions()
458 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_extensions()
461 if (XE_IOCTL_DBG(xe, ext.pad) || in exec_queue_user_extensions()
462 XE_IOCTL_DBG(xe, ext.name >= in exec_queue_user_extensions()
468 err = exec_queue_user_extension_funcs[idx](xe, q, extensions); in exec_queue_user_extensions()
469 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_extensions()
473 return exec_queue_user_extensions(xe, q, ext.next_extension, in exec_queue_user_extensions()
476 return 0; in exec_queue_user_extensions()
479 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt, in calc_validate_logical_mask() argument
487 u32 return_mask = 0, prev_mask; in calc_validate_logical_mask()
489 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) && in calc_validate_logical_mask()
491 return 0; in calc_validate_logical_mask()
493 for (i = 0; i < width; ++i) { in calc_validate_logical_mask()
494 u32 current_mask = 0; in calc_validate_logical_mask()
496 for (j = 0; j < num_placements; ++j) { in calc_validate_logical_mask()
501 hwe = xe_hw_engine_lookup(xe, eci[n]); in calc_validate_logical_mask()
502 if (XE_IOCTL_DBG(xe, !hwe)) in calc_validate_logical_mask()
503 return 0; in calc_validate_logical_mask()
505 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe))) in calc_validate_logical_mask()
506 return 0; in calc_validate_logical_mask()
508 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) || in calc_validate_logical_mask()
509 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class)) in calc_validate_logical_mask()
510 return 0; in calc_validate_logical_mask()
521 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1)) in calc_validate_logical_mask()
522 return 0; in calc_validate_logical_mask()
533 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_create_ioctl() local
549 if (XE_IOCTL_DBG(xe, args->flags) || in xe_exec_queue_create_ioctl()
550 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_create_ioctl()
554 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE)) in xe_exec_queue_create_ioctl()
560 if (XE_IOCTL_DBG(xe, err)) in xe_exec_queue_create_ioctl()
563 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count)) in xe_exec_queue_create_ioctl()
566 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) { in xe_exec_queue_create_ioctl()
567 if (XE_IOCTL_DBG(xe, args->width != 1) || in xe_exec_queue_create_ioctl()
568 XE_IOCTL_DBG(xe, args->num_placements != 1) || in xe_exec_queue_create_ioctl()
569 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) in xe_exec_queue_create_ioctl()
572 for_each_tile(tile, xe, id) { in xe_exec_queue_create_ioctl()
579 new = xe_exec_queue_create_bind(xe, tile, flags, in xe_exec_queue_create_ioctl()
587 if (id == 0) in xe_exec_queue_create_ioctl()
594 gt = xe_device_get_gt(xe, eci[0].gt_id); in xe_exec_queue_create_ioctl()
595 logical_mask = calc_validate_logical_mask(xe, gt, eci, in xe_exec_queue_create_ioctl()
598 if (XE_IOCTL_DBG(xe, !logical_mask)) in xe_exec_queue_create_ioctl()
601 hwe = xe_hw_engine_lookup(xe, eci[0]); in xe_exec_queue_create_ioctl()
602 if (XE_IOCTL_DBG(xe, !hwe)) in xe_exec_queue_create_ioctl()
606 if (XE_IOCTL_DBG(xe, !vm)) in xe_exec_queue_create_ioctl()
615 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { in xe_exec_queue_create_ioctl()
621 q = xe_exec_queue_create(xe, vm, logical_mask, in xe_exec_queue_create_ioctl()
622 args->width, hwe, 0, in xe_exec_queue_create_ioctl()
633 if (XE_IOCTL_DBG(xe, err)) in xe_exec_queue_create_ioctl()
653 return 0; in xe_exec_queue_create_ioctl()
665 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_get_property_ioctl() local
671 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_get_property_ioctl()
675 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_get_property_ioctl()
681 ret = 0; in xe_exec_queue_get_property_ioctl()
706 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1; in xe_exec_queue_num_job_inflight()
717 struct xe_lrc *lrc = q->lrc[0]; in xe_exec_queue_ring_full()
742 for (i = 0; i < q->width; ++i) { in xe_exec_queue_is_idle()
751 return xe_lrc_seqno(q->lrc[0]) == in xe_exec_queue_is_idle()
752 q->lrc[0]->fence_ctx.next_seqno - 1; in xe_exec_queue_is_idle()
771 * not associated with a user xe file, so avoid accumulating busyness in xe_exec_queue_update_run_ticks()
787 lrc = q->lrc[0]; in xe_exec_queue_update_run_ticks()
818 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_destroy_ioctl() local
823 if (XE_IOCTL_DBG(xe, args->pad) || in xe_exec_queue_destroy_ioctl()
824 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_destroy_ioctl()
830 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_destroy_ioctl()
841 return 0; in xe_exec_queue_destroy_ioctl()
962 int err = 0; in xe_exec_queue_last_fence_test_dep()
967 0 : -ETIME; in xe_exec_queue_last_fence_test_dep()