Lines Matching refs:stream
117 static u32 xe_oa_circ_diff(struct xe_oa_stream *stream, u32 tail, u32 head) in xe_oa_circ_diff() argument
120 tail + stream->oa_buffer.circ_size - head; in xe_oa_circ_diff()
123 static u32 xe_oa_circ_incr(struct xe_oa_stream *stream, u32 ptr, u32 n) in xe_oa_circ_incr() argument
125 return ptr + n >= stream->oa_buffer.circ_size ? in xe_oa_circ_incr()
126 ptr + n - stream->oa_buffer.circ_size : ptr + n; in xe_oa_circ_incr()
172 static const struct xe_oa_regs *__oa_regs(struct xe_oa_stream *stream) in __oa_regs() argument
174 return &stream->hwe->oa_unit->regs; in __oa_regs()
177 static u32 xe_oa_hw_tail_read(struct xe_oa_stream *stream) in xe_oa_hw_tail_read() argument
179 return xe_mmio_read32(stream->gt, __oa_regs(stream)->oa_tail_ptr) & in xe_oa_hw_tail_read()
186 static u64 oa_report_id(struct xe_oa_stream *stream, void *report) in oa_report_id() argument
188 return oa_report_header_64bit(stream) ? *(u64 *)report : *(u32 *)report; in oa_report_id()
191 static void oa_report_id_clear(struct xe_oa_stream *stream, u32 *report) in oa_report_id_clear() argument
193 if (oa_report_header_64bit(stream)) in oa_report_id_clear()
199 static u64 oa_timestamp(struct xe_oa_stream *stream, void *report) in oa_timestamp() argument
201 return oa_report_header_64bit(stream) ? in oa_timestamp()
206 static void oa_timestamp_clear(struct xe_oa_stream *stream, u32 *report) in oa_timestamp_clear() argument
208 if (oa_report_header_64bit(stream)) in oa_timestamp_clear()
214 static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream) in xe_oa_buffer_check_unlocked() argument
216 u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo); in xe_oa_buffer_check_unlocked()
217 int report_size = stream->oa_buffer.format->size; in xe_oa_buffer_check_unlocked()
223 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in xe_oa_buffer_check_unlocked()
225 hw_tail = xe_oa_hw_tail_read(stream); in xe_oa_buffer_check_unlocked()
233 partial_report_size = xe_oa_circ_diff(stream, hw_tail, stream->oa_buffer.tail); in xe_oa_buffer_check_unlocked()
237 hw_tail = xe_oa_circ_diff(stream, hw_tail, partial_report_size); in xe_oa_buffer_check_unlocked()
249 while (xe_oa_circ_diff(stream, tail, stream->oa_buffer.tail) >= report_size) { in xe_oa_buffer_check_unlocked()
250 void *report = stream->oa_buffer.vaddr + tail; in xe_oa_buffer_check_unlocked()
252 if (oa_report_id(stream, report) || oa_timestamp(stream, report)) in xe_oa_buffer_check_unlocked()
255 tail = xe_oa_circ_diff(stream, tail, report_size); in xe_oa_buffer_check_unlocked()
258 if (xe_oa_circ_diff(stream, hw_tail, tail) > report_size) in xe_oa_buffer_check_unlocked()
259 drm_dbg(&stream->oa->xe->drm, in xe_oa_buffer_check_unlocked()
261 stream->oa_buffer.head, tail, hw_tail); in xe_oa_buffer_check_unlocked()
263 stream->oa_buffer.tail = tail; in xe_oa_buffer_check_unlocked()
265 pollin = xe_oa_circ_diff(stream, stream->oa_buffer.tail, in xe_oa_buffer_check_unlocked()
266 stream->oa_buffer.head) >= report_size; in xe_oa_buffer_check_unlocked()
268 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in xe_oa_buffer_check_unlocked()
275 struct xe_oa_stream *stream = in xe_oa_poll_check_timer_cb() local
276 container_of(hrtimer, typeof(*stream), poll_check_timer); in xe_oa_poll_check_timer_cb()
278 if (xe_oa_buffer_check_unlocked(stream)) { in xe_oa_poll_check_timer_cb()
279 stream->pollin = true; in xe_oa_poll_check_timer_cb()
280 wake_up(&stream->poll_wq); in xe_oa_poll_check_timer_cb()
283 hrtimer_forward_now(hrtimer, ns_to_ktime(stream->poll_period_ns)); in xe_oa_poll_check_timer_cb()
288 static int xe_oa_append_report(struct xe_oa_stream *stream, char __user *buf, in xe_oa_append_report() argument
291 int report_size = stream->oa_buffer.format->size; in xe_oa_append_report()
300 oa_buf_end = stream->oa_buffer.vaddr + stream->oa_buffer.circ_size; in xe_oa_append_report()
308 if (copy_to_user(buf, stream->oa_buffer.vaddr, in xe_oa_append_report()
320 static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf, in xe_oa_append_reports() argument
323 int report_size = stream->oa_buffer.format->size; in xe_oa_append_reports()
324 u8 *oa_buf_base = stream->oa_buffer.vaddr; in xe_oa_append_reports()
325 u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo); in xe_oa_append_reports()
331 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in xe_oa_append_reports()
332 head = stream->oa_buffer.head; in xe_oa_append_reports()
333 tail = stream->oa_buffer.tail; in xe_oa_append_reports()
334 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in xe_oa_append_reports()
336 xe_assert(stream->oa->xe, in xe_oa_append_reports()
337 head < stream->oa_buffer.circ_size && tail < stream->oa_buffer.circ_size); in xe_oa_append_reports()
339 for (; xe_oa_circ_diff(stream, tail, head); in xe_oa_append_reports()
340 head = xe_oa_circ_incr(stream, head, report_size)) { in xe_oa_append_reports()
343 ret = xe_oa_append_report(stream, buf, count, offset, report); in xe_oa_append_reports()
347 if (!(stream->oa_buffer.circ_size % report_size)) { in xe_oa_append_reports()
349 oa_report_id_clear(stream, (void *)report); in xe_oa_append_reports()
350 oa_timestamp_clear(stream, (void *)report); in xe_oa_append_reports()
352 u8 *oa_buf_end = stream->oa_buffer.vaddr + stream->oa_buffer.circ_size; in xe_oa_append_reports()
366 struct xe_reg oaheadptr = __oa_regs(stream)->oa_head_ptr; in xe_oa_append_reports()
368 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in xe_oa_append_reports()
369 xe_mmio_write32(stream->gt, oaheadptr, in xe_oa_append_reports()
371 stream->oa_buffer.head = head; in xe_oa_append_reports()
372 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in xe_oa_append_reports()
378 static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream) in xe_oa_init_oa_buffer() argument
380 u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo); in xe_oa_init_oa_buffer()
384 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in xe_oa_init_oa_buffer()
386 xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_status, 0); in xe_oa_init_oa_buffer()
387 xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_head_ptr, in xe_oa_init_oa_buffer()
389 stream->oa_buffer.head = 0; in xe_oa_init_oa_buffer()
394 xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_buffer, oa_buf); in xe_oa_init_oa_buffer()
395 xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_tail_ptr, in xe_oa_init_oa_buffer()
399 stream->oa_buffer.tail = 0; in xe_oa_init_oa_buffer()
401 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in xe_oa_init_oa_buffer()
404 memset(stream->oa_buffer.vaddr, 0, stream->oa_buffer.bo->size); in xe_oa_init_oa_buffer()
414 static u32 __oa_ccs_select(struct xe_oa_stream *stream) in __oa_ccs_select() argument
418 if (stream->hwe->class != XE_ENGINE_CLASS_COMPUTE) in __oa_ccs_select()
421 val = REG_FIELD_PREP(OAG_OACONTROL_OA_CCS_SELECT_MASK, stream->hwe->instance); in __oa_ccs_select()
422 xe_assert(stream->oa->xe, in __oa_ccs_select()
423 REG_FIELD_GET(OAG_OACONTROL_OA_CCS_SELECT_MASK, val) == stream->hwe->instance); in __oa_ccs_select()
427 static void xe_oa_enable(struct xe_oa_stream *stream) in xe_oa_enable() argument
429 const struct xe_oa_format *format = stream->oa_buffer.format; in xe_oa_enable()
437 xe_oa_init_oa_buffer(stream); in xe_oa_enable()
439 regs = __oa_regs(stream); in xe_oa_enable()
441 __oa_ccs_select(stream) | OAG_OACONTROL_OA_COUNTER_ENABLE; in xe_oa_enable()
443 if (GRAPHICS_VER(stream->oa->xe) >= 20 && in xe_oa_enable()
444 stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG) in xe_oa_enable()
447 xe_mmio_write32(stream->gt, regs->oa_ctrl, val); in xe_oa_enable()
450 static void xe_oa_disable(struct xe_oa_stream *stream) in xe_oa_disable() argument
452 xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, 0); in xe_oa_disable()
453 if (xe_mmio_wait32(stream->gt, __oa_regs(stream)->oa_ctrl, in xe_oa_disable()
455 drm_err(&stream->oa->xe->drm, in xe_oa_disable()
458 if (GRAPHICS_VERx100(stream->oa->xe) <= 1270 && GRAPHICS_VERx100(stream->oa->xe) != 1260) { in xe_oa_disable()
460 xe_mmio_write32(stream->gt, OA_TLB_INV_CR, 1); in xe_oa_disable()
461 if (xe_mmio_wait32(stream->gt, OA_TLB_INV_CR, 1, 0, 50000, NULL, false)) in xe_oa_disable()
462 drm_err(&stream->oa->xe->drm, in xe_oa_disable()
467 static int xe_oa_wait_unlocked(struct xe_oa_stream *stream) in xe_oa_wait_unlocked() argument
470 if (!stream->periodic) in xe_oa_wait_unlocked()
473 return wait_event_interruptible(stream->poll_wq, in xe_oa_wait_unlocked()
474 xe_oa_buffer_check_unlocked(stream)); in xe_oa_wait_unlocked()
480 static int __xe_oa_read(struct xe_oa_stream *stream, char __user *buf, in __xe_oa_read() argument
484 stream->oa_status = xe_mmio_rmw32(stream->gt, __oa_regs(stream)->oa_status, in __xe_oa_read()
490 if (stream->oa_status & OASTATUS_RELEVANT_BITS) in __xe_oa_read()
493 return xe_oa_append_reports(stream, buf, count, offset); in __xe_oa_read()
499 struct xe_oa_stream *stream = file->private_data; in xe_oa_read() local
504 if (!stream->enabled || !stream->sample) in xe_oa_read()
509 ret = xe_oa_wait_unlocked(stream); in xe_oa_read()
513 mutex_lock(&stream->stream_lock); in xe_oa_read()
514 ret = __xe_oa_read(stream, buf, count, &offset); in xe_oa_read()
515 mutex_unlock(&stream->stream_lock); in xe_oa_read()
518 mutex_lock(&stream->stream_lock); in xe_oa_read()
519 ret = __xe_oa_read(stream, buf, count, &offset); in xe_oa_read()
520 mutex_unlock(&stream->stream_lock); in xe_oa_read()
533 stream->pollin = false; in xe_oa_read()
539 static __poll_t xe_oa_poll_locked(struct xe_oa_stream *stream, in xe_oa_poll_locked() argument
544 poll_wait(file, &stream->poll_wq, wait); in xe_oa_poll_locked()
552 if (stream->pollin) in xe_oa_poll_locked()
560 struct xe_oa_stream *stream = file->private_data; in xe_oa_poll() local
563 mutex_lock(&stream->stream_lock); in xe_oa_poll()
564 ret = xe_oa_poll_locked(stream, file, wait); in xe_oa_poll()
565 mutex_unlock(&stream->stream_lock); in xe_oa_poll()
570 static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb) in xe_oa_submit_bb() argument
578 job = xe_bb_create_job(stream->k_exec_q, bb); in xe_oa_submit_bb()
628 static void xe_oa_free_oa_buffer(struct xe_oa_stream *stream) in xe_oa_free_oa_buffer() argument
630 xe_bo_unpin_map_no_vm(stream->oa_buffer.bo); in xe_oa_free_oa_buffer()
633 static void xe_oa_free_configs(struct xe_oa_stream *stream) in xe_oa_free_configs() argument
637 xe_oa_config_put(stream->oa_config); in xe_oa_free_configs()
638 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) in xe_oa_free_configs()
642 static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc, in xe_oa_store_flex() argument
656 static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc, in xe_oa_modify_ctx_image() argument
662 bb = xe_bb_new(stream->gt, 4 * count, false); in xe_oa_modify_ctx_image()
668 xe_oa_store_flex(stream, lrc, bb, flex, count); in xe_oa_modify_ctx_image()
670 err = xe_oa_submit_bb(stream, bb); in xe_oa_modify_ctx_image()
676 static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri) in xe_oa_load_with_lri() argument
681 bb = xe_bb_new(stream->gt, 3, false); in xe_oa_load_with_lri()
689 err = xe_oa_submit_bb(stream, bb); in xe_oa_load_with_lri()
695 static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable) in xe_oa_configure_oar_context() argument
697 const struct xe_oa_format *format = stream->oa_buffer.format; in xe_oa_configure_oar_context()
698 struct xe_lrc *lrc = stream->exec_q->lrc[0]; in xe_oa_configure_oar_context()
705 OACTXCONTROL(stream->hwe->mmio_base), in xe_oa_configure_oar_context()
706 stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1, in xe_oa_configure_oar_context()
710 RING_CONTEXT_CONTROL(stream->hwe->mmio_base), in xe_oa_configure_oar_context()
719 err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0], in xe_oa_configure_oar_context()
725 return xe_oa_load_with_lri(stream, ®_lri); in xe_oa_configure_oar_context()
728 static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable) in xe_oa_configure_oac_context() argument
730 const struct xe_oa_format *format = stream->oa_buffer.format; in xe_oa_configure_oac_context()
731 struct xe_lrc *lrc = stream->exec_q->lrc[0]; in xe_oa_configure_oac_context()
737 OACTXCONTROL(stream->hwe->mmio_base), in xe_oa_configure_oac_context()
738 stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1, in xe_oa_configure_oac_context()
742 RING_CONTEXT_CONTROL(stream->hwe->mmio_base), in xe_oa_configure_oac_context()
752 xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, __oa_ccs_select(stream)); in xe_oa_configure_oac_context()
755 err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0], in xe_oa_configure_oac_context()
761 return xe_oa_load_with_lri(stream, ®_lri); in xe_oa_configure_oac_context()
764 static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable) in xe_oa_configure_oa_context() argument
766 switch (stream->hwe->class) { in xe_oa_configure_oa_context()
768 return xe_oa_configure_oar_context(stream, enable); in xe_oa_configure_oa_context()
770 return xe_oa_configure_oac_context(stream, enable); in xe_oa_configure_oa_context()
779 static u32 oag_configure_mmio_trigger(const struct xe_oa_stream *stream, bool enable) in oag_configure_mmio_trigger() argument
782 enable && stream && stream->sample ? in oag_configure_mmio_trigger()
786 static void xe_oa_disable_metric_set(struct xe_oa_stream *stream) in xe_oa_disable_metric_set() argument
794 if (stream->oa->xe->info.platform == XE_DG2) { in xe_oa_disable_metric_set()
795 xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN, in xe_oa_disable_metric_set()
797 xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2, in xe_oa_disable_metric_set()
801 xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug, in xe_oa_disable_metric_set()
802 oag_configure_mmio_trigger(stream, false)); in xe_oa_disable_metric_set()
805 if (stream->exec_q) in xe_oa_disable_metric_set()
806 xe_oa_configure_oa_context(stream, false); in xe_oa_disable_metric_set()
809 xe_mmio_rmw32(stream->gt, RPM_CONFIG1, GT_NOA_ENABLE, 0); in xe_oa_disable_metric_set()
812 (HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0); in xe_oa_disable_metric_set()
815 xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, sqcnt1, 0); in xe_oa_disable_metric_set()
818 static void xe_oa_stream_destroy(struct xe_oa_stream *stream) in xe_oa_stream_destroy() argument
820 struct xe_oa_unit *u = stream->hwe->oa_unit; in xe_oa_stream_destroy()
821 struct xe_gt *gt = stream->hwe->gt; in xe_oa_stream_destroy()
823 if (WARN_ON(stream != u->exclusive_stream)) in xe_oa_stream_destroy()
828 mutex_destroy(&stream->stream_lock); in xe_oa_stream_destroy()
830 xe_oa_disable_metric_set(stream); in xe_oa_stream_destroy()
831 xe_exec_queue_put(stream->k_exec_q); in xe_oa_stream_destroy()
833 xe_oa_free_oa_buffer(stream); in xe_oa_stream_destroy()
836 xe_pm_runtime_put(stream->oa->xe); in xe_oa_stream_destroy()
839 if (stream->override_gucrc) in xe_oa_stream_destroy()
842 xe_oa_free_configs(stream); in xe_oa_stream_destroy()
845 static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream) in xe_oa_alloc_oa_buffer() argument
852 bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL, in xe_oa_alloc_oa_buffer()
858 stream->oa_buffer.bo = bo; in xe_oa_alloc_oa_buffer()
860 xe_assert(stream->oa->xe, bo->vmap.is_iomem == 0); in xe_oa_alloc_oa_buffer()
861 stream->oa_buffer.vaddr = bo->vmap.vaddr; in xe_oa_alloc_oa_buffer()
866 __xe_oa_alloc_config_buffer(struct xe_oa_stream *stream, struct xe_oa_config *oa_config) in __xe_oa_alloc_config_buffer() argument
879 bb = xe_bb_new(stream->gt, config_length, false); in __xe_oa_alloc_config_buffer()
887 llist_add(&oa_bo->node, &stream->oa_config_bos); in __xe_oa_alloc_config_buffer()
896 xe_oa_alloc_config_buffer(struct xe_oa_stream *stream, struct xe_oa_config *oa_config) in xe_oa_alloc_config_buffer() argument
901 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) { in xe_oa_alloc_config_buffer()
908 oa_bo = __xe_oa_alloc_config_buffer(stream, oa_config); in xe_oa_alloc_config_buffer()
913 static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config *config) in xe_oa_emit_oa_config() argument
919 oa_bo = xe_oa_alloc_config_buffer(stream, config); in xe_oa_emit_oa_config()
925 err = xe_oa_submit_bb(stream, oa_bo->bb); in xe_oa_emit_oa_config()
933 static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream) in oag_report_ctx_switches() argument
937 stream->sample ? in oag_report_ctx_switches()
941 static int xe_oa_enable_metric_set(struct xe_oa_stream *stream) in xe_oa_enable_metric_set() argument
951 if (stream->oa->xe->info.platform == XE_DG2) { in xe_oa_enable_metric_set()
952 xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN, in xe_oa_enable_metric_set()
954 xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2, in xe_oa_enable_metric_set()
962 if (GRAPHICS_VER(stream->oa->xe) >= 20) in xe_oa_enable_metric_set()
969 xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug, in xe_oa_enable_metric_set()
971 oag_report_ctx_switches(stream) | in xe_oa_enable_metric_set()
972 oag_configure_mmio_trigger(stream, true)); in xe_oa_enable_metric_set()
974 xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ? in xe_oa_enable_metric_set()
978 stream->period_exponent)) : 0); in xe_oa_enable_metric_set()
986 (HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0); in xe_oa_enable_metric_set()
988 xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, 0, sqcnt1); in xe_oa_enable_metric_set()
991 if (stream->exec_q) { in xe_oa_enable_metric_set()
992 ret = xe_oa_configure_oa_context(stream, true); in xe_oa_enable_metric_set()
997 return xe_oa_emit_oa_config(stream, stream->oa_config); in xe_oa_enable_metric_set()
1000 static void xe_oa_stream_enable(struct xe_oa_stream *stream) in xe_oa_stream_enable() argument
1002 stream->pollin = false; in xe_oa_stream_enable()
1004 xe_oa_enable(stream); in xe_oa_stream_enable()
1006 if (stream->sample) in xe_oa_stream_enable()
1007 hrtimer_start(&stream->poll_check_timer, in xe_oa_stream_enable()
1008 ns_to_ktime(stream->poll_period_ns), in xe_oa_stream_enable()
1012 static void xe_oa_stream_disable(struct xe_oa_stream *stream) in xe_oa_stream_disable() argument
1014 xe_oa_disable(stream); in xe_oa_stream_disable()
1016 if (stream->sample) in xe_oa_stream_disable()
1017 hrtimer_cancel(&stream->poll_check_timer); in xe_oa_stream_disable()
1020 static int xe_oa_enable_preempt_timeslice(struct xe_oa_stream *stream) in xe_oa_enable_preempt_timeslice() argument
1022 struct xe_exec_queue *q = stream->exec_q; in xe_oa_enable_preempt_timeslice()
1026 ret1 = q->ops->set_timeslice(q, stream->hwe->eclass->sched_props.timeslice_us); in xe_oa_enable_preempt_timeslice()
1027 ret2 = q->ops->set_preempt_timeout(q, stream->hwe->eclass->sched_props.preempt_timeout_us); in xe_oa_enable_preempt_timeslice()
1032 drm_dbg(&stream->oa->xe->drm, "%s failed ret1 %d ret2 %d\n", __func__, ret1, ret2); in xe_oa_enable_preempt_timeslice()
1036 static int xe_oa_disable_preempt_timeslice(struct xe_oa_stream *stream) in xe_oa_disable_preempt_timeslice() argument
1038 struct xe_exec_queue *q = stream->exec_q; in xe_oa_disable_preempt_timeslice()
1052 xe_oa_enable_preempt_timeslice(stream); in xe_oa_disable_preempt_timeslice()
1053 drm_dbg(&stream->oa->xe->drm, "%s failed %d\n", __func__, ret); in xe_oa_disable_preempt_timeslice()
1057 static int xe_oa_enable_locked(struct xe_oa_stream *stream) in xe_oa_enable_locked() argument
1059 if (stream->enabled) in xe_oa_enable_locked()
1062 if (stream->no_preempt) { in xe_oa_enable_locked()
1063 int ret = xe_oa_disable_preempt_timeslice(stream); in xe_oa_enable_locked()
1069 xe_oa_stream_enable(stream); in xe_oa_enable_locked()
1071 stream->enabled = true; in xe_oa_enable_locked()
1075 static int xe_oa_disable_locked(struct xe_oa_stream *stream) in xe_oa_disable_locked() argument
1079 if (!stream->enabled) in xe_oa_disable_locked()
1082 xe_oa_stream_disable(stream); in xe_oa_disable_locked()
1084 if (stream->no_preempt) in xe_oa_disable_locked()
1085 ret = xe_oa_enable_preempt_timeslice(stream); in xe_oa_disable_locked()
1087 stream->enabled = false; in xe_oa_disable_locked()
1091 static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg) in xe_oa_config_locked() argument
1094 long ret = stream->oa_config->id; in xe_oa_config_locked()
1099 if (XE_IOCTL_DBG(stream->oa->xe, err)) in xe_oa_config_locked()
1102 if (XE_IOCTL_DBG(stream->oa->xe, ext.pad) || in xe_oa_config_locked()
1103 XE_IOCTL_DBG(stream->oa->xe, ext.base.name != DRM_XE_OA_EXTENSION_SET_PROPERTY) || in xe_oa_config_locked()
1104 XE_IOCTL_DBG(stream->oa->xe, ext.base.next_extension) || in xe_oa_config_locked()
1105 XE_IOCTL_DBG(stream->oa->xe, ext.property != DRM_XE_OA_PROPERTY_OA_METRIC_SET)) in xe_oa_config_locked()
1108 config = xe_oa_get_oa_config(stream->oa, ext.value); in xe_oa_config_locked()
1112 if (config != stream->oa_config) { in xe_oa_config_locked()
1113 err = xe_oa_emit_oa_config(stream, config); in xe_oa_config_locked()
1115 config = xchg(&stream->oa_config, config); in xe_oa_config_locked()
1125 static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg) in xe_oa_status_locked() argument
1131 if (stream->oa_status & OASTATUS_REPORT_LOST) in xe_oa_status_locked()
1133 if (stream->oa_status & OASTATUS_BUFFER_OVERFLOW) in xe_oa_status_locked()
1135 if (stream->oa_status & OASTATUS_COUNTER_OVERFLOW) in xe_oa_status_locked()
1137 if (stream->oa_status & OASTATUS_MMIO_TRG_Q_FULL) in xe_oa_status_locked()
1146 static long xe_oa_info_locked(struct xe_oa_stream *stream, unsigned long arg) in xe_oa_info_locked() argument
1157 static long xe_oa_ioctl_locked(struct xe_oa_stream *stream, in xe_oa_ioctl_locked() argument
1163 return xe_oa_enable_locked(stream); in xe_oa_ioctl_locked()
1165 return xe_oa_disable_locked(stream); in xe_oa_ioctl_locked()
1167 return xe_oa_config_locked(stream, arg); in xe_oa_ioctl_locked()
1169 return xe_oa_status_locked(stream, arg); in xe_oa_ioctl_locked()
1171 return xe_oa_info_locked(stream, arg); in xe_oa_ioctl_locked()
1181 struct xe_oa_stream *stream = file->private_data; in xe_oa_ioctl() local
1184 mutex_lock(&stream->stream_lock); in xe_oa_ioctl()
1185 ret = xe_oa_ioctl_locked(stream, cmd, arg); in xe_oa_ioctl()
1186 mutex_unlock(&stream->stream_lock); in xe_oa_ioctl()
1191 static void xe_oa_destroy_locked(struct xe_oa_stream *stream) in xe_oa_destroy_locked() argument
1193 if (stream->enabled) in xe_oa_destroy_locked()
1194 xe_oa_disable_locked(stream); in xe_oa_destroy_locked()
1196 xe_oa_stream_destroy(stream); in xe_oa_destroy_locked()
1198 if (stream->exec_q) in xe_oa_destroy_locked()
1199 xe_exec_queue_put(stream->exec_q); in xe_oa_destroy_locked()
1201 kfree(stream); in xe_oa_destroy_locked()
1206 struct xe_oa_stream *stream = file->private_data; in xe_oa_release() local
1207 struct xe_gt *gt = stream->gt; in xe_oa_release()
1211 xe_oa_destroy_locked(stream); in xe_oa_release()
1223 struct xe_oa_stream *stream = file->private_data; in xe_oa_mmap() local
1224 struct xe_bo *bo = stream->oa_buffer.bo; in xe_oa_mmap()
1229 drm_dbg(&stream->oa->xe->drm, "Insufficient privilege to map OA buffer\n"); in xe_oa_mmap()
1235 drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n"); in xe_oa_mmap()
1244 drm_dbg(&stream->oa->xe->drm, "mmap must be read only\n"); in xe_oa_mmap()
1250 xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages == vma_pages(vma)); in xe_oa_mmap()
1299 static u32 xe_oa_context_image_offset(struct xe_oa_stream *stream, u32 reg) in xe_oa_context_image_offset() argument
1301 struct xe_lrc *lrc = stream->exec_q->lrc[0]; in xe_oa_context_image_offset()
1302 u32 len = (xe_gt_lrc_size(stream->gt, stream->hwe->class) + in xe_oa_context_image_offset()
1307 if (drm_WARN_ON(&stream->oa->xe->drm, !state)) in xe_oa_context_image_offset()
1316 drm_WARN_ON(&stream->oa->xe->drm, in xe_oa_context_image_offset()
1329 static int xe_oa_set_ctx_ctrl_offset(struct xe_oa_stream *stream) in xe_oa_set_ctx_ctrl_offset() argument
1331 struct xe_reg reg = OACTXCONTROL(stream->hwe->mmio_base); in xe_oa_set_ctx_ctrl_offset()
1332 u32 offset = stream->oa->ctx_oactxctrl_offset[stream->hwe->class]; in xe_oa_set_ctx_ctrl_offset()
1338 offset = xe_oa_context_image_offset(stream, reg.addr); in xe_oa_set_ctx_ctrl_offset()
1339 stream->oa->ctx_oactxctrl_offset[stream->hwe->class] = offset; in xe_oa_set_ctx_ctrl_offset()
1341 drm_dbg(&stream->oa->xe->drm, "%s oa ctx control at 0x%08x dword offset\n", in xe_oa_set_ctx_ctrl_offset()
1342 stream->hwe->name, offset); in xe_oa_set_ctx_ctrl_offset()
1347 static int xe_oa_stream_init(struct xe_oa_stream *stream, in xe_oa_stream_init() argument
1354 stream->exec_q = param->exec_q; in xe_oa_stream_init()
1355 stream->poll_period_ns = DEFAULT_POLL_PERIOD_NS; in xe_oa_stream_init()
1356 stream->hwe = param->hwe; in xe_oa_stream_init()
1357 stream->gt = stream->hwe->gt; in xe_oa_stream_init()
1358 stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format]; in xe_oa_stream_init()
1360 stream->sample = param->sample; in xe_oa_stream_init()
1361 stream->periodic = param->period_exponent > 0; in xe_oa_stream_init()
1362 stream->period_exponent = param->period_exponent; in xe_oa_stream_init()
1363 stream->no_preempt = param->no_preempt; in xe_oa_stream_init()
1370 if (GRAPHICS_VER(stream->oa->xe) >= 20 && in xe_oa_stream_init()
1371 stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample) in xe_oa_stream_init()
1372 stream->oa_buffer.circ_size = in xe_oa_stream_init()
1373 XE_OA_BUFFER_SIZE - XE_OA_BUFFER_SIZE % stream->oa_buffer.format->size; in xe_oa_stream_init()
1375 stream->oa_buffer.circ_size = XE_OA_BUFFER_SIZE; in xe_oa_stream_init()
1377 if (stream->exec_q && engine_supports_mi_query(stream->hwe)) { in xe_oa_stream_init()
1379 ret = xe_oa_set_ctx_ctrl_offset(stream); in xe_oa_stream_init()
1381 drm_err(&stream->oa->xe->drm, in xe_oa_stream_init()
1383 stream->hwe->name); in xe_oa_stream_init()
1388 stream->oa_config = xe_oa_get_oa_config(stream->oa, param->metric_set); in xe_oa_stream_init()
1389 if (!stream->oa_config) { in xe_oa_stream_init()
1390 drm_dbg(&stream->oa->xe->drm, "Invalid OA config id=%i\n", param->metric_set); in xe_oa_stream_init()
1401 if (stream->oa->xe->info.platform == XE_PVC) { in xe_oa_stream_init()
1407 stream->override_gucrc = true; in xe_oa_stream_init()
1411 xe_pm_runtime_get(stream->oa->xe); in xe_oa_stream_init()
1414 ret = xe_oa_alloc_oa_buffer(stream); in xe_oa_stream_init()
1418 stream->k_exec_q = xe_exec_queue_create(stream->oa->xe, NULL, in xe_oa_stream_init()
1419 BIT(stream->hwe->logical_instance), 1, in xe_oa_stream_init()
1420 stream->hwe, EXEC_QUEUE_FLAG_KERNEL, 0); in xe_oa_stream_init()
1421 if (IS_ERR(stream->k_exec_q)) { in xe_oa_stream_init()
1422 ret = PTR_ERR(stream->k_exec_q); in xe_oa_stream_init()
1423 drm_err(&stream->oa->xe->drm, "gt%d, hwe %s, xe_exec_queue_create failed=%d", in xe_oa_stream_init()
1424 stream->gt->info.id, stream->hwe->name, ret); in xe_oa_stream_init()
1428 ret = xe_oa_enable_metric_set(stream); in xe_oa_stream_init()
1430 drm_dbg(&stream->oa->xe->drm, "Unable to enable metric set\n"); in xe_oa_stream_init()
1434 drm_dbg(&stream->oa->xe->drm, "opening stream oa config uuid=%s\n", in xe_oa_stream_init()
1435 stream->oa_config->uuid); in xe_oa_stream_init()
1437 WRITE_ONCE(u->exclusive_stream, stream); in xe_oa_stream_init()
1439 hrtimer_init(&stream->poll_check_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in xe_oa_stream_init()
1440 stream->poll_check_timer.function = xe_oa_poll_check_timer_cb; in xe_oa_stream_init()
1441 init_waitqueue_head(&stream->poll_wq); in xe_oa_stream_init()
1443 spin_lock_init(&stream->oa_buffer.ptr_lock); in xe_oa_stream_init()
1444 mutex_init(&stream->stream_lock); in xe_oa_stream_init()
1449 xe_oa_disable_metric_set(stream); in xe_oa_stream_init()
1450 xe_exec_queue_put(stream->k_exec_q); in xe_oa_stream_init()
1452 xe_oa_free_oa_buffer(stream); in xe_oa_stream_init()
1455 xe_pm_runtime_put(stream->oa->xe); in xe_oa_stream_init()
1456 if (stream->override_gucrc) in xe_oa_stream_init()
1459 xe_oa_free_configs(stream); in xe_oa_stream_init()
1467 struct xe_oa_stream *stream; in xe_oa_stream_open_ioctl_locked() local
1478 stream = kzalloc(sizeof(*stream), GFP_KERNEL); in xe_oa_stream_open_ioctl_locked()
1479 if (!stream) { in xe_oa_stream_open_ioctl_locked()
1484 stream->oa = oa; in xe_oa_stream_open_ioctl_locked()
1485 ret = xe_oa_stream_init(stream, param); in xe_oa_stream_open_ioctl_locked()
1490 ret = xe_oa_enable_locked(stream); in xe_oa_stream_open_ioctl_locked()
1495 stream_fd = anon_inode_getfd("[xe_oa]", &xe_oa_fops, stream, 0); in xe_oa_stream_open_ioctl_locked()
1502 drm_dev_get(&stream->oa->xe->drm); in xe_oa_stream_open_ioctl_locked()
1507 xe_oa_disable_locked(stream); in xe_oa_stream_open_ioctl_locked()
1509 xe_oa_stream_destroy(stream); in xe_oa_stream_open_ioctl_locked()
1511 kfree(stream); in xe_oa_stream_open_ioctl_locked()