1 /*
2  * Copyright © 2015-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *   Robert Bragg <robert@sixbynine.org>
25  */
26 
27 
28 /**
29  * DOC: i915 Perf Overview
30  *
31  * Gen graphics supports a large number of performance counters that can help
32  * driver and application developers understand and optimize their use of the
33  * GPU.
34  *
35  * This i915 perf interface enables userspace to configure and open a file
36  * descriptor representing a stream of GPU metrics which can then be read() as
37  * a stream of sample records.
38  *
39  * The interface is particularly suited to exposing buffered metrics that are
40  * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41  *
42  * Streams representing a single context are accessible to applications with a
43  * corresponding drm file descriptor, such that OpenGL can use the interface
44  * without special privileges. Access to system-wide metrics requires root
45  * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46  * sysctl option.
47  *
48  */
49 
50 /**
51  * DOC: i915 Perf History and Comparison with Core Perf
52  *
53  * The interface was initially inspired by the core Perf infrastructure but
54  * some notable differences are:
55  *
56  * i915 perf file descriptors represent a "stream" instead of an "event"; where
57  * a perf event primarily corresponds to a single 64bit value, while a stream
58  * might sample sets of tightly-coupled counters, depending on the
59  * configuration.  For example the Gen OA unit isn't designed to support
60  * orthogonal configurations of individual counters; it's configured for a set
61  * of related counters. Samples for an i915 perf stream capturing OA metrics
62  * will include a set of counter values packed in a compact HW specific format.
63  * The OA unit supports a number of different packing formats which can be
64  * selected by the user opening the stream. Perf has support for grouping
65  * events, but each event in the group is configured, validated and
66  * authenticated individually with separate system calls.
67  *
68  * i915 perf stream configurations are provided as an array of u64 (key,value)
69  * pairs, instead of a fixed struct with multiple miscellaneous config members,
70  * interleaved with event-type specific members.
71  *
72  * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73  * The supported metrics are being written to memory by the GPU unsynchronized
74  * with the CPU, using HW specific packing formats for counter sets. Sometimes
75  * the constraints on HW configuration require reports to be filtered before it
76  * would be acceptable to expose them to unprivileged applications - to hide
77  * the metrics of other processes/contexts. For these use cases a read() based
78  * interface is a good fit, and provides an opportunity to filter data as it
79  * gets copied from the GPU mapped buffers to userspace buffers.
80  *
81  *
82  * Issues hit with first prototype based on Core Perf
83  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84  *
85  * The first prototype of this driver was based on the core perf
86  * infrastructure, and while we did make that mostly work, with some changes to
87  * perf, we found we were breaking or working around too many assumptions baked
88  * into perf's currently cpu centric design.
89  *
90  * In the end we didn't see a clear benefit to making perf's implementation and
91  * interface more complex by changing design assumptions while we knew we still
92  * wouldn't be able to use any existing perf based userspace tools.
93  *
94  * Also considering the Gen specific nature of the Observability hardware and
95  * how userspace will sometimes need to combine i915 perf OA metrics with
96  * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97  * expecting the interface to be used by a platform specific userspace such as
98  * OpenGL or tools. This is to say; we aren't inherently missing out on having
99  * a standard vendor/architecture agnostic interface by not using perf.
100  *
101  *
102  * For posterity, in case we might re-visit trying to adapt core perf to be
103  * better suited to exposing i915 metrics these were the main pain points we
104  * hit:
105  *
106  * - The perf based OA PMU driver broke some significant design assumptions:
107  *
108  *   Existing perf pmus are used for profiling work on a cpu and we were
109  *   introducing the idea of _IS_DEVICE pmus with different security
110  *   implications, the need to fake cpu-related data (such as user/kernel
111  *   registers) to fit with perf's current design, and adding _DEVICE records
112  *   as a way to forward device-specific status records.
113  *
114  *   The OA unit writes reports of counters into a circular buffer, without
115  *   involvement from the CPU, making our PMU driver the first of a kind.
116  *
117  *   Given the way we were periodically forward data from the GPU-mapped, OA
118  *   buffer to perf's buffer, those bursts of sample writes looked to perf like
119  *   we were sampling too fast and so we had to subvert its throttling checks.
120  *
121  *   Perf supports groups of counters and allows those to be read via
122  *   transactions internally but transactions currently seem designed to be
123  *   explicitly initiated from the cpu (say in response to a userspace read())
124  *   and while we could pull a report out of the OA buffer we can't
125  *   trigger a report from the cpu on demand.
126  *
127  *   Related to being report based; the OA counters are configured in HW as a
128  *   set while perf generally expects counter configurations to be orthogonal.
129  *   Although counters can be associated with a group leader as they are
130  *   opened, there's no clear precedent for being able to provide group-wide
131  *   configuration attributes (for example we want to let userspace choose the
132  *   OA unit report format used to capture all counters in a set, or specify a
133  *   GPU context to filter metrics on). We avoided using perf's grouping
134  *   feature and forwarded OA reports to userspace via perf's 'raw' sample
135  *   field. This suited our userspace well considering how coupled the counters
136  *   are when dealing with normalizing. It would be inconvenient to split
137  *   counters up into separate events, only to require userspace to recombine
138  *   them. For Mesa it's also convenient to be forwarded raw, periodic reports
139  *   for combining with the side-band raw reports it captures using
140  *   MI_REPORT_PERF_COUNT commands.
141  *
142  *   - As a side note on perf's grouping feature; there was also some concern
143  *     that using PERF_FORMAT_GROUP as a way to pack together counter values
144  *     would quite drastically inflate our sample sizes, which would likely
145  *     lower the effective sampling resolutions we could use when the available
146  *     memory bandwidth is limited.
147  *
148  *     With the OA unit's report formats, counters are packed together as 32
149  *     or 40bit values, with the largest report size being 256 bytes.
150  *
151  *     PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152  *     documented ordering to the values, implying PERF_FORMAT_ID must also be
153  *     used to add a 64bit ID before each value; giving 16 bytes per counter.
154  *
155  *   Related to counter orthogonality; we can't time share the OA unit, while
156  *   event scheduling is a central design idea within perf for allowing
157  *   userspace to open + enable more events than can be configured in HW at any
158  *   one time.  The OA unit is not designed to allow re-configuration while in
159  *   use. We can't reconfigure the OA unit without losing internal OA unit
160  *   state which we can't access explicitly to save and restore. Reconfiguring
161  *   the OA unit is also relatively slow, involving ~100 register writes. From
162  *   userspace Mesa also depends on a stable OA configuration when emitting
163  *   MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164  *   disabled while there are outstanding MI_RPC commands lest we hang the
165  *   command streamer.
166  *
167  *   The contents of sample records aren't extensible by device drivers (i.e.
168  *   the sample_type bits). As an example; Sourab Gupta had been looking to
169  *   attach GPU timestamps to our OA samples. We were shoehorning OA reports
170  *   into sample records by using the 'raw' field, but it's tricky to pack more
171  *   than one thing into this field because events/core.c currently only lets a
172  *   pmu give a single raw data pointer plus len which will be copied into the
173  *   ring buffer. To include more than the OA report we'd have to copy the
174  *   report into an intermediate larger buffer. I'd been considering allowing a
175  *   vector of data+len values to be specified for copying the raw data, but
176  *   it felt like a kludge to being using the raw field for this purpose.
177  *
178  * - It felt like our perf based PMU was making some technical compromises
179  *   just for the sake of using perf:
180  *
181  *   perf_event_open() requires events to either relate to a pid or a specific
182  *   cpu core, while our device pmu related to neither.  Events opened with a
183  *   pid will be automatically enabled/disabled according to the scheduling of
184  *   that process - so not appropriate for us. When an event is related to a
185  *   cpu id, perf ensures pmu methods will be invoked via an inter process
186  *   interrupt on that core. To avoid invasive changes our userspace opened OA
187  *   perf events for a specific cpu. This was workable but it meant the
188  *   majority of the OA driver ran in atomic context, including all OA report
189  *   forwarding, which wasn't really necessary in our case and seems to make
190  *   our locking requirements somewhat complex as we handled the interaction
191  *   with the rest of the i915 driver.
192  */
193 
194 #include <linux/anon_inodes.h>
195 #include <linux/nospec.h>
196 #include <linux/sizes.h>
197 #include <linux/uuid.h>
198 
199 #include "gem/i915_gem_context.h"
200 #include "gem/i915_gem_internal.h"
201 #include "gt/intel_engine_pm.h"
202 #include "gt/intel_engine_regs.h"
203 #include "gt/intel_engine_user.h"
204 #include "gt/intel_execlists_submission.h"
205 #include "gt/intel_gpu_commands.h"
206 #include "gt/intel_gt.h"
207 #include "gt/intel_gt_clock_utils.h"
208 #include "gt/intel_gt_mcr.h"
209 #include "gt/intel_gt_print.h"
210 #include "gt/intel_gt_regs.h"
211 #include "gt/intel_lrc.h"
212 #include "gt/intel_lrc_reg.h"
213 #include "gt/intel_rc6.h"
214 #include "gt/intel_ring.h"
215 #include "gt/uc/intel_guc_slpc.h"
216 
217 #include "i915_drv.h"
218 #include "i915_file_private.h"
219 #include "i915_perf.h"
220 #include "i915_perf_oa_regs.h"
221 #include "i915_reg.h"
222 
223 /* HW requires this to be a power of two, between 128k and 16M, though driver
224  * is currently generally designed assuming the largest 16M size is used such
225  * that the overflow cases are unlikely in normal operation.
226  */
227 #define OA_BUFFER_SIZE		SZ_16M
228 
229 #define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
230 
231 /**
232  * DOC: OA Tail Pointer Race
233  *
234  * There's a HW race condition between OA unit tail pointer register updates and
235  * writes to memory whereby the tail pointer can sometimes get ahead of what's
236  * been written out to the OA buffer so far (in terms of what's visible to the
237  * CPU).
238  *
239  * Although this can be observed explicitly while copying reports to userspace
240  * by checking for a zeroed report-id field in tail reports, we want to account
241  * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
242  * redundant read() attempts.
243  *
244  * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
245  * in the OA buffer, starting from the tail reported by the HW until we find a
246  * report with its first 2 dwords not 0 meaning its previous report is
247  * completely in memory and ready to be read. Those dwords are also set to 0
248  * once read and the whole buffer is cleared upon OA buffer initialization. The
249  * first dword is the reason for this report while the second is the timestamp,
250  * making the chances of having those 2 fields at 0 fairly unlikely. A more
251  * detailed explanation is available in oa_buffer_check_unlocked().
252  *
253  * Most of the implementation details for this workaround are in
254  * oa_buffer_check_unlocked() and _append_oa_reports()
255  *
256  * Note for posterity: previously the driver used to define an effective tail
257  * pointer that lagged the real pointer by a 'tail margin' measured in bytes
258  * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
259  * This was flawed considering that the OA unit may also automatically generate
260  * non-periodic reports (such as on context switch) or the OA unit may be
261  * enabled without any periodic sampling.
262  */
263 #define OA_TAIL_MARGIN_NSEC	100000ULL
264 #define INVALID_TAIL_PTR	0xffffffff
265 
266 /* The default frequency for checking whether the OA unit has written new
267  * reports to the circular OA buffer...
268  */
269 #define DEFAULT_POLL_FREQUENCY_HZ 200
270 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
271 
272 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
273 static u32 i915_perf_stream_paranoid = true;
274 
275 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
276  * of the 64bit timestamp bits to trigger reports from) but there's currently
277  * no known use case for sampling as infrequently as once per 47 thousand years.
278  *
279  * Since the timestamps included in OA reports are only 32bits it seems
280  * reasonable to limit the OA exponent where it's still possible to account for
281  * overflow in OA report timestamps.
282  */
283 #define OA_EXPONENT_MAX 31
284 
285 #define INVALID_CTX_ID 0xffffffff
286 
287 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
288 #define OAREPORT_REASON_MASK           0x3f
289 #define OAREPORT_REASON_MASK_EXTENDED  0x7f
290 #define OAREPORT_REASON_SHIFT          19
291 #define OAREPORT_REASON_TIMER          (1<<0)
292 #define OAREPORT_REASON_CTX_SWITCH     (1<<3)
293 #define OAREPORT_REASON_CLK_RATIO      (1<<5)
294 
295 #define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
296 
297 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
298  *
299  * The highest sampling frequency we can theoretically program the OA unit
300  * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
301  *
302  * Initialized just before we register the sysctl parameter.
303  */
304 static int oa_sample_rate_hard_limit;
305 
306 /* Theoretically we can program the OA unit to sample every 160ns but don't
307  * allow that by default unless root...
308  *
309  * The default threshold of 100000Hz is based on perf's similar
310  * kernel.perf_event_max_sample_rate sysctl parameter.
311  */
312 static u32 i915_oa_max_sample_rate = 100000;
313 
314 /* XXX: beware if future OA HW adds new report formats that the current
315  * code assumes all reports have a power-of-two size and ~(size - 1) can
316  * be used as a mask to align the OA tail pointer.
317  */
318 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
319 	[I915_OA_FORMAT_A13]	    = { 0, 64 },
320 	[I915_OA_FORMAT_A29]	    = { 1, 128 },
321 	[I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
322 	/* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
323 	[I915_OA_FORMAT_B4_C8]	    = { 4, 64 },
324 	[I915_OA_FORMAT_A45_B8_C8]  = { 5, 256 },
325 	[I915_OA_FORMAT_B4_C8_A16]  = { 6, 128 },
326 	[I915_OA_FORMAT_C4_B8]	    = { 7, 64 },
327 	[I915_OA_FORMAT_A12]		    = { 0, 64 },
328 	[I915_OA_FORMAT_A12_B8_C8]	    = { 2, 128 },
329 	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
330 	[I915_OAR_FORMAT_A32u40_A4u32_B8_C8]    = { 5, 256 },
331 	[I915_OA_FORMAT_A24u40_A14u32_B8_C8]    = { 5, 256 },
332 	[I915_OAM_FORMAT_MPEC8u64_B8_C8]	= { 1, 192, TYPE_OAM, HDR_64_BIT },
333 	[I915_OAM_FORMAT_MPEC8u32_B8_C8]	= { 2, 128, TYPE_OAM, HDR_64_BIT },
334 };
335 
336 static const u32 mtl_oa_base[] = {
337 	[PERF_GROUP_OAM_SAMEDIA_0] = 0x393000,
338 };
339 
340 #define SAMPLE_OA_REPORT      (1<<0)
341 
342 /**
343  * struct perf_open_properties - for validated properties given to open a stream
344  * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
345  * @single_context: Whether a single or all gpu contexts should be monitored
346  * @hold_preemption: Whether the preemption is disabled for the filtered
347  *                   context
348  * @ctx_handle: A gem ctx handle for use with @single_context
349  * @metrics_set: An ID for an OA unit metric set advertised via sysfs
350  * @oa_format: An OA unit HW report format
351  * @oa_periodic: Whether to enable periodic OA unit sampling
352  * @oa_period_exponent: The OA unit sampling period is derived from this
353  * @engine: The engine (typically rcs0) being monitored by the OA unit
354  * @has_sseu: Whether @sseu was specified by userspace
355  * @sseu: internal SSEU configuration computed either from the userspace
356  *        specified configuration in the opening parameters or a default value
357  *        (see get_default_sseu_config())
358  * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
359  * data availability
360  *
361  * As read_properties_unlocked() enumerates and validates the properties given
362  * to open a stream of metrics the configuration is built up in the structure
363  * which starts out zero initialized.
364  */
365 struct perf_open_properties {
366 	u32 sample_flags;
367 
368 	u64 single_context:1;
369 	u64 hold_preemption:1;
370 	u64 ctx_handle;
371 
372 	/* OA sampling state */
373 	int metrics_set;
374 	int oa_format;
375 	bool oa_periodic;
376 	int oa_period_exponent;
377 
378 	struct intel_engine_cs *engine;
379 
380 	bool has_sseu;
381 	struct intel_sseu sseu;
382 
383 	u64 poll_oa_period;
384 };
385 
386 struct i915_oa_config_bo {
387 	struct llist_node node;
388 
389 	struct i915_oa_config *oa_config;
390 	struct i915_vma *vma;
391 };
392 
393 static struct ctl_table_header *sysctl_header;
394 
395 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
396 
i915_oa_config_release(struct kref * ref)397 void i915_oa_config_release(struct kref *ref)
398 {
399 	struct i915_oa_config *oa_config =
400 		container_of(ref, typeof(*oa_config), ref);
401 
402 	kfree(oa_config->flex_regs);
403 	kfree(oa_config->b_counter_regs);
404 	kfree(oa_config->mux_regs);
405 
406 	kfree_rcu(oa_config, rcu);
407 }
408 
409 struct i915_oa_config *
i915_perf_get_oa_config(struct i915_perf * perf,int metrics_set)410 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
411 {
412 	struct i915_oa_config *oa_config;
413 
414 	rcu_read_lock();
415 	oa_config = idr_find(&perf->metrics_idr, metrics_set);
416 	if (oa_config)
417 		oa_config = i915_oa_config_get(oa_config);
418 	rcu_read_unlock();
419 
420 	return oa_config;
421 }
422 
free_oa_config_bo(struct i915_oa_config_bo * oa_bo)423 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
424 {
425 	i915_oa_config_put(oa_bo->oa_config);
426 	i915_vma_put(oa_bo->vma);
427 	kfree(oa_bo);
428 }
429 
430 static inline const
__oa_regs(struct i915_perf_stream * stream)431 struct i915_perf_regs *__oa_regs(struct i915_perf_stream *stream)
432 {
433 	return &stream->engine->oa_group->regs;
434 }
435 
gen12_oa_hw_tail_read(struct i915_perf_stream * stream)436 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
437 {
438 	struct intel_uncore *uncore = stream->uncore;
439 
440 	return intel_uncore_read(uncore, __oa_regs(stream)->oa_tail_ptr) &
441 	       GEN12_OAG_OATAILPTR_MASK;
442 }
443 
gen8_oa_hw_tail_read(struct i915_perf_stream * stream)444 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
445 {
446 	struct intel_uncore *uncore = stream->uncore;
447 
448 	return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
449 }
450 
gen7_oa_hw_tail_read(struct i915_perf_stream * stream)451 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
452 {
453 	struct intel_uncore *uncore = stream->uncore;
454 	u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
455 
456 	return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
457 }
458 
459 #define oa_report_header_64bit(__s) \
460 	((__s)->oa_buffer.format->header == HDR_64_BIT)
461 
oa_report_id(struct i915_perf_stream * stream,void * report)462 static u64 oa_report_id(struct i915_perf_stream *stream, void *report)
463 {
464 	return oa_report_header_64bit(stream) ? *(u64 *)report : *(u32 *)report;
465 }
466 
oa_report_reason(struct i915_perf_stream * stream,void * report)467 static u64 oa_report_reason(struct i915_perf_stream *stream, void *report)
468 {
469 	return (oa_report_id(stream, report) >> OAREPORT_REASON_SHIFT) &
470 	       (GRAPHICS_VER(stream->perf->i915) == 12 ?
471 		OAREPORT_REASON_MASK_EXTENDED :
472 		OAREPORT_REASON_MASK);
473 }
474 
oa_report_id_clear(struct i915_perf_stream * stream,u32 * report)475 static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report)
476 {
477 	if (oa_report_header_64bit(stream))
478 		*(u64 *)report = 0;
479 	else
480 		*report = 0;
481 }
482 
oa_report_ctx_invalid(struct i915_perf_stream * stream,void * report)483 static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report)
484 {
485 	return !(oa_report_id(stream, report) &
486 	       stream->perf->gen8_valid_ctx_bit);
487 }
488 
oa_timestamp(struct i915_perf_stream * stream,void * report)489 static u64 oa_timestamp(struct i915_perf_stream *stream, void *report)
490 {
491 	return oa_report_header_64bit(stream) ?
492 		*((u64 *)report + 1) :
493 		*((u32 *)report + 1);
494 }
495 
oa_timestamp_clear(struct i915_perf_stream * stream,u32 * report)496 static void oa_timestamp_clear(struct i915_perf_stream *stream, u32 *report)
497 {
498 	if (oa_report_header_64bit(stream))
499 		*(u64 *)&report[2] = 0;
500 	else
501 		report[1] = 0;
502 }
503 
oa_context_id(struct i915_perf_stream * stream,u32 * report)504 static u32 oa_context_id(struct i915_perf_stream *stream, u32 *report)
505 {
506 	u32 ctx_id = oa_report_header_64bit(stream) ? report[4] : report[2];
507 
508 	return ctx_id & stream->specific_ctx_id_mask;
509 }
510 
oa_context_id_squash(struct i915_perf_stream * stream,u32 * report)511 static void oa_context_id_squash(struct i915_perf_stream *stream, u32 *report)
512 {
513 	if (oa_report_header_64bit(stream))
514 		report[4] = INVALID_CTX_ID;
515 	else
516 		report[2] = INVALID_CTX_ID;
517 }
518 
519 /**
520  * oa_buffer_check_unlocked - check for data and update tail ptr state
521  * @stream: i915 stream instance
522  *
523  * This is either called via fops (for blocking reads in user ctx) or the poll
524  * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
525  * if there is data available for userspace to read.
526  *
527  * This function is central to providing a workaround for the OA unit tail
528  * pointer having a race with respect to what data is visible to the CPU.
529  * It is responsible for reading tail pointers from the hardware and giving
530  * the pointers time to 'age' before they are made available for reading.
531  * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
532  *
533  * Besides returning true when there is data available to read() this function
534  * also updates the tail in the oa_buffer object.
535  *
536  * Note: It's safe to read OA config state here unlocked, assuming that this is
537  * only called while the stream is enabled, while the global OA configuration
538  * can't be modified.
539  *
540  * Returns: %true if the OA buffer contains data, else %false
541  */
oa_buffer_check_unlocked(struct i915_perf_stream * stream)542 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
543 {
544 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
545 	int report_size = stream->oa_buffer.format->size;
546 	u32 tail, hw_tail;
547 	unsigned long flags;
548 	bool pollin;
549 	u32 partial_report_size;
550 
551 	/* We have to consider the (unlikely) possibility that read() errors
552 	 * could result in an OA buffer reset which might reset the head and
553 	 * tail state.
554 	 */
555 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
556 
557 	hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
558 	hw_tail -= gtt_offset;
559 
560 	/* The tail pointer increases in 64 byte increments, not in report_size
561 	 * steps. Also the report size may not be a power of 2. Compute
562 	 * potentially partially landed report in the OA buffer
563 	 */
564 	partial_report_size = OA_TAKEN(hw_tail, stream->oa_buffer.tail);
565 	partial_report_size %= report_size;
566 
567 	/* Subtract partial amount off the tail */
568 	hw_tail = OA_TAKEN(hw_tail, partial_report_size);
569 
570 	tail = hw_tail;
571 
572 	/* Walk the stream backward until we find a report with report
573 	 * id and timestmap not at 0. Since the circular buffer pointers
574 	 * progress by increments of 64 bytes and that reports can be up
575 	 * to 256 bytes long, we can't tell whether a report has fully
576 	 * landed in memory before the report id and timestamp of the
577 	 * following report have effectively landed.
578 	 *
579 	 * This is assuming that the writes of the OA unit land in
580 	 * memory in the order they were written to.
581 	 * If not : (╯°□°)╯︵ ┻━┻
582 	 */
583 	while (OA_TAKEN(tail, stream->oa_buffer.tail) >= report_size) {
584 		void *report = stream->oa_buffer.vaddr + tail;
585 
586 		if (oa_report_id(stream, report) ||
587 		    oa_timestamp(stream, report))
588 			break;
589 
590 		tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
591 	}
592 
593 	if (OA_TAKEN(hw_tail, tail) > report_size &&
594 	    __ratelimit(&stream->perf->tail_pointer_race))
595 		drm_notice(&stream->uncore->i915->drm,
596 			   "unlanded report(s) head=0x%x tail=0x%x hw_tail=0x%x\n",
597 		 stream->oa_buffer.head, tail, hw_tail);
598 
599 	stream->oa_buffer.tail = tail;
600 
601 	pollin = OA_TAKEN(stream->oa_buffer.tail,
602 			  stream->oa_buffer.head) >= report_size;
603 
604 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
605 
606 	return pollin;
607 }
608 
609 /**
610  * append_oa_status - Appends a status record to a userspace read() buffer.
611  * @stream: An i915-perf stream opened for OA metrics
612  * @buf: destination buffer given by userspace
613  * @count: the number of bytes userspace wants to read
614  * @offset: (inout): the current position for writing into @buf
615  * @type: The kind of status to report to userspace
616  *
617  * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
618  * into the userspace read() buffer.
619  *
620  * The @buf @offset will only be updated on success.
621  *
622  * Returns: 0 on success, negative error code on failure.
623  */
append_oa_status(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,enum drm_i915_perf_record_type type)624 static int append_oa_status(struct i915_perf_stream *stream,
625 			    char __user *buf,
626 			    size_t count,
627 			    size_t *offset,
628 			    enum drm_i915_perf_record_type type)
629 {
630 	struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
631 
632 	if ((count - *offset) < header.size)
633 		return -ENOSPC;
634 
635 	if (copy_to_user(buf + *offset, &header, sizeof(header)))
636 		return -EFAULT;
637 
638 	(*offset) += header.size;
639 
640 	return 0;
641 }
642 
643 /**
644  * append_oa_sample - Copies single OA report into userspace read() buffer.
645  * @stream: An i915-perf stream opened for OA metrics
646  * @buf: destination buffer given by userspace
647  * @count: the number of bytes userspace wants to read
648  * @offset: (inout): the current position for writing into @buf
649  * @report: A single OA report to (optionally) include as part of the sample
650  *
651  * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
652  * properties when opening a stream, tracked as `stream->sample_flags`. This
653  * function copies the requested components of a single sample to the given
654  * read() @buf.
655  *
656  * The @buf @offset will only be updated on success.
657  *
658  * Returns: 0 on success, negative error code on failure.
659  */
append_oa_sample(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,const u8 * report)660 static int append_oa_sample(struct i915_perf_stream *stream,
661 			    char __user *buf,
662 			    size_t count,
663 			    size_t *offset,
664 			    const u8 *report)
665 {
666 	int report_size = stream->oa_buffer.format->size;
667 	struct drm_i915_perf_record_header header;
668 	int report_size_partial;
669 	u8 *oa_buf_end;
670 
671 	header.type = DRM_I915_PERF_RECORD_SAMPLE;
672 	header.pad = 0;
673 	header.size = stream->sample_size;
674 
675 	if ((count - *offset) < header.size)
676 		return -ENOSPC;
677 
678 	buf += *offset;
679 	if (copy_to_user(buf, &header, sizeof(header)))
680 		return -EFAULT;
681 	buf += sizeof(header);
682 
683 	oa_buf_end = stream->oa_buffer.vaddr + OA_BUFFER_SIZE;
684 	report_size_partial = oa_buf_end - report;
685 
686 	if (report_size_partial < report_size) {
687 		if (copy_to_user(buf, report, report_size_partial))
688 			return -EFAULT;
689 		buf += report_size_partial;
690 
691 		if (copy_to_user(buf, stream->oa_buffer.vaddr,
692 				 report_size - report_size_partial))
693 			return -EFAULT;
694 	} else if (copy_to_user(buf, report, report_size)) {
695 		return -EFAULT;
696 	}
697 
698 	(*offset) += header.size;
699 
700 	return 0;
701 }
702 
703 /**
704  * gen8_append_oa_reports - Copies all buffered OA reports into
705  *			    userspace read() buffer.
706  * @stream: An i915-perf stream opened for OA metrics
707  * @buf: destination buffer given by userspace
708  * @count: the number of bytes userspace wants to read
709  * @offset: (inout): the current position for writing into @buf
710  *
711  * Notably any error condition resulting in a short read (-%ENOSPC or
712  * -%EFAULT) will be returned even though one or more records may
713  * have been successfully copied. In this case it's up to the caller
714  * to decide if the error should be squashed before returning to
715  * userspace.
716  *
717  * Note: reports are consumed from the head, and appended to the
718  * tail, so the tail chases the head?... If you think that's mad
719  * and back-to-front you're not alone, but this follows the
720  * Gen PRM naming convention.
721  *
722  * Returns: 0 on success, negative error code on failure.
723  */
gen8_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)724 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
725 				  char __user *buf,
726 				  size_t count,
727 				  size_t *offset)
728 {
729 	struct intel_uncore *uncore = stream->uncore;
730 	int report_size = stream->oa_buffer.format->size;
731 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
732 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
733 	u32 mask = (OA_BUFFER_SIZE - 1);
734 	size_t start_offset = *offset;
735 	unsigned long flags;
736 	u32 head, tail;
737 	int ret = 0;
738 
739 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
740 		return -EIO;
741 
742 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
743 
744 	head = stream->oa_buffer.head;
745 	tail = stream->oa_buffer.tail;
746 
747 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
748 
749 	/*
750 	 * An out of bounds or misaligned head or tail pointer implies a driver
751 	 * bug since we validate + align the tail pointers we read from the
752 	 * hardware and we are in full control of the head pointer which should
753 	 * only be incremented by multiples of the report size.
754 	 */
755 	if (drm_WARN_ONCE(&uncore->i915->drm,
756 			  head > OA_BUFFER_SIZE ||
757 			  tail > OA_BUFFER_SIZE,
758 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
759 			  head, tail))
760 		return -EIO;
761 
762 
763 	for (/* none */;
764 	     OA_TAKEN(tail, head);
765 	     head = (head + report_size) & mask) {
766 		u8 *report = oa_buf_base + head;
767 		u32 *report32 = (void *)report;
768 		u32 ctx_id;
769 		u64 reason;
770 
771 		/*
772 		 * The reason field includes flags identifying what
773 		 * triggered this specific report (mostly timer
774 		 * triggered or e.g. due to a context switch).
775 		 */
776 		reason = oa_report_reason(stream, report);
777 		ctx_id = oa_context_id(stream, report32);
778 
779 		/*
780 		 * Squash whatever is in the CTX_ID field if it's marked as
781 		 * invalid to be sure we avoid false-positive, single-context
782 		 * filtering below...
783 		 *
784 		 * Note: that we don't clear the valid_ctx_bit so userspace can
785 		 * understand that the ID has been squashed by the kernel.
786 		 *
787 		 * Update:
788 		 *
789 		 * On XEHP platforms the behavior of context id valid bit has
790 		 * changed compared to prior platforms. To describe this, we
791 		 * define a few terms:
792 		 *
793 		 * context-switch-report: This is a report with the reason type
794 		 * being context-switch. It is generated when a context switches
795 		 * out.
796 		 *
797 		 * context-valid-bit: A bit that is set in the report ID field
798 		 * to indicate that a valid context has been loaded.
799 		 *
800 		 * gpu-idle: A condition characterized by a
801 		 * context-switch-report with context-valid-bit set to 0.
802 		 *
803 		 * On prior platforms, context-id-valid bit is set to 0 only
804 		 * when GPU goes idle. In all other reports, it is set to 1.
805 		 *
806 		 * On XEHP platforms, context-valid-bit is set to 1 in a context
807 		 * switch report if a new context switched in. For all other
808 		 * reports it is set to 0.
809 		 *
810 		 * This change in behavior causes an issue with MMIO triggered
811 		 * reports. MMIO triggered reports have the markers in the
812 		 * context ID field and the context-valid-bit is 0. The logic
813 		 * below to squash the context ID would render the report
814 		 * useless since the user will not be able to find it in the OA
815 		 * buffer. Since MMIO triggered reports exist only on XEHP,
816 		 * we should avoid squashing these for XEHP platforms.
817 		 */
818 
819 		if (oa_report_ctx_invalid(stream, report) &&
820 		    GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 55)) {
821 			ctx_id = INVALID_CTX_ID;
822 			oa_context_id_squash(stream, report32);
823 		}
824 
825 		/*
826 		 * NB: For Gen 8 the OA unit no longer supports clock gating
827 		 * off for a specific context and the kernel can't securely
828 		 * stop the counters from updating as system-wide / global
829 		 * values.
830 		 *
831 		 * Automatic reports now include a context ID so reports can be
832 		 * filtered on the cpu but it's not worth trying to
833 		 * automatically subtract/hide counter progress for other
834 		 * contexts while filtering since we can't stop userspace
835 		 * issuing MI_REPORT_PERF_COUNT commands which would still
836 		 * provide a side-band view of the real values.
837 		 *
838 		 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
839 		 * to normalize counters for a single filtered context then it
840 		 * needs be forwarded bookend context-switch reports so that it
841 		 * can track switches in between MI_REPORT_PERF_COUNT commands
842 		 * and can itself subtract/ignore the progress of counters
843 		 * associated with other contexts. Note that the hardware
844 		 * automatically triggers reports when switching to a new
845 		 * context which are tagged with the ID of the newly active
846 		 * context. To avoid the complexity (and likely fragility) of
847 		 * reading ahead while parsing reports to try and minimize
848 		 * forwarding redundant context switch reports (i.e. between
849 		 * other, unrelated contexts) we simply elect to forward them
850 		 * all.
851 		 *
852 		 * We don't rely solely on the reason field to identify context
853 		 * switches since it's not-uncommon for periodic samples to
854 		 * identify a switch before any 'context switch' report.
855 		 */
856 		if (!stream->ctx ||
857 		    stream->specific_ctx_id == ctx_id ||
858 		    stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
859 		    reason & OAREPORT_REASON_CTX_SWITCH) {
860 
861 			/*
862 			 * While filtering for a single context we avoid
863 			 * leaking the IDs of other contexts.
864 			 */
865 			if (stream->ctx &&
866 			    stream->specific_ctx_id != ctx_id) {
867 				oa_context_id_squash(stream, report32);
868 			}
869 
870 			ret = append_oa_sample(stream, buf, count, offset,
871 					       report);
872 			if (ret)
873 				break;
874 
875 			stream->oa_buffer.last_ctx_id = ctx_id;
876 		}
877 
878 		if (is_power_of_2(report_size)) {
879 			/*
880 			 * Clear out the report id and timestamp as a means
881 			 * to detect unlanded reports.
882 			 */
883 			oa_report_id_clear(stream, report32);
884 			oa_timestamp_clear(stream, report32);
885 		} else {
886 			u8 *oa_buf_end = stream->oa_buffer.vaddr +
887 					 OA_BUFFER_SIZE;
888 			u32 part = oa_buf_end - (u8 *)report32;
889 
890 			/* Zero out the entire report */
891 			if (report_size <= part) {
892 				memset(report32, 0, report_size);
893 			} else {
894 				memset(report32, 0, part);
895 				memset(oa_buf_base, 0, report_size - part);
896 			}
897 		}
898 	}
899 
900 	if (start_offset != *offset) {
901 		i915_reg_t oaheadptr;
902 
903 		oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
904 			    __oa_regs(stream)->oa_head_ptr :
905 			    GEN8_OAHEADPTR;
906 
907 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
908 
909 		/*
910 		 * We removed the gtt_offset for the copy loop above, indexing
911 		 * relative to oa_buf_base so put back here...
912 		 */
913 		intel_uncore_write(uncore, oaheadptr,
914 				   (head + gtt_offset) & GEN12_OAG_OAHEADPTR_MASK);
915 		stream->oa_buffer.head = head;
916 
917 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
918 	}
919 
920 	return ret;
921 }
922 
923 /**
924  * gen8_oa_read - copy status records then buffered OA reports
925  * @stream: An i915-perf stream opened for OA metrics
926  * @buf: destination buffer given by userspace
927  * @count: the number of bytes userspace wants to read
928  * @offset: (inout): the current position for writing into @buf
929  *
930  * Checks OA unit status registers and if necessary appends corresponding
931  * status records for userspace (such as for a buffer full condition) and then
932  * initiate appending any buffered OA reports.
933  *
934  * Updates @offset according to the number of bytes successfully copied into
935  * the userspace buffer.
936  *
937  * NB: some data may be successfully copied to the userspace buffer
938  * even if an error is returned, and this is reflected in the
939  * updated @offset.
940  *
941  * Returns: zero on success or a negative error code
942  */
gen8_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)943 static int gen8_oa_read(struct i915_perf_stream *stream,
944 			char __user *buf,
945 			size_t count,
946 			size_t *offset)
947 {
948 	struct intel_uncore *uncore = stream->uncore;
949 	u32 oastatus;
950 	i915_reg_t oastatus_reg;
951 	int ret;
952 
953 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
954 		return -EIO;
955 
956 	oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
957 		       __oa_regs(stream)->oa_status :
958 		       GEN8_OASTATUS;
959 
960 	oastatus = intel_uncore_read(uncore, oastatus_reg);
961 
962 	/*
963 	 * We treat OABUFFER_OVERFLOW as a significant error:
964 	 *
965 	 * Although theoretically we could handle this more gracefully
966 	 * sometimes, some Gens don't correctly suppress certain
967 	 * automatically triggered reports in this condition and so we
968 	 * have to assume that old reports are now being trampled
969 	 * over.
970 	 *
971 	 * Considering how we don't currently give userspace control
972 	 * over the OA buffer size and always configure a large 16MB
973 	 * buffer, then a buffer overflow does anyway likely indicate
974 	 * that something has gone quite badly wrong.
975 	 */
976 	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
977 		ret = append_oa_status(stream, buf, count, offset,
978 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
979 		if (ret)
980 			return ret;
981 
982 		drm_dbg(&stream->perf->i915->drm,
983 			"OA buffer overflow (exponent = %d): force restart\n",
984 			stream->period_exponent);
985 
986 		stream->perf->ops.oa_disable(stream);
987 		stream->perf->ops.oa_enable(stream);
988 
989 		/*
990 		 * Note: .oa_enable() is expected to re-init the oabuffer and
991 		 * reset GEN8_OASTATUS for us
992 		 */
993 		oastatus = intel_uncore_read(uncore, oastatus_reg);
994 	}
995 
996 	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
997 		ret = append_oa_status(stream, buf, count, offset,
998 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
999 		if (ret)
1000 			return ret;
1001 
1002 		intel_uncore_rmw(uncore, oastatus_reg,
1003 				 GEN8_OASTATUS_COUNTER_OVERFLOW |
1004 				 GEN8_OASTATUS_REPORT_LOST,
1005 				 IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
1006 				 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
1007 				  GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
1008 	}
1009 
1010 	return gen8_append_oa_reports(stream, buf, count, offset);
1011 }
1012 
1013 /**
1014  * gen7_append_oa_reports - Copies all buffered OA reports into
1015  *			    userspace read() buffer.
1016  * @stream: An i915-perf stream opened for OA metrics
1017  * @buf: destination buffer given by userspace
1018  * @count: the number of bytes userspace wants to read
1019  * @offset: (inout): the current position for writing into @buf
1020  *
1021  * Notably any error condition resulting in a short read (-%ENOSPC or
1022  * -%EFAULT) will be returned even though one or more records may
1023  * have been successfully copied. In this case it's up to the caller
1024  * to decide if the error should be squashed before returning to
1025  * userspace.
1026  *
1027  * Note: reports are consumed from the head, and appended to the
1028  * tail, so the tail chases the head?... If you think that's mad
1029  * and back-to-front you're not alone, but this follows the
1030  * Gen PRM naming convention.
1031  *
1032  * Returns: 0 on success, negative error code on failure.
1033  */
gen7_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1034 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
1035 				  char __user *buf,
1036 				  size_t count,
1037 				  size_t *offset)
1038 {
1039 	struct intel_uncore *uncore = stream->uncore;
1040 	int report_size = stream->oa_buffer.format->size;
1041 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
1042 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1043 	u32 mask = (OA_BUFFER_SIZE - 1);
1044 	size_t start_offset = *offset;
1045 	unsigned long flags;
1046 	u32 head, tail;
1047 	int ret = 0;
1048 
1049 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
1050 		return -EIO;
1051 
1052 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1053 
1054 	head = stream->oa_buffer.head;
1055 	tail = stream->oa_buffer.tail;
1056 
1057 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1058 
1059 	/* An out of bounds or misaligned head or tail pointer implies a driver
1060 	 * bug since we validate + align the tail pointers we read from the
1061 	 * hardware and we are in full control of the head pointer which should
1062 	 * only be incremented by multiples of the report size (notably also
1063 	 * all a power of two).
1064 	 */
1065 	if (drm_WARN_ONCE(&uncore->i915->drm,
1066 			  head > OA_BUFFER_SIZE || head % report_size ||
1067 			  tail > OA_BUFFER_SIZE || tail % report_size,
1068 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
1069 			  head, tail))
1070 		return -EIO;
1071 
1072 
1073 	for (/* none */;
1074 	     OA_TAKEN(tail, head);
1075 	     head = (head + report_size) & mask) {
1076 		u8 *report = oa_buf_base + head;
1077 		u32 *report32 = (void *)report;
1078 
1079 		/* All the report sizes factor neatly into the buffer
1080 		 * size so we never expect to see a report split
1081 		 * between the beginning and end of the buffer.
1082 		 *
1083 		 * Given the initial alignment check a misalignment
1084 		 * here would imply a driver bug that would result
1085 		 * in an overrun.
1086 		 */
1087 		if (drm_WARN_ON(&uncore->i915->drm,
1088 				(OA_BUFFER_SIZE - head) < report_size)) {
1089 			drm_err(&uncore->i915->drm,
1090 				"Spurious OA head ptr: non-integral report offset\n");
1091 			break;
1092 		}
1093 
1094 		/* The report-ID field for periodic samples includes
1095 		 * some undocumented flags related to what triggered
1096 		 * the report and is never expected to be zero so we
1097 		 * can check that the report isn't invalid before
1098 		 * copying it to userspace...
1099 		 */
1100 		if (report32[0] == 0) {
1101 			if (__ratelimit(&stream->perf->spurious_report_rs))
1102 				drm_notice(&uncore->i915->drm,
1103 					   "Skipping spurious, invalid OA report\n");
1104 			continue;
1105 		}
1106 
1107 		ret = append_oa_sample(stream, buf, count, offset, report);
1108 		if (ret)
1109 			break;
1110 
1111 		/* Clear out the first 2 dwords as a mean to detect unlanded
1112 		 * reports.
1113 		 */
1114 		report32[0] = 0;
1115 		report32[1] = 0;
1116 	}
1117 
1118 	if (start_offset != *offset) {
1119 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1120 
1121 		intel_uncore_write(uncore, GEN7_OASTATUS2,
1122 				   ((head + gtt_offset) & GEN7_OASTATUS2_HEAD_MASK) |
1123 				   GEN7_OASTATUS2_MEM_SELECT_GGTT);
1124 		stream->oa_buffer.head = head;
1125 
1126 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1127 	}
1128 
1129 	return ret;
1130 }
1131 
1132 /**
1133  * gen7_oa_read - copy status records then buffered OA reports
1134  * @stream: An i915-perf stream opened for OA metrics
1135  * @buf: destination buffer given by userspace
1136  * @count: the number of bytes userspace wants to read
1137  * @offset: (inout): the current position for writing into @buf
1138  *
1139  * Checks Gen 7 specific OA unit status registers and if necessary appends
1140  * corresponding status records for userspace (such as for a buffer full
1141  * condition) and then initiate appending any buffered OA reports.
1142  *
1143  * Updates @offset according to the number of bytes successfully copied into
1144  * the userspace buffer.
1145  *
1146  * Returns: zero on success or a negative error code
1147  */
gen7_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1148 static int gen7_oa_read(struct i915_perf_stream *stream,
1149 			char __user *buf,
1150 			size_t count,
1151 			size_t *offset)
1152 {
1153 	struct intel_uncore *uncore = stream->uncore;
1154 	u32 oastatus1;
1155 	int ret;
1156 
1157 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1158 		return -EIO;
1159 
1160 	oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1161 
1162 	/* XXX: On Haswell we don't have a safe way to clear oastatus1
1163 	 * bits while the OA unit is enabled (while the tail pointer
1164 	 * may be updated asynchronously) so we ignore status bits
1165 	 * that have already been reported to userspace.
1166 	 */
1167 	oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1168 
1169 	/* We treat OABUFFER_OVERFLOW as a significant error:
1170 	 *
1171 	 * - The status can be interpreted to mean that the buffer is
1172 	 *   currently full (with a higher precedence than OA_TAKEN()
1173 	 *   which will start to report a near-empty buffer after an
1174 	 *   overflow) but it's awkward that we can't clear the status
1175 	 *   on Haswell, so without a reset we won't be able to catch
1176 	 *   the state again.
1177 	 *
1178 	 * - Since it also implies the HW has started overwriting old
1179 	 *   reports it may also affect our sanity checks for invalid
1180 	 *   reports when copying to userspace that assume new reports
1181 	 *   are being written to cleared memory.
1182 	 *
1183 	 * - In the future we may want to introduce a flight recorder
1184 	 *   mode where the driver will automatically maintain a safe
1185 	 *   guard band between head/tail, avoiding this overflow
1186 	 *   condition, but we avoid the added driver complexity for
1187 	 *   now.
1188 	 */
1189 	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1190 		ret = append_oa_status(stream, buf, count, offset,
1191 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1192 		if (ret)
1193 			return ret;
1194 
1195 		drm_dbg(&stream->perf->i915->drm,
1196 			"OA buffer overflow (exponent = %d): force restart\n",
1197 			stream->period_exponent);
1198 
1199 		stream->perf->ops.oa_disable(stream);
1200 		stream->perf->ops.oa_enable(stream);
1201 
1202 		oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1203 	}
1204 
1205 	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1206 		ret = append_oa_status(stream, buf, count, offset,
1207 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1208 		if (ret)
1209 			return ret;
1210 		stream->perf->gen7_latched_oastatus1 |=
1211 			GEN7_OASTATUS1_REPORT_LOST;
1212 	}
1213 
1214 	return gen7_append_oa_reports(stream, buf, count, offset);
1215 }
1216 
1217 /**
1218  * i915_oa_wait_unlocked - handles blocking IO until OA data available
1219  * @stream: An i915-perf stream opened for OA metrics
1220  *
1221  * Called when userspace tries to read() from a blocking stream FD opened
1222  * for OA metrics. It waits until the hrtimer callback finds a non-empty
1223  * OA buffer and wakes us.
1224  *
1225  * Note: it's acceptable to have this return with some false positives
1226  * since any subsequent read handling will return -EAGAIN if there isn't
1227  * really data ready for userspace yet.
1228  *
1229  * Returns: zero on success or a negative error code
1230  */
i915_oa_wait_unlocked(struct i915_perf_stream * stream)1231 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1232 {
1233 	/* We would wait indefinitely if periodic sampling is not enabled */
1234 	if (!stream->periodic)
1235 		return -EIO;
1236 
1237 	return wait_event_interruptible(stream->poll_wq,
1238 					oa_buffer_check_unlocked(stream));
1239 }
1240 
1241 /**
1242  * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1243  * @stream: An i915-perf stream opened for OA metrics
1244  * @file: An i915 perf stream file
1245  * @wait: poll() state table
1246  *
1247  * For handling userspace polling on an i915 perf stream opened for OA metrics,
1248  * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1249  * when it sees data ready to read in the circular OA buffer.
1250  */
i915_oa_poll_wait(struct i915_perf_stream * stream,struct file * file,poll_table * wait)1251 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1252 			      struct file *file,
1253 			      poll_table *wait)
1254 {
1255 	poll_wait(file, &stream->poll_wq, wait);
1256 }
1257 
1258 /**
1259  * i915_oa_read - just calls through to &i915_oa_ops->read
1260  * @stream: An i915-perf stream opened for OA metrics
1261  * @buf: destination buffer given by userspace
1262  * @count: the number of bytes userspace wants to read
1263  * @offset: (inout): the current position for writing into @buf
1264  *
1265  * Updates @offset according to the number of bytes successfully copied into
1266  * the userspace buffer.
1267  *
1268  * Returns: zero on success or a negative error code
1269  */
i915_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1270 static int i915_oa_read(struct i915_perf_stream *stream,
1271 			char __user *buf,
1272 			size_t count,
1273 			size_t *offset)
1274 {
1275 	return stream->perf->ops.read(stream, buf, count, offset);
1276 }
1277 
oa_pin_context(struct i915_perf_stream * stream)1278 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1279 {
1280 	struct i915_gem_engines_iter it;
1281 	struct i915_gem_context *ctx = stream->ctx;
1282 	struct intel_context *ce;
1283 	struct i915_gem_ww_ctx ww;
1284 	int err = -ENODEV;
1285 
1286 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1287 		if (ce->engine != stream->engine) /* first match! */
1288 			continue;
1289 
1290 		err = 0;
1291 		break;
1292 	}
1293 	i915_gem_context_unlock_engines(ctx);
1294 
1295 	if (err)
1296 		return ERR_PTR(err);
1297 
1298 	i915_gem_ww_ctx_init(&ww, true);
1299 retry:
1300 	/*
1301 	 * As the ID is the gtt offset of the context's vma we
1302 	 * pin the vma to ensure the ID remains fixed.
1303 	 */
1304 	err = intel_context_pin_ww(ce, &ww);
1305 	if (err == -EDEADLK) {
1306 		err = i915_gem_ww_ctx_backoff(&ww);
1307 		if (!err)
1308 			goto retry;
1309 	}
1310 	i915_gem_ww_ctx_fini(&ww);
1311 
1312 	if (err)
1313 		return ERR_PTR(err);
1314 
1315 	stream->pinned_ctx = ce;
1316 	return stream->pinned_ctx;
1317 }
1318 
1319 static int
__store_reg_to_mem(struct i915_request * rq,i915_reg_t reg,u32 ggtt_offset)1320 __store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
1321 {
1322 	u32 *cs, cmd;
1323 
1324 	cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1325 	if (GRAPHICS_VER(rq->i915) >= 8)
1326 		cmd++;
1327 
1328 	cs = intel_ring_begin(rq, 4);
1329 	if (IS_ERR(cs))
1330 		return PTR_ERR(cs);
1331 
1332 	*cs++ = cmd;
1333 	*cs++ = i915_mmio_reg_offset(reg);
1334 	*cs++ = ggtt_offset;
1335 	*cs++ = 0;
1336 
1337 	intel_ring_advance(rq, cs);
1338 
1339 	return 0;
1340 }
1341 
1342 static int
__read_reg(struct intel_context * ce,i915_reg_t reg,u32 ggtt_offset)1343 __read_reg(struct intel_context *ce, i915_reg_t reg, u32 ggtt_offset)
1344 {
1345 	struct i915_request *rq;
1346 	int err;
1347 
1348 	rq = i915_request_create(ce);
1349 	if (IS_ERR(rq))
1350 		return PTR_ERR(rq);
1351 
1352 	i915_request_get(rq);
1353 
1354 	err = __store_reg_to_mem(rq, reg, ggtt_offset);
1355 
1356 	i915_request_add(rq);
1357 	if (!err && i915_request_wait(rq, 0, HZ / 2) < 0)
1358 		err = -ETIME;
1359 
1360 	i915_request_put(rq);
1361 
1362 	return err;
1363 }
1364 
1365 static int
gen12_guc_sw_ctx_id(struct intel_context * ce,u32 * ctx_id)1366 gen12_guc_sw_ctx_id(struct intel_context *ce, u32 *ctx_id)
1367 {
1368 	struct i915_vma *scratch;
1369 	u32 *val;
1370 	int err;
1371 
1372 	scratch = __vm_create_scratch_for_read_pinned(&ce->engine->gt->ggtt->vm, 4);
1373 	if (IS_ERR(scratch))
1374 		return PTR_ERR(scratch);
1375 
1376 	err = i915_vma_sync(scratch);
1377 	if (err)
1378 		goto err_scratch;
1379 
1380 	err = __read_reg(ce, RING_EXECLIST_STATUS_HI(ce->engine->mmio_base),
1381 			 i915_ggtt_offset(scratch));
1382 	if (err)
1383 		goto err_scratch;
1384 
1385 	val = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
1386 	if (IS_ERR(val)) {
1387 		err = PTR_ERR(val);
1388 		goto err_scratch;
1389 	}
1390 
1391 	*ctx_id = *val;
1392 	i915_gem_object_unpin_map(scratch->obj);
1393 
1394 err_scratch:
1395 	i915_vma_unpin_and_release(&scratch, 0);
1396 	return err;
1397 }
1398 
1399 /*
1400  * For execlist mode of submission, pick an unused context id
1401  * 0 - (NUM_CONTEXT_TAG -1) are used by other contexts
1402  * XXX_MAX_CONTEXT_HW_ID is used by idle context
1403  *
1404  * For GuC mode of submission read context id from the upper dword of the
1405  * EXECLIST_STATUS register. Note that we read this value only once and expect
1406  * that the value stays fixed for the entire OA use case. There are cases where
1407  * GuC KMD implementation may deregister a context to reuse it's context id, but
1408  * we prevent that from happening to the OA context by pinning it.
1409  */
gen12_get_render_context_id(struct i915_perf_stream * stream)1410 static int gen12_get_render_context_id(struct i915_perf_stream *stream)
1411 {
1412 	u32 ctx_id, mask;
1413 	int ret;
1414 
1415 	if (intel_engine_uses_guc(stream->engine)) {
1416 		ret = gen12_guc_sw_ctx_id(stream->pinned_ctx, &ctx_id);
1417 		if (ret)
1418 			return ret;
1419 
1420 		mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) <<
1421 			(GEN12_GUC_SW_CTX_ID_SHIFT - 32);
1422 	} else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 55)) {
1423 		ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) <<
1424 			(XEHP_SW_CTX_ID_SHIFT - 32);
1425 
1426 		mask = ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
1427 			(XEHP_SW_CTX_ID_SHIFT - 32);
1428 	} else {
1429 		ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) <<
1430 			 (GEN11_SW_CTX_ID_SHIFT - 32);
1431 
1432 		mask = ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) <<
1433 			(GEN11_SW_CTX_ID_SHIFT - 32);
1434 	}
1435 	stream->specific_ctx_id = ctx_id & mask;
1436 	stream->specific_ctx_id_mask = mask;
1437 
1438 	return 0;
1439 }
1440 
oa_find_reg_in_lri(u32 * state,u32 reg,u32 * offset,u32 end)1441 static bool oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end)
1442 {
1443 	u32 idx = *offset;
1444 	u32 len = min(MI_LRI_LEN(state[idx]) + idx, end);
1445 	bool found = false;
1446 
1447 	idx++;
1448 	for (; idx < len; idx += 2) {
1449 		if (state[idx] == reg) {
1450 			found = true;
1451 			break;
1452 		}
1453 	}
1454 
1455 	*offset = idx;
1456 	return found;
1457 }
1458 
oa_context_image_offset(struct intel_context * ce,u32 reg)1459 static u32 oa_context_image_offset(struct intel_context *ce, u32 reg)
1460 {
1461 	u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
1462 	u32 *state = ce->lrc_reg_state;
1463 
1464 	if (drm_WARN_ON(&ce->engine->i915->drm, !state))
1465 		return U32_MAX;
1466 
1467 	for (offset = 0; offset < len; ) {
1468 		if (IS_MI_LRI_CMD(state[offset])) {
1469 			/*
1470 			 * We expect reg-value pairs in MI_LRI command, so
1471 			 * MI_LRI_LEN() should be even, if not, issue a warning.
1472 			 */
1473 			drm_WARN_ON(&ce->engine->i915->drm,
1474 				    MI_LRI_LEN(state[offset]) & 0x1);
1475 
1476 			if (oa_find_reg_in_lri(state, reg, &offset, len))
1477 				break;
1478 		} else {
1479 			offset++;
1480 		}
1481 	}
1482 
1483 	return offset < len ? offset : U32_MAX;
1484 }
1485 
set_oa_ctx_ctrl_offset(struct intel_context * ce)1486 static int set_oa_ctx_ctrl_offset(struct intel_context *ce)
1487 {
1488 	i915_reg_t reg = GEN12_OACTXCONTROL(ce->engine->mmio_base);
1489 	struct i915_perf *perf = &ce->engine->i915->perf;
1490 	u32 offset = perf->ctx_oactxctrl_offset;
1491 
1492 	/* Do this only once. Failure is stored as offset of U32_MAX */
1493 	if (offset)
1494 		goto exit;
1495 
1496 	offset = oa_context_image_offset(ce, i915_mmio_reg_offset(reg));
1497 	perf->ctx_oactxctrl_offset = offset;
1498 
1499 	drm_dbg(&ce->engine->i915->drm,
1500 		"%s oa ctx control at 0x%08x dword offset\n",
1501 		ce->engine->name, offset);
1502 
1503 exit:
1504 	return offset && offset != U32_MAX ? 0 : -ENODEV;
1505 }
1506 
engine_supports_mi_query(struct intel_engine_cs * engine)1507 static bool engine_supports_mi_query(struct intel_engine_cs *engine)
1508 {
1509 	return engine->class == RENDER_CLASS;
1510 }
1511 
1512 /**
1513  * oa_get_render_ctx_id - determine and hold ctx hw id
1514  * @stream: An i915-perf stream opened for OA metrics
1515  *
1516  * Determine the render context hw id, and ensure it remains fixed for the
1517  * lifetime of the stream. This ensures that we don't have to worry about
1518  * updating the context ID in OACONTROL on the fly.
1519  *
1520  * Returns: zero on success or a negative error code
1521  */
oa_get_render_ctx_id(struct i915_perf_stream * stream)1522 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1523 {
1524 	struct intel_context *ce;
1525 	int ret = 0;
1526 
1527 	ce = oa_pin_context(stream);
1528 	if (IS_ERR(ce))
1529 		return PTR_ERR(ce);
1530 
1531 	if (engine_supports_mi_query(stream->engine) &&
1532 	    HAS_LOGICAL_RING_CONTEXTS(stream->perf->i915)) {
1533 		/*
1534 		 * We are enabling perf query here. If we don't find the context
1535 		 * offset here, just return an error.
1536 		 */
1537 		ret = set_oa_ctx_ctrl_offset(ce);
1538 		if (ret) {
1539 			intel_context_unpin(ce);
1540 			drm_err(&stream->perf->i915->drm,
1541 				"Enabling perf query failed for %s\n",
1542 				stream->engine->name);
1543 			return ret;
1544 		}
1545 	}
1546 
1547 	switch (GRAPHICS_VER(ce->engine->i915)) {
1548 	case 7: {
1549 		/*
1550 		 * On Haswell we don't do any post processing of the reports
1551 		 * and don't need to use the mask.
1552 		 */
1553 		stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1554 		stream->specific_ctx_id_mask = 0;
1555 		break;
1556 	}
1557 
1558 	case 8:
1559 	case 9:
1560 		if (intel_engine_uses_guc(ce->engine)) {
1561 			/*
1562 			 * When using GuC, the context descriptor we write in
1563 			 * i915 is read by GuC and rewritten before it's
1564 			 * actually written into the hardware. The LRCA is
1565 			 * what is put into the context id field of the
1566 			 * context descriptor by GuC. Because it's aligned to
1567 			 * a page, the lower 12bits are always at 0 and
1568 			 * dropped by GuC. They won't be part of the context
1569 			 * ID in the OA reports, so squash those lower bits.
1570 			 */
1571 			stream->specific_ctx_id = ce->lrc.lrca >> 12;
1572 
1573 			/*
1574 			 * GuC uses the top bit to signal proxy submission, so
1575 			 * ignore that bit.
1576 			 */
1577 			stream->specific_ctx_id_mask =
1578 				(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1579 		} else {
1580 			stream->specific_ctx_id_mask =
1581 				(1U << GEN8_CTX_ID_WIDTH) - 1;
1582 			stream->specific_ctx_id = stream->specific_ctx_id_mask;
1583 		}
1584 		break;
1585 
1586 	case 11:
1587 	case 12:
1588 		ret = gen12_get_render_context_id(stream);
1589 		break;
1590 
1591 	default:
1592 		MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
1593 	}
1594 
1595 	ce->tag = stream->specific_ctx_id;
1596 
1597 	drm_dbg(&stream->perf->i915->drm,
1598 		"filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1599 		stream->specific_ctx_id,
1600 		stream->specific_ctx_id_mask);
1601 
1602 	return ret;
1603 }
1604 
1605 /**
1606  * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1607  * @stream: An i915-perf stream opened for OA metrics
1608  *
1609  * In case anything needed doing to ensure the context HW ID would remain valid
1610  * for the lifetime of the stream, then that can be undone here.
1611  */
oa_put_render_ctx_id(struct i915_perf_stream * stream)1612 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1613 {
1614 	struct intel_context *ce;
1615 
1616 	ce = fetch_and_zero(&stream->pinned_ctx);
1617 	if (ce) {
1618 		ce->tag = 0; /* recomputed on next submission after parking */
1619 		intel_context_unpin(ce);
1620 	}
1621 
1622 	stream->specific_ctx_id = INVALID_CTX_ID;
1623 	stream->specific_ctx_id_mask = 0;
1624 }
1625 
1626 static void
free_oa_buffer(struct i915_perf_stream * stream)1627 free_oa_buffer(struct i915_perf_stream *stream)
1628 {
1629 	i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1630 				   I915_VMA_RELEASE_MAP);
1631 
1632 	stream->oa_buffer.vaddr = NULL;
1633 }
1634 
1635 static void
free_oa_configs(struct i915_perf_stream * stream)1636 free_oa_configs(struct i915_perf_stream *stream)
1637 {
1638 	struct i915_oa_config_bo *oa_bo, *tmp;
1639 
1640 	i915_oa_config_put(stream->oa_config);
1641 	llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1642 		free_oa_config_bo(oa_bo);
1643 }
1644 
1645 static void
free_noa_wait(struct i915_perf_stream * stream)1646 free_noa_wait(struct i915_perf_stream *stream)
1647 {
1648 	i915_vma_unpin_and_release(&stream->noa_wait, 0);
1649 }
1650 
engine_supports_oa(const struct intel_engine_cs * engine)1651 static bool engine_supports_oa(const struct intel_engine_cs *engine)
1652 {
1653 	return engine->oa_group;
1654 }
1655 
engine_supports_oa_format(struct intel_engine_cs * engine,int type)1656 static bool engine_supports_oa_format(struct intel_engine_cs *engine, int type)
1657 {
1658 	return engine->oa_group && engine->oa_group->type == type;
1659 }
1660 
i915_oa_stream_destroy(struct i915_perf_stream * stream)1661 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1662 {
1663 	struct i915_perf *perf = stream->perf;
1664 	struct intel_gt *gt = stream->engine->gt;
1665 	struct i915_perf_group *g = stream->engine->oa_group;
1666 
1667 	if (WARN_ON(stream != g->exclusive_stream))
1668 		return;
1669 
1670 	/*
1671 	 * Unset exclusive_stream first, it will be checked while disabling
1672 	 * the metric set on gen8+.
1673 	 *
1674 	 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1675 	 */
1676 	WRITE_ONCE(g->exclusive_stream, NULL);
1677 	perf->ops.disable_metric_set(stream);
1678 
1679 	free_oa_buffer(stream);
1680 
1681 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1682 	intel_engine_pm_put(stream->engine);
1683 
1684 	if (stream->ctx)
1685 		oa_put_render_ctx_id(stream);
1686 
1687 	free_oa_configs(stream);
1688 	free_noa_wait(stream);
1689 
1690 	if (perf->spurious_report_rs.missed) {
1691 		gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n",
1692 			  perf->spurious_report_rs.missed);
1693 	}
1694 }
1695 
gen7_init_oa_buffer(struct i915_perf_stream * stream)1696 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1697 {
1698 	struct intel_uncore *uncore = stream->uncore;
1699 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1700 	unsigned long flags;
1701 
1702 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1703 
1704 	/* Pre-DevBDW: OABUFFER must be set with counters off,
1705 	 * before OASTATUS1, but after OASTATUS2
1706 	 */
1707 	intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1708 			   gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1709 	stream->oa_buffer.head = 0;
1710 
1711 	intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1712 
1713 	intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1714 			   gtt_offset | OABUFFER_SIZE_16M);
1715 
1716 	/* Mark that we need updated tail pointers to read from... */
1717 	stream->oa_buffer.tail = 0;
1718 
1719 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1720 
1721 	/* On Haswell we have to track which OASTATUS1 flags we've
1722 	 * already seen since they can't be cleared while periodic
1723 	 * sampling is enabled.
1724 	 */
1725 	stream->perf->gen7_latched_oastatus1 = 0;
1726 
1727 	/* NB: although the OA buffer will initially be allocated
1728 	 * zeroed via shmfs (and so this memset is redundant when
1729 	 * first allocating), we may re-init the OA buffer, either
1730 	 * when re-enabling a stream or in error/reset paths.
1731 	 *
1732 	 * The reason we clear the buffer for each re-init is for the
1733 	 * sanity check in gen7_append_oa_reports() that looks at the
1734 	 * report-id field to make sure it's non-zero which relies on
1735 	 * the assumption that new reports are being written to zeroed
1736 	 * memory...
1737 	 */
1738 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1739 }
1740 
gen8_init_oa_buffer(struct i915_perf_stream * stream)1741 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1742 {
1743 	struct intel_uncore *uncore = stream->uncore;
1744 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1745 	unsigned long flags;
1746 
1747 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1748 
1749 	intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1750 	intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1751 	stream->oa_buffer.head = 0;
1752 
1753 	intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1754 
1755 	/*
1756 	 * PRM says:
1757 	 *
1758 	 *  "This MMIO must be set before the OATAILPTR
1759 	 *  register and after the OAHEADPTR register. This is
1760 	 *  to enable proper functionality of the overflow
1761 	 *  bit."
1762 	 */
1763 	intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1764 		   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1765 	intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1766 
1767 	/* Mark that we need updated tail pointers to read from... */
1768 	stream->oa_buffer.tail = 0;
1769 
1770 	/*
1771 	 * Reset state used to recognise context switches, affecting which
1772 	 * reports we will forward to userspace while filtering for a single
1773 	 * context.
1774 	 */
1775 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1776 
1777 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1778 
1779 	/*
1780 	 * NB: although the OA buffer will initially be allocated
1781 	 * zeroed via shmfs (and so this memset is redundant when
1782 	 * first allocating), we may re-init the OA buffer, either
1783 	 * when re-enabling a stream or in error/reset paths.
1784 	 *
1785 	 * The reason we clear the buffer for each re-init is for the
1786 	 * sanity check in gen8_append_oa_reports() that looks at the
1787 	 * reason field to make sure it's non-zero which relies on
1788 	 * the assumption that new reports are being written to zeroed
1789 	 * memory...
1790 	 */
1791 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1792 }
1793 
gen12_init_oa_buffer(struct i915_perf_stream * stream)1794 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1795 {
1796 	struct intel_uncore *uncore = stream->uncore;
1797 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1798 	unsigned long flags;
1799 
1800 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1801 
1802 	intel_uncore_write(uncore, __oa_regs(stream)->oa_status, 0);
1803 	intel_uncore_write(uncore, __oa_regs(stream)->oa_head_ptr,
1804 			   gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1805 	stream->oa_buffer.head = 0;
1806 
1807 	/*
1808 	 * PRM says:
1809 	 *
1810 	 *  "This MMIO must be set before the OATAILPTR
1811 	 *  register and after the OAHEADPTR register. This is
1812 	 *  to enable proper functionality of the overflow
1813 	 *  bit."
1814 	 */
1815 	intel_uncore_write(uncore, __oa_regs(stream)->oa_buffer, gtt_offset |
1816 			   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1817 	intel_uncore_write(uncore, __oa_regs(stream)->oa_tail_ptr,
1818 			   gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1819 
1820 	/* Mark that we need updated tail pointers to read from... */
1821 	stream->oa_buffer.tail = 0;
1822 
1823 	/*
1824 	 * Reset state used to recognise context switches, affecting which
1825 	 * reports we will forward to userspace while filtering for a single
1826 	 * context.
1827 	 */
1828 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1829 
1830 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1831 
1832 	/*
1833 	 * NB: although the OA buffer will initially be allocated
1834 	 * zeroed via shmfs (and so this memset is redundant when
1835 	 * first allocating), we may re-init the OA buffer, either
1836 	 * when re-enabling a stream or in error/reset paths.
1837 	 *
1838 	 * The reason we clear the buffer for each re-init is for the
1839 	 * sanity check in gen8_append_oa_reports() that looks at the
1840 	 * reason field to make sure it's non-zero which relies on
1841 	 * the assumption that new reports are being written to zeroed
1842 	 * memory...
1843 	 */
1844 	memset(stream->oa_buffer.vaddr, 0,
1845 	       stream->oa_buffer.vma->size);
1846 }
1847 
alloc_oa_buffer(struct i915_perf_stream * stream)1848 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1849 {
1850 	struct drm_i915_private *i915 = stream->perf->i915;
1851 	struct intel_gt *gt = stream->engine->gt;
1852 	struct drm_i915_gem_object *bo;
1853 	struct i915_vma *vma;
1854 	int ret;
1855 
1856 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1857 		return -ENODEV;
1858 
1859 	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1860 	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1861 
1862 	bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1863 	if (IS_ERR(bo)) {
1864 		drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1865 		return PTR_ERR(bo);
1866 	}
1867 
1868 	i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1869 
1870 	/* PreHSW required 512K alignment, HSW requires 16M */
1871 	vma = i915_vma_instance(bo, &gt->ggtt->vm, NULL);
1872 	if (IS_ERR(vma)) {
1873 		ret = PTR_ERR(vma);
1874 		goto err_unref;
1875 	}
1876 
1877 	/*
1878 	 * PreHSW required 512K alignment.
1879 	 * HSW and onwards, align to requested size of OA buffer.
1880 	 */
1881 	ret = i915_vma_pin(vma, 0, SZ_16M, PIN_GLOBAL | PIN_HIGH);
1882 	if (ret) {
1883 		gt_err(gt, "Failed to pin OA buffer %d\n", ret);
1884 		goto err_unref;
1885 	}
1886 
1887 	stream->oa_buffer.vma = vma;
1888 
1889 	stream->oa_buffer.vaddr =
1890 		i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
1891 	if (IS_ERR(stream->oa_buffer.vaddr)) {
1892 		ret = PTR_ERR(stream->oa_buffer.vaddr);
1893 		goto err_unpin;
1894 	}
1895 
1896 	return 0;
1897 
1898 err_unpin:
1899 	__i915_vma_unpin(vma);
1900 
1901 err_unref:
1902 	i915_gem_object_put(bo);
1903 
1904 	stream->oa_buffer.vaddr = NULL;
1905 	stream->oa_buffer.vma = NULL;
1906 
1907 	return ret;
1908 }
1909 
save_restore_register(struct i915_perf_stream * stream,u32 * cs,bool save,i915_reg_t reg,u32 offset,u32 dword_count)1910 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1911 				  bool save, i915_reg_t reg, u32 offset,
1912 				  u32 dword_count)
1913 {
1914 	u32 cmd;
1915 	u32 d;
1916 
1917 	cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1918 	cmd |= MI_SRM_LRM_GLOBAL_GTT;
1919 	if (GRAPHICS_VER(stream->perf->i915) >= 8)
1920 		cmd++;
1921 
1922 	for (d = 0; d < dword_count; d++) {
1923 		*cs++ = cmd;
1924 		*cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1925 		*cs++ = i915_ggtt_offset(stream->noa_wait) + offset + 4 * d;
1926 		*cs++ = 0;
1927 	}
1928 
1929 	return cs;
1930 }
1931 
alloc_noa_wait(struct i915_perf_stream * stream)1932 static int alloc_noa_wait(struct i915_perf_stream *stream)
1933 {
1934 	struct drm_i915_private *i915 = stream->perf->i915;
1935 	struct intel_gt *gt = stream->engine->gt;
1936 	struct drm_i915_gem_object *bo;
1937 	struct i915_vma *vma;
1938 	const u64 delay_ticks = 0xffffffffffffffff -
1939 		intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915),
1940 		atomic64_read(&stream->perf->noa_programming_delay));
1941 	const u32 base = stream->engine->mmio_base;
1942 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1943 	u32 *batch, *ts0, *cs, *jump;
1944 	struct i915_gem_ww_ctx ww;
1945 	int ret, i;
1946 	enum {
1947 		START_TS,
1948 		NOW_TS,
1949 		DELTA_TS,
1950 		JUMP_PREDICATE,
1951 		DELTA_TARGET,
1952 		N_CS_GPR
1953 	};
1954 	i915_reg_t mi_predicate_result = HAS_MI_SET_PREDICATE(i915) ?
1955 					  MI_PREDICATE_RESULT_2_ENGINE(base) :
1956 					  MI_PREDICATE_RESULT_1(RENDER_RING_BASE);
1957 
1958 	/*
1959 	 * gt->scratch was being used to save/restore the GPR registers, but on
1960 	 * MTL the scratch uses stolen lmem. An MI_SRM to this memory region
1961 	 * causes an engine hang. Instead allocate an additional page here to
1962 	 * save/restore GPR registers
1963 	 */
1964 	bo = i915_gem_object_create_internal(i915, 8192);
1965 	if (IS_ERR(bo)) {
1966 		drm_err(&i915->drm,
1967 			"Failed to allocate NOA wait batchbuffer\n");
1968 		return PTR_ERR(bo);
1969 	}
1970 
1971 	i915_gem_ww_ctx_init(&ww, true);
1972 retry:
1973 	ret = i915_gem_object_lock(bo, &ww);
1974 	if (ret)
1975 		goto out_ww;
1976 
1977 	/*
1978 	 * We pin in GGTT because we jump into this buffer now because
1979 	 * multiple OA config BOs will have a jump to this address and it
1980 	 * needs to be fixed during the lifetime of the i915/perf stream.
1981 	 */
1982 	vma = i915_vma_instance(bo, &gt->ggtt->vm, NULL);
1983 	if (IS_ERR(vma)) {
1984 		ret = PTR_ERR(vma);
1985 		goto out_ww;
1986 	}
1987 
1988 	ret = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
1989 	if (ret)
1990 		goto out_ww;
1991 
1992 	batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1993 	if (IS_ERR(batch)) {
1994 		ret = PTR_ERR(batch);
1995 		goto err_unpin;
1996 	}
1997 
1998 	stream->noa_wait = vma;
1999 
2000 #define GPR_SAVE_OFFSET 4096
2001 #define PREDICATE_SAVE_OFFSET 4160
2002 
2003 	/* Save registers. */
2004 	for (i = 0; i < N_CS_GPR; i++)
2005 		cs = save_restore_register(
2006 			stream, cs, true /* save */, CS_GPR(i),
2007 			GPR_SAVE_OFFSET + 8 * i, 2);
2008 	cs = save_restore_register(
2009 		stream, cs, true /* save */, mi_predicate_result,
2010 		PREDICATE_SAVE_OFFSET, 1);
2011 
2012 	/* First timestamp snapshot location. */
2013 	ts0 = cs;
2014 
2015 	/*
2016 	 * Initial snapshot of the timestamp register to implement the wait.
2017 	 * We work with 32b values, so clear out the top 32b bits of the
2018 	 * register because the ALU works 64bits.
2019 	 */
2020 	*cs++ = MI_LOAD_REGISTER_IMM(1);
2021 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
2022 	*cs++ = 0;
2023 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2024 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
2025 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
2026 
2027 	/*
2028 	 * This is the location we're going to jump back into until the
2029 	 * required amount of time has passed.
2030 	 */
2031 	jump = cs;
2032 
2033 	/*
2034 	 * Take another snapshot of the timestamp register. Take care to clear
2035 	 * up the top 32bits of CS_GPR(1) as we're using it for other
2036 	 * operations below.
2037 	 */
2038 	*cs++ = MI_LOAD_REGISTER_IMM(1);
2039 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
2040 	*cs++ = 0;
2041 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2042 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
2043 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
2044 
2045 	/*
2046 	 * Do a diff between the 2 timestamps and store the result back into
2047 	 * CS_GPR(1).
2048 	 */
2049 	*cs++ = MI_MATH(5);
2050 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
2051 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
2052 	*cs++ = MI_MATH_SUB;
2053 	*cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
2054 	*cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
2055 
2056 	/*
2057 	 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
2058 	 * timestamp have rolled over the 32bits) into the predicate register
2059 	 * to be used for the predicated jump.
2060 	 */
2061 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2062 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
2063 	*cs++ = i915_mmio_reg_offset(mi_predicate_result);
2064 
2065 	if (HAS_MI_SET_PREDICATE(i915))
2066 		*cs++ = MI_SET_PREDICATE | 1;
2067 
2068 	/* Restart from the beginning if we had timestamps roll over. */
2069 	*cs++ = (GRAPHICS_VER(i915) < 8 ?
2070 		 MI_BATCH_BUFFER_START :
2071 		 MI_BATCH_BUFFER_START_GEN8) |
2072 		MI_BATCH_PREDICATE;
2073 	*cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
2074 	*cs++ = 0;
2075 
2076 	if (HAS_MI_SET_PREDICATE(i915))
2077 		*cs++ = MI_SET_PREDICATE;
2078 
2079 	/*
2080 	 * Now add the diff between to previous timestamps and add it to :
2081 	 *      (((1 * << 64) - 1) - delay_ns)
2082 	 *
2083 	 * When the Carry Flag contains 1 this means the elapsed time is
2084 	 * longer than the expected delay, and we can exit the wait loop.
2085 	 */
2086 	*cs++ = MI_LOAD_REGISTER_IMM(2);
2087 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
2088 	*cs++ = lower_32_bits(delay_ticks);
2089 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
2090 	*cs++ = upper_32_bits(delay_ticks);
2091 
2092 	*cs++ = MI_MATH(4);
2093 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
2094 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
2095 	*cs++ = MI_MATH_ADD;
2096 	*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
2097 
2098 	*cs++ = MI_ARB_CHECK;
2099 
2100 	/*
2101 	 * Transfer the result into the predicate register to be used for the
2102 	 * predicated jump.
2103 	 */
2104 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2105 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
2106 	*cs++ = i915_mmio_reg_offset(mi_predicate_result);
2107 
2108 	if (HAS_MI_SET_PREDICATE(i915))
2109 		*cs++ = MI_SET_PREDICATE | 1;
2110 
2111 	/* Predicate the jump.  */
2112 	*cs++ = (GRAPHICS_VER(i915) < 8 ?
2113 		 MI_BATCH_BUFFER_START :
2114 		 MI_BATCH_BUFFER_START_GEN8) |
2115 		MI_BATCH_PREDICATE;
2116 	*cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
2117 	*cs++ = 0;
2118 
2119 	if (HAS_MI_SET_PREDICATE(i915))
2120 		*cs++ = MI_SET_PREDICATE;
2121 
2122 	/* Restore registers. */
2123 	for (i = 0; i < N_CS_GPR; i++)
2124 		cs = save_restore_register(
2125 			stream, cs, false /* restore */, CS_GPR(i),
2126 			GPR_SAVE_OFFSET + 8 * i, 2);
2127 	cs = save_restore_register(
2128 		stream, cs, false /* restore */, mi_predicate_result,
2129 		PREDICATE_SAVE_OFFSET, 1);
2130 
2131 	/* And return to the ring. */
2132 	*cs++ = MI_BATCH_BUFFER_END;
2133 
2134 	GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
2135 
2136 	i915_gem_object_flush_map(bo);
2137 	__i915_gem_object_release_map(bo);
2138 
2139 	goto out_ww;
2140 
2141 err_unpin:
2142 	i915_vma_unpin_and_release(&vma, 0);
2143 out_ww:
2144 	if (ret == -EDEADLK) {
2145 		ret = i915_gem_ww_ctx_backoff(&ww);
2146 		if (!ret)
2147 			goto retry;
2148 	}
2149 	i915_gem_ww_ctx_fini(&ww);
2150 	if (ret)
2151 		i915_gem_object_put(bo);
2152 	return ret;
2153 }
2154 
write_cs_mi_lri(u32 * cs,const struct i915_oa_reg * reg_data,u32 n_regs)2155 static u32 *write_cs_mi_lri(u32 *cs,
2156 			    const struct i915_oa_reg *reg_data,
2157 			    u32 n_regs)
2158 {
2159 	u32 i;
2160 
2161 	for (i = 0; i < n_regs; i++) {
2162 		if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
2163 			u32 n_lri = min_t(u32,
2164 					  n_regs - i,
2165 					  MI_LOAD_REGISTER_IMM_MAX_REGS);
2166 
2167 			*cs++ = MI_LOAD_REGISTER_IMM(n_lri);
2168 		}
2169 		*cs++ = i915_mmio_reg_offset(reg_data[i].addr);
2170 		*cs++ = reg_data[i].value;
2171 	}
2172 
2173 	return cs;
2174 }
2175 
num_lri_dwords(int num_regs)2176 static int num_lri_dwords(int num_regs)
2177 {
2178 	int count = 0;
2179 
2180 	if (num_regs > 0) {
2181 		count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
2182 		count += num_regs * 2;
2183 	}
2184 
2185 	return count;
2186 }
2187 
2188 static struct i915_oa_config_bo *
alloc_oa_config_buffer(struct i915_perf_stream * stream,struct i915_oa_config * oa_config)2189 alloc_oa_config_buffer(struct i915_perf_stream *stream,
2190 		       struct i915_oa_config *oa_config)
2191 {
2192 	struct drm_i915_gem_object *obj;
2193 	struct i915_oa_config_bo *oa_bo;
2194 	struct i915_gem_ww_ctx ww;
2195 	size_t config_length = 0;
2196 	u32 *cs;
2197 	int err;
2198 
2199 	oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
2200 	if (!oa_bo)
2201 		return ERR_PTR(-ENOMEM);
2202 
2203 	config_length += num_lri_dwords(oa_config->mux_regs_len);
2204 	config_length += num_lri_dwords(oa_config->b_counter_regs_len);
2205 	config_length += num_lri_dwords(oa_config->flex_regs_len);
2206 	config_length += 3; /* MI_BATCH_BUFFER_START */
2207 	config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
2208 
2209 	obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
2210 	if (IS_ERR(obj)) {
2211 		err = PTR_ERR(obj);
2212 		goto err_free;
2213 	}
2214 
2215 	i915_gem_ww_ctx_init(&ww, true);
2216 retry:
2217 	err = i915_gem_object_lock(obj, &ww);
2218 	if (err)
2219 		goto out_ww;
2220 
2221 	cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
2222 	if (IS_ERR(cs)) {
2223 		err = PTR_ERR(cs);
2224 		goto out_ww;
2225 	}
2226 
2227 	cs = write_cs_mi_lri(cs,
2228 			     oa_config->mux_regs,
2229 			     oa_config->mux_regs_len);
2230 	cs = write_cs_mi_lri(cs,
2231 			     oa_config->b_counter_regs,
2232 			     oa_config->b_counter_regs_len);
2233 	cs = write_cs_mi_lri(cs,
2234 			     oa_config->flex_regs,
2235 			     oa_config->flex_regs_len);
2236 
2237 	/* Jump into the active wait. */
2238 	*cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
2239 		 MI_BATCH_BUFFER_START :
2240 		 MI_BATCH_BUFFER_START_GEN8);
2241 	*cs++ = i915_ggtt_offset(stream->noa_wait);
2242 	*cs++ = 0;
2243 
2244 	i915_gem_object_flush_map(obj);
2245 	__i915_gem_object_release_map(obj);
2246 
2247 	oa_bo->vma = i915_vma_instance(obj,
2248 				       &stream->engine->gt->ggtt->vm,
2249 				       NULL);
2250 	if (IS_ERR(oa_bo->vma)) {
2251 		err = PTR_ERR(oa_bo->vma);
2252 		goto out_ww;
2253 	}
2254 
2255 	oa_bo->oa_config = i915_oa_config_get(oa_config);
2256 	llist_add(&oa_bo->node, &stream->oa_config_bos);
2257 
2258 out_ww:
2259 	if (err == -EDEADLK) {
2260 		err = i915_gem_ww_ctx_backoff(&ww);
2261 		if (!err)
2262 			goto retry;
2263 	}
2264 	i915_gem_ww_ctx_fini(&ww);
2265 
2266 	if (err)
2267 		i915_gem_object_put(obj);
2268 err_free:
2269 	if (err) {
2270 		kfree(oa_bo);
2271 		return ERR_PTR(err);
2272 	}
2273 	return oa_bo;
2274 }
2275 
2276 static struct i915_vma *
get_oa_vma(struct i915_perf_stream * stream,struct i915_oa_config * oa_config)2277 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
2278 {
2279 	struct i915_oa_config_bo *oa_bo;
2280 
2281 	/*
2282 	 * Look for the buffer in the already allocated BOs attached
2283 	 * to the stream.
2284 	 */
2285 	llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
2286 		if (oa_bo->oa_config == oa_config &&
2287 		    memcmp(oa_bo->oa_config->uuid,
2288 			   oa_config->uuid,
2289 			   sizeof(oa_config->uuid)) == 0)
2290 			goto out;
2291 	}
2292 
2293 	oa_bo = alloc_oa_config_buffer(stream, oa_config);
2294 	if (IS_ERR(oa_bo))
2295 		return ERR_CAST(oa_bo);
2296 
2297 out:
2298 	return i915_vma_get(oa_bo->vma);
2299 }
2300 
2301 static int
emit_oa_config(struct i915_perf_stream * stream,struct i915_oa_config * oa_config,struct intel_context * ce,struct i915_active * active)2302 emit_oa_config(struct i915_perf_stream *stream,
2303 	       struct i915_oa_config *oa_config,
2304 	       struct intel_context *ce,
2305 	       struct i915_active *active)
2306 {
2307 	struct i915_request *rq;
2308 	struct i915_vma *vma;
2309 	struct i915_gem_ww_ctx ww;
2310 	int err;
2311 
2312 	vma = get_oa_vma(stream, oa_config);
2313 	if (IS_ERR(vma))
2314 		return PTR_ERR(vma);
2315 
2316 	i915_gem_ww_ctx_init(&ww, true);
2317 retry:
2318 	err = i915_gem_object_lock(vma->obj, &ww);
2319 	if (err)
2320 		goto err;
2321 
2322 	err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
2323 	if (err)
2324 		goto err;
2325 
2326 	intel_engine_pm_get(ce->engine);
2327 	rq = i915_request_create(ce);
2328 	intel_engine_pm_put(ce->engine);
2329 	if (IS_ERR(rq)) {
2330 		err = PTR_ERR(rq);
2331 		goto err_vma_unpin;
2332 	}
2333 
2334 	if (!IS_ERR_OR_NULL(active)) {
2335 		/* After all individual context modifications */
2336 		err = i915_request_await_active(rq, active,
2337 						I915_ACTIVE_AWAIT_ACTIVE);
2338 		if (err)
2339 			goto err_add_request;
2340 
2341 		err = i915_active_add_request(active, rq);
2342 		if (err)
2343 			goto err_add_request;
2344 	}
2345 
2346 	err = i915_vma_move_to_active(vma, rq, 0);
2347 	if (err)
2348 		goto err_add_request;
2349 
2350 	err = rq->engine->emit_bb_start(rq,
2351 					i915_vma_offset(vma), 0,
2352 					I915_DISPATCH_SECURE);
2353 	if (err)
2354 		goto err_add_request;
2355 
2356 err_add_request:
2357 	i915_request_add(rq);
2358 err_vma_unpin:
2359 	i915_vma_unpin(vma);
2360 err:
2361 	if (err == -EDEADLK) {
2362 		err = i915_gem_ww_ctx_backoff(&ww);
2363 		if (!err)
2364 			goto retry;
2365 	}
2366 
2367 	i915_gem_ww_ctx_fini(&ww);
2368 	i915_vma_put(vma);
2369 	return err;
2370 }
2371 
oa_context(struct i915_perf_stream * stream)2372 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2373 {
2374 	return stream->pinned_ctx ?: stream->engine->kernel_context;
2375 }
2376 
2377 static int
hsw_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2378 hsw_enable_metric_set(struct i915_perf_stream *stream,
2379 		      struct i915_active *active)
2380 {
2381 	struct intel_uncore *uncore = stream->uncore;
2382 
2383 	/*
2384 	 * PRM:
2385 	 *
2386 	 * OA unit is using “crclk” for its functionality. When trunk
2387 	 * level clock gating takes place, OA clock would be gated,
2388 	 * unable to count the events from non-render clock domain.
2389 	 * Render clock gating must be disabled when OA is enabled to
2390 	 * count the events from non-render domain. Unit level clock
2391 	 * gating for RCS should also be disabled.
2392 	 */
2393 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2394 			 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2395 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2396 			 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2397 
2398 	return emit_oa_config(stream,
2399 			      stream->oa_config, oa_context(stream),
2400 			      active);
2401 }
2402 
hsw_disable_metric_set(struct i915_perf_stream * stream)2403 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2404 {
2405 	struct intel_uncore *uncore = stream->uncore;
2406 
2407 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2408 			 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2409 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2410 			 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2411 
2412 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2413 }
2414 
oa_config_flex_reg(const struct i915_oa_config * oa_config,i915_reg_t reg)2415 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2416 			      i915_reg_t reg)
2417 {
2418 	u32 mmio = i915_mmio_reg_offset(reg);
2419 	int i;
2420 
2421 	/*
2422 	 * This arbitrary default will select the 'EU FPU0 Pipeline
2423 	 * Active' event. In the future it's anticipated that there
2424 	 * will be an explicit 'No Event' we can select, but not yet...
2425 	 */
2426 	if (!oa_config)
2427 		return 0;
2428 
2429 	for (i = 0; i < oa_config->flex_regs_len; i++) {
2430 		if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2431 			return oa_config->flex_regs[i].value;
2432 	}
2433 
2434 	return 0;
2435 }
2436 /*
2437  * NB: It must always remain pointer safe to run this even if the OA unit
2438  * has been disabled.
2439  *
2440  * It's fine to put out-of-date values into these per-context registers
2441  * in the case that the OA unit has been disabled.
2442  */
2443 static void
gen8_update_reg_state_unlocked(const struct intel_context * ce,const struct i915_perf_stream * stream)2444 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2445 			       const struct i915_perf_stream *stream)
2446 {
2447 	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2448 	u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2449 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2450 	static const i915_reg_t flex_regs[] = {
2451 		EU_PERF_CNTL0,
2452 		EU_PERF_CNTL1,
2453 		EU_PERF_CNTL2,
2454 		EU_PERF_CNTL3,
2455 		EU_PERF_CNTL4,
2456 		EU_PERF_CNTL5,
2457 		EU_PERF_CNTL6,
2458 	};
2459 	u32 *reg_state = ce->lrc_reg_state;
2460 	int i;
2461 
2462 	reg_state[ctx_oactxctrl + 1] =
2463 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2464 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2465 		GEN8_OA_COUNTER_RESUME;
2466 
2467 	for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2468 		reg_state[ctx_flexeu0 + i * 2 + 1] =
2469 			oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2470 }
2471 
2472 struct flex {
2473 	i915_reg_t reg;
2474 	u32 offset;
2475 	u32 value;
2476 };
2477 
2478 static int
gen8_store_flex(struct i915_request * rq,struct intel_context * ce,const struct flex * flex,unsigned int count)2479 gen8_store_flex(struct i915_request *rq,
2480 		struct intel_context *ce,
2481 		const struct flex *flex, unsigned int count)
2482 {
2483 	u32 offset;
2484 	u32 *cs;
2485 
2486 	cs = intel_ring_begin(rq, 4 * count);
2487 	if (IS_ERR(cs))
2488 		return PTR_ERR(cs);
2489 
2490 	offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2491 	do {
2492 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2493 		*cs++ = offset + flex->offset * sizeof(u32);
2494 		*cs++ = 0;
2495 		*cs++ = flex->value;
2496 	} while (flex++, --count);
2497 
2498 	intel_ring_advance(rq, cs);
2499 
2500 	return 0;
2501 }
2502 
2503 static int
gen8_load_flex(struct i915_request * rq,struct intel_context * ce,const struct flex * flex,unsigned int count)2504 gen8_load_flex(struct i915_request *rq,
2505 	       struct intel_context *ce,
2506 	       const struct flex *flex, unsigned int count)
2507 {
2508 	u32 *cs;
2509 
2510 	GEM_BUG_ON(!count || count > 63);
2511 
2512 	cs = intel_ring_begin(rq, 2 * count + 2);
2513 	if (IS_ERR(cs))
2514 		return PTR_ERR(cs);
2515 
2516 	*cs++ = MI_LOAD_REGISTER_IMM(count);
2517 	do {
2518 		*cs++ = i915_mmio_reg_offset(flex->reg);
2519 		*cs++ = flex->value;
2520 	} while (flex++, --count);
2521 	*cs++ = MI_NOOP;
2522 
2523 	intel_ring_advance(rq, cs);
2524 
2525 	return 0;
2526 }
2527 
gen8_modify_context(struct intel_context * ce,const struct flex * flex,unsigned int count)2528 static int gen8_modify_context(struct intel_context *ce,
2529 			       const struct flex *flex, unsigned int count)
2530 {
2531 	struct i915_request *rq;
2532 	int err;
2533 
2534 	rq = intel_engine_create_kernel_request(ce->engine);
2535 	if (IS_ERR(rq))
2536 		return PTR_ERR(rq);
2537 
2538 	/* Serialise with the remote context */
2539 	err = intel_context_prepare_remote_request(ce, rq);
2540 	if (err == 0)
2541 		err = gen8_store_flex(rq, ce, flex, count);
2542 
2543 	i915_request_add(rq);
2544 	return err;
2545 }
2546 
2547 static int
gen8_modify_self(struct intel_context * ce,const struct flex * flex,unsigned int count,struct i915_active * active)2548 gen8_modify_self(struct intel_context *ce,
2549 		 const struct flex *flex, unsigned int count,
2550 		 struct i915_active *active)
2551 {
2552 	struct i915_request *rq;
2553 	int err;
2554 
2555 	intel_engine_pm_get(ce->engine);
2556 	rq = i915_request_create(ce);
2557 	intel_engine_pm_put(ce->engine);
2558 	if (IS_ERR(rq))
2559 		return PTR_ERR(rq);
2560 
2561 	if (!IS_ERR_OR_NULL(active)) {
2562 		err = i915_active_add_request(active, rq);
2563 		if (err)
2564 			goto err_add_request;
2565 	}
2566 
2567 	err = gen8_load_flex(rq, ce, flex, count);
2568 	if (err)
2569 		goto err_add_request;
2570 
2571 err_add_request:
2572 	i915_request_add(rq);
2573 	return err;
2574 }
2575 
gen8_configure_context(struct i915_perf_stream * stream,struct i915_gem_context * ctx,struct flex * flex,unsigned int count)2576 static int gen8_configure_context(struct i915_perf_stream *stream,
2577 				  struct i915_gem_context *ctx,
2578 				  struct flex *flex, unsigned int count)
2579 {
2580 	struct i915_gem_engines_iter it;
2581 	struct intel_context *ce;
2582 	int err = 0;
2583 
2584 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2585 		GEM_BUG_ON(ce == ce->engine->kernel_context);
2586 
2587 		if (ce->engine->class != RENDER_CLASS)
2588 			continue;
2589 
2590 		/* Otherwise OA settings will be set upon first use */
2591 		if (!intel_context_pin_if_active(ce))
2592 			continue;
2593 
2594 		flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2595 		err = gen8_modify_context(ce, flex, count);
2596 
2597 		intel_context_unpin(ce);
2598 		if (err)
2599 			break;
2600 	}
2601 	i915_gem_context_unlock_engines(ctx);
2602 
2603 	return err;
2604 }
2605 
gen12_configure_oar_context(struct i915_perf_stream * stream,struct i915_active * active)2606 static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2607 				       struct i915_active *active)
2608 {
2609 	int err;
2610 	struct intel_context *ce = stream->pinned_ctx;
2611 	u32 format = stream->oa_buffer.format->format;
2612 	u32 offset = stream->perf->ctx_oactxctrl_offset;
2613 	struct flex regs_context[] = {
2614 		{
2615 			GEN8_OACTXCONTROL,
2616 			offset + 1,
2617 			active ? GEN8_OA_COUNTER_RESUME : 0,
2618 		},
2619 	};
2620 	/* Offsets in regs_lri are not used since this configuration is only
2621 	 * applied using LRI. Initialize the correct offsets for posterity.
2622 	 */
2623 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2624 	struct flex regs_lri[] = {
2625 		{
2626 			GEN12_OAR_OACONTROL,
2627 			GEN12_OAR_OACONTROL_OFFSET + 1,
2628 			(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2629 			(active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2630 		},
2631 		{
2632 			RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2633 			CTX_CONTEXT_CONTROL,
2634 			_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2635 				      active ?
2636 				      GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2637 				      0)
2638 		},
2639 	};
2640 
2641 	/* Modify the context image of pinned context with regs_context */
2642 	err = intel_context_lock_pinned(ce);
2643 	if (err)
2644 		return err;
2645 
2646 	err = gen8_modify_context(ce, regs_context,
2647 				  ARRAY_SIZE(regs_context));
2648 	intel_context_unlock_pinned(ce);
2649 	if (err)
2650 		return err;
2651 
2652 	/* Apply regs_lri using LRI with pinned context */
2653 	return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2654 }
2655 
2656 /*
2657  * Manages updating the per-context aspects of the OA stream
2658  * configuration across all contexts.
2659  *
2660  * The awkward consideration here is that OACTXCONTROL controls the
2661  * exponent for periodic sampling which is primarily used for system
2662  * wide profiling where we'd like a consistent sampling period even in
2663  * the face of context switches.
2664  *
2665  * Our approach of updating the register state context (as opposed to
2666  * say using a workaround batch buffer) ensures that the hardware
2667  * won't automatically reload an out-of-date timer exponent even
2668  * transiently before a WA BB could be parsed.
2669  *
2670  * This function needs to:
2671  * - Ensure the currently running context's per-context OA state is
2672  *   updated
2673  * - Ensure that all existing contexts will have the correct per-context
2674  *   OA state if they are scheduled for use.
2675  * - Ensure any new contexts will be initialized with the correct
2676  *   per-context OA state.
2677  *
2678  * Note: it's only the RCS/Render context that has any OA state.
2679  * Note: the first flex register passed must always be R_PWR_CLK_STATE
2680  */
2681 static int
oa_configure_all_contexts(struct i915_perf_stream * stream,struct flex * regs,size_t num_regs,struct i915_active * active)2682 oa_configure_all_contexts(struct i915_perf_stream *stream,
2683 			  struct flex *regs,
2684 			  size_t num_regs,
2685 			  struct i915_active *active)
2686 {
2687 	struct drm_i915_private *i915 = stream->perf->i915;
2688 	struct intel_engine_cs *engine;
2689 	struct intel_gt *gt = stream->engine->gt;
2690 	struct i915_gem_context *ctx, *cn;
2691 	int err;
2692 
2693 	lockdep_assert_held(&gt->perf.lock);
2694 
2695 	/*
2696 	 * The OA register config is setup through the context image. This image
2697 	 * might be written to by the GPU on context switch (in particular on
2698 	 * lite-restore). This means we can't safely update a context's image,
2699 	 * if this context is scheduled/submitted to run on the GPU.
2700 	 *
2701 	 * We could emit the OA register config through the batch buffer but
2702 	 * this might leave small interval of time where the OA unit is
2703 	 * configured at an invalid sampling period.
2704 	 *
2705 	 * Note that since we emit all requests from a single ring, there
2706 	 * is still an implicit global barrier here that may cause a high
2707 	 * priority context to wait for an otherwise independent low priority
2708 	 * context. Contexts idle at the time of reconfiguration are not
2709 	 * trapped behind the barrier.
2710 	 */
2711 	spin_lock(&i915->gem.contexts.lock);
2712 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2713 		if (!kref_get_unless_zero(&ctx->ref))
2714 			continue;
2715 
2716 		spin_unlock(&i915->gem.contexts.lock);
2717 
2718 		err = gen8_configure_context(stream, ctx, regs, num_regs);
2719 		if (err) {
2720 			i915_gem_context_put(ctx);
2721 			return err;
2722 		}
2723 
2724 		spin_lock(&i915->gem.contexts.lock);
2725 		list_safe_reset_next(ctx, cn, link);
2726 		i915_gem_context_put(ctx);
2727 	}
2728 	spin_unlock(&i915->gem.contexts.lock);
2729 
2730 	/*
2731 	 * After updating all other contexts, we need to modify ourselves.
2732 	 * If we don't modify the kernel_context, we do not get events while
2733 	 * idle.
2734 	 */
2735 	for_each_uabi_engine(engine, i915) {
2736 		struct intel_context *ce = engine->kernel_context;
2737 
2738 		if (engine->class != RENDER_CLASS)
2739 			continue;
2740 
2741 		regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2742 
2743 		err = gen8_modify_self(ce, regs, num_regs, active);
2744 		if (err)
2745 			return err;
2746 	}
2747 
2748 	return 0;
2749 }
2750 
2751 static int
lrc_configure_all_contexts(struct i915_perf_stream * stream,const struct i915_oa_config * oa_config,struct i915_active * active)2752 lrc_configure_all_contexts(struct i915_perf_stream *stream,
2753 			   const struct i915_oa_config *oa_config,
2754 			   struct i915_active *active)
2755 {
2756 	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2757 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2758 	const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2759 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2760 	struct flex regs[] = {
2761 		{
2762 			GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2763 			CTX_R_PWR_CLK_STATE,
2764 		},
2765 		{
2766 			GEN8_OACTXCONTROL,
2767 			ctx_oactxctrl + 1,
2768 		},
2769 		{ EU_PERF_CNTL0, ctx_flexeuN(0) },
2770 		{ EU_PERF_CNTL1, ctx_flexeuN(1) },
2771 		{ EU_PERF_CNTL2, ctx_flexeuN(2) },
2772 		{ EU_PERF_CNTL3, ctx_flexeuN(3) },
2773 		{ EU_PERF_CNTL4, ctx_flexeuN(4) },
2774 		{ EU_PERF_CNTL5, ctx_flexeuN(5) },
2775 		{ EU_PERF_CNTL6, ctx_flexeuN(6) },
2776 	};
2777 #undef ctx_flexeuN
2778 	int i;
2779 
2780 	regs[1].value =
2781 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2782 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2783 		GEN8_OA_COUNTER_RESUME;
2784 
2785 	for (i = 2; i < ARRAY_SIZE(regs); i++)
2786 		regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2787 
2788 	return oa_configure_all_contexts(stream,
2789 					 regs, ARRAY_SIZE(regs),
2790 					 active);
2791 }
2792 
2793 static int
gen8_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2794 gen8_enable_metric_set(struct i915_perf_stream *stream,
2795 		       struct i915_active *active)
2796 {
2797 	struct intel_uncore *uncore = stream->uncore;
2798 	struct i915_oa_config *oa_config = stream->oa_config;
2799 	int ret;
2800 
2801 	/*
2802 	 * We disable slice/unslice clock ratio change reports on SKL since
2803 	 * they are too noisy. The HW generates a lot of redundant reports
2804 	 * where the ratio hasn't really changed causing a lot of redundant
2805 	 * work to processes and increasing the chances we'll hit buffer
2806 	 * overruns.
2807 	 *
2808 	 * Although we don't currently use the 'disable overrun' OABUFFER
2809 	 * feature it's worth noting that clock ratio reports have to be
2810 	 * disabled before considering to use that feature since the HW doesn't
2811 	 * correctly block these reports.
2812 	 *
2813 	 * Currently none of the high-level metrics we have depend on knowing
2814 	 * this ratio to normalize.
2815 	 *
2816 	 * Note: This register is not power context saved and restored, but
2817 	 * that's OK considering that we disable RC6 while the OA unit is
2818 	 * enabled.
2819 	 *
2820 	 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2821 	 * be read back from automatically triggered reports, as part of the
2822 	 * RPT_ID field.
2823 	 */
2824 	if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
2825 		intel_uncore_write(uncore, GEN8_OA_DEBUG,
2826 				   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2827 						      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2828 	}
2829 
2830 	/*
2831 	 * Update all contexts prior writing the mux configurations as we need
2832 	 * to make sure all slices/subslices are ON before writing to NOA
2833 	 * registers.
2834 	 */
2835 	ret = lrc_configure_all_contexts(stream, oa_config, active);
2836 	if (ret)
2837 		return ret;
2838 
2839 	return emit_oa_config(stream,
2840 			      stream->oa_config, oa_context(stream),
2841 			      active);
2842 }
2843 
oag_report_ctx_switches(const struct i915_perf_stream * stream)2844 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2845 {
2846 	return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2847 			     (stream->sample_flags & SAMPLE_OA_REPORT) ?
2848 			     0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2849 }
2850 
2851 static int
gen12_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2852 gen12_enable_metric_set(struct i915_perf_stream *stream,
2853 			struct i915_active *active)
2854 {
2855 	struct drm_i915_private *i915 = stream->perf->i915;
2856 	struct intel_uncore *uncore = stream->uncore;
2857 	bool periodic = stream->periodic;
2858 	u32 period_exponent = stream->period_exponent;
2859 	u32 sqcnt1;
2860 	int ret;
2861 
2862 	/*
2863 	 * Wa_1508761755
2864 	 * EU NOA signals behave incorrectly if EU clock gating is enabled.
2865 	 * Disable thread stall DOP gating and EU DOP gating.
2866 	 */
2867 	if (IS_DG2(i915)) {
2868 		intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
2869 					     _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
2870 		intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
2871 				   _MASKED_BIT_ENABLE(GEN12_DISABLE_DOP_GATING));
2872 	}
2873 
2874 	intel_uncore_write(uncore, __oa_regs(stream)->oa_debug,
2875 			   /* Disable clk ratio reports, like previous Gens. */
2876 			   _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2877 					      GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2878 			   /*
2879 			    * If the user didn't require OA reports, instruct
2880 			    * the hardware not to emit ctx switch reports.
2881 			    */
2882 			   oag_report_ctx_switches(stream));
2883 
2884 	intel_uncore_write(uncore, __oa_regs(stream)->oa_ctx_ctrl, periodic ?
2885 			   (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2886 			    GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2887 			    (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2888 			    : 0);
2889 
2890 	/*
2891 	 * Initialize Super Queue Internal Cnt Register
2892 	 * Set PMON Enable in order to collect valid metrics.
2893 	 * Enable bytes per clock reporting in OA.
2894 	 */
2895 	sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
2896 		 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
2897 
2898 	intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
2899 
2900 	/*
2901 	 * For Gen12, performance counters are context
2902 	 * saved/restored. Only enable it for the context that
2903 	 * requested this.
2904 	 */
2905 	if (stream->ctx) {
2906 		ret = gen12_configure_oar_context(stream, active);
2907 		if (ret)
2908 			return ret;
2909 	}
2910 
2911 	return emit_oa_config(stream,
2912 			      stream->oa_config, oa_context(stream),
2913 			      active);
2914 }
2915 
gen8_disable_metric_set(struct i915_perf_stream * stream)2916 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2917 {
2918 	struct intel_uncore *uncore = stream->uncore;
2919 
2920 	/* Reset all contexts' slices/subslices configurations. */
2921 	lrc_configure_all_contexts(stream, NULL, NULL);
2922 
2923 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2924 }
2925 
gen11_disable_metric_set(struct i915_perf_stream * stream)2926 static void gen11_disable_metric_set(struct i915_perf_stream *stream)
2927 {
2928 	struct intel_uncore *uncore = stream->uncore;
2929 
2930 	/* Reset all contexts' slices/subslices configurations. */
2931 	lrc_configure_all_contexts(stream, NULL, NULL);
2932 
2933 	/* Make sure we disable noa to save power. */
2934 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2935 }
2936 
gen12_disable_metric_set(struct i915_perf_stream * stream)2937 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2938 {
2939 	struct intel_uncore *uncore = stream->uncore;
2940 	struct drm_i915_private *i915 = stream->perf->i915;
2941 	u32 sqcnt1;
2942 
2943 	/*
2944 	 * Wa_1508761755: Enable thread stall DOP gating and EU DOP gating.
2945 	 */
2946 	if (IS_DG2(i915)) {
2947 		intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
2948 					     _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
2949 		intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
2950 				   _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
2951 	}
2952 
2953 	/* disable the context save/restore or OAR counters */
2954 	if (stream->ctx)
2955 		gen12_configure_oar_context(stream, NULL);
2956 
2957 	/* Make sure we disable noa to save power. */
2958 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2959 
2960 	sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
2961 		 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
2962 
2963 	/* Reset PMON Enable to save power. */
2964 	intel_uncore_rmw(uncore, GEN12_SQCNT1, sqcnt1, 0);
2965 }
2966 
gen7_oa_enable(struct i915_perf_stream * stream)2967 static void gen7_oa_enable(struct i915_perf_stream *stream)
2968 {
2969 	struct intel_uncore *uncore = stream->uncore;
2970 	struct i915_gem_context *ctx = stream->ctx;
2971 	u32 ctx_id = stream->specific_ctx_id;
2972 	bool periodic = stream->periodic;
2973 	u32 period_exponent = stream->period_exponent;
2974 	u32 report_format = stream->oa_buffer.format->format;
2975 
2976 	/*
2977 	 * Reset buf pointers so we don't forward reports from before now.
2978 	 *
2979 	 * Think carefully if considering trying to avoid this, since it
2980 	 * also ensures status flags and the buffer itself are cleared
2981 	 * in error paths, and we have checks for invalid reports based
2982 	 * on the assumption that certain fields are written to zeroed
2983 	 * memory which this helps maintains.
2984 	 */
2985 	gen7_init_oa_buffer(stream);
2986 
2987 	intel_uncore_write(uncore, GEN7_OACONTROL,
2988 			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2989 			   (period_exponent <<
2990 			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2991 			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2992 			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2993 			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2994 			   GEN7_OACONTROL_ENABLE);
2995 }
2996 
gen8_oa_enable(struct i915_perf_stream * stream)2997 static void gen8_oa_enable(struct i915_perf_stream *stream)
2998 {
2999 	struct intel_uncore *uncore = stream->uncore;
3000 	u32 report_format = stream->oa_buffer.format->format;
3001 
3002 	/*
3003 	 * Reset buf pointers so we don't forward reports from before now.
3004 	 *
3005 	 * Think carefully if considering trying to avoid this, since it
3006 	 * also ensures status flags and the buffer itself are cleared
3007 	 * in error paths, and we have checks for invalid reports based
3008 	 * on the assumption that certain fields are written to zeroed
3009 	 * memory which this helps maintains.
3010 	 */
3011 	gen8_init_oa_buffer(stream);
3012 
3013 	/*
3014 	 * Note: we don't rely on the hardware to perform single context
3015 	 * filtering and instead filter on the cpu based on the context-id
3016 	 * field of reports
3017 	 */
3018 	intel_uncore_write(uncore, GEN8_OACONTROL,
3019 			   (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
3020 			   GEN8_OA_COUNTER_ENABLE);
3021 }
3022 
gen12_oa_enable(struct i915_perf_stream * stream)3023 static void gen12_oa_enable(struct i915_perf_stream *stream)
3024 {
3025 	const struct i915_perf_regs *regs;
3026 	u32 val;
3027 
3028 	/*
3029 	 * If we don't want OA reports from the OA buffer, then we don't even
3030 	 * need to program the OAG unit.
3031 	 */
3032 	if (!(stream->sample_flags & SAMPLE_OA_REPORT))
3033 		return;
3034 
3035 	gen12_init_oa_buffer(stream);
3036 
3037 	regs = __oa_regs(stream);
3038 	val = (stream->oa_buffer.format->format << regs->oa_ctrl_counter_format_shift) |
3039 	      GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE;
3040 
3041 	intel_uncore_write(stream->uncore, regs->oa_ctrl, val);
3042 }
3043 
3044 /**
3045  * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
3046  * @stream: An i915 perf stream opened for OA metrics
3047  *
3048  * [Re]enables hardware periodic sampling according to the period configured
3049  * when opening the stream. This also starts a hrtimer that will periodically
3050  * check for data in the circular OA buffer for notifying userspace (e.g.
3051  * during a read() or poll()).
3052  */
i915_oa_stream_enable(struct i915_perf_stream * stream)3053 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
3054 {
3055 	stream->pollin = false;
3056 
3057 	stream->perf->ops.oa_enable(stream);
3058 
3059 	if (stream->sample_flags & SAMPLE_OA_REPORT)
3060 		hrtimer_start(&stream->poll_check_timer,
3061 			      ns_to_ktime(stream->poll_oa_period),
3062 			      HRTIMER_MODE_REL_PINNED);
3063 }
3064 
gen7_oa_disable(struct i915_perf_stream * stream)3065 static void gen7_oa_disable(struct i915_perf_stream *stream)
3066 {
3067 	struct intel_uncore *uncore = stream->uncore;
3068 
3069 	intel_uncore_write(uncore, GEN7_OACONTROL, 0);
3070 	if (intel_wait_for_register(uncore,
3071 				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
3072 				    50))
3073 		drm_err(&stream->perf->i915->drm,
3074 			"wait for OA to be disabled timed out\n");
3075 }
3076 
gen8_oa_disable(struct i915_perf_stream * stream)3077 static void gen8_oa_disable(struct i915_perf_stream *stream)
3078 {
3079 	struct intel_uncore *uncore = stream->uncore;
3080 
3081 	intel_uncore_write(uncore, GEN8_OACONTROL, 0);
3082 	if (intel_wait_for_register(uncore,
3083 				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
3084 				    50))
3085 		drm_err(&stream->perf->i915->drm,
3086 			"wait for OA to be disabled timed out\n");
3087 }
3088 
gen12_oa_disable(struct i915_perf_stream * stream)3089 static void gen12_oa_disable(struct i915_perf_stream *stream)
3090 {
3091 	struct intel_uncore *uncore = stream->uncore;
3092 
3093 	intel_uncore_write(uncore, __oa_regs(stream)->oa_ctrl, 0);
3094 	if (intel_wait_for_register(uncore,
3095 				    __oa_regs(stream)->oa_ctrl,
3096 				    GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
3097 				    50))
3098 		drm_err(&stream->perf->i915->drm,
3099 			"wait for OA to be disabled timed out\n");
3100 
3101 	intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
3102 	if (intel_wait_for_register(uncore,
3103 				    GEN12_OA_TLB_INV_CR,
3104 				    1, 0,
3105 				    50))
3106 		drm_err(&stream->perf->i915->drm,
3107 			"wait for OA tlb invalidate timed out\n");
3108 }
3109 
3110 /**
3111  * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
3112  * @stream: An i915 perf stream opened for OA metrics
3113  *
3114  * Stops the OA unit from periodically writing counter reports into the
3115  * circular OA buffer. This also stops the hrtimer that periodically checks for
3116  * data in the circular OA buffer, for notifying userspace.
3117  */
i915_oa_stream_disable(struct i915_perf_stream * stream)3118 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
3119 {
3120 	stream->perf->ops.oa_disable(stream);
3121 
3122 	if (stream->sample_flags & SAMPLE_OA_REPORT)
3123 		hrtimer_cancel(&stream->poll_check_timer);
3124 }
3125 
3126 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
3127 	.destroy = i915_oa_stream_destroy,
3128 	.enable = i915_oa_stream_enable,
3129 	.disable = i915_oa_stream_disable,
3130 	.wait_unlocked = i915_oa_wait_unlocked,
3131 	.poll_wait = i915_oa_poll_wait,
3132 	.read = i915_oa_read,
3133 };
3134 
i915_perf_stream_enable_sync(struct i915_perf_stream * stream)3135 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
3136 {
3137 	struct i915_active *active;
3138 	int err;
3139 
3140 	active = i915_active_create();
3141 	if (!active)
3142 		return -ENOMEM;
3143 
3144 	err = stream->perf->ops.enable_metric_set(stream, active);
3145 	if (err == 0)
3146 		__i915_active_wait(active, TASK_UNINTERRUPTIBLE);
3147 
3148 	i915_active_put(active);
3149 	return err;
3150 }
3151 
3152 static void
get_default_sseu_config(struct intel_sseu * out_sseu,struct intel_engine_cs * engine)3153 get_default_sseu_config(struct intel_sseu *out_sseu,
3154 			struct intel_engine_cs *engine)
3155 {
3156 	const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
3157 
3158 	*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
3159 
3160 	if (GRAPHICS_VER(engine->i915) == 11) {
3161 		/*
3162 		 * We only need subslice count so it doesn't matter which ones
3163 		 * we select - just turn off low bits in the amount of half of
3164 		 * all available subslices per slice.
3165 		 */
3166 		out_sseu->subslice_mask =
3167 			~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
3168 		out_sseu->slice_mask = 0x1;
3169 	}
3170 }
3171 
3172 static int
get_sseu_config(struct intel_sseu * out_sseu,struct intel_engine_cs * engine,const struct drm_i915_gem_context_param_sseu * drm_sseu)3173 get_sseu_config(struct intel_sseu *out_sseu,
3174 		struct intel_engine_cs *engine,
3175 		const struct drm_i915_gem_context_param_sseu *drm_sseu)
3176 {
3177 	if (drm_sseu->engine.engine_class != engine->uabi_class ||
3178 	    drm_sseu->engine.engine_instance != engine->uabi_instance)
3179 		return -EINVAL;
3180 
3181 	return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
3182 }
3183 
3184 /*
3185  * OA timestamp frequency = CS timestamp frequency in most platforms. On some
3186  * platforms OA unit ignores the CTC_SHIFT and the 2 timestamps differ. In such
3187  * cases, return the adjusted CS timestamp frequency to the user.
3188  */
i915_perf_oa_timestamp_frequency(struct drm_i915_private * i915)3189 u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
3190 {
3191 	struct intel_gt *gt = to_gt(i915);
3192 
3193 	/* Wa_18013179988 */
3194 	if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
3195 		intel_wakeref_t wakeref;
3196 		u32 reg, shift;
3197 
3198 		with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref)
3199 			reg = intel_uncore_read(to_gt(i915)->uncore, RPM_CONFIG0);
3200 
3201 		shift = REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK,
3202 				      reg);
3203 
3204 		return to_gt(i915)->clock_frequency << (3 - shift);
3205 	}
3206 
3207 	return to_gt(i915)->clock_frequency;
3208 }
3209 
3210 /**
3211  * i915_oa_stream_init - validate combined props for OA stream and init
3212  * @stream: An i915 perf stream
3213  * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
3214  * @props: The property state that configures stream (individually validated)
3215  *
3216  * While read_properties_unlocked() validates properties in isolation it
3217  * doesn't ensure that the combination necessarily makes sense.
3218  *
3219  * At this point it has been determined that userspace wants a stream of
3220  * OA metrics, but still we need to further validate the combined
3221  * properties are OK.
3222  *
3223  * If the configuration makes sense then we can allocate memory for
3224  * a circular OA buffer and apply the requested metric set configuration.
3225  *
3226  * Returns: zero on success or a negative error code.
3227  */
i915_oa_stream_init(struct i915_perf_stream * stream,struct drm_i915_perf_open_param * param,struct perf_open_properties * props)3228 static int i915_oa_stream_init(struct i915_perf_stream *stream,
3229 			       struct drm_i915_perf_open_param *param,
3230 			       struct perf_open_properties *props)
3231 {
3232 	struct drm_i915_private *i915 = stream->perf->i915;
3233 	struct i915_perf *perf = stream->perf;
3234 	struct i915_perf_group *g;
3235 	int ret;
3236 
3237 	if (!props->engine) {
3238 		drm_dbg(&stream->perf->i915->drm,
3239 			"OA engine not specified\n");
3240 		return -EINVAL;
3241 	}
3242 	g = props->engine->oa_group;
3243 
3244 	/*
3245 	 * If the sysfs metrics/ directory wasn't registered for some
3246 	 * reason then don't let userspace try their luck with config
3247 	 * IDs
3248 	 */
3249 	if (!perf->metrics_kobj) {
3250 		drm_dbg(&stream->perf->i915->drm,
3251 			"OA metrics weren't advertised via sysfs\n");
3252 		return -EINVAL;
3253 	}
3254 
3255 	if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
3256 	    (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
3257 		drm_dbg(&stream->perf->i915->drm,
3258 			"Only OA report sampling supported\n");
3259 		return -EINVAL;
3260 	}
3261 
3262 	if (!perf->ops.enable_metric_set) {
3263 		drm_dbg(&stream->perf->i915->drm,
3264 			"OA unit not supported\n");
3265 		return -ENODEV;
3266 	}
3267 
3268 	/*
3269 	 * To avoid the complexity of having to accurately filter
3270 	 * counter reports and marshal to the appropriate client
3271 	 * we currently only allow exclusive access
3272 	 */
3273 	if (g->exclusive_stream) {
3274 		drm_dbg(&stream->perf->i915->drm,
3275 			"OA unit already in use\n");
3276 		return -EBUSY;
3277 	}
3278 
3279 	if (!props->oa_format) {
3280 		drm_dbg(&stream->perf->i915->drm,
3281 			"OA report format not specified\n");
3282 		return -EINVAL;
3283 	}
3284 
3285 	stream->engine = props->engine;
3286 	stream->uncore = stream->engine->gt->uncore;
3287 
3288 	stream->sample_size = sizeof(struct drm_i915_perf_record_header);
3289 
3290 	stream->oa_buffer.format = &perf->oa_formats[props->oa_format];
3291 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format->size == 0))
3292 		return -EINVAL;
3293 
3294 	stream->sample_flags = props->sample_flags;
3295 	stream->sample_size += stream->oa_buffer.format->size;
3296 
3297 	stream->hold_preemption = props->hold_preemption;
3298 
3299 	stream->periodic = props->oa_periodic;
3300 	if (stream->periodic)
3301 		stream->period_exponent = props->oa_period_exponent;
3302 
3303 	if (stream->ctx) {
3304 		ret = oa_get_render_ctx_id(stream);
3305 		if (ret) {
3306 			drm_dbg(&stream->perf->i915->drm,
3307 				"Invalid context id to filter with\n");
3308 			return ret;
3309 		}
3310 	}
3311 
3312 	ret = alloc_noa_wait(stream);
3313 	if (ret) {
3314 		drm_dbg(&stream->perf->i915->drm,
3315 			"Unable to allocate NOA wait batch buffer\n");
3316 		goto err_noa_wait_alloc;
3317 	}
3318 
3319 	stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
3320 	if (!stream->oa_config) {
3321 		drm_dbg(&stream->perf->i915->drm,
3322 			"Invalid OA config id=%i\n", props->metrics_set);
3323 		ret = -EINVAL;
3324 		goto err_config;
3325 	}
3326 
3327 	/* PRM - observability performance counters:
3328 	 *
3329 	 *   OACONTROL, performance counter enable, note:
3330 	 *
3331 	 *   "When this bit is set, in order to have coherent counts,
3332 	 *   RC6 power state and trunk clock gating must be disabled.
3333 	 *   This can be achieved by programming MMIO registers as
3334 	 *   0xA094=0 and 0xA090[31]=1"
3335 	 *
3336 	 *   In our case we are expecting that taking pm + FORCEWAKE
3337 	 *   references will effectively disable RC6.
3338 	 */
3339 	intel_engine_pm_get(stream->engine);
3340 	intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
3341 
3342 	ret = alloc_oa_buffer(stream);
3343 	if (ret)
3344 		goto err_oa_buf_alloc;
3345 
3346 	stream->ops = &i915_oa_stream_ops;
3347 
3348 	stream->engine->gt->perf.sseu = props->sseu;
3349 	WRITE_ONCE(g->exclusive_stream, stream);
3350 
3351 	ret = i915_perf_stream_enable_sync(stream);
3352 	if (ret) {
3353 		drm_dbg(&stream->perf->i915->drm,
3354 			"Unable to enable metric set\n");
3355 		goto err_enable;
3356 	}
3357 
3358 	drm_dbg(&stream->perf->i915->drm,
3359 		"opening stream oa config uuid=%s\n",
3360 		  stream->oa_config->uuid);
3361 
3362 	hrtimer_init(&stream->poll_check_timer,
3363 		     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3364 	stream->poll_check_timer.function = oa_poll_check_timer_cb;
3365 	init_waitqueue_head(&stream->poll_wq);
3366 	spin_lock_init(&stream->oa_buffer.ptr_lock);
3367 	mutex_init(&stream->lock);
3368 
3369 	return 0;
3370 
3371 err_enable:
3372 	WRITE_ONCE(g->exclusive_stream, NULL);
3373 	perf->ops.disable_metric_set(stream);
3374 
3375 	free_oa_buffer(stream);
3376 
3377 err_oa_buf_alloc:
3378 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
3379 	intel_engine_pm_put(stream->engine);
3380 
3381 	free_oa_configs(stream);
3382 
3383 err_config:
3384 	free_noa_wait(stream);
3385 
3386 err_noa_wait_alloc:
3387 	if (stream->ctx)
3388 		oa_put_render_ctx_id(stream);
3389 
3390 	return ret;
3391 }
3392 
i915_oa_init_reg_state(const struct intel_context * ce,const struct intel_engine_cs * engine)3393 void i915_oa_init_reg_state(const struct intel_context *ce,
3394 			    const struct intel_engine_cs *engine)
3395 {
3396 	struct i915_perf_stream *stream;
3397 
3398 	if (engine->class != RENDER_CLASS)
3399 		return;
3400 
3401 	/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
3402 	stream = READ_ONCE(engine->oa_group->exclusive_stream);
3403 	if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
3404 		gen8_update_reg_state_unlocked(ce, stream);
3405 }
3406 
3407 /**
3408  * i915_perf_read - handles read() FOP for i915 perf stream FDs
3409  * @file: An i915 perf stream file
3410  * @buf: destination buffer given by userspace
3411  * @count: the number of bytes userspace wants to read
3412  * @ppos: (inout) file seek position (unused)
3413  *
3414  * The entry point for handling a read() on a stream file descriptor from
3415  * userspace. Most of the work is left to the i915_perf_read_locked() and
3416  * &i915_perf_stream_ops->read but to save having stream implementations (of
3417  * which we might have multiple later) we handle blocking read here.
3418  *
3419  * We can also consistently treat trying to read from a disabled stream
3420  * as an IO error so implementations can assume the stream is enabled
3421  * while reading.
3422  *
3423  * Returns: The number of bytes copied or a negative error code on failure.
3424  */
i915_perf_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)3425 static ssize_t i915_perf_read(struct file *file,
3426 			      char __user *buf,
3427 			      size_t count,
3428 			      loff_t *ppos)
3429 {
3430 	struct i915_perf_stream *stream = file->private_data;
3431 	size_t offset = 0;
3432 	int ret;
3433 
3434 	/* To ensure it's handled consistently we simply treat all reads of a
3435 	 * disabled stream as an error. In particular it might otherwise lead
3436 	 * to a deadlock for blocking file descriptors...
3437 	 */
3438 	if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
3439 		return -EIO;
3440 
3441 	if (!(file->f_flags & O_NONBLOCK)) {
3442 		/* There's the small chance of false positives from
3443 		 * stream->ops->wait_unlocked.
3444 		 *
3445 		 * E.g. with single context filtering since we only wait until
3446 		 * oabuffer has >= 1 report we don't immediately know whether
3447 		 * any reports really belong to the current context
3448 		 */
3449 		do {
3450 			ret = stream->ops->wait_unlocked(stream);
3451 			if (ret)
3452 				return ret;
3453 
3454 			mutex_lock(&stream->lock);
3455 			ret = stream->ops->read(stream, buf, count, &offset);
3456 			mutex_unlock(&stream->lock);
3457 		} while (!offset && !ret);
3458 	} else {
3459 		mutex_lock(&stream->lock);
3460 		ret = stream->ops->read(stream, buf, count, &offset);
3461 		mutex_unlock(&stream->lock);
3462 	}
3463 
3464 	/* We allow the poll checking to sometimes report false positive EPOLLIN
3465 	 * events where we might actually report EAGAIN on read() if there's
3466 	 * not really any data available. In this situation though we don't
3467 	 * want to enter a busy loop between poll() reporting a EPOLLIN event
3468 	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3469 	 * effectively ensures we back off until the next hrtimer callback
3470 	 * before reporting another EPOLLIN event.
3471 	 * The exception to this is if ops->read() returned -ENOSPC which means
3472 	 * that more OA data is available than could fit in the user provided
3473 	 * buffer. In this case we want the next poll() call to not block.
3474 	 */
3475 	if (ret != -ENOSPC)
3476 		stream->pollin = false;
3477 
3478 	/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3479 	return offset ?: (ret ?: -EAGAIN);
3480 }
3481 
oa_poll_check_timer_cb(struct hrtimer * hrtimer)3482 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3483 {
3484 	struct i915_perf_stream *stream =
3485 		container_of(hrtimer, typeof(*stream), poll_check_timer);
3486 
3487 	if (oa_buffer_check_unlocked(stream)) {
3488 		stream->pollin = true;
3489 		wake_up(&stream->poll_wq);
3490 	}
3491 
3492 	hrtimer_forward_now(hrtimer,
3493 			    ns_to_ktime(stream->poll_oa_period));
3494 
3495 	return HRTIMER_RESTART;
3496 }
3497 
3498 /**
3499  * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3500  * @stream: An i915 perf stream
3501  * @file: An i915 perf stream file
3502  * @wait: poll() state table
3503  *
3504  * For handling userspace polling on an i915 perf stream, this calls through to
3505  * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3506  * will be woken for new stream data.
3507  *
3508  * Returns: any poll events that are ready without sleeping
3509  */
i915_perf_poll_locked(struct i915_perf_stream * stream,struct file * file,poll_table * wait)3510 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3511 				      struct file *file,
3512 				      poll_table *wait)
3513 {
3514 	__poll_t events = 0;
3515 
3516 	stream->ops->poll_wait(stream, file, wait);
3517 
3518 	/* Note: we don't explicitly check whether there's something to read
3519 	 * here since this path may be very hot depending on what else
3520 	 * userspace is polling, or on the timeout in use. We rely solely on
3521 	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3522 	 * samples to read.
3523 	 */
3524 	if (stream->pollin)
3525 		events |= EPOLLIN;
3526 
3527 	return events;
3528 }
3529 
3530 /**
3531  * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3532  * @file: An i915 perf stream file
3533  * @wait: poll() state table
3534  *
3535  * For handling userspace polling on an i915 perf stream, this ensures
3536  * poll_wait() gets called with a wait queue that will be woken for new stream
3537  * data.
3538  *
3539  * Note: Implementation deferred to i915_perf_poll_locked()
3540  *
3541  * Returns: any poll events that are ready without sleeping
3542  */
i915_perf_poll(struct file * file,poll_table * wait)3543 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3544 {
3545 	struct i915_perf_stream *stream = file->private_data;
3546 	__poll_t ret;
3547 
3548 	mutex_lock(&stream->lock);
3549 	ret = i915_perf_poll_locked(stream, file, wait);
3550 	mutex_unlock(&stream->lock);
3551 
3552 	return ret;
3553 }
3554 
3555 /**
3556  * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3557  * @stream: A disabled i915 perf stream
3558  *
3559  * [Re]enables the associated capture of data for this stream.
3560  *
3561  * If a stream was previously enabled then there's currently no intention
3562  * to provide userspace any guarantee about the preservation of previously
3563  * buffered data.
3564  */
i915_perf_enable_locked(struct i915_perf_stream * stream)3565 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3566 {
3567 	if (stream->enabled)
3568 		return;
3569 
3570 	/* Allow stream->ops->enable() to refer to this */
3571 	stream->enabled = true;
3572 
3573 	if (stream->ops->enable)
3574 		stream->ops->enable(stream);
3575 
3576 	if (stream->hold_preemption)
3577 		intel_context_set_nopreempt(stream->pinned_ctx);
3578 }
3579 
3580 /**
3581  * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3582  * @stream: An enabled i915 perf stream
3583  *
3584  * Disables the associated capture of data for this stream.
3585  *
3586  * The intention is that disabling an re-enabling a stream will ideally be
3587  * cheaper than destroying and re-opening a stream with the same configuration,
3588  * though there are no formal guarantees about what state or buffered data
3589  * must be retained between disabling and re-enabling a stream.
3590  *
3591  * Note: while a stream is disabled it's considered an error for userspace
3592  * to attempt to read from the stream (-EIO).
3593  */
i915_perf_disable_locked(struct i915_perf_stream * stream)3594 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3595 {
3596 	if (!stream->enabled)
3597 		return;
3598 
3599 	/* Allow stream->ops->disable() to refer to this */
3600 	stream->enabled = false;
3601 
3602 	if (stream->hold_preemption)
3603 		intel_context_clear_nopreempt(stream->pinned_ctx);
3604 
3605 	if (stream->ops->disable)
3606 		stream->ops->disable(stream);
3607 }
3608 
i915_perf_config_locked(struct i915_perf_stream * stream,unsigned long metrics_set)3609 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3610 				    unsigned long metrics_set)
3611 {
3612 	struct i915_oa_config *config;
3613 	long ret = stream->oa_config->id;
3614 
3615 	config = i915_perf_get_oa_config(stream->perf, metrics_set);
3616 	if (!config)
3617 		return -EINVAL;
3618 
3619 	if (config != stream->oa_config) {
3620 		int err;
3621 
3622 		/*
3623 		 * If OA is bound to a specific context, emit the
3624 		 * reconfiguration inline from that context. The update
3625 		 * will then be ordered with respect to submission on that
3626 		 * context.
3627 		 *
3628 		 * When set globally, we use a low priority kernel context,
3629 		 * so it will effectively take effect when idle.
3630 		 */
3631 		err = emit_oa_config(stream, config, oa_context(stream), NULL);
3632 		if (!err)
3633 			config = xchg(&stream->oa_config, config);
3634 		else
3635 			ret = err;
3636 	}
3637 
3638 	i915_oa_config_put(config);
3639 
3640 	return ret;
3641 }
3642 
3643 /**
3644  * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3645  * @stream: An i915 perf stream
3646  * @cmd: the ioctl request
3647  * @arg: the ioctl data
3648  *
3649  * Returns: zero on success or a negative error code. Returns -EINVAL for
3650  * an unknown ioctl request.
3651  */
i915_perf_ioctl_locked(struct i915_perf_stream * stream,unsigned int cmd,unsigned long arg)3652 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3653 				   unsigned int cmd,
3654 				   unsigned long arg)
3655 {
3656 	switch (cmd) {
3657 	case I915_PERF_IOCTL_ENABLE:
3658 		i915_perf_enable_locked(stream);
3659 		return 0;
3660 	case I915_PERF_IOCTL_DISABLE:
3661 		i915_perf_disable_locked(stream);
3662 		return 0;
3663 	case I915_PERF_IOCTL_CONFIG:
3664 		return i915_perf_config_locked(stream, arg);
3665 	}
3666 
3667 	return -EINVAL;
3668 }
3669 
3670 /**
3671  * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3672  * @file: An i915 perf stream file
3673  * @cmd: the ioctl request
3674  * @arg: the ioctl data
3675  *
3676  * Implementation deferred to i915_perf_ioctl_locked().
3677  *
3678  * Returns: zero on success or a negative error code. Returns -EINVAL for
3679  * an unknown ioctl request.
3680  */
i915_perf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3681 static long i915_perf_ioctl(struct file *file,
3682 			    unsigned int cmd,
3683 			    unsigned long arg)
3684 {
3685 	struct i915_perf_stream *stream = file->private_data;
3686 	long ret;
3687 
3688 	mutex_lock(&stream->lock);
3689 	ret = i915_perf_ioctl_locked(stream, cmd, arg);
3690 	mutex_unlock(&stream->lock);
3691 
3692 	return ret;
3693 }
3694 
3695 /**
3696  * i915_perf_destroy_locked - destroy an i915 perf stream
3697  * @stream: An i915 perf stream
3698  *
3699  * Frees all resources associated with the given i915 perf @stream, disabling
3700  * any associated data capture in the process.
3701  *
3702  * Note: The &gt->perf.lock mutex has been taken to serialize
3703  * with any non-file-operation driver hooks.
3704  */
i915_perf_destroy_locked(struct i915_perf_stream * stream)3705 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3706 {
3707 	if (stream->enabled)
3708 		i915_perf_disable_locked(stream);
3709 
3710 	if (stream->ops->destroy)
3711 		stream->ops->destroy(stream);
3712 
3713 	if (stream->ctx)
3714 		i915_gem_context_put(stream->ctx);
3715 
3716 	kfree(stream);
3717 }
3718 
3719 /**
3720  * i915_perf_release - handles userspace close() of a stream file
3721  * @inode: anonymous inode associated with file
3722  * @file: An i915 perf stream file
3723  *
3724  * Cleans up any resources associated with an open i915 perf stream file.
3725  *
3726  * NB: close() can't really fail from the userspace point of view.
3727  *
3728  * Returns: zero on success or a negative error code.
3729  */
i915_perf_release(struct inode * inode,struct file * file)3730 static int i915_perf_release(struct inode *inode, struct file *file)
3731 {
3732 	struct i915_perf_stream *stream = file->private_data;
3733 	struct i915_perf *perf = stream->perf;
3734 	struct intel_gt *gt = stream->engine->gt;
3735 
3736 	/*
3737 	 * Within this call, we know that the fd is being closed and we have no
3738 	 * other user of stream->lock. Use the perf lock to destroy the stream
3739 	 * here.
3740 	 */
3741 	mutex_lock(&gt->perf.lock);
3742 	i915_perf_destroy_locked(stream);
3743 	mutex_unlock(&gt->perf.lock);
3744 
3745 	/* Release the reference the perf stream kept on the driver. */
3746 	drm_dev_put(&perf->i915->drm);
3747 
3748 	return 0;
3749 }
3750 
3751 
3752 static const struct file_operations fops = {
3753 	.owner		= THIS_MODULE,
3754 	.release	= i915_perf_release,
3755 	.poll		= i915_perf_poll,
3756 	.read		= i915_perf_read,
3757 	.unlocked_ioctl	= i915_perf_ioctl,
3758 	/* Our ioctl have no arguments, so it's safe to use the same function
3759 	 * to handle 32bits compatibility.
3760 	 */
3761 	.compat_ioctl   = i915_perf_ioctl,
3762 };
3763 
3764 
3765 /**
3766  * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3767  * @perf: i915 perf instance
3768  * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3769  * @props: individually validated u64 property value pairs
3770  * @file: drm file
3771  *
3772  * See i915_perf_ioctl_open() for interface details.
3773  *
3774  * Implements further stream config validation and stream initialization on
3775  * behalf of i915_perf_open_ioctl() with the &gt->perf.lock mutex
3776  * taken to serialize with any non-file-operation driver hooks.
3777  *
3778  * Note: at this point the @props have only been validated in isolation and
3779  * it's still necessary to validate that the combination of properties makes
3780  * sense.
3781  *
3782  * In the case where userspace is interested in OA unit metrics then further
3783  * config validation and stream initialization details will be handled by
3784  * i915_oa_stream_init(). The code here should only validate config state that
3785  * will be relevant to all stream types / backends.
3786  *
3787  * Returns: zero on success or a negative error code.
3788  */
3789 static int
i915_perf_open_ioctl_locked(struct i915_perf * perf,struct drm_i915_perf_open_param * param,struct perf_open_properties * props,struct drm_file * file)3790 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3791 			    struct drm_i915_perf_open_param *param,
3792 			    struct perf_open_properties *props,
3793 			    struct drm_file *file)
3794 {
3795 	struct i915_gem_context *specific_ctx = NULL;
3796 	struct i915_perf_stream *stream = NULL;
3797 	unsigned long f_flags = 0;
3798 	bool privileged_op = true;
3799 	int stream_fd;
3800 	int ret;
3801 
3802 	if (props->single_context) {
3803 		u32 ctx_handle = props->ctx_handle;
3804 		struct drm_i915_file_private *file_priv = file->driver_priv;
3805 
3806 		specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3807 		if (IS_ERR(specific_ctx)) {
3808 			drm_dbg(&perf->i915->drm,
3809 				"Failed to look up context with ID %u for opening perf stream\n",
3810 				  ctx_handle);
3811 			ret = PTR_ERR(specific_ctx);
3812 			goto err;
3813 		}
3814 	}
3815 
3816 	/*
3817 	 * On Haswell the OA unit supports clock gating off for a specific
3818 	 * context and in this mode there's no visibility of metrics for the
3819 	 * rest of the system, which we consider acceptable for a
3820 	 * non-privileged client.
3821 	 *
3822 	 * For Gen8->11 the OA unit no longer supports clock gating off for a
3823 	 * specific context and the kernel can't securely stop the counters
3824 	 * from updating as system-wide / global values. Even though we can
3825 	 * filter reports based on the included context ID we can't block
3826 	 * clients from seeing the raw / global counter values via
3827 	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3828 	 * enable the OA unit by default.
3829 	 *
3830 	 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3831 	 * per context basis. So we can relax requirements there if the user
3832 	 * doesn't request global stream access (i.e. query based sampling
3833 	 * using MI_RECORD_PERF_COUNT.
3834 	 */
3835 	if (IS_HASWELL(perf->i915) && specific_ctx)
3836 		privileged_op = false;
3837 	else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
3838 		 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3839 		privileged_op = false;
3840 
3841 	if (props->hold_preemption) {
3842 		if (!props->single_context) {
3843 			drm_dbg(&perf->i915->drm,
3844 				"preemption disable with no context\n");
3845 			ret = -EINVAL;
3846 			goto err;
3847 		}
3848 		privileged_op = true;
3849 	}
3850 
3851 	/*
3852 	 * Asking for SSEU configuration is a priviliged operation.
3853 	 */
3854 	if (props->has_sseu)
3855 		privileged_op = true;
3856 	else
3857 		get_default_sseu_config(&props->sseu, props->engine);
3858 
3859 	/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3860 	 * we check a dev.i915.perf_stream_paranoid sysctl option
3861 	 * to determine if it's ok to access system wide OA counters
3862 	 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3863 	 */
3864 	if (privileged_op &&
3865 	    i915_perf_stream_paranoid && !perfmon_capable()) {
3866 		drm_dbg(&perf->i915->drm,
3867 			"Insufficient privileges to open i915 perf stream\n");
3868 		ret = -EACCES;
3869 		goto err_ctx;
3870 	}
3871 
3872 	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3873 	if (!stream) {
3874 		ret = -ENOMEM;
3875 		goto err_ctx;
3876 	}
3877 
3878 	stream->perf = perf;
3879 	stream->ctx = specific_ctx;
3880 	stream->poll_oa_period = props->poll_oa_period;
3881 
3882 	ret = i915_oa_stream_init(stream, param, props);
3883 	if (ret)
3884 		goto err_alloc;
3885 
3886 	/* we avoid simply assigning stream->sample_flags = props->sample_flags
3887 	 * to have _stream_init check the combination of sample flags more
3888 	 * thoroughly, but still this is the expected result at this point.
3889 	 */
3890 	if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3891 		ret = -ENODEV;
3892 		goto err_flags;
3893 	}
3894 
3895 	if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3896 		f_flags |= O_CLOEXEC;
3897 	if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3898 		f_flags |= O_NONBLOCK;
3899 
3900 	stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3901 	if (stream_fd < 0) {
3902 		ret = stream_fd;
3903 		goto err_flags;
3904 	}
3905 
3906 	if (!(param->flags & I915_PERF_FLAG_DISABLED))
3907 		i915_perf_enable_locked(stream);
3908 
3909 	/* Take a reference on the driver that will be kept with stream_fd
3910 	 * until its release.
3911 	 */
3912 	drm_dev_get(&perf->i915->drm);
3913 
3914 	return stream_fd;
3915 
3916 err_flags:
3917 	if (stream->ops->destroy)
3918 		stream->ops->destroy(stream);
3919 err_alloc:
3920 	kfree(stream);
3921 err_ctx:
3922 	if (specific_ctx)
3923 		i915_gem_context_put(specific_ctx);
3924 err:
3925 	return ret;
3926 }
3927 
oa_exponent_to_ns(struct i915_perf * perf,int exponent)3928 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3929 {
3930 	u64 nom = (2ULL << exponent) * NSEC_PER_SEC;
3931 	u32 den = i915_perf_oa_timestamp_frequency(perf->i915);
3932 
3933 	return div_u64(nom + den - 1, den);
3934 }
3935 
3936 static __always_inline bool
oa_format_valid(struct i915_perf * perf,enum drm_i915_oa_format format)3937 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
3938 {
3939 	return test_bit(format, perf->format_mask);
3940 }
3941 
3942 static __always_inline void
oa_format_add(struct i915_perf * perf,enum drm_i915_oa_format format)3943 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
3944 {
3945 	__set_bit(format, perf->format_mask);
3946 }
3947 
3948 /**
3949  * read_properties_unlocked - validate + copy userspace stream open properties
3950  * @perf: i915 perf instance
3951  * @uprops: The array of u64 key value pairs given by userspace
3952  * @n_props: The number of key value pairs expected in @uprops
3953  * @props: The stream configuration built up while validating properties
3954  *
3955  * Note this function only validates properties in isolation it doesn't
3956  * validate that the combination of properties makes sense or that all
3957  * properties necessary for a particular kind of stream have been set.
3958  *
3959  * Note that there currently aren't any ordering requirements for properties so
3960  * we shouldn't validate or assume anything about ordering here. This doesn't
3961  * rule out defining new properties with ordering requirements in the future.
3962  */
read_properties_unlocked(struct i915_perf * perf,u64 __user * uprops,u32 n_props,struct perf_open_properties * props)3963 static int read_properties_unlocked(struct i915_perf *perf,
3964 				    u64 __user *uprops,
3965 				    u32 n_props,
3966 				    struct perf_open_properties *props)
3967 {
3968 	struct drm_i915_gem_context_param_sseu user_sseu;
3969 	const struct i915_oa_format *f;
3970 	u64 __user *uprop = uprops;
3971 	bool config_instance = false;
3972 	bool config_class = false;
3973 	bool config_sseu = false;
3974 	u8 class, instance;
3975 	u32 i;
3976 	int ret;
3977 
3978 	memset(props, 0, sizeof(struct perf_open_properties));
3979 	props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
3980 
3981 	/* Considering that ID = 0 is reserved and assuming that we don't
3982 	 * (currently) expect any configurations to ever specify duplicate
3983 	 * values for a particular property ID then the last _PROP_MAX value is
3984 	 * one greater than the maximum number of properties we expect to get
3985 	 * from userspace.
3986 	 */
3987 	if (!n_props || n_props >= DRM_I915_PERF_PROP_MAX) {
3988 		drm_dbg(&perf->i915->drm,
3989 			"Invalid number of i915 perf properties given\n");
3990 		return -EINVAL;
3991 	}
3992 
3993 	/* Defaults when class:instance is not passed */
3994 	class = I915_ENGINE_CLASS_RENDER;
3995 	instance = 0;
3996 
3997 	for (i = 0; i < n_props; i++) {
3998 		u64 oa_period, oa_freq_hz;
3999 		u64 id, value;
4000 
4001 		ret = get_user(id, uprop);
4002 		if (ret)
4003 			return ret;
4004 
4005 		ret = get_user(value, uprop + 1);
4006 		if (ret)
4007 			return ret;
4008 
4009 		if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
4010 			drm_dbg(&perf->i915->drm,
4011 				"Unknown i915 perf property ID\n");
4012 			return -EINVAL;
4013 		}
4014 
4015 		switch ((enum drm_i915_perf_property_id)id) {
4016 		case DRM_I915_PERF_PROP_CTX_HANDLE:
4017 			props->single_context = 1;
4018 			props->ctx_handle = value;
4019 			break;
4020 		case DRM_I915_PERF_PROP_SAMPLE_OA:
4021 			if (value)
4022 				props->sample_flags |= SAMPLE_OA_REPORT;
4023 			break;
4024 		case DRM_I915_PERF_PROP_OA_METRICS_SET:
4025 			if (value == 0) {
4026 				drm_dbg(&perf->i915->drm,
4027 					"Unknown OA metric set ID\n");
4028 				return -EINVAL;
4029 			}
4030 			props->metrics_set = value;
4031 			break;
4032 		case DRM_I915_PERF_PROP_OA_FORMAT:
4033 			if (value == 0 || value >= I915_OA_FORMAT_MAX) {
4034 				drm_dbg(&perf->i915->drm,
4035 					"Out-of-range OA report format %llu\n",
4036 					  value);
4037 				return -EINVAL;
4038 			}
4039 			if (!oa_format_valid(perf, value)) {
4040 				drm_dbg(&perf->i915->drm,
4041 					"Unsupported OA report format %llu\n",
4042 					  value);
4043 				return -EINVAL;
4044 			}
4045 			props->oa_format = value;
4046 			break;
4047 		case DRM_I915_PERF_PROP_OA_EXPONENT:
4048 			if (value > OA_EXPONENT_MAX) {
4049 				drm_dbg(&perf->i915->drm,
4050 					"OA timer exponent too high (> %u)\n",
4051 					 OA_EXPONENT_MAX);
4052 				return -EINVAL;
4053 			}
4054 
4055 			/* Theoretically we can program the OA unit to sample
4056 			 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
4057 			 * for BXT. We don't allow such high sampling
4058 			 * frequencies by default unless root.
4059 			 */
4060 
4061 			BUILD_BUG_ON(sizeof(oa_period) != 8);
4062 			oa_period = oa_exponent_to_ns(perf, value);
4063 
4064 			/* This check is primarily to ensure that oa_period <=
4065 			 * UINT32_MAX (before passing to do_div which only
4066 			 * accepts a u32 denominator), but we can also skip
4067 			 * checking anything < 1Hz which implicitly can't be
4068 			 * limited via an integer oa_max_sample_rate.
4069 			 */
4070 			if (oa_period <= NSEC_PER_SEC) {
4071 				u64 tmp = NSEC_PER_SEC;
4072 				do_div(tmp, oa_period);
4073 				oa_freq_hz = tmp;
4074 			} else
4075 				oa_freq_hz = 0;
4076 
4077 			if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
4078 				drm_dbg(&perf->i915->drm,
4079 					"OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
4080 					  i915_oa_max_sample_rate);
4081 				return -EACCES;
4082 			}
4083 
4084 			props->oa_periodic = true;
4085 			props->oa_period_exponent = value;
4086 			break;
4087 		case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
4088 			props->hold_preemption = !!value;
4089 			break;
4090 		case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
4091 			if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 55)) {
4092 				drm_dbg(&perf->i915->drm,
4093 					"SSEU config not supported on gfx %x\n",
4094 					GRAPHICS_VER_FULL(perf->i915));
4095 				return -ENODEV;
4096 			}
4097 
4098 			if (copy_from_user(&user_sseu,
4099 					   u64_to_user_ptr(value),
4100 					   sizeof(user_sseu))) {
4101 				drm_dbg(&perf->i915->drm,
4102 					"Unable to copy global sseu parameter\n");
4103 				return -EFAULT;
4104 			}
4105 			config_sseu = true;
4106 			break;
4107 		}
4108 		case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
4109 			if (value < 100000 /* 100us */) {
4110 				drm_dbg(&perf->i915->drm,
4111 					"OA availability timer too small (%lluns < 100us)\n",
4112 					  value);
4113 				return -EINVAL;
4114 			}
4115 			props->poll_oa_period = value;
4116 			break;
4117 		case DRM_I915_PERF_PROP_OA_ENGINE_CLASS:
4118 			class = (u8)value;
4119 			config_class = true;
4120 			break;
4121 		case DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE:
4122 			instance = (u8)value;
4123 			config_instance = true;
4124 			break;
4125 		default:
4126 			MISSING_CASE(id);
4127 			return -EINVAL;
4128 		}
4129 
4130 		uprop += 2;
4131 	}
4132 
4133 	if ((config_class && !config_instance) ||
4134 	    (config_instance && !config_class)) {
4135 		drm_dbg(&perf->i915->drm,
4136 			"OA engine-class and engine-instance parameters must be passed together\n");
4137 		return -EINVAL;
4138 	}
4139 
4140 	props->engine = intel_engine_lookup_user(perf->i915, class, instance);
4141 	if (!props->engine) {
4142 		drm_dbg(&perf->i915->drm,
4143 			"OA engine class and instance invalid %d:%d\n",
4144 			class, instance);
4145 		return -EINVAL;
4146 	}
4147 
4148 	if (!engine_supports_oa(props->engine)) {
4149 		drm_dbg(&perf->i915->drm,
4150 			"Engine not supported by OA %d:%d\n",
4151 			class, instance);
4152 		return -EINVAL;
4153 	}
4154 
4155 	/*
4156 	 * Wa_14017512683: mtl[a0..c0): Use of OAM must be preceded with Media
4157 	 * C6 disable in BIOS. Fail if Media C6 is enabled on steppings where OAM
4158 	 * does not work as expected.
4159 	 */
4160 	if (IS_MEDIA_GT_IP_STEP(props->engine->gt, IP_VER(13, 0), STEP_A0, STEP_C0) &&
4161 	    props->engine->oa_group->type == TYPE_OAM &&
4162 	    intel_check_bios_c6_setup(&props->engine->gt->rc6)) {
4163 		drm_dbg(&perf->i915->drm,
4164 			"OAM requires media C6 to be disabled in BIOS\n");
4165 		return -EINVAL;
4166 	}
4167 
4168 	i = array_index_nospec(props->oa_format, I915_OA_FORMAT_MAX);
4169 	f = &perf->oa_formats[i];
4170 	if (!engine_supports_oa_format(props->engine, f->type)) {
4171 		drm_dbg(&perf->i915->drm,
4172 			"Invalid OA format %d for class %d\n",
4173 			f->type, props->engine->class);
4174 		return -EINVAL;
4175 	}
4176 
4177 	if (config_sseu) {
4178 		ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
4179 		if (ret) {
4180 			drm_dbg(&perf->i915->drm,
4181 				"Invalid SSEU configuration\n");
4182 			return ret;
4183 		}
4184 		props->has_sseu = true;
4185 	}
4186 
4187 	return 0;
4188 }
4189 
4190 /**
4191  * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
4192  * @dev: drm device
4193  * @data: ioctl data copied from userspace (unvalidated)
4194  * @file: drm file
4195  *
4196  * Validates the stream open parameters given by userspace including flags
4197  * and an array of u64 key, value pair properties.
4198  *
4199  * Very little is assumed up front about the nature of the stream being
4200  * opened (for instance we don't assume it's for periodic OA unit metrics). An
4201  * i915-perf stream is expected to be a suitable interface for other forms of
4202  * buffered data written by the GPU besides periodic OA metrics.
4203  *
4204  * Note we copy the properties from userspace outside of the i915 perf
4205  * mutex to avoid an awkward lockdep with mmap_lock.
4206  *
4207  * Most of the implementation details are handled by
4208  * i915_perf_open_ioctl_locked() after taking the &gt->perf.lock
4209  * mutex for serializing with any non-file-operation driver hooks.
4210  *
4211  * Return: A newly opened i915 Perf stream file descriptor or negative
4212  * error code on failure.
4213  */
i915_perf_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4214 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
4215 			 struct drm_file *file)
4216 {
4217 	struct i915_perf *perf = &to_i915(dev)->perf;
4218 	struct drm_i915_perf_open_param *param = data;
4219 	struct intel_gt *gt;
4220 	struct perf_open_properties props;
4221 	u32 known_open_flags;
4222 	int ret;
4223 
4224 	if (!perf->i915)
4225 		return -ENOTSUPP;
4226 
4227 	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
4228 			   I915_PERF_FLAG_FD_NONBLOCK |
4229 			   I915_PERF_FLAG_DISABLED;
4230 	if (param->flags & ~known_open_flags) {
4231 		drm_dbg(&perf->i915->drm,
4232 			"Unknown drm_i915_perf_open_param flag\n");
4233 		return -EINVAL;
4234 	}
4235 
4236 	ret = read_properties_unlocked(perf,
4237 				       u64_to_user_ptr(param->properties_ptr),
4238 				       param->num_properties,
4239 				       &props);
4240 	if (ret)
4241 		return ret;
4242 
4243 	gt = props.engine->gt;
4244 
4245 	mutex_lock(&gt->perf.lock);
4246 	ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
4247 	mutex_unlock(&gt->perf.lock);
4248 
4249 	return ret;
4250 }
4251 
4252 /**
4253  * i915_perf_register - exposes i915-perf to userspace
4254  * @i915: i915 device instance
4255  *
4256  * In particular OA metric sets are advertised under a sysfs metrics/
4257  * directory allowing userspace to enumerate valid IDs that can be
4258  * used to open an i915-perf stream.
4259  */
i915_perf_register(struct drm_i915_private * i915)4260 void i915_perf_register(struct drm_i915_private *i915)
4261 {
4262 	struct i915_perf *perf = &i915->perf;
4263 	struct intel_gt *gt = to_gt(i915);
4264 
4265 	if (!perf->i915)
4266 		return;
4267 
4268 	/* To be sure we're synchronized with an attempted
4269 	 * i915_perf_open_ioctl(); considering that we register after
4270 	 * being exposed to userspace.
4271 	 */
4272 	mutex_lock(&gt->perf.lock);
4273 
4274 	perf->metrics_kobj =
4275 		kobject_create_and_add("metrics",
4276 				       &i915->drm.primary->kdev->kobj);
4277 
4278 	mutex_unlock(&gt->perf.lock);
4279 }
4280 
4281 /**
4282  * i915_perf_unregister - hide i915-perf from userspace
4283  * @i915: i915 device instance
4284  *
4285  * i915-perf state cleanup is split up into an 'unregister' and
4286  * 'deinit' phase where the interface is first hidden from
4287  * userspace by i915_perf_unregister() before cleaning up
4288  * remaining state in i915_perf_fini().
4289  */
i915_perf_unregister(struct drm_i915_private * i915)4290 void i915_perf_unregister(struct drm_i915_private *i915)
4291 {
4292 	struct i915_perf *perf = &i915->perf;
4293 
4294 	if (!perf->metrics_kobj)
4295 		return;
4296 
4297 	kobject_put(perf->metrics_kobj);
4298 	perf->metrics_kobj = NULL;
4299 }
4300 
gen8_is_valid_flex_addr(struct i915_perf * perf,u32 addr)4301 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
4302 {
4303 	static const i915_reg_t flex_eu_regs[] = {
4304 		EU_PERF_CNTL0,
4305 		EU_PERF_CNTL1,
4306 		EU_PERF_CNTL2,
4307 		EU_PERF_CNTL3,
4308 		EU_PERF_CNTL4,
4309 		EU_PERF_CNTL5,
4310 		EU_PERF_CNTL6,
4311 	};
4312 	int i;
4313 
4314 	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
4315 		if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
4316 			return true;
4317 	}
4318 	return false;
4319 }
4320 
reg_in_range_table(u32 addr,const struct i915_range * table)4321 static bool reg_in_range_table(u32 addr, const struct i915_range *table)
4322 {
4323 	while (table->start || table->end) {
4324 		if (addr >= table->start && addr <= table->end)
4325 			return true;
4326 
4327 		table++;
4328 	}
4329 
4330 	return false;
4331 }
4332 
4333 #define REG_EQUAL(addr, mmio) \
4334 	((addr) == i915_mmio_reg_offset(mmio))
4335 
4336 static const struct i915_range gen7_oa_b_counters[] = {
4337 	{ .start = 0x2710, .end = 0x272c },	/* OASTARTTRIG[1-8] */
4338 	{ .start = 0x2740, .end = 0x275c },	/* OAREPORTTRIG[1-8] */
4339 	{ .start = 0x2770, .end = 0x27ac },	/* OACEC[0-7][0-1] */
4340 	{}
4341 };
4342 
4343 static const struct i915_range gen12_oa_b_counters[] = {
4344 	{ .start = 0x2b2c, .end = 0x2b2c },	/* GEN12_OAG_OA_PESS */
4345 	{ .start = 0xd900, .end = 0xd91c },	/* GEN12_OAG_OASTARTTRIG[1-8] */
4346 	{ .start = 0xd920, .end = 0xd93c },	/* GEN12_OAG_OAREPORTTRIG1[1-8] */
4347 	{ .start = 0xd940, .end = 0xd97c },	/* GEN12_OAG_CEC[0-7][0-1] */
4348 	{ .start = 0xdc00, .end = 0xdc3c },	/* GEN12_OAG_SCEC[0-7][0-1] */
4349 	{ .start = 0xdc40, .end = 0xdc40 },	/* GEN12_OAG_SPCTR_CNF */
4350 	{ .start = 0xdc44, .end = 0xdc44 },	/* GEN12_OAA_DBG_REG */
4351 	{}
4352 };
4353 
4354 static const struct i915_range mtl_oam_b_counters[] = {
4355 	{ .start = 0x393000, .end = 0x39301c },	/* GEN12_OAM_STARTTRIG1[1-8] */
4356 	{ .start = 0x393020, .end = 0x39303c },	/* GEN12_OAM_REPORTTRIG1[1-8] */
4357 	{ .start = 0x393040, .end = 0x39307c },	/* GEN12_OAM_CEC[0-7][0-1] */
4358 	{ .start = 0x393200, .end = 0x39323C },	/* MPES[0-7] */
4359 	{}
4360 };
4361 
4362 static const struct i915_range xehp_oa_b_counters[] = {
4363 	{ .start = 0xdc48, .end = 0xdc48 },	/* OAA_ENABLE_REG */
4364 	{ .start = 0xdd00, .end = 0xdd48 },	/* OAG_LCE0_0 - OAA_LENABLE_REG */
4365 	{}
4366 };
4367 
4368 static const struct i915_range gen7_oa_mux_regs[] = {
4369 	{ .start = 0x91b8, .end = 0x91cc },	/* OA_PERFCNT[1-2], OA_PERFMATRIX */
4370 	{ .start = 0x9800, .end = 0x9888 },	/* MICRO_BP0_0 - NOA_WRITE */
4371 	{ .start = 0xe180, .end = 0xe180 },	/* HALF_SLICE_CHICKEN2 */
4372 	{}
4373 };
4374 
4375 static const struct i915_range hsw_oa_mux_regs[] = {
4376 	{ .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
4377 	{ .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */
4378 	{ .start = 0x25100, .end = 0x2ff90 },
4379 	{}
4380 };
4381 
4382 static const struct i915_range chv_oa_mux_regs[] = {
4383 	{ .start = 0x182300, .end = 0x1823a4 },
4384 	{}
4385 };
4386 
4387 static const struct i915_range gen8_oa_mux_regs[] = {
4388 	{ .start = 0x0d00, .end = 0x0d2c },	/* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
4389 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
4390 	{}
4391 };
4392 
4393 static const struct i915_range gen11_oa_mux_regs[] = {
4394 	{ .start = 0x91c8, .end = 0x91dc },	/* OA_PERFCNT[3-4] */
4395 	{}
4396 };
4397 
4398 static const struct i915_range gen12_oa_mux_regs[] = {
4399 	{ .start = 0x0d00, .end = 0x0d04 },     /* RPM_CONFIG[0-1] */
4400 	{ .start = 0x0d0c, .end = 0x0d2c },     /* NOA_CONFIG[0-8] */
4401 	{ .start = 0x9840, .end = 0x9840 },	/* GDT_CHICKEN_BITS */
4402 	{ .start = 0x9884, .end = 0x9888 },	/* NOA_WRITE */
4403 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
4404 	{}
4405 };
4406 
4407 /*
4408  * Ref: 14010536224:
4409  * 0x20cc is repurposed on MTL, so use a separate array for MTL.
4410  */
4411 static const struct i915_range mtl_oa_mux_regs[] = {
4412 	{ .start = 0x0d00, .end = 0x0d04 },	/* RPM_CONFIG[0-1] */
4413 	{ .start = 0x0d0c, .end = 0x0d2c },	/* NOA_CONFIG[0-8] */
4414 	{ .start = 0x9840, .end = 0x9840 },	/* GDT_CHICKEN_BITS */
4415 	{ .start = 0x9884, .end = 0x9888 },	/* NOA_WRITE */
4416 	{ .start = 0x38d100, .end = 0x38d114},	/* VISACTL */
4417 	{}
4418 };
4419 
gen7_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)4420 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4421 {
4422 	return reg_in_range_table(addr, gen7_oa_b_counters);
4423 }
4424 
gen8_is_valid_mux_addr(struct i915_perf * perf,u32 addr)4425 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4426 {
4427 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4428 		reg_in_range_table(addr, gen8_oa_mux_regs);
4429 }
4430 
gen11_is_valid_mux_addr(struct i915_perf * perf,u32 addr)4431 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4432 {
4433 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4434 		reg_in_range_table(addr, gen8_oa_mux_regs) ||
4435 		reg_in_range_table(addr, gen11_oa_mux_regs);
4436 }
4437 
hsw_is_valid_mux_addr(struct i915_perf * perf,u32 addr)4438 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4439 {
4440 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4441 		reg_in_range_table(addr, hsw_oa_mux_regs);
4442 }
4443 
chv_is_valid_mux_addr(struct i915_perf * perf,u32 addr)4444 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4445 {
4446 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4447 		reg_in_range_table(addr, chv_oa_mux_regs);
4448 }
4449 
gen12_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)4450 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4451 {
4452 	return reg_in_range_table(addr, gen12_oa_b_counters);
4453 }
4454 
mtl_is_valid_oam_b_counter_addr(struct i915_perf * perf,u32 addr)4455 static bool mtl_is_valid_oam_b_counter_addr(struct i915_perf *perf, u32 addr)
4456 {
4457 	if (HAS_OAM(perf->i915) &&
4458 	    GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
4459 		return reg_in_range_table(addr, mtl_oam_b_counters);
4460 
4461 	return false;
4462 }
4463 
xehp_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)4464 static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4465 {
4466 	return reg_in_range_table(addr, xehp_oa_b_counters) ||
4467 		reg_in_range_table(addr, gen12_oa_b_counters) ||
4468 		mtl_is_valid_oam_b_counter_addr(perf, addr);
4469 }
4470 
gen12_is_valid_mux_addr(struct i915_perf * perf,u32 addr)4471 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4472 {
4473 	if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
4474 		return reg_in_range_table(addr, mtl_oa_mux_regs);
4475 	else
4476 		return reg_in_range_table(addr, gen12_oa_mux_regs);
4477 }
4478 
mask_reg_value(u32 reg,u32 val)4479 static u32 mask_reg_value(u32 reg, u32 val)
4480 {
4481 	/* HALF_SLICE_CHICKEN2 is programmed with a the
4482 	 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
4483 	 * programmed by userspace doesn't change this.
4484 	 */
4485 	if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
4486 		val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
4487 
4488 	/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
4489 	 * indicated by its name and a bunch of selection fields used by OA
4490 	 * configs.
4491 	 */
4492 	if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
4493 		val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
4494 
4495 	return val;
4496 }
4497 
alloc_oa_regs(struct i915_perf * perf,bool (* is_valid)(struct i915_perf * perf,u32 addr),u32 __user * regs,u32 n_regs)4498 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
4499 					 bool (*is_valid)(struct i915_perf *perf, u32 addr),
4500 					 u32 __user *regs,
4501 					 u32 n_regs)
4502 {
4503 	struct i915_oa_reg *oa_regs;
4504 	int err;
4505 	u32 i;
4506 
4507 	if (!n_regs)
4508 		return NULL;
4509 
4510 	/* No is_valid function means we're not allowing any register to be programmed. */
4511 	GEM_BUG_ON(!is_valid);
4512 	if (!is_valid)
4513 		return ERR_PTR(-EINVAL);
4514 
4515 	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
4516 	if (!oa_regs)
4517 		return ERR_PTR(-ENOMEM);
4518 
4519 	for (i = 0; i < n_regs; i++) {
4520 		u32 addr, value;
4521 
4522 		err = get_user(addr, regs);
4523 		if (err)
4524 			goto addr_err;
4525 
4526 		if (!is_valid(perf, addr)) {
4527 			drm_dbg(&perf->i915->drm,
4528 				"Invalid oa_reg address: %X\n", addr);
4529 			err = -EINVAL;
4530 			goto addr_err;
4531 		}
4532 
4533 		err = get_user(value, regs + 1);
4534 		if (err)
4535 			goto addr_err;
4536 
4537 		oa_regs[i].addr = _MMIO(addr);
4538 		oa_regs[i].value = mask_reg_value(addr, value);
4539 
4540 		regs += 2;
4541 	}
4542 
4543 	return oa_regs;
4544 
4545 addr_err:
4546 	kfree(oa_regs);
4547 	return ERR_PTR(err);
4548 }
4549 
show_dynamic_id(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4550 static ssize_t show_dynamic_id(struct kobject *kobj,
4551 			       struct kobj_attribute *attr,
4552 			       char *buf)
4553 {
4554 	struct i915_oa_config *oa_config =
4555 		container_of(attr, typeof(*oa_config), sysfs_metric_id);
4556 
4557 	return sprintf(buf, "%d\n", oa_config->id);
4558 }
4559 
create_dynamic_oa_sysfs_entry(struct i915_perf * perf,struct i915_oa_config * oa_config)4560 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
4561 					 struct i915_oa_config *oa_config)
4562 {
4563 	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
4564 	oa_config->sysfs_metric_id.attr.name = "id";
4565 	oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
4566 	oa_config->sysfs_metric_id.show = show_dynamic_id;
4567 	oa_config->sysfs_metric_id.store = NULL;
4568 
4569 	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
4570 	oa_config->attrs[1] = NULL;
4571 
4572 	oa_config->sysfs_metric.name = oa_config->uuid;
4573 	oa_config->sysfs_metric.attrs = oa_config->attrs;
4574 
4575 	return sysfs_create_group(perf->metrics_kobj,
4576 				  &oa_config->sysfs_metric);
4577 }
4578 
4579 /**
4580  * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4581  * @dev: drm device
4582  * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4583  *        userspace (unvalidated)
4584  * @file: drm file
4585  *
4586  * Validates the submitted OA register to be saved into a new OA config that
4587  * can then be used for programming the OA unit and its NOA network.
4588  *
4589  * Returns: A new allocated config number to be used with the perf open ioctl
4590  * or a negative error code on failure.
4591  */
i915_perf_add_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4592 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4593 			       struct drm_file *file)
4594 {
4595 	struct i915_perf *perf = &to_i915(dev)->perf;
4596 	struct drm_i915_perf_oa_config *args = data;
4597 	struct i915_oa_config *oa_config, *tmp;
4598 	struct i915_oa_reg *regs;
4599 	int err, id;
4600 
4601 	if (!perf->i915)
4602 		return -ENOTSUPP;
4603 
4604 	if (!perf->metrics_kobj) {
4605 		drm_dbg(&perf->i915->drm,
4606 			"OA metrics weren't advertised via sysfs\n");
4607 		return -EINVAL;
4608 	}
4609 
4610 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4611 		drm_dbg(&perf->i915->drm,
4612 			"Insufficient privileges to add i915 OA config\n");
4613 		return -EACCES;
4614 	}
4615 
4616 	if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4617 	    (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4618 	    (!args->flex_regs_ptr || !args->n_flex_regs)) {
4619 		drm_dbg(&perf->i915->drm,
4620 			"No OA registers given\n");
4621 		return -EINVAL;
4622 	}
4623 
4624 	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4625 	if (!oa_config) {
4626 		drm_dbg(&perf->i915->drm,
4627 			"Failed to allocate memory for the OA config\n");
4628 		return -ENOMEM;
4629 	}
4630 
4631 	oa_config->perf = perf;
4632 	kref_init(&oa_config->ref);
4633 
4634 	if (!uuid_is_valid(args->uuid)) {
4635 		drm_dbg(&perf->i915->drm,
4636 			"Invalid uuid format for OA config\n");
4637 		err = -EINVAL;
4638 		goto reg_err;
4639 	}
4640 
4641 	/* Last character in oa_config->uuid will be 0 because oa_config is
4642 	 * kzalloc.
4643 	 */
4644 	memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4645 
4646 	oa_config->mux_regs_len = args->n_mux_regs;
4647 	regs = alloc_oa_regs(perf,
4648 			     perf->ops.is_valid_mux_reg,
4649 			     u64_to_user_ptr(args->mux_regs_ptr),
4650 			     args->n_mux_regs);
4651 
4652 	if (IS_ERR(regs)) {
4653 		drm_dbg(&perf->i915->drm,
4654 			"Failed to create OA config for mux_regs\n");
4655 		err = PTR_ERR(regs);
4656 		goto reg_err;
4657 	}
4658 	oa_config->mux_regs = regs;
4659 
4660 	oa_config->b_counter_regs_len = args->n_boolean_regs;
4661 	regs = alloc_oa_regs(perf,
4662 			     perf->ops.is_valid_b_counter_reg,
4663 			     u64_to_user_ptr(args->boolean_regs_ptr),
4664 			     args->n_boolean_regs);
4665 
4666 	if (IS_ERR(regs)) {
4667 		drm_dbg(&perf->i915->drm,
4668 			"Failed to create OA config for b_counter_regs\n");
4669 		err = PTR_ERR(regs);
4670 		goto reg_err;
4671 	}
4672 	oa_config->b_counter_regs = regs;
4673 
4674 	if (GRAPHICS_VER(perf->i915) < 8) {
4675 		if (args->n_flex_regs != 0) {
4676 			err = -EINVAL;
4677 			goto reg_err;
4678 		}
4679 	} else {
4680 		oa_config->flex_regs_len = args->n_flex_regs;
4681 		regs = alloc_oa_regs(perf,
4682 				     perf->ops.is_valid_flex_reg,
4683 				     u64_to_user_ptr(args->flex_regs_ptr),
4684 				     args->n_flex_regs);
4685 
4686 		if (IS_ERR(regs)) {
4687 			drm_dbg(&perf->i915->drm,
4688 				"Failed to create OA config for flex_regs\n");
4689 			err = PTR_ERR(regs);
4690 			goto reg_err;
4691 		}
4692 		oa_config->flex_regs = regs;
4693 	}
4694 
4695 	err = mutex_lock_interruptible(&perf->metrics_lock);
4696 	if (err)
4697 		goto reg_err;
4698 
4699 	/* We shouldn't have too many configs, so this iteration shouldn't be
4700 	 * too costly.
4701 	 */
4702 	idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4703 		if (!strcmp(tmp->uuid, oa_config->uuid)) {
4704 			drm_dbg(&perf->i915->drm,
4705 				"OA config already exists with this uuid\n");
4706 			err = -EADDRINUSE;
4707 			goto sysfs_err;
4708 		}
4709 	}
4710 
4711 	err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4712 	if (err) {
4713 		drm_dbg(&perf->i915->drm,
4714 			"Failed to create sysfs entry for OA config\n");
4715 		goto sysfs_err;
4716 	}
4717 
4718 	/* Config id 0 is invalid, id 1 for kernel stored test config. */
4719 	oa_config->id = idr_alloc(&perf->metrics_idr,
4720 				  oa_config, 2,
4721 				  0, GFP_KERNEL);
4722 	if (oa_config->id < 0) {
4723 		drm_dbg(&perf->i915->drm,
4724 			"Failed to create sysfs entry for OA config\n");
4725 		err = oa_config->id;
4726 		goto sysfs_err;
4727 	}
4728 	id = oa_config->id;
4729 
4730 	drm_dbg(&perf->i915->drm,
4731 		"Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4732 	mutex_unlock(&perf->metrics_lock);
4733 
4734 	return id;
4735 
4736 sysfs_err:
4737 	mutex_unlock(&perf->metrics_lock);
4738 reg_err:
4739 	i915_oa_config_put(oa_config);
4740 	drm_dbg(&perf->i915->drm,
4741 		"Failed to add new OA config\n");
4742 	return err;
4743 }
4744 
4745 /**
4746  * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4747  * @dev: drm device
4748  * @data: ioctl data (pointer to u64 integer) copied from userspace
4749  * @file: drm file
4750  *
4751  * Configs can be removed while being used, the will stop appearing in sysfs
4752  * and their content will be freed when the stream using the config is closed.
4753  *
4754  * Returns: 0 on success or a negative error code on failure.
4755  */
i915_perf_remove_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4756 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4757 				  struct drm_file *file)
4758 {
4759 	struct i915_perf *perf = &to_i915(dev)->perf;
4760 	u64 *arg = data;
4761 	struct i915_oa_config *oa_config;
4762 	int ret;
4763 
4764 	if (!perf->i915)
4765 		return -ENOTSUPP;
4766 
4767 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4768 		drm_dbg(&perf->i915->drm,
4769 			"Insufficient privileges to remove i915 OA config\n");
4770 		return -EACCES;
4771 	}
4772 
4773 	ret = mutex_lock_interruptible(&perf->metrics_lock);
4774 	if (ret)
4775 		return ret;
4776 
4777 	oa_config = idr_find(&perf->metrics_idr, *arg);
4778 	if (!oa_config) {
4779 		drm_dbg(&perf->i915->drm,
4780 			"Failed to remove unknown OA config\n");
4781 		ret = -ENOENT;
4782 		goto err_unlock;
4783 	}
4784 
4785 	GEM_BUG_ON(*arg != oa_config->id);
4786 
4787 	sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4788 
4789 	idr_remove(&perf->metrics_idr, *arg);
4790 
4791 	mutex_unlock(&perf->metrics_lock);
4792 
4793 	drm_dbg(&perf->i915->drm,
4794 		"Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4795 
4796 	i915_oa_config_put(oa_config);
4797 
4798 	return 0;
4799 
4800 err_unlock:
4801 	mutex_unlock(&perf->metrics_lock);
4802 	return ret;
4803 }
4804 
4805 static struct ctl_table oa_table[] = {
4806 	{
4807 	 .procname = "perf_stream_paranoid",
4808 	 .data = &i915_perf_stream_paranoid,
4809 	 .maxlen = sizeof(i915_perf_stream_paranoid),
4810 	 .mode = 0644,
4811 	 .proc_handler = proc_dointvec_minmax,
4812 	 .extra1 = SYSCTL_ZERO,
4813 	 .extra2 = SYSCTL_ONE,
4814 	 },
4815 	{
4816 	 .procname = "oa_max_sample_rate",
4817 	 .data = &i915_oa_max_sample_rate,
4818 	 .maxlen = sizeof(i915_oa_max_sample_rate),
4819 	 .mode = 0644,
4820 	 .proc_handler = proc_dointvec_minmax,
4821 	 .extra1 = SYSCTL_ZERO,
4822 	 .extra2 = &oa_sample_rate_hard_limit,
4823 	 },
4824 };
4825 
num_perf_groups_per_gt(struct intel_gt * gt)4826 static u32 num_perf_groups_per_gt(struct intel_gt *gt)
4827 {
4828 	return 1;
4829 }
4830 
__oam_engine_group(struct intel_engine_cs * engine)4831 static u32 __oam_engine_group(struct intel_engine_cs *engine)
4832 {
4833 	if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70)) {
4834 		/*
4835 		 * There's 1 SAMEDIA gt and 1 OAM per SAMEDIA gt. All media slices
4836 		 * within the gt use the same OAM. All MTL SKUs list 1 SA MEDIA.
4837 		 */
4838 		drm_WARN_ON(&engine->i915->drm,
4839 			    engine->gt->type != GT_MEDIA);
4840 
4841 		return PERF_GROUP_OAM_SAMEDIA_0;
4842 	}
4843 
4844 	return PERF_GROUP_INVALID;
4845 }
4846 
__oa_engine_group(struct intel_engine_cs * engine)4847 static u32 __oa_engine_group(struct intel_engine_cs *engine)
4848 {
4849 	switch (engine->class) {
4850 	case RENDER_CLASS:
4851 		return PERF_GROUP_OAG;
4852 
4853 	case VIDEO_DECODE_CLASS:
4854 	case VIDEO_ENHANCEMENT_CLASS:
4855 		return __oam_engine_group(engine);
4856 
4857 	default:
4858 		return PERF_GROUP_INVALID;
4859 	}
4860 }
4861 
__oam_regs(u32 base)4862 static struct i915_perf_regs __oam_regs(u32 base)
4863 {
4864 	return (struct i915_perf_regs) {
4865 		base,
4866 		GEN12_OAM_HEAD_POINTER(base),
4867 		GEN12_OAM_TAIL_POINTER(base),
4868 		GEN12_OAM_BUFFER(base),
4869 		GEN12_OAM_CONTEXT_CONTROL(base),
4870 		GEN12_OAM_CONTROL(base),
4871 		GEN12_OAM_DEBUG(base),
4872 		GEN12_OAM_STATUS(base),
4873 		GEN12_OAM_CONTROL_COUNTER_FORMAT_SHIFT,
4874 	};
4875 }
4876 
__oag_regs(void)4877 static struct i915_perf_regs __oag_regs(void)
4878 {
4879 	return (struct i915_perf_regs) {
4880 		0,
4881 		GEN12_OAG_OAHEADPTR,
4882 		GEN12_OAG_OATAILPTR,
4883 		GEN12_OAG_OABUFFER,
4884 		GEN12_OAG_OAGLBCTXCTRL,
4885 		GEN12_OAG_OACONTROL,
4886 		GEN12_OAG_OA_DEBUG,
4887 		GEN12_OAG_OASTATUS,
4888 		GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT,
4889 	};
4890 }
4891 
oa_init_groups(struct intel_gt * gt)4892 static void oa_init_groups(struct intel_gt *gt)
4893 {
4894 	int i, num_groups = gt->perf.num_perf_groups;
4895 
4896 	for (i = 0; i < num_groups; i++) {
4897 		struct i915_perf_group *g = &gt->perf.group[i];
4898 
4899 		/* Fused off engines can result in a group with num_engines == 0 */
4900 		if (g->num_engines == 0)
4901 			continue;
4902 
4903 		if (i == PERF_GROUP_OAG && gt->type != GT_MEDIA) {
4904 			g->regs = __oag_regs();
4905 			g->type = TYPE_OAG;
4906 		} else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
4907 			g->regs = __oam_regs(mtl_oa_base[i]);
4908 			g->type = TYPE_OAM;
4909 		}
4910 	}
4911 }
4912 
oa_init_gt(struct intel_gt * gt)4913 static int oa_init_gt(struct intel_gt *gt)
4914 {
4915 	u32 num_groups = num_perf_groups_per_gt(gt);
4916 	struct intel_engine_cs *engine;
4917 	struct i915_perf_group *g;
4918 	intel_engine_mask_t tmp;
4919 
4920 	g = kcalloc(num_groups, sizeof(*g), GFP_KERNEL);
4921 	if (!g)
4922 		return -ENOMEM;
4923 
4924 	for_each_engine_masked(engine, gt, ALL_ENGINES, tmp) {
4925 		u32 index = __oa_engine_group(engine);
4926 
4927 		engine->oa_group = NULL;
4928 		if (index < num_groups) {
4929 			g[index].num_engines++;
4930 			engine->oa_group = &g[index];
4931 		}
4932 	}
4933 
4934 	gt->perf.num_perf_groups = num_groups;
4935 	gt->perf.group = g;
4936 
4937 	oa_init_groups(gt);
4938 
4939 	return 0;
4940 }
4941 
oa_init_engine_groups(struct i915_perf * perf)4942 static int oa_init_engine_groups(struct i915_perf *perf)
4943 {
4944 	struct intel_gt *gt;
4945 	int i, ret;
4946 
4947 	for_each_gt(gt, perf->i915, i) {
4948 		ret = oa_init_gt(gt);
4949 		if (ret)
4950 			return ret;
4951 	}
4952 
4953 	return 0;
4954 }
4955 
oa_init_supported_formats(struct i915_perf * perf)4956 static void oa_init_supported_formats(struct i915_perf *perf)
4957 {
4958 	struct drm_i915_private *i915 = perf->i915;
4959 	enum intel_platform platform = INTEL_INFO(i915)->platform;
4960 
4961 	switch (platform) {
4962 	case INTEL_HASWELL:
4963 		oa_format_add(perf, I915_OA_FORMAT_A13);
4964 		oa_format_add(perf, I915_OA_FORMAT_A13);
4965 		oa_format_add(perf, I915_OA_FORMAT_A29);
4966 		oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
4967 		oa_format_add(perf, I915_OA_FORMAT_B4_C8);
4968 		oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
4969 		oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
4970 		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4971 		break;
4972 
4973 	case INTEL_BROADWELL:
4974 	case INTEL_CHERRYVIEW:
4975 	case INTEL_SKYLAKE:
4976 	case INTEL_BROXTON:
4977 	case INTEL_KABYLAKE:
4978 	case INTEL_GEMINILAKE:
4979 	case INTEL_COFFEELAKE:
4980 	case INTEL_COMETLAKE:
4981 	case INTEL_ICELAKE:
4982 	case INTEL_ELKHARTLAKE:
4983 	case INTEL_JASPERLAKE:
4984 	case INTEL_TIGERLAKE:
4985 	case INTEL_ROCKETLAKE:
4986 	case INTEL_DG1:
4987 	case INTEL_ALDERLAKE_S:
4988 	case INTEL_ALDERLAKE_P:
4989 		oa_format_add(perf, I915_OA_FORMAT_A12);
4990 		oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
4991 		oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
4992 		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4993 		break;
4994 
4995 	case INTEL_DG2:
4996 		oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8);
4997 		oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8);
4998 		break;
4999 
5000 	case INTEL_METEORLAKE:
5001 		oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8);
5002 		oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8);
5003 		oa_format_add(perf, I915_OAM_FORMAT_MPEC8u64_B8_C8);
5004 		oa_format_add(perf, I915_OAM_FORMAT_MPEC8u32_B8_C8);
5005 		break;
5006 
5007 	default:
5008 		MISSING_CASE(platform);
5009 	}
5010 }
5011 
i915_perf_init_info(struct drm_i915_private * i915)5012 static void i915_perf_init_info(struct drm_i915_private *i915)
5013 {
5014 	struct i915_perf *perf = &i915->perf;
5015 
5016 	switch (GRAPHICS_VER(i915)) {
5017 	case 8:
5018 		perf->ctx_oactxctrl_offset = 0x120;
5019 		perf->ctx_flexeu0_offset = 0x2ce;
5020 		perf->gen8_valid_ctx_bit = BIT(25);
5021 		break;
5022 	case 9:
5023 		perf->ctx_oactxctrl_offset = 0x128;
5024 		perf->ctx_flexeu0_offset = 0x3de;
5025 		perf->gen8_valid_ctx_bit = BIT(16);
5026 		break;
5027 	case 11:
5028 		perf->ctx_oactxctrl_offset = 0x124;
5029 		perf->ctx_flexeu0_offset = 0x78e;
5030 		perf->gen8_valid_ctx_bit = BIT(16);
5031 		break;
5032 	case 12:
5033 		perf->gen8_valid_ctx_bit = BIT(16);
5034 		/*
5035 		 * Calculate offset at runtime in oa_pin_context for gen12 and
5036 		 * cache the value in perf->ctx_oactxctrl_offset.
5037 		 */
5038 		break;
5039 	default:
5040 		MISSING_CASE(GRAPHICS_VER(i915));
5041 	}
5042 }
5043 
5044 /**
5045  * i915_perf_init - initialize i915-perf state on module bind
5046  * @i915: i915 device instance
5047  *
5048  * Initializes i915-perf state without exposing anything to userspace.
5049  *
5050  * Note: i915-perf initialization is split into an 'init' and 'register'
5051  * phase with the i915_perf_register() exposing state to userspace.
5052  */
i915_perf_init(struct drm_i915_private * i915)5053 int i915_perf_init(struct drm_i915_private *i915)
5054 {
5055 	struct i915_perf *perf = &i915->perf;
5056 
5057 	perf->oa_formats = oa_formats;
5058 	if (IS_HASWELL(i915)) {
5059 		perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
5060 		perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
5061 		perf->ops.is_valid_flex_reg = NULL;
5062 		perf->ops.enable_metric_set = hsw_enable_metric_set;
5063 		perf->ops.disable_metric_set = hsw_disable_metric_set;
5064 		perf->ops.oa_enable = gen7_oa_enable;
5065 		perf->ops.oa_disable = gen7_oa_disable;
5066 		perf->ops.read = gen7_oa_read;
5067 		perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
5068 	} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
5069 		/* Note: that although we could theoretically also support the
5070 		 * legacy ringbuffer mode on BDW (and earlier iterations of
5071 		 * this driver, before upstreaming did this) it didn't seem
5072 		 * worth the complexity to maintain now that BDW+ enable
5073 		 * execlist mode by default.
5074 		 */
5075 		perf->ops.read = gen8_oa_read;
5076 		i915_perf_init_info(i915);
5077 
5078 		if (IS_GRAPHICS_VER(i915, 8, 9)) {
5079 			perf->ops.is_valid_b_counter_reg =
5080 				gen7_is_valid_b_counter_addr;
5081 			perf->ops.is_valid_mux_reg =
5082 				gen8_is_valid_mux_addr;
5083 			perf->ops.is_valid_flex_reg =
5084 				gen8_is_valid_flex_addr;
5085 
5086 			if (IS_CHERRYVIEW(i915)) {
5087 				perf->ops.is_valid_mux_reg =
5088 					chv_is_valid_mux_addr;
5089 			}
5090 
5091 			perf->ops.oa_enable = gen8_oa_enable;
5092 			perf->ops.oa_disable = gen8_oa_disable;
5093 			perf->ops.enable_metric_set = gen8_enable_metric_set;
5094 			perf->ops.disable_metric_set = gen8_disable_metric_set;
5095 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
5096 		} else if (GRAPHICS_VER(i915) == 11) {
5097 			perf->ops.is_valid_b_counter_reg =
5098 				gen7_is_valid_b_counter_addr;
5099 			perf->ops.is_valid_mux_reg =
5100 				gen11_is_valid_mux_addr;
5101 			perf->ops.is_valid_flex_reg =
5102 				gen8_is_valid_flex_addr;
5103 
5104 			perf->ops.oa_enable = gen8_oa_enable;
5105 			perf->ops.oa_disable = gen8_oa_disable;
5106 			perf->ops.enable_metric_set = gen8_enable_metric_set;
5107 			perf->ops.disable_metric_set = gen11_disable_metric_set;
5108 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
5109 		} else if (GRAPHICS_VER(i915) == 12) {
5110 			perf->ops.is_valid_b_counter_reg =
5111 				HAS_OA_SLICE_CONTRIB_LIMITS(i915) ?
5112 				xehp_is_valid_b_counter_addr :
5113 				gen12_is_valid_b_counter_addr;
5114 			perf->ops.is_valid_mux_reg =
5115 				gen12_is_valid_mux_addr;
5116 			perf->ops.is_valid_flex_reg =
5117 				gen8_is_valid_flex_addr;
5118 
5119 			perf->ops.oa_enable = gen12_oa_enable;
5120 			perf->ops.oa_disable = gen12_oa_disable;
5121 			perf->ops.enable_metric_set = gen12_enable_metric_set;
5122 			perf->ops.disable_metric_set = gen12_disable_metric_set;
5123 			perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
5124 		}
5125 	}
5126 
5127 	if (perf->ops.enable_metric_set) {
5128 		struct intel_gt *gt;
5129 		int i, ret;
5130 
5131 		for_each_gt(gt, i915, i)
5132 			mutex_init(&gt->perf.lock);
5133 
5134 		/* Choose a representative limit */
5135 		oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2;
5136 
5137 		mutex_init(&perf->metrics_lock);
5138 		idr_init_base(&perf->metrics_idr, 1);
5139 
5140 		/* We set up some ratelimit state to potentially throttle any
5141 		 * _NOTES about spurious, invalid OA reports which we don't
5142 		 * forward to userspace.
5143 		 *
5144 		 * We print a _NOTE about any throttling when closing the
5145 		 * stream instead of waiting until driver _fini which no one
5146 		 * would ever see.
5147 		 *
5148 		 * Using the same limiting factors as printk_ratelimit()
5149 		 */
5150 		ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
5151 		/* Since we use a DRM_NOTE for spurious reports it would be
5152 		 * inconsistent to let __ratelimit() automatically print a
5153 		 * warning for throttling.
5154 		 */
5155 		ratelimit_set_flags(&perf->spurious_report_rs,
5156 				    RATELIMIT_MSG_ON_RELEASE);
5157 
5158 		ratelimit_state_init(&perf->tail_pointer_race,
5159 				     5 * HZ, 10);
5160 		ratelimit_set_flags(&perf->tail_pointer_race,
5161 				    RATELIMIT_MSG_ON_RELEASE);
5162 
5163 		atomic64_set(&perf->noa_programming_delay,
5164 			     500 * 1000 /* 500us */);
5165 
5166 		perf->i915 = i915;
5167 
5168 		ret = oa_init_engine_groups(perf);
5169 		if (ret) {
5170 			drm_err(&i915->drm,
5171 				"OA initialization failed %d\n", ret);
5172 			return ret;
5173 		}
5174 
5175 		oa_init_supported_formats(perf);
5176 	}
5177 
5178 	return 0;
5179 }
5180 
destroy_config(int id,void * p,void * data)5181 static int destroy_config(int id, void *p, void *data)
5182 {
5183 	i915_oa_config_put(p);
5184 	return 0;
5185 }
5186 
i915_perf_sysctl_register(void)5187 int i915_perf_sysctl_register(void)
5188 {
5189 	sysctl_header = register_sysctl("dev/i915", oa_table);
5190 	return 0;
5191 }
5192 
i915_perf_sysctl_unregister(void)5193 void i915_perf_sysctl_unregister(void)
5194 {
5195 	unregister_sysctl_table(sysctl_header);
5196 }
5197 
5198 /**
5199  * i915_perf_fini - Counter part to i915_perf_init()
5200  * @i915: i915 device instance
5201  */
i915_perf_fini(struct drm_i915_private * i915)5202 void i915_perf_fini(struct drm_i915_private *i915)
5203 {
5204 	struct i915_perf *perf = &i915->perf;
5205 	struct intel_gt *gt;
5206 	int i;
5207 
5208 	if (!perf->i915)
5209 		return;
5210 
5211 	for_each_gt(gt, perf->i915, i)
5212 		kfree(gt->perf.group);
5213 
5214 	idr_for_each(&perf->metrics_idr, destroy_config, perf);
5215 	idr_destroy(&perf->metrics_idr);
5216 
5217 	memset(&perf->ops, 0, sizeof(perf->ops));
5218 	perf->i915 = NULL;
5219 }
5220 
5221 /**
5222  * i915_perf_ioctl_version - Version of the i915-perf subsystem
5223  * @i915: The i915 device
5224  *
5225  * This version number is used by userspace to detect available features.
5226  */
i915_perf_ioctl_version(struct drm_i915_private * i915)5227 int i915_perf_ioctl_version(struct drm_i915_private *i915)
5228 {
5229 	/*
5230 	 * 1: Initial version
5231 	 *   I915_PERF_IOCTL_ENABLE
5232 	 *   I915_PERF_IOCTL_DISABLE
5233 	 *
5234 	 * 2: Added runtime modification of OA config.
5235 	 *   I915_PERF_IOCTL_CONFIG
5236 	 *
5237 	 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
5238 	 *    preemption on a particular context so that performance data is
5239 	 *    accessible from a delta of MI_RPC reports without looking at the
5240 	 *    OA buffer.
5241 	 *
5242 	 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
5243 	 *    be run for the duration of the performance recording based on
5244 	 *    their SSEU configuration.
5245 	 *
5246 	 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
5247 	 *    interval for the hrtimer used to check for OA data.
5248 	 *
5249 	 * 6: Add DRM_I915_PERF_PROP_OA_ENGINE_CLASS and
5250 	 *    DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE
5251 	 *
5252 	 * 7: Add support for video decode and enhancement classes.
5253 	 */
5254 
5255 	/*
5256 	 * Wa_14017512683: mtl[a0..c0): Use of OAM must be preceded with Media
5257 	 * C6 disable in BIOS. If Media C6 is enabled in BIOS, return version 6
5258 	 * to indicate that OA media is not supported.
5259 	 */
5260 	if (IS_MEDIA_GT_IP_STEP(i915->media_gt, IP_VER(13, 0), STEP_A0, STEP_C0) &&
5261 	    intel_check_bios_c6_setup(&i915->media_gt->rc6))
5262 		return 6;
5263 
5264 	return 7;
5265 }
5266 
5267 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5268 #include "selftests/i915_perf.c"
5269 #endif
5270