1  // SPDX-License-Identifier: MIT
2  /*
3   * Copyright © 2019 Intel Corporation
4   */
5  
6  #include <linux/sched/clock.h>
7  
8  #include "i915_drv.h"
9  #include "i915_irq.h"
10  #include "i915_reg.h"
11  #include "intel_breadcrumbs.h"
12  #include "intel_gt.h"
13  #include "intel_gt_irq.h"
14  #include "intel_gt_print.h"
15  #include "intel_gt_regs.h"
16  #include "intel_uncore.h"
17  #include "intel_rps.h"
18  #include "pxp/intel_pxp_irq.h"
19  #include "uc/intel_gsc_proxy.h"
20  
guc_irq_handler(struct intel_guc * guc,u16 iir)21  static void guc_irq_handler(struct intel_guc *guc, u16 iir)
22  {
23  	if (unlikely(!guc->interrupts.enabled))
24  		return;
25  
26  	if (iir & GUC_INTR_GUC2HOST)
27  		intel_guc_to_host_event_handler(guc);
28  }
29  
30  static u32
gen11_gt_engine_identity(struct intel_gt * gt,const unsigned int bank,const unsigned int bit)31  gen11_gt_engine_identity(struct intel_gt *gt,
32  			 const unsigned int bank, const unsigned int bit)
33  {
34  	void __iomem * const regs = intel_uncore_regs(gt->uncore);
35  	u32 timeout_ts;
36  	u32 ident;
37  
38  	lockdep_assert_held(gt->irq_lock);
39  
40  	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
41  
42  	/*
43  	 * NB: Specs do not specify how long to spin wait,
44  	 * so we do ~100us as an educated guess.
45  	 */
46  	timeout_ts = (local_clock() >> 10) + 100;
47  	do {
48  		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
49  	} while (!(ident & GEN11_INTR_DATA_VALID) &&
50  		 !time_after32(local_clock() >> 10, timeout_ts));
51  
52  	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
53  		gt_err(gt, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
54  		       bank, bit, ident);
55  		return 0;
56  	}
57  
58  	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
59  		      GEN11_INTR_DATA_VALID);
60  
61  	return ident;
62  }
63  
64  static void
gen11_other_irq_handler(struct intel_gt * gt,const u8 instance,const u16 iir)65  gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
66  			const u16 iir)
67  {
68  	struct intel_gt *media_gt = gt->i915->media_gt;
69  
70  	if (instance == OTHER_GUC_INSTANCE)
71  		return guc_irq_handler(gt_to_guc(gt), iir);
72  	if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
73  		return guc_irq_handler(gt_to_guc(media_gt), iir);
74  
75  	if (instance == OTHER_GTPM_INSTANCE)
76  		return gen11_rps_irq_handler(&gt->rps, iir);
77  	if (instance == OTHER_MEDIA_GTPM_INSTANCE && media_gt)
78  		return gen11_rps_irq_handler(&media_gt->rps, iir);
79  
80  	if (instance == OTHER_KCR_INSTANCE)
81  		return intel_pxp_irq_handler(gt->i915->pxp, iir);
82  
83  	if (instance == OTHER_GSC_INSTANCE)
84  		return intel_gsc_irq_handler(gt, iir);
85  
86  	if (instance == OTHER_GSC_HECI_2_INSTANCE)
87  		return intel_gsc_proxy_irq_handler(&gt->uc.gsc, iir);
88  
89  	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
90  		  instance, iir);
91  }
92  
pick_gt(struct intel_gt * gt,u8 class,u8 instance)93  static struct intel_gt *pick_gt(struct intel_gt *gt, u8 class, u8 instance)
94  {
95  	struct intel_gt *media_gt = gt->i915->media_gt;
96  
97  	/* we expect the non-media gt to be passed in */
98  	GEM_BUG_ON(gt == media_gt);
99  
100  	if (!media_gt)
101  		return gt;
102  
103  	switch (class) {
104  	case VIDEO_DECODE_CLASS:
105  	case VIDEO_ENHANCEMENT_CLASS:
106  		return media_gt;
107  	case OTHER_CLASS:
108  		if (instance == OTHER_GSC_HECI_2_INSTANCE)
109  			return media_gt;
110  		if ((instance == OTHER_GSC_INSTANCE || instance == OTHER_KCR_INSTANCE) &&
111  		    HAS_ENGINE(media_gt, GSC0))
112  			return media_gt;
113  		fallthrough;
114  	default:
115  		return gt;
116  	}
117  }
118  
119  static void
gen11_gt_identity_handler(struct intel_gt * gt,const u32 identity)120  gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
121  {
122  	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
123  	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
124  	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
125  
126  	if (unlikely(!intr))
127  		return;
128  
129  	/*
130  	 * Platforms with standalone media have the media and GSC engines in
131  	 * another GT.
132  	 */
133  	gt = pick_gt(gt, class, instance);
134  
135  	if (class <= MAX_ENGINE_CLASS && instance <= MAX_ENGINE_INSTANCE) {
136  		struct intel_engine_cs *engine = gt->engine_class[class][instance];
137  		if (engine)
138  			return intel_engine_cs_irq(engine, intr);
139  	}
140  
141  	if (class == OTHER_CLASS)
142  		return gen11_other_irq_handler(gt, instance, intr);
143  
144  	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
145  		  class, instance, intr);
146  }
147  
148  static void
gen11_gt_bank_handler(struct intel_gt * gt,const unsigned int bank)149  gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
150  {
151  	void __iomem * const regs = intel_uncore_regs(gt->uncore);
152  	unsigned long intr_dw;
153  	unsigned int bit;
154  
155  	lockdep_assert_held(gt->irq_lock);
156  
157  	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
158  
159  	for_each_set_bit(bit, &intr_dw, 32) {
160  		const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
161  
162  		gen11_gt_identity_handler(gt, ident);
163  	}
164  
165  	/* Clear must be after shared has been served for engine */
166  	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
167  }
168  
gen11_gt_irq_handler(struct intel_gt * gt,const u32 master_ctl)169  void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
170  {
171  	unsigned int bank;
172  
173  	spin_lock(gt->irq_lock);
174  
175  	for (bank = 0; bank < 2; bank++) {
176  		if (master_ctl & GEN11_GT_DW_IRQ(bank))
177  			gen11_gt_bank_handler(gt, bank);
178  	}
179  
180  	spin_unlock(gt->irq_lock);
181  }
182  
gen11_gt_reset_one_iir(struct intel_gt * gt,const unsigned int bank,const unsigned int bit)183  bool gen11_gt_reset_one_iir(struct intel_gt *gt,
184  			    const unsigned int bank, const unsigned int bit)
185  {
186  	void __iomem * const regs = intel_uncore_regs(gt->uncore);
187  	u32 dw;
188  
189  	lockdep_assert_held(gt->irq_lock);
190  
191  	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
192  	if (dw & BIT(bit)) {
193  		/*
194  		 * According to the BSpec, DW_IIR bits cannot be cleared without
195  		 * first servicing the Selector & Shared IIR registers.
196  		 */
197  		gen11_gt_engine_identity(gt, bank, bit);
198  
199  		/*
200  		 * We locked GT INT DW by reading it. If we want to (try
201  		 * to) recover from this successfully, we need to clear
202  		 * our bit, otherwise we are locking the register for
203  		 * everybody.
204  		 */
205  		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
206  
207  		return true;
208  	}
209  
210  	return false;
211  }
212  
gen11_gt_irq_reset(struct intel_gt * gt)213  void gen11_gt_irq_reset(struct intel_gt *gt)
214  {
215  	struct intel_uncore *uncore = gt->uncore;
216  
217  	/* Disable RCS, BCS, VCS and VECS class engines. */
218  	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
219  	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,	  0);
220  	if (CCS_MASK(gt))
221  		intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, 0);
222  	if (HAS_HECI_GSC(gt->i915) || HAS_ENGINE(gt, GSC0))
223  		intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, 0);
224  
225  	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
226  	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,	~0);
227  	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,	~0);
228  	if (HAS_ENGINE(gt, BCS1) || HAS_ENGINE(gt, BCS2))
229  		intel_uncore_write(uncore, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
230  	if (HAS_ENGINE(gt, BCS3) || HAS_ENGINE(gt, BCS4))
231  		intel_uncore_write(uncore, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
232  	if (HAS_ENGINE(gt, BCS5) || HAS_ENGINE(gt, BCS6))
233  		intel_uncore_write(uncore, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
234  	if (HAS_ENGINE(gt, BCS7) || HAS_ENGINE(gt, BCS8))
235  		intel_uncore_write(uncore, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
236  	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,	~0);
237  	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,	~0);
238  	if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
239  		intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK,   ~0);
240  	if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
241  		intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK,   ~0);
242  	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK,	~0);
243  	if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
244  		intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~0);
245  	if (HAS_ENGINE(gt, CCS0) || HAS_ENGINE(gt, CCS1))
246  		intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~0);
247  	if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
248  		intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~0);
249  	if (HAS_HECI_GSC(gt->i915) || HAS_ENGINE(gt, GSC0))
250  		intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~0);
251  
252  	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
253  	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
254  	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
255  	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
256  
257  	intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, 0);
258  	intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK,  ~0);
259  }
260  
gen11_gt_irq_postinstall(struct intel_gt * gt)261  void gen11_gt_irq_postinstall(struct intel_gt *gt)
262  {
263  	struct intel_uncore *uncore = gt->uncore;
264  	u32 irqs = GT_RENDER_USER_INTERRUPT;
265  	u32 guc_mask = intel_uc_wants_guc(&gt->uc) ? GUC_INTR_GUC2HOST : 0;
266  	u32 gsc_mask = 0;
267  	u32 heci_mask = 0;
268  	u32 dmask;
269  	u32 smask;
270  
271  	if (!intel_uc_wants_guc_submission(&gt->uc))
272  		irqs |= GT_CS_MASTER_ERROR_INTERRUPT |
273  			GT_CONTEXT_SWITCH_INTERRUPT |
274  			GT_WAIT_SEMAPHORE_INTERRUPT;
275  
276  	dmask = irqs << 16 | irqs;
277  	smask = irqs << 16;
278  
279  	if (HAS_ENGINE(gt, GSC0)) {
280  		/*
281  		 * the heci2 interrupt is enabled via the same register as the
282  		 * GSC interrupt, but it has its own mask register.
283  		 */
284  		gsc_mask = irqs;
285  		heci_mask = GSC_IRQ_INTF(1); /* HECI2 IRQ for SW Proxy*/
286  	} else if (HAS_HECI_GSC(gt->i915)) {
287  		gsc_mask = GSC_IRQ_INTF(0) | GSC_IRQ_INTF(1);
288  	}
289  
290  	BUILD_BUG_ON(irqs & 0xffff0000);
291  
292  	/* Enable RCS, BCS, VCS and VECS class interrupts. */
293  	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
294  	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
295  	if (CCS_MASK(gt))
296  		intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, smask);
297  	if (gsc_mask)
298  		intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, gsc_mask | heci_mask);
299  
300  	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
301  	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
302  	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
303  	if (HAS_ENGINE(gt, BCS1) || HAS_ENGINE(gt, BCS2))
304  		intel_uncore_write(uncore, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
305  	if (HAS_ENGINE(gt, BCS3) || HAS_ENGINE(gt, BCS4))
306  		intel_uncore_write(uncore, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
307  	if (HAS_ENGINE(gt, BCS5) || HAS_ENGINE(gt, BCS6))
308  		intel_uncore_write(uncore, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
309  	if (HAS_ENGINE(gt, BCS7) || HAS_ENGINE(gt, BCS8))
310  		intel_uncore_write(uncore, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
311  	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
312  	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
313  	if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
314  		intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK, ~dmask);
315  	if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
316  		intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK, ~dmask);
317  	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
318  	if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
319  		intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~dmask);
320  	if (HAS_ENGINE(gt, CCS0) || HAS_ENGINE(gt, CCS1))
321  		intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~dmask);
322  	if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
323  		intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~dmask);
324  	if (gsc_mask)
325  		intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~gsc_mask);
326  	if (heci_mask)
327  		intel_uncore_write(uncore, GEN12_HECI2_RSVD_INTR_MASK,
328  				   ~REG_FIELD_PREP(ENGINE1_MASK, heci_mask));
329  
330  	if (guc_mask) {
331  		/* the enable bit is common for both GTs but the masks are separate */
332  		u32 mask = gt->type == GT_MEDIA ?
333  			REG_FIELD_PREP(ENGINE0_MASK, guc_mask) :
334  			REG_FIELD_PREP(ENGINE1_MASK, guc_mask);
335  
336  		intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE,
337  				   REG_FIELD_PREP(ENGINE1_MASK, guc_mask));
338  
339  		/* we might not be the first GT to write this reg */
340  		intel_uncore_rmw(uncore, MTL_GUC_MGUC_INTR_MASK, mask, 0);
341  	}
342  
343  	/*
344  	 * RPS interrupts will get enabled/disabled on demand when RPS itself
345  	 * is enabled/disabled.
346  	 */
347  	gt->pm_ier = 0x0;
348  	gt->pm_imr = ~gt->pm_ier;
349  	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
350  	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
351  }
352  
gen5_gt_irq_handler(struct intel_gt * gt,u32 gt_iir)353  void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
354  {
355  	if (gt_iir & GT_RENDER_USER_INTERRUPT)
356  		intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
357  				    gt_iir);
358  
359  	if (gt_iir & ILK_BSD_USER_INTERRUPT)
360  		intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
361  				    gt_iir);
362  }
363  
gen7_parity_error_irq_handler(struct intel_gt * gt,u32 iir)364  static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
365  {
366  	if (!HAS_L3_DPF(gt->i915))
367  		return;
368  
369  	spin_lock(gt->irq_lock);
370  	gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
371  	spin_unlock(gt->irq_lock);
372  
373  	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
374  		gt->i915->l3_parity.which_slice |= 1 << 1;
375  
376  	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
377  		gt->i915->l3_parity.which_slice |= 1 << 0;
378  
379  	queue_work(gt->i915->unordered_wq, &gt->i915->l3_parity.error_work);
380  }
381  
gen6_gt_irq_handler(struct intel_gt * gt,u32 gt_iir)382  void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
383  {
384  	if (gt_iir & GT_RENDER_USER_INTERRUPT)
385  		intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
386  				    gt_iir);
387  
388  	if (gt_iir & GT_BSD_USER_INTERRUPT)
389  		intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
390  				    gt_iir >> 12);
391  
392  	if (gt_iir & GT_BLT_USER_INTERRUPT)
393  		intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
394  				    gt_iir >> 22);
395  
396  	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
397  		      GT_BSD_CS_ERROR_INTERRUPT |
398  		      GT_CS_MASTER_ERROR_INTERRUPT))
399  		gt_dbg(gt, "Command parser error, gt_iir 0x%08x\n", gt_iir);
400  
401  	if (gt_iir & GT_PARITY_ERROR(gt->i915))
402  		gen7_parity_error_irq_handler(gt, gt_iir);
403  }
404  
gen8_gt_irq_handler(struct intel_gt * gt,u32 master_ctl)405  void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
406  {
407  	void __iomem * const regs = intel_uncore_regs(gt->uncore);
408  	u32 iir;
409  
410  	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
411  		iir = raw_reg_read(regs, GEN8_GT_IIR(0));
412  		if (likely(iir)) {
413  			intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
414  					    iir >> GEN8_RCS_IRQ_SHIFT);
415  			intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
416  					    iir >> GEN8_BCS_IRQ_SHIFT);
417  			raw_reg_write(regs, GEN8_GT_IIR(0), iir);
418  		}
419  	}
420  
421  	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
422  		iir = raw_reg_read(regs, GEN8_GT_IIR(1));
423  		if (likely(iir)) {
424  			intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
425  					    iir >> GEN8_VCS0_IRQ_SHIFT);
426  			intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][1],
427  					    iir >> GEN8_VCS1_IRQ_SHIFT);
428  			raw_reg_write(regs, GEN8_GT_IIR(1), iir);
429  		}
430  	}
431  
432  	if (master_ctl & GEN8_GT_VECS_IRQ) {
433  		iir = raw_reg_read(regs, GEN8_GT_IIR(3));
434  		if (likely(iir)) {
435  			intel_engine_cs_irq(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
436  					    iir >> GEN8_VECS_IRQ_SHIFT);
437  			raw_reg_write(regs, GEN8_GT_IIR(3), iir);
438  		}
439  	}
440  
441  	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
442  		iir = raw_reg_read(regs, GEN8_GT_IIR(2));
443  		if (likely(iir)) {
444  			gen6_rps_irq_handler(&gt->rps, iir);
445  			guc_irq_handler(gt_to_guc(gt), iir >> 16);
446  			raw_reg_write(regs, GEN8_GT_IIR(2), iir);
447  		}
448  	}
449  }
450  
gen8_gt_irq_reset(struct intel_gt * gt)451  void gen8_gt_irq_reset(struct intel_gt *gt)
452  {
453  	struct intel_uncore *uncore = gt->uncore;
454  
455  	GEN8_IRQ_RESET_NDX(uncore, GT, 0);
456  	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
457  	GEN8_IRQ_RESET_NDX(uncore, GT, 2);
458  	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
459  }
460  
gen8_gt_irq_postinstall(struct intel_gt * gt)461  void gen8_gt_irq_postinstall(struct intel_gt *gt)
462  {
463  	/* These are interrupts we'll toggle with the ring mask register */
464  	const u32 irqs =
465  		GT_CS_MASTER_ERROR_INTERRUPT |
466  		GT_RENDER_USER_INTERRUPT |
467  		GT_CONTEXT_SWITCH_INTERRUPT |
468  		GT_WAIT_SEMAPHORE_INTERRUPT;
469  	const u32 gt_interrupts[] = {
470  		irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
471  		irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
472  		0,
473  		irqs << GEN8_VECS_IRQ_SHIFT,
474  	};
475  	struct intel_uncore *uncore = gt->uncore;
476  
477  	gt->pm_ier = 0x0;
478  	gt->pm_imr = ~gt->pm_ier;
479  	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
480  	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
481  	/*
482  	 * RPS interrupts will get enabled/disabled on demand when RPS itself
483  	 * is enabled/disabled. Same wil be the case for GuC interrupts.
484  	 */
485  	GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
486  	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
487  }
488  
gen5_gt_update_irq(struct intel_gt * gt,u32 interrupt_mask,u32 enabled_irq_mask)489  static void gen5_gt_update_irq(struct intel_gt *gt,
490  			       u32 interrupt_mask,
491  			       u32 enabled_irq_mask)
492  {
493  	lockdep_assert_held(gt->irq_lock);
494  
495  	GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
496  
497  	gt->gt_imr &= ~interrupt_mask;
498  	gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
499  	intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
500  }
501  
gen5_gt_enable_irq(struct intel_gt * gt,u32 mask)502  void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
503  {
504  	gen5_gt_update_irq(gt, mask, mask);
505  	intel_uncore_posting_read_fw(gt->uncore, GTIMR);
506  }
507  
gen5_gt_disable_irq(struct intel_gt * gt,u32 mask)508  void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
509  {
510  	gen5_gt_update_irq(gt, mask, 0);
511  }
512  
gen5_gt_irq_reset(struct intel_gt * gt)513  void gen5_gt_irq_reset(struct intel_gt *gt)
514  {
515  	struct intel_uncore *uncore = gt->uncore;
516  
517  	GEN3_IRQ_RESET(uncore, GT);
518  	if (GRAPHICS_VER(gt->i915) >= 6)
519  		GEN3_IRQ_RESET(uncore, GEN6_PM);
520  }
521  
gen5_gt_irq_postinstall(struct intel_gt * gt)522  void gen5_gt_irq_postinstall(struct intel_gt *gt)
523  {
524  	struct intel_uncore *uncore = gt->uncore;
525  	u32 pm_irqs = 0;
526  	u32 gt_irqs = 0;
527  
528  	gt->gt_imr = ~0;
529  	if (HAS_L3_DPF(gt->i915)) {
530  		/* L3 parity interrupt is always unmasked. */
531  		gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
532  		gt_irqs |= GT_PARITY_ERROR(gt->i915);
533  	}
534  
535  	gt_irqs |= GT_RENDER_USER_INTERRUPT;
536  	if (GRAPHICS_VER(gt->i915) == 5)
537  		gt_irqs |= ILK_BSD_USER_INTERRUPT;
538  	else
539  		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
540  
541  	GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
542  
543  	if (GRAPHICS_VER(gt->i915) >= 6) {
544  		/*
545  		 * RPS interrupts will get enabled/disabled on demand when RPS
546  		 * itself is enabled/disabled.
547  		 */
548  		if (HAS_ENGINE(gt, VECS0)) {
549  			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
550  			gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
551  		}
552  
553  		gt->pm_imr = 0xffffffff;
554  		GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
555  	}
556  }
557