1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
8 
9 #include <drm/drm_managed.h>
10 
11 #include "dpu_core_irq.h"
12 #include "dpu_kms.h"
13 #include "dpu_hw_interrupts.h"
14 #include "dpu_hw_util.h"
15 #include "dpu_hw_mdss.h"
16 #include "dpu_trace.h"
17 
18 /*
19  * Register offsets in MDSS register file for the interrupt registers
20  * w.r.t. the MDP base
21  */
22 #define MDP_INTF_OFF(intf)				(0x6A000 + 0x800 * (intf))
23 #define MDP_INTF_INTR_EN(intf)				(MDP_INTF_OFF(intf) + 0x1c0)
24 #define MDP_INTF_INTR_STATUS(intf)			(MDP_INTF_OFF(intf) + 0x1c4)
25 #define MDP_INTF_INTR_CLEAR(intf)			(MDP_INTF_OFF(intf) + 0x1c8)
26 #define MDP_INTF_TEAR_OFF(intf)				(0x6D700 + 0x100 * (intf))
27 #define MDP_INTF_INTR_TEAR_EN(intf)			(MDP_INTF_TEAR_OFF(intf) + 0x000)
28 #define MDP_INTF_INTR_TEAR_STATUS(intf)			(MDP_INTF_TEAR_OFF(intf) + 0x004)
29 #define MDP_INTF_INTR_TEAR_CLEAR(intf)			(MDP_INTF_TEAR_OFF(intf) + 0x008)
30 #define MDP_AD4_OFF(ad4)				(0x7C000 + 0x1000 * (ad4))
31 #define MDP_AD4_INTR_EN_OFF(ad4)			(MDP_AD4_OFF(ad4) + 0x41c)
32 #define MDP_AD4_INTR_CLEAR_OFF(ad4)			(MDP_AD4_OFF(ad4) + 0x424)
33 #define MDP_AD4_INTR_STATUS_OFF(ad4)			(MDP_AD4_OFF(ad4) + 0x420)
34 #define MDP_INTF_REV_7xxx_OFF(intf)			(0x34000 + 0x1000 * (intf))
35 #define MDP_INTF_REV_7xxx_INTR_EN(intf)			(MDP_INTF_REV_7xxx_OFF(intf) + 0x1c0)
36 #define MDP_INTF_REV_7xxx_INTR_STATUS(intf)		(MDP_INTF_REV_7xxx_OFF(intf) + 0x1c4)
37 #define MDP_INTF_REV_7xxx_INTR_CLEAR(intf)		(MDP_INTF_REV_7xxx_OFF(intf) + 0x1c8)
38 #define MDP_INTF_REV_7xxx_TEAR_OFF(intf)		(0x34800 + 0x1000 * (intf))
39 #define MDP_INTF_REV_7xxx_INTR_TEAR_EN(intf)		(MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x000)
40 #define MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(intf)	(MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x004)
41 #define MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(intf)		(MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x008)
42 
43 /**
44  * struct dpu_intr_reg - array of DPU register sets
45  * @clr_off:	offset to CLEAR reg
46  * @en_off:	offset to ENABLE reg
47  * @status_off:	offset to STATUS reg
48  */
49 struct dpu_intr_reg {
50 	u32 clr_off;
51 	u32 en_off;
52 	u32 status_off;
53 };
54 
55 /*
56  * dpu_intr_set_legacy -  List of DPU interrupt registers for DPU <= 6.x
57  */
58 static const struct dpu_intr_reg dpu_intr_set_legacy[] = {
59 	[MDP_SSPP_TOP0_INTR] = {
60 		INTR_CLEAR,
61 		INTR_EN,
62 		INTR_STATUS
63 	},
64 	[MDP_SSPP_TOP0_INTR2] = {
65 		INTR2_CLEAR,
66 		INTR2_EN,
67 		INTR2_STATUS
68 	},
69 	[MDP_SSPP_TOP0_HIST_INTR] = {
70 		HIST_INTR_CLEAR,
71 		HIST_INTR_EN,
72 		HIST_INTR_STATUS
73 	},
74 	[MDP_INTF0_INTR] = {
75 		MDP_INTF_INTR_CLEAR(0),
76 		MDP_INTF_INTR_EN(0),
77 		MDP_INTF_INTR_STATUS(0)
78 	},
79 	[MDP_INTF1_INTR] = {
80 		MDP_INTF_INTR_CLEAR(1),
81 		MDP_INTF_INTR_EN(1),
82 		MDP_INTF_INTR_STATUS(1)
83 	},
84 	[MDP_INTF2_INTR] = {
85 		MDP_INTF_INTR_CLEAR(2),
86 		MDP_INTF_INTR_EN(2),
87 		MDP_INTF_INTR_STATUS(2)
88 	},
89 	[MDP_INTF3_INTR] = {
90 		MDP_INTF_INTR_CLEAR(3),
91 		MDP_INTF_INTR_EN(3),
92 		MDP_INTF_INTR_STATUS(3)
93 	},
94 	[MDP_INTF4_INTR] = {
95 		MDP_INTF_INTR_CLEAR(4),
96 		MDP_INTF_INTR_EN(4),
97 		MDP_INTF_INTR_STATUS(4)
98 	},
99 	[MDP_INTF5_INTR] = {
100 		MDP_INTF_INTR_CLEAR(5),
101 		MDP_INTF_INTR_EN(5),
102 		MDP_INTF_INTR_STATUS(5)
103 	},
104 	[MDP_INTF1_TEAR_INTR] = {
105 		MDP_INTF_INTR_TEAR_CLEAR(1),
106 		MDP_INTF_INTR_TEAR_EN(1),
107 		MDP_INTF_INTR_TEAR_STATUS(1)
108 	},
109 	[MDP_INTF2_TEAR_INTR] = {
110 		MDP_INTF_INTR_TEAR_CLEAR(2),
111 		MDP_INTF_INTR_TEAR_EN(2),
112 		MDP_INTF_INTR_TEAR_STATUS(2)
113 	},
114 	[MDP_AD4_0_INTR] = {
115 		MDP_AD4_INTR_CLEAR_OFF(0),
116 		MDP_AD4_INTR_EN_OFF(0),
117 		MDP_AD4_INTR_STATUS_OFF(0),
118 	},
119 	[MDP_AD4_1_INTR] = {
120 		MDP_AD4_INTR_CLEAR_OFF(1),
121 		MDP_AD4_INTR_EN_OFF(1),
122 		MDP_AD4_INTR_STATUS_OFF(1),
123 	},
124 };
125 
126 /*
127  * dpu_intr_set_7xxx -  List of DPU interrupt registers for DPU >= 7.0
128  */
129 static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
130 	[MDP_SSPP_TOP0_INTR] = {
131 		INTR_CLEAR,
132 		INTR_EN,
133 		INTR_STATUS
134 	},
135 	[MDP_SSPP_TOP0_INTR2] = {
136 		INTR2_CLEAR,
137 		INTR2_EN,
138 		INTR2_STATUS
139 	},
140 	[MDP_SSPP_TOP0_HIST_INTR] = {
141 		HIST_INTR_CLEAR,
142 		HIST_INTR_EN,
143 		HIST_INTR_STATUS
144 	},
145 	[MDP_INTF0_INTR] = {
146 		MDP_INTF_REV_7xxx_INTR_CLEAR(0),
147 		MDP_INTF_REV_7xxx_INTR_EN(0),
148 		MDP_INTF_REV_7xxx_INTR_STATUS(0)
149 	},
150 	[MDP_INTF1_INTR] = {
151 		MDP_INTF_REV_7xxx_INTR_CLEAR(1),
152 		MDP_INTF_REV_7xxx_INTR_EN(1),
153 		MDP_INTF_REV_7xxx_INTR_STATUS(1)
154 	},
155 	[MDP_INTF1_TEAR_INTR] = {
156 		MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(1),
157 		MDP_INTF_REV_7xxx_INTR_TEAR_EN(1),
158 		MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(1)
159 	},
160 	[MDP_INTF2_INTR] = {
161 		MDP_INTF_REV_7xxx_INTR_CLEAR(2),
162 		MDP_INTF_REV_7xxx_INTR_EN(2),
163 		MDP_INTF_REV_7xxx_INTR_STATUS(2)
164 	},
165 	[MDP_INTF2_TEAR_INTR] = {
166 		MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(2),
167 		MDP_INTF_REV_7xxx_INTR_TEAR_EN(2),
168 		MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(2)
169 	},
170 	[MDP_INTF3_INTR] = {
171 		MDP_INTF_REV_7xxx_INTR_CLEAR(3),
172 		MDP_INTF_REV_7xxx_INTR_EN(3),
173 		MDP_INTF_REV_7xxx_INTR_STATUS(3)
174 	},
175 	[MDP_INTF4_INTR] = {
176 		MDP_INTF_REV_7xxx_INTR_CLEAR(4),
177 		MDP_INTF_REV_7xxx_INTR_EN(4),
178 		MDP_INTF_REV_7xxx_INTR_STATUS(4)
179 	},
180 	[MDP_INTF5_INTR] = {
181 		MDP_INTF_REV_7xxx_INTR_CLEAR(5),
182 		MDP_INTF_REV_7xxx_INTR_EN(5),
183 		MDP_INTF_REV_7xxx_INTR_STATUS(5)
184 	},
185 	[MDP_INTF6_INTR] = {
186 		MDP_INTF_REV_7xxx_INTR_CLEAR(6),
187 		MDP_INTF_REV_7xxx_INTR_EN(6),
188 		MDP_INTF_REV_7xxx_INTR_STATUS(6)
189 	},
190 	[MDP_INTF7_INTR] = {
191 		MDP_INTF_REV_7xxx_INTR_CLEAR(7),
192 		MDP_INTF_REV_7xxx_INTR_EN(7),
193 		MDP_INTF_REV_7xxx_INTR_STATUS(7)
194 	},
195 	[MDP_INTF8_INTR] = {
196 		MDP_INTF_REV_7xxx_INTR_CLEAR(8),
197 		MDP_INTF_REV_7xxx_INTR_EN(8),
198 		MDP_INTF_REV_7xxx_INTR_STATUS(8)
199 	},
200 };
201 
202 #define DPU_IRQ_MASK(irq_idx)	(BIT(DPU_IRQ_BIT(irq_idx)))
203 
dpu_core_irq_is_valid(unsigned int irq_idx)204 static inline bool dpu_core_irq_is_valid(unsigned int irq_idx)
205 {
206 	return irq_idx && irq_idx <= DPU_NUM_IRQS;
207 }
208 
dpu_core_irq_get_entry(struct dpu_hw_intr * intr,unsigned int irq_idx)209 static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct dpu_hw_intr *intr,
210 							       unsigned int irq_idx)
211 {
212 	return &intr->irq_tbl[irq_idx - 1];
213 }
214 
215 /**
216  * dpu_core_irq_callback_handler - dispatch core interrupts
217  * @dpu_kms:		Pointer to DPU's KMS structure
218  * @irq_idx:		interrupt index
219  */
dpu_core_irq_callback_handler(struct dpu_kms * dpu_kms,unsigned int irq_idx)220 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int irq_idx)
221 {
222 	struct dpu_hw_intr_entry *irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
223 
224 	VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
225 
226 	if (!irq_entry->cb) {
227 		DRM_ERROR("no registered cb, IRQ=[%d, %d]\n",
228 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
229 		return;
230 	}
231 
232 	atomic_inc(&irq_entry->count);
233 
234 	/*
235 	 * Perform registered function callback
236 	 */
237 	irq_entry->cb(irq_entry->arg);
238 }
239 
dpu_core_irq(struct msm_kms * kms)240 irqreturn_t dpu_core_irq(struct msm_kms *kms)
241 {
242 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
243 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
244 	int reg_idx;
245 	unsigned int irq_idx;
246 	u32 irq_status;
247 	u32 enable_mask;
248 	int bit;
249 	unsigned long irq_flags;
250 
251 	if (!intr)
252 		return IRQ_NONE;
253 
254 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
255 	for (reg_idx = 0; reg_idx < MDP_INTR_MAX; reg_idx++) {
256 		if (!test_bit(reg_idx, &intr->irq_mask))
257 			continue;
258 
259 		/* Read interrupt status */
260 		irq_status = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].status_off);
261 
262 		/* Read enable mask */
263 		enable_mask = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].en_off);
264 
265 		/* and clear the interrupt */
266 		if (irq_status)
267 			DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
268 				     irq_status);
269 
270 		/* Finally update IRQ status based on enable mask */
271 		irq_status &= enable_mask;
272 
273 		if (!irq_status)
274 			continue;
275 
276 		/*
277 		 * Search through matching intr status.
278 		 */
279 		while ((bit = ffs(irq_status)) != 0) {
280 			irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
281 
282 			dpu_core_irq_callback_handler(dpu_kms, irq_idx);
283 
284 			/*
285 			 * When callback finish, clear the irq_status
286 			 * with the matching mask. Once irq_status
287 			 * is all cleared, the search can be stopped.
288 			 */
289 			irq_status &= ~BIT(bit - 1);
290 		}
291 	}
292 
293 	/* ensure register writes go through */
294 	wmb();
295 
296 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
297 
298 	return IRQ_HANDLED;
299 }
300 
dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr * intr,unsigned int irq_idx)301 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr,
302 					 unsigned int irq_idx)
303 {
304 	int reg_idx;
305 	const struct dpu_intr_reg *reg;
306 	const char *dbgstr = NULL;
307 	uint32_t cache_irq_mask;
308 
309 	if (!intr)
310 		return -EINVAL;
311 
312 	if (!dpu_core_irq_is_valid(irq_idx)) {
313 		pr_err("invalid IRQ=[%d, %d]\n",
314 		       DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
315 		return -EINVAL;
316 	}
317 
318 	/*
319 	 * The cache_irq_mask and hardware RMW operations needs to be done
320 	 * under irq_lock and it's the caller's responsibility to ensure that's
321 	 * held.
322 	 */
323 	assert_spin_locked(&intr->irq_lock);
324 
325 	reg_idx = DPU_IRQ_REG(irq_idx);
326 	reg = &intr->intr_set[reg_idx];
327 
328 	/* Is this interrupt register supported on the platform */
329 	if (WARN_ON(!reg->en_off))
330 		return -EINVAL;
331 
332 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
333 	if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
334 		dbgstr = "already ";
335 	} else {
336 		dbgstr = "";
337 
338 		cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
339 		/* Cleaning any pending interrupt */
340 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
341 		/* Enabling interrupts with the new mask */
342 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
343 
344 		/* ensure register write goes through */
345 		wmb();
346 
347 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
348 	}
349 
350 	pr_debug("DPU IRQ=[%d, %d] %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
351 		 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
352 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
353 
354 	return 0;
355 }
356 
dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr * intr,unsigned int irq_idx)357 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr,
358 					  unsigned int irq_idx)
359 {
360 	int reg_idx;
361 	const struct dpu_intr_reg *reg;
362 	const char *dbgstr = NULL;
363 	uint32_t cache_irq_mask;
364 
365 	if (!intr)
366 		return -EINVAL;
367 
368 	if (!dpu_core_irq_is_valid(irq_idx)) {
369 		pr_err("invalid IRQ=[%d, %d]\n",
370 		       DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
371 		return -EINVAL;
372 	}
373 
374 	/*
375 	 * The cache_irq_mask and hardware RMW operations needs to be done
376 	 * under irq_lock and it's the caller's responsibility to ensure that's
377 	 * held.
378 	 */
379 	assert_spin_locked(&intr->irq_lock);
380 
381 	reg_idx = DPU_IRQ_REG(irq_idx);
382 	reg = &intr->intr_set[reg_idx];
383 
384 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
385 	if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
386 		dbgstr = "already ";
387 	} else {
388 		dbgstr = "";
389 
390 		cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
391 		/* Disable interrupts based on the new mask */
392 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
393 		/* Cleaning any pending interrupt */
394 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
395 
396 		/* ensure register write goes through */
397 		wmb();
398 
399 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
400 	}
401 
402 	pr_debug("DPU IRQ=[%d, %d] %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
403 		 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
404 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
405 
406 	return 0;
407 }
408 
dpu_clear_irqs(struct dpu_kms * dpu_kms)409 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
410 {
411 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
412 	int i;
413 
414 	if (!intr)
415 		return;
416 
417 	for (i = 0; i < MDP_INTR_MAX; i++) {
418 		if (test_bit(i, &intr->irq_mask))
419 			DPU_REG_WRITE(&intr->hw,
420 					intr->intr_set[i].clr_off, 0xffffffff);
421 	}
422 
423 	/* ensure register writes go through */
424 	wmb();
425 }
426 
dpu_disable_all_irqs(struct dpu_kms * dpu_kms)427 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
428 {
429 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
430 	int i;
431 
432 	if (!intr)
433 		return;
434 
435 	for (i = 0; i < MDP_INTR_MAX; i++) {
436 		if (test_bit(i, &intr->irq_mask))
437 			DPU_REG_WRITE(&intr->hw,
438 					intr->intr_set[i].en_off, 0x00000000);
439 	}
440 
441 	/* ensure register writes go through */
442 	wmb();
443 }
444 
dpu_core_irq_read(struct dpu_kms * dpu_kms,unsigned int irq_idx)445 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
446 		      unsigned int irq_idx)
447 {
448 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
449 	int reg_idx;
450 	unsigned long irq_flags;
451 	u32 intr_status;
452 
453 	if (!intr)
454 		return 0;
455 
456 	if (!dpu_core_irq_is_valid(irq_idx)) {
457 		pr_err("invalid IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
458 		return 0;
459 	}
460 
461 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
462 
463 	reg_idx = DPU_IRQ_REG(irq_idx);
464 	intr_status = DPU_REG_READ(&intr->hw,
465 			intr->intr_set[reg_idx].status_off) &
466 		DPU_IRQ_MASK(irq_idx);
467 	if (intr_status)
468 		DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
469 				intr_status);
470 
471 	/* ensure register writes go through */
472 	wmb();
473 
474 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
475 
476 	return intr_status;
477 }
478 
dpu_hw_intr_init(struct drm_device * dev,void __iomem * addr,const struct dpu_mdss_cfg * m)479 struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
480 				     void __iomem *addr,
481 				     const struct dpu_mdss_cfg *m)
482 {
483 	struct dpu_hw_intr *intr;
484 	unsigned int i;
485 
486 	if (!addr || !m)
487 		return ERR_PTR(-EINVAL);
488 
489 	intr = drmm_kzalloc(dev, sizeof(*intr), GFP_KERNEL);
490 	if (!intr)
491 		return ERR_PTR(-ENOMEM);
492 
493 	if (m->mdss_ver->core_major_ver >= 7)
494 		intr->intr_set = dpu_intr_set_7xxx;
495 	else
496 		intr->intr_set = dpu_intr_set_legacy;
497 
498 	intr->hw.blk_addr = addr + m->mdp[0].base;
499 
500 	intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
501 			 BIT(MDP_SSPP_TOP0_INTR2) |
502 			 BIT(MDP_SSPP_TOP0_HIST_INTR);
503 	for (i = 0; i < m->intf_count; i++) {
504 		const struct dpu_intf_cfg *intf = &m->intf[i];
505 
506 		if (intf->type == INTF_NONE)
507 			continue;
508 
509 		intr->irq_mask |= BIT(MDP_INTFn_INTR(intf->id));
510 
511 		if (intf->intr_tear_rd_ptr)
512 			intr->irq_mask |= BIT(DPU_IRQ_REG(intf->intr_tear_rd_ptr));
513 	}
514 
515 	spin_lock_init(&intr->irq_lock);
516 
517 	return intr;
518 }
519 
dpu_core_irq_register_callback(struct dpu_kms * dpu_kms,unsigned int irq_idx,void (* irq_cb)(void * arg),void * irq_arg)520 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
521 				   unsigned int irq_idx,
522 				   void (*irq_cb)(void *arg),
523 				   void *irq_arg)
524 {
525 	struct dpu_hw_intr_entry *irq_entry;
526 	unsigned long irq_flags;
527 	int ret;
528 
529 	if (!irq_cb) {
530 		DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
531 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
532 		return -EINVAL;
533 	}
534 
535 	if (!dpu_core_irq_is_valid(irq_idx)) {
536 		DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
537 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
538 		return -EINVAL;
539 	}
540 
541 	VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
542 	     DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
543 
544 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
545 
546 	irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
547 	if (unlikely(WARN_ON(irq_entry->cb))) {
548 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
549 
550 		return -EBUSY;
551 	}
552 
553 	trace_dpu_core_irq_register_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
554 	irq_entry->arg = irq_arg;
555 	irq_entry->cb = irq_cb;
556 
557 	ret = dpu_hw_intr_enable_irq_locked(
558 				dpu_kms->hw_intr,
559 				irq_idx);
560 	if (ret)
561 		DPU_ERROR("Failed/ to enable IRQ=[%d, %d]\n",
562 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
563 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
564 
565 	trace_dpu_irq_register_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
566 
567 	return 0;
568 }
569 
dpu_core_irq_unregister_callback(struct dpu_kms * dpu_kms,unsigned int irq_idx)570 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms,
571 				     unsigned int irq_idx)
572 {
573 	struct dpu_hw_intr_entry *irq_entry;
574 	unsigned long irq_flags;
575 	int ret;
576 
577 	if (!dpu_core_irq_is_valid(irq_idx)) {
578 		DPU_ERROR("invalid IRQ=[%d, %d]\n",
579 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
580 		return -EINVAL;
581 	}
582 
583 	VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
584 	     DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
585 
586 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
587 	trace_dpu_core_irq_unregister_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
588 
589 	ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
590 	if (ret)
591 		DPU_ERROR("Failed to disable IRQ=[%d, %d]: %d\n",
592 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), ret);
593 
594 	irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
595 	irq_entry->cb = NULL;
596 	irq_entry->arg = NULL;
597 
598 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
599 
600 	trace_dpu_irq_unregister_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
601 
602 	return 0;
603 }
604 
605 #ifdef CONFIG_DEBUG_FS
dpu_debugfs_core_irq_show(struct seq_file * s,void * v)606 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
607 {
608 	struct dpu_kms *dpu_kms = s->private;
609 	struct dpu_hw_intr_entry *irq_entry;
610 	unsigned long irq_flags;
611 	int i, irq_count;
612 	void *cb;
613 
614 	for (i = 1; i <= DPU_NUM_IRQS; i++) {
615 		spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
616 		irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
617 		irq_count = atomic_read(&irq_entry->count);
618 		cb = irq_entry->cb;
619 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
620 
621 		if (irq_count || cb)
622 			seq_printf(s, "IRQ=[%d, %d] count:%d cb:%ps\n",
623 				   DPU_IRQ_REG(i), DPU_IRQ_BIT(i), irq_count, cb);
624 	}
625 
626 	return 0;
627 }
628 
629 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
630 
dpu_debugfs_core_irq_init(struct dpu_kms * dpu_kms,struct dentry * parent)631 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
632 		struct dentry *parent)
633 {
634 	debugfs_create_file("core_irq", 0600, parent, dpu_kms,
635 		&dpu_debugfs_core_irq_fops);
636 }
637 #endif
638 
dpu_core_irq_preinstall(struct msm_kms * kms)639 void dpu_core_irq_preinstall(struct msm_kms *kms)
640 {
641 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
642 	struct dpu_hw_intr_entry *irq_entry;
643 	int i;
644 
645 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
646 	dpu_clear_irqs(dpu_kms);
647 	dpu_disable_all_irqs(dpu_kms);
648 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
649 
650 	for (i = 1; i <= DPU_NUM_IRQS; i++) {
651 		irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
652 		atomic_set(&irq_entry->count, 0);
653 	}
654 }
655 
dpu_core_irq_uninstall(struct msm_kms * kms)656 void dpu_core_irq_uninstall(struct msm_kms *kms)
657 {
658 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
659 	struct dpu_hw_intr_entry *irq_entry;
660 	int i;
661 
662 	if (!dpu_kms->hw_intr)
663 		return;
664 
665 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
666 	for (i = 1; i <= DPU_NUM_IRQS; i++) {
667 		irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
668 		if (irq_entry->cb)
669 			DPU_ERROR("IRQ=[%d, %d] still enabled/registered\n",
670 				  DPU_IRQ_REG(i), DPU_IRQ_BIT(i));
671 	}
672 
673 	dpu_clear_irqs(dpu_kms);
674 	dpu_disable_all_irqs(dpu_kms);
675 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
676 }
677