1 /* SPDX-License-Identifier: MIT */
2 /* Copyright 2024 Advanced Micro Devices, Inc. */
3
4 #include "dm_services.h"
5 #include "include/logger_interface.h"
6 #include "../dce110/irq_service_dce110.h"
7
8
9 #include "dcn/dcn_3_5_1_offset.h"
10 #include "dcn/dcn_3_5_1_sh_mask.h"
11
12 #include "irq_service_dcn351.h"
13
14 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
15
to_dal_irq_source_dcn351(struct irq_service * irq_service,uint32_t src_id,uint32_t ext_id)16 static enum dc_irq_source to_dal_irq_source_dcn351(
17 struct irq_service *irq_service,
18 uint32_t src_id,
19 uint32_t ext_id)
20 {
21 switch (src_id) {
22 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
23 return DC_IRQ_SOURCE_VBLANK1;
24 case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
25 return DC_IRQ_SOURCE_VBLANK2;
26 case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
27 return DC_IRQ_SOURCE_VBLANK3;
28 case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
29 return DC_IRQ_SOURCE_VBLANK4;
30 case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
31 return DC_IRQ_SOURCE_VBLANK5;
32 case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
33 return DC_IRQ_SOURCE_VBLANK6;
34 case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
35 return DC_IRQ_SOURCE_DC1_VLINE0;
36 case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
37 return DC_IRQ_SOURCE_DC2_VLINE0;
38 case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
39 return DC_IRQ_SOURCE_DC3_VLINE0;
40 case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
41 return DC_IRQ_SOURCE_DC4_VLINE0;
42 case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
43 return DC_IRQ_SOURCE_DC5_VLINE0;
44 case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
45 return DC_IRQ_SOURCE_DC6_VLINE0;
46 case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
47 return DC_IRQ_SOURCE_PFLIP1;
48 case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
49 return DC_IRQ_SOURCE_PFLIP2;
50 case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
51 return DC_IRQ_SOURCE_PFLIP3;
52 case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
53 return DC_IRQ_SOURCE_PFLIP4;
54 case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
55 return DC_IRQ_SOURCE_PFLIP5;
56 case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
57 return DC_IRQ_SOURCE_PFLIP6;
58 case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
59 return DC_IRQ_SOURCE_VUPDATE1;
60 case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
61 return DC_IRQ_SOURCE_VUPDATE2;
62 case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
63 return DC_IRQ_SOURCE_VUPDATE3;
64 case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
65 return DC_IRQ_SOURCE_VUPDATE4;
66 case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
67 return DC_IRQ_SOURCE_VUPDATE5;
68 case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
69 return DC_IRQ_SOURCE_VUPDATE6;
70 case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT:
71 return DC_IRQ_SOURCE_DMCUB_OUTBOX;
72 case DCN_1_0__SRCID__DC_HPD1_INT:
73 /* generic src_id for all HPD and HPDRX interrupts */
74 switch (ext_id) {
75 case DCN_1_0__CTXID__DC_HPD1_INT:
76 return DC_IRQ_SOURCE_HPD1;
77 case DCN_1_0__CTXID__DC_HPD2_INT:
78 return DC_IRQ_SOURCE_HPD2;
79 case DCN_1_0__CTXID__DC_HPD3_INT:
80 return DC_IRQ_SOURCE_HPD3;
81 case DCN_1_0__CTXID__DC_HPD4_INT:
82 return DC_IRQ_SOURCE_HPD4;
83 case DCN_1_0__CTXID__DC_HPD5_INT:
84 return DC_IRQ_SOURCE_HPD5;
85 case DCN_1_0__CTXID__DC_HPD6_INT:
86 return DC_IRQ_SOURCE_HPD6;
87 case DCN_1_0__CTXID__DC_HPD1_RX_INT:
88 return DC_IRQ_SOURCE_HPD1RX;
89 case DCN_1_0__CTXID__DC_HPD2_RX_INT:
90 return DC_IRQ_SOURCE_HPD2RX;
91 case DCN_1_0__CTXID__DC_HPD3_RX_INT:
92 return DC_IRQ_SOURCE_HPD3RX;
93 case DCN_1_0__CTXID__DC_HPD4_RX_INT:
94 return DC_IRQ_SOURCE_HPD4RX;
95 case DCN_1_0__CTXID__DC_HPD5_RX_INT:
96 return DC_IRQ_SOURCE_HPD5RX;
97 case DCN_1_0__CTXID__DC_HPD6_RX_INT:
98 return DC_IRQ_SOURCE_HPD6RX;
99 default:
100 return DC_IRQ_SOURCE_INVALID;
101 }
102 break;
103
104 default:
105 return DC_IRQ_SOURCE_INVALID;
106 }
107 }
108
hpd_ack(struct irq_service * irq_service,const struct irq_source_info * info)109 static bool hpd_ack(
110 struct irq_service *irq_service,
111 const struct irq_source_info *info)
112 {
113 uint32_t addr = info->status_reg;
114 uint32_t value = dm_read_reg(irq_service->ctx, addr);
115 uint32_t current_status =
116 get_reg_field_value(
117 value,
118 HPD0_DC_HPD_INT_STATUS,
119 DC_HPD_SENSE_DELAYED);
120
121 dal_irq_service_ack_generic(irq_service, info);
122
123 value = dm_read_reg(irq_service->ctx, info->enable_reg);
124
125 set_reg_field_value(
126 value,
127 current_status ? 0 : 1,
128 HPD0_DC_HPD_INT_CONTROL,
129 DC_HPD_INT_POLARITY);
130
131 dm_write_reg(irq_service->ctx, info->enable_reg, value);
132
133 return true;
134 }
135
136 static struct irq_source_info_funcs hpd_irq_info_funcs = {
137 .set = NULL,
138 .ack = hpd_ack
139 };
140
141 static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
142 .set = NULL,
143 .ack = NULL
144 };
145
146 static struct irq_source_info_funcs pflip_irq_info_funcs = {
147 .set = NULL,
148 .ack = NULL
149 };
150
151 static struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
152 .set = NULL,
153 .ack = NULL
154 };
155
156 static struct irq_source_info_funcs vblank_irq_info_funcs = {
157 .set = NULL,
158 .ack = NULL
159 };
160
161 static struct irq_source_info_funcs outbox_irq_info_funcs = {
162 .set = NULL,
163 .ack = NULL
164 };
165
166 static struct irq_source_info_funcs vline0_irq_info_funcs = {
167 .set = NULL,
168 .ack = NULL
169 };
170
171 #undef BASE_INNER
172 #define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
173
174 /* compile time expand base address. */
175 #define BASE(seg) \
176 BASE_INNER(seg)
177
178 #define SRI(reg_name, block, id)\
179 BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
180 reg ## block ## id ## _ ## reg_name
181
182 #define SRI_DMUB(reg_name)\
183 BASE(reg ## reg_name ## _BASE_IDX) + \
184 reg ## reg_name
185
186 #define IRQ_REG_ENTRY(base, block, reg_num, reg1, mask1, reg2, mask2)\
187 REG_STRUCT[base + reg_num].enable_reg = SRI(reg1, block, reg_num),\
188 REG_STRUCT[base + reg_num].enable_mask = \
189 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
190 REG_STRUCT[base + reg_num].enable_value[0] = \
191 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
192 REG_STRUCT[base + reg_num].enable_value[1] = \
193 ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \
194 REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\
195 REG_STRUCT[base + reg_num].ack_mask = \
196 block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
197 REG_STRUCT[base + reg_num].ack_value = \
198 block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
199
200 #define IRQ_REG_ENTRY_DMUB(base, reg1, mask1, reg2, mask2)\
201 REG_STRUCT[base].enable_reg = SRI_DMUB(reg1),\
202 REG_STRUCT[base].enable_mask = \
203 reg1 ## __ ## mask1 ## _MASK,\
204 REG_STRUCT[base].enable_value[0] = \
205 reg1 ## __ ## mask1 ## _MASK,\
206 REG_STRUCT[base].enable_value[1] = \
207 ~reg1 ## __ ## mask1 ## _MASK, \
208 REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\
209 REG_STRUCT[base].ack_mask = \
210 reg2 ## __ ## mask2 ## _MASK,\
211 REG_STRUCT[base].ack_value = \
212 reg2 ## __ ## mask2 ## _MASK \
213
214 #define hpd_int_entry(reg_num)\
215 IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1, HPD, reg_num,\
216 DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
217 DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
218 REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].funcs = &hpd_irq_info_funcs;\
219 REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\
220
221 #define hpd_rx_int_entry(reg_num)\
222 IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1RX, HPD, reg_num,\
223 DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
224 DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
225 REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\
226 REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].funcs = &hpd_rx_irq_info_funcs;\
227
228 #define pflip_int_entry(reg_num)\
229 IRQ_REG_ENTRY(DC_IRQ_SOURCE_PFLIP1, HUBPREQ, reg_num,\
230 DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
231 DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
232 REG_STRUCT[DC_IRQ_SOURCE_PFLIP1 + reg_num].funcs = &pflip_irq_info_funcs\
233
234 /* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
235 * of DCE's DC_IRQ_SOURCE_VUPDATEx.
236 */
237 #define vupdate_no_lock_int_entry(reg_num)\
238 IRQ_REG_ENTRY(DC_IRQ_SOURCE_VUPDATE1, OTG, reg_num,\
239 OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
240 OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
241 REG_STRUCT[DC_IRQ_SOURCE_VUPDATE1 + reg_num].funcs = &vupdate_no_lock_irq_info_funcs\
242
243 #define vblank_int_entry(reg_num)\
244 IRQ_REG_ENTRY(DC_IRQ_SOURCE_VBLANK1, OTG, reg_num,\
245 OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
246 OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
247 REG_STRUCT[DC_IRQ_SOURCE_VBLANK1 + reg_num].funcs = &vblank_irq_info_funcs\
248
249 #define vline0_int_entry(reg_num)\
250 IRQ_REG_ENTRY(DC_IRQ_SOURCE_DC1_VLINE0, OTG, reg_num,\
251 OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
252 OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
253 REG_STRUCT[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num].funcs = &vline0_irq_info_funcs\
254
255 #define dmub_outbox_int_entry()\
256 IRQ_REG_ENTRY_DMUB(DC_IRQ_SOURCE_DMCUB_OUTBOX, \
257 DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\
258 DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\
259 REG_STRUCT[DC_IRQ_SOURCE_DMCUB_OUTBOX].funcs = &outbox_irq_info_funcs
260
261 #define dummy_irq_entry(irqno) \
262 REG_STRUCT[irqno].funcs = &dummy_irq_info_funcs\
263
264 #define i2c_int_entry(reg_num) \
265 dummy_irq_entry(DC_IRQ_SOURCE_I2C_DDC ## reg_num)
266
267 #define dp_sink_int_entry(reg_num) \
268 dummy_irq_entry(DC_IRQ_SOURCE_DPSINK ## reg_num)
269
270 #define gpio_pad_int_entry(reg_num) \
271 dummy_irq_entry(DC_IRQ_SOURCE_GPIOPAD ## reg_num)
272
273 #define dc_underflow_int_entry(reg_num) \
274 dummy_irq_entry(DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW)
275
276 static struct irq_source_info_funcs dummy_irq_info_funcs = {
277 .set = dal_irq_service_dummy_set,
278 .ack = dal_irq_service_dummy_ack
279 };
280
281 #define dcn351_irq_init_part_1() {\
282 dummy_irq_entry(DC_IRQ_SOURCE_INVALID); \
283 hpd_int_entry(0); \
284 hpd_int_entry(1); \
285 hpd_int_entry(2); \
286 hpd_int_entry(3); \
287 hpd_int_entry(4); \
288 hpd_rx_int_entry(0); \
289 hpd_rx_int_entry(1); \
290 hpd_rx_int_entry(2); \
291 hpd_rx_int_entry(3); \
292 hpd_rx_int_entry(4); \
293 i2c_int_entry(1); \
294 i2c_int_entry(2); \
295 i2c_int_entry(3); \
296 i2c_int_entry(4); \
297 i2c_int_entry(5); \
298 i2c_int_entry(6); \
299 dp_sink_int_entry(1); \
300 dp_sink_int_entry(2); \
301 dp_sink_int_entry(3); \
302 dp_sink_int_entry(4); \
303 dp_sink_int_entry(5); \
304 dp_sink_int_entry(6); \
305 dummy_irq_entry(DC_IRQ_SOURCE_TIMER); \
306 pflip_int_entry(0); \
307 pflip_int_entry(1); \
308 pflip_int_entry(2); \
309 pflip_int_entry(3); \
310 dummy_irq_entry(DC_IRQ_SOURCE_PFLIP5); \
311 dummy_irq_entry(DC_IRQ_SOURCE_PFLIP6); \
312 dummy_irq_entry(DC_IRQ_SOURCE_PFLIP_UNDERLAY0); \
313 gpio_pad_int_entry(0); \
314 gpio_pad_int_entry(1); \
315 gpio_pad_int_entry(2); \
316 gpio_pad_int_entry(3); \
317 gpio_pad_int_entry(4); \
318 gpio_pad_int_entry(5); \
319 gpio_pad_int_entry(6); \
320 gpio_pad_int_entry(7); \
321 gpio_pad_int_entry(8); \
322 gpio_pad_int_entry(9); \
323 gpio_pad_int_entry(10); \
324 gpio_pad_int_entry(11); \
325 gpio_pad_int_entry(12); \
326 gpio_pad_int_entry(13); \
327 gpio_pad_int_entry(14); \
328 gpio_pad_int_entry(15); \
329 gpio_pad_int_entry(16); \
330 gpio_pad_int_entry(17); \
331 gpio_pad_int_entry(18); \
332 gpio_pad_int_entry(19); \
333 gpio_pad_int_entry(20); \
334 gpio_pad_int_entry(21); \
335 gpio_pad_int_entry(22); \
336 gpio_pad_int_entry(23); \
337 gpio_pad_int_entry(24); \
338 gpio_pad_int_entry(25); \
339 gpio_pad_int_entry(26); \
340 gpio_pad_int_entry(27); \
341 gpio_pad_int_entry(28); \
342 gpio_pad_int_entry(29); \
343 gpio_pad_int_entry(30); \
344 dc_underflow_int_entry(1); \
345 dc_underflow_int_entry(2); \
346 dc_underflow_int_entry(3); \
347 dc_underflow_int_entry(4); \
348 dc_underflow_int_entry(5); \
349 dc_underflow_int_entry(6); \
350 dummy_irq_entry(DC_IRQ_SOURCE_DMCU_SCP); \
351 dummy_irq_entry(DC_IRQ_SOURCE_VBIOS_SW); \
352 }
353
354 #define dcn351_irq_init_part_2() {\
355 vupdate_no_lock_int_entry(0); \
356 vupdate_no_lock_int_entry(1); \
357 vupdate_no_lock_int_entry(2); \
358 vupdate_no_lock_int_entry(3); \
359 vblank_int_entry(0); \
360 vblank_int_entry(1); \
361 vblank_int_entry(2); \
362 vblank_int_entry(3); \
363 vline0_int_entry(0); \
364 vline0_int_entry(1); \
365 vline0_int_entry(2); \
366 vline0_int_entry(3); \
367 dummy_irq_entry(DC_IRQ_SOURCE_DC5_VLINE1); \
368 dummy_irq_entry(DC_IRQ_SOURCE_DC6_VLINE1); \
369 dmub_outbox_int_entry(); \
370 }
371
372 #define dcn351_irq_init() {\
373 dcn351_irq_init_part_1(); \
374 dcn351_irq_init_part_2(); \
375 }
376
377 static struct irq_source_info irq_source_info_dcn351[DAL_IRQ_SOURCES_NUMBER] = {0};
378
379 static struct irq_service_funcs irq_service_funcs_dcn351 = {
380 .to_dal_irq_source = to_dal_irq_source_dcn351
381 };
382
dcn351_irq_construct(struct irq_service * irq_service,struct irq_service_init_data * init_data)383 static void dcn351_irq_construct(
384 struct irq_service *irq_service,
385 struct irq_service_init_data *init_data)
386 {
387 struct dc_context *ctx = init_data->ctx;
388
389 #define REG_STRUCT irq_source_info_dcn351
390 dcn351_irq_init();
391
392 dal_irq_service_construct(irq_service, init_data);
393
394 irq_service->info = irq_source_info_dcn351;
395 irq_service->funcs = &irq_service_funcs_dcn351;
396 }
397
dal_irq_service_dcn351_create(struct irq_service_init_data * init_data)398 struct irq_service *dal_irq_service_dcn351_create(
399 struct irq_service_init_data *init_data)
400 {
401 struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
402 GFP_KERNEL);
403
404 if (!irq_service)
405 return NULL;
406
407 dcn351_irq_construct(irq_service, init_data);
408 return irq_service;
409 }
410