1 // SPDX-License-Identifier: MIT
2 //
3 // Copyright 2024 Advanced Micro Devices, Inc.
4
5 #include "dm_services.h"
6 #include "dm_helpers.h"
7 #include "core_types.h"
8 #include "resource.h"
9 #include "dccg.h"
10 #include "dce/dce_hwseq.h"
11 #include "reg_helper.h"
12 #include "abm.h"
13 #include "hubp.h"
14 #include "dchubbub.h"
15 #include "timing_generator.h"
16 #include "opp.h"
17 #include "ipp.h"
18 #include "mpc.h"
19 #include "mcif_wb.h"
20 #include "dc_dmub_srv.h"
21 #include "link_hwss.h"
22 #include "dpcd_defs.h"
23 #include "clk_mgr.h"
24 #include "dsc.h"
25 #include "link.h"
26
27 #include "dce/dmub_hw_lock_mgr.h"
28 #include "dcn10/dcn10_cm_common.h"
29 #include "dcn20/dcn20_optc.h"
30 #include "dcn30/dcn30_cm_common.h"
31 #include "dcn32/dcn32_hwseq.h"
32 #include "dcn401_hwseq.h"
33 #include "dcn401/dcn401_resource.h"
34 #include "dc_state_priv.h"
35 #include "link_enc_cfg.h"
36
37 #define DC_LOGGER_INIT(logger)
38
39 #define CTX \
40 hws->ctx
41 #define REG(reg)\
42 hws->regs->reg
43 #define DC_LOGGER \
44 dc->ctx->logger
45
46
47 #undef FN
48 #define FN(reg_name, field_name) \
49 hws->shifts->field_name, hws->masks->field_name
50
dcn401_initialize_min_clocks(struct dc * dc)51 static void dcn401_initialize_min_clocks(struct dc *dc)
52 {
53 struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
54
55 clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
56 clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
57 clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
58 clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
59 clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
60 if (dc->debug.disable_boot_optimizations) {
61 clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
62 } else {
63 /* Even though DPG_EN = 1 for the connected display, it still requires the
64 * correct timing so we cannot set DISPCLK to min freq or it could cause
65 * audio corruption. Read current DISPCLK from DENTIST and request the same
66 * freq to ensure that the timing is valid and unchanged.
67 */
68 clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
69 }
70 clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
71 clocks->fclk_p_state_change_support = true;
72 clocks->p_state_change_support = true;
73
74 dc->clk_mgr->funcs->update_clocks(
75 dc->clk_mgr,
76 dc->current_state,
77 true);
78 }
79
dcn401_program_gamut_remap(struct pipe_ctx * pipe_ctx)80 void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
81 {
82 unsigned int i = 0;
83 struct mpc_grph_gamut_adjustment mpc_adjust;
84 unsigned int mpcc_id = pipe_ctx->plane_res.mpcc_inst;
85 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
86
87 //For now assert if location is not pre-blend
88 if (pipe_ctx->plane_state)
89 ASSERT(pipe_ctx->plane_state->mcm_location == MPCC_MOVABLE_CM_LOCATION_BEFORE);
90
91 // program MPCC_MCM_FIRST_GAMUT_REMAP
92 memset(&mpc_adjust, 0, sizeof(mpc_adjust));
93 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
94 mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_FIRST_GAMUT_REMAP;
95
96 if (pipe_ctx->plane_state &&
97 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
98 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
99 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
100 mpc_adjust.temperature_matrix[i] =
101 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
102 }
103
104 mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
105
106 // program MPCC_MCM_SECOND_GAMUT_REMAP for Bypass / Disable for now
107 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
108 mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_SECOND_GAMUT_REMAP;
109
110 mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
111
112 // program MPCC_OGAM_GAMUT_REMAP same as is currently used on DCN3x
113 memset(&mpc_adjust, 0, sizeof(mpc_adjust));
114 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
115 mpc_adjust.mpcc_gamut_remap_block_id = MPCC_OGAM_GAMUT_REMAP;
116
117 if (pipe_ctx->top_pipe == NULL) {
118 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
119 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
120 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
121 mpc_adjust.temperature_matrix[i] =
122 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
123 }
124 }
125
126 mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
127 }
128
dcn401_read_ono_state(struct dc * dc,uint8_t region)129 struct ips_ono_region_state dcn401_read_ono_state(struct dc *dc, uint8_t region)
130 {
131 struct dce_hwseq *hws = dc->hwseq;
132 struct ips_ono_region_state state = {0, 0};
133
134 switch (region) {
135 case 0:
136 /* dccg, dio, dcio */
137 REG_GET_2(DOMAIN22_PG_STATUS,
138 DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
139 DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
140 break;
141 case 1:
142 /* dchubbub, dchvm, dchubbubmem */
143 REG_GET_2(DOMAIN23_PG_STATUS,
144 DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
145 DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
146 break;
147 case 2:
148 /* mpc, opp, optc, dwb */
149 REG_GET_2(DOMAIN24_PG_STATUS,
150 DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
151 DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
152 break;
153 case 3:
154 /* hpo */
155 REG_GET_2(DOMAIN25_PG_STATUS,
156 DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
157 DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
158 break;
159 case 4:
160 /* dchubp0, dpp0 */
161 REG_GET_2(DOMAIN0_PG_STATUS,
162 DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
163 DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
164 break;
165 case 5:
166 /* dsc0 */
167 REG_GET_2(DOMAIN16_PG_STATUS,
168 DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
169 DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
170 break;
171 case 6:
172 /* dchubp1, dpp1 */
173 REG_GET_2(DOMAIN1_PG_STATUS,
174 DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
175 DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
176 break;
177 case 7:
178 /* dsc1 */
179 REG_GET_2(DOMAIN17_PG_STATUS,
180 DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
181 DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
182 break;
183 case 8:
184 /* dchubp2, dpp2 */
185 REG_GET_2(DOMAIN2_PG_STATUS,
186 DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
187 DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
188 break;
189 case 9:
190 /* dsc2 */
191 REG_GET_2(DOMAIN18_PG_STATUS,
192 DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
193 DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
194 break;
195 case 10:
196 /* dchubp3, dpp3 */
197 REG_GET_2(DOMAIN3_PG_STATUS,
198 DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
199 DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
200 break;
201 case 11:
202 /* dsc3 */
203 REG_GET_2(DOMAIN19_PG_STATUS,
204 DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
205 DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
206 break;
207 default:
208 break;
209 }
210
211 return state;
212 }
213
dcn401_init_hw(struct dc * dc)214 void dcn401_init_hw(struct dc *dc)
215 {
216 struct abm **abms = dc->res_pool->multiple_abms;
217 struct dce_hwseq *hws = dc->hwseq;
218 struct dc_bios *dcb = dc->ctx->dc_bios;
219 struct resource_pool *res_pool = dc->res_pool;
220 int i;
221 int edp_num;
222 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
223 uint32_t user_level = MAX_BACKLIGHT_LEVEL;
224 int current_dchub_ref_freq = 0;
225
226 if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
227 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
228
229 // mark dcmode limits present if any clock has distinct AC and DC values from SMU
230 dc->caps.dcmode_power_limits_present =
231 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) ||
232 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) ||
233 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) ||
234 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) ||
235 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) ||
236 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz);
237 }
238
239 // Initialize the dccg
240 if (res_pool->dccg->funcs->dccg_init)
241 res_pool->dccg->funcs->dccg_init(res_pool->dccg);
242
243 // Disable DMUB Initialization until IPS state programming is finalized
244 //if (!dcb->funcs->is_accelerated_mode(dcb)) {
245 // hws->funcs.bios_golden_init(dc);
246 //}
247
248 // Set default OPTC memory power states
249 if (dc->debug.enable_mem_low_power.bits.optc) {
250 // Shutdown when unassigned and light sleep in VBLANK
251 REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
252 }
253
254 if (dc->debug.enable_mem_low_power.bits.vga) {
255 // Power down VGA memory
256 REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
257 }
258
259 if (dc->ctx->dc_bios->fw_info_valid) {
260 res_pool->ref_clocks.xtalin_clock_inKhz =
261 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
262
263 if (res_pool->hubbub) {
264 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
265 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
266 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
267
268 current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
269
270 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
271 res_pool->ref_clocks.dccg_ref_clock_inKhz,
272 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
273 } else {
274 // Not all ASICs have DCCG sw component
275 res_pool->ref_clocks.dccg_ref_clock_inKhz =
276 res_pool->ref_clocks.xtalin_clock_inKhz;
277 res_pool->ref_clocks.dchub_ref_clock_inKhz =
278 res_pool->ref_clocks.xtalin_clock_inKhz;
279 }
280 } else
281 ASSERT_CRITICAL(false);
282
283 for (i = 0; i < dc->link_count; i++) {
284 /* Power up AND update implementation according to the
285 * required signal (which may be different from the
286 * default signal on connector).
287 */
288 struct dc_link *link = dc->links[i];
289
290 link->link_enc->funcs->hw_init(link->link_enc);
291
292 /* Check for enabled DIG to identify enabled display */
293 if (link->link_enc->funcs->is_dig_enabled &&
294 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
295 link->link_status.link_active = true;
296 link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
297 if (link->link_enc->funcs->fec_is_active &&
298 link->link_enc->funcs->fec_is_active(link->link_enc))
299 link->fec_state = dc_link_fec_enabled;
300 }
301 }
302
303 /* enable_power_gating_plane before dsc_pg_control because
304 * FORCEON = 1 with hw default value on bootup, resume from s3
305 */
306 if (hws->funcs.enable_power_gating_plane)
307 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
308
309 /* we want to turn off all dp displays before doing detection */
310 dc->link_srv->blank_all_dp_displays(dc);
311
312 /* If taking control over from VBIOS, we may want to optimize our first
313 * mode set, so we need to skip powering down pipes until we know which
314 * pipes we want to use.
315 * Otherwise, if taking control is not possible, we need to power
316 * everything down.
317 */
318 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
319 /* Disable boot optimizations means power down everything including PHY, DIG,
320 * and OTG (i.e. the boot is not optimized because we do a full power down).
321 */
322 if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations)
323 dc->hwss.enable_accelerated_mode(dc, dc->current_state);
324 else
325 hws->funcs.init_pipes(dc, dc->current_state);
326
327 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
328 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
329 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
330
331 dcn401_initialize_min_clocks(dc);
332
333 /* On HW init, allow idle optimizations after pipes have been turned off.
334 *
335 * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state
336 * is reset (i.e. not in idle at the time hw init is called), but software state
337 * still has idle_optimizations = true, so we must disable idle optimizations first
338 * (i.e. set false), then re-enable (set true).
339 */
340 dc_allow_idle_optimizations(dc, false);
341 dc_allow_idle_optimizations(dc, true);
342 }
343
344 /* In headless boot cases, DIG may be turned
345 * on which causes HW/SW discrepancies.
346 * To avoid this, power down hardware on boot
347 * if DIG is turned on and seamless boot not enabled
348 */
349 if (!dc->config.seamless_boot_edp_requested) {
350 struct dc_link *edp_links[MAX_NUM_EDP];
351 struct dc_link *edp_link;
352
353 dc_get_edp_links(dc, edp_links, &edp_num);
354 if (edp_num) {
355 for (i = 0; i < edp_num; i++) {
356 edp_link = edp_links[i];
357 if (edp_link->link_enc->funcs->is_dig_enabled &&
358 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
359 dc->hwss.edp_backlight_control &&
360 hws->funcs.power_down &&
361 dc->hwss.edp_power_control) {
362 dc->hwss.edp_backlight_control(edp_link, false);
363 hws->funcs.power_down(dc);
364 dc->hwss.edp_power_control(edp_link, false);
365 }
366 }
367 } else {
368 for (i = 0; i < dc->link_count; i++) {
369 struct dc_link *link = dc->links[i];
370
371 if (link->link_enc->funcs->is_dig_enabled &&
372 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
373 hws->funcs.power_down) {
374 hws->funcs.power_down(dc);
375 break;
376 }
377
378 }
379 }
380 }
381
382 for (i = 0; i < res_pool->audio_count; i++) {
383 struct audio *audio = res_pool->audios[i];
384
385 audio->funcs->hw_init(audio);
386 }
387
388 for (i = 0; i < dc->link_count; i++) {
389 struct dc_link *link = dc->links[i];
390
391 if (link->panel_cntl) {
392 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
393 user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
394 }
395 }
396
397 for (i = 0; i < dc->res_pool->pipe_count; i++) {
398 if (abms[i] != NULL && abms[i]->funcs != NULL)
399 abms[i]->funcs->abm_init(abms[i], backlight, user_level);
400 }
401
402 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
403 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
404
405 if (!dc->debug.disable_clock_gate) {
406 /* enable all DCN clock gating */
407 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
408
409 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
410
411 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
412 }
413
414 dcn401_setup_hpo_hw_control(hws, true);
415
416 if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
417 dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
418
419 if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
420 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
421
422 if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
423 dc->res_pool->hubbub->funcs->force_pstate_change_control(
424 dc->res_pool->hubbub, false, false);
425
426 if (dc->res_pool->hubbub->funcs->init_crb)
427 dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
428
429 if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
430 dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
431
432 // Get DMCUB capabilities
433 if (dc->ctx->dmub_srv) {
434 dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
435 dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
436 dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
437 dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
438 dc->debug.fams2_config.bits.enable &= dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver == 2;
439 if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
440 || res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
441 /* update bounding box if FAMS2 disabled, or if dchub clk has changed */
442 if (dc->clk_mgr)
443 dc->res_pool->funcs->update_bw_bounding_box(dc,
444 dc->clk_mgr->bw_params);
445 }
446 }
447 }
448
dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc * dc,struct pipe_ctx * pipe_ctx,enum MCM_LUT_XABLE * shaper_xable,enum MCM_LUT_XABLE * lut3d_xable,enum MCM_LUT_XABLE * lut1d_xable)449 static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx,
450 enum MCM_LUT_XABLE *shaper_xable,
451 enum MCM_LUT_XABLE *lut3d_xable,
452 enum MCM_LUT_XABLE *lut1d_xable)
453 {
454 enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL;
455 bool lut1d_enable = false;
456 struct mpc *mpc = dc->res_pool->mpc;
457 int mpcc_id = pipe_ctx->plane_res.hubp->inst;
458
459 if (!pipe_ctx->plane_state)
460 return;
461 shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting;
462 lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable;
463 mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
464 pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
465
466 *lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE;
467
468 switch (shaper_3dlut_setting) {
469 case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL:
470 *lut3d_xable = *shaper_xable = MCM_LUT_DISABLE;
471 break;
472 case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER:
473 *lut3d_xable = MCM_LUT_DISABLE;
474 *shaper_xable = MCM_LUT_ENABLE;
475 break;
476 case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT:
477 *lut3d_xable = *shaper_xable = MCM_LUT_ENABLE;
478 break;
479 }
480 }
481
dcn401_populate_mcm_luts(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_cm2_func_luts mcm_luts,bool lut_bank_a)482 void dcn401_populate_mcm_luts(struct dc *dc,
483 struct pipe_ctx *pipe_ctx,
484 struct dc_cm2_func_luts mcm_luts,
485 bool lut_bank_a)
486 {
487 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
488 struct hubp *hubp = pipe_ctx->plane_res.hubp;
489 int mpcc_id = hubp->inst;
490 struct mpc *mpc = dc->res_pool->mpc;
491 union mcm_lut_params m_lut_params;
492 enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
493 enum hubp_3dlut_fl_format format;
494 enum hubp_3dlut_fl_mode mode;
495 enum hubp_3dlut_fl_width width;
496 enum hubp_3dlut_fl_addressing_mode addr_mode;
497 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
498 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
499 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
500 enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
501 enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
502 enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
503 bool is_17x17x17 = true;
504 bool rval;
505
506 dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
507
508 /* 1D LUT */
509 if (mcm_luts.lut1d_func && lut3d_xable != MCM_LUT_DISABLE) {
510 memset(&m_lut_params, 0, sizeof(m_lut_params));
511 if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
512 m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
513 else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
514 rval = cm3_helper_translate_curve_to_hw_format(
515 mcm_luts.lut1d_func,
516 &dpp_base->regamma_params, false);
517 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
518 }
519 if (m_lut_params.pwl) {
520 if (mpc->funcs->populate_lut)
521 mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
522 }
523 if (mpc->funcs->program_lut_mode)
524 mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable, lut_bank_a, mpcc_id);
525 }
526
527 /* Shaper */
528 if (mcm_luts.shaper) {
529 memset(&m_lut_params, 0, sizeof(m_lut_params));
530 if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
531 m_lut_params.pwl = &mcm_luts.shaper->pwl;
532 else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
533 ASSERT(false);
534 rval = cm3_helper_translate_curve_to_hw_format(
535 mcm_luts.shaper,
536 &dpp_base->regamma_params, true);
537 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
538 }
539 if (m_lut_params.pwl) {
540 if (mpc->funcs->populate_lut)
541 mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, m_lut_params, lut_bank_a, mpcc_id);
542 }
543 if (mpc->funcs->program_lut_mode)
544 mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, shaper_xable, lut_bank_a, mpcc_id);
545 }
546
547 /* 3DLUT */
548 switch (lut3d_src) {
549 case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
550 memset(&m_lut_params, 0, sizeof(m_lut_params));
551 if (hubp->funcs->hubp_enable_3dlut_fl)
552 hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
553 if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
554 m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
555 if (mpc->funcs->populate_lut)
556 mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id);
557 if (mpc->funcs->program_lut_mode)
558 mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a,
559 mpcc_id);
560 }
561 break;
562 case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
563
564 if (mpc->funcs->program_lut_read_write_control)
565 mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
566 if (mpc->funcs->program_lut_mode)
567 mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
568 if (mpc->funcs->program_3dlut_size)
569 mpc->funcs->program_3dlut_size(mpc, is_17x17x17, mpcc_id);
570 if (hubp->funcs->hubp_program_3dlut_fl_addr)
571 hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
572 switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
573 case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
574 mode = hubp_3dlut_fl_mode_native_1;
575 addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
576 break;
577 case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
578 mode = hubp_3dlut_fl_mode_native_2;
579 addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
580 break;
581 case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
582 mode = hubp_3dlut_fl_mode_transform;
583 addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
584 break;
585 default:
586 mode = hubp_3dlut_fl_mode_disable;
587 addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
588 break;
589 }
590 if (hubp->funcs->hubp_program_3dlut_fl_mode)
591 hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
592
593 if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode)
594 hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
595
596 switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
597 case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
598 default:
599 format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
600 break;
601 case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
602 format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
603 break;
604 case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
605 format = hubp_3dlut_fl_format_float_fp1_5_10;
606 break;
607 }
608 if (hubp->funcs->hubp_program_3dlut_fl_format)
609 hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
610 if (hubp->funcs->hubp_update_3dlut_fl_bias_scale)
611 hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
612 mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
613 mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
614
615 switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
616 case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
617 default:
618 crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
619 crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
620 crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
621 break;
622 }
623
624 if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
625 hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
626 crossbar_bit_slice_y_g,
627 crossbar_bit_slice_cb_b,
628 crossbar_bit_slice_cr_r);
629
630 switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
631 case DC_CM2_GPU_MEM_SIZE_171717:
632 default:
633 width = hubp_3dlut_fl_width_17;
634 break;
635 case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
636 width = hubp_3dlut_fl_width_transformed;
637 break;
638 }
639 if (hubp->funcs->hubp_program_3dlut_fl_width)
640 hubp->funcs->hubp_program_3dlut_fl_width(hubp, width);
641 if (mpc->funcs->update_3dlut_fast_load_select)
642 mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
643
644 if (hubp->funcs->hubp_enable_3dlut_fl)
645 hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
646 else {
647 if (mpc->funcs->program_lut_mode) {
648 mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
649 mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
650 mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
651 }
652 }
653 break;
654
655 }
656 }
657
dcn401_trigger_3dlut_dma_load(struct dc * dc,struct pipe_ctx * pipe_ctx)658 void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
659 {
660 struct hubp *hubp = pipe_ctx->plane_res.hubp;
661
662 if (hubp->funcs->hubp_enable_3dlut_fl) {
663 hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
664 }
665 }
666
dcn401_set_mcm_luts(struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)667 bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
668 const struct dc_plane_state *plane_state)
669 {
670 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
671 int mpcc_id = pipe_ctx->plane_res.hubp->inst;
672 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
673 bool result;
674 const struct pwl_params *lut_params = NULL;
675 bool rval;
676
677 mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
678 pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
679 // 1D LUT
680 if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
681 lut_params = &plane_state->blend_tf.pwl;
682 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
683 rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
684 &dpp_base->regamma_params, false);
685 lut_params = rval ? &dpp_base->regamma_params : NULL;
686 }
687 result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
688 lut_params = NULL;
689
690 // Shaper
691 if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
692 lut_params = &plane_state->in_shaper_func.pwl;
693 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
694 // TODO: dpp_base replace
695 rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func,
696 &dpp_base->shaper_params, true);
697 lut_params = rval ? &dpp_base->shaper_params : NULL;
698 }
699 result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
700
701 // 3D
702 if (mpc->funcs->program_3dlut) {
703 if (plane_state->lut3d_func.state.bits.initialized == 1)
704 result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
705 else
706 result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
707 }
708
709 return result;
710 }
711
dcn401_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)712 bool dcn401_set_output_transfer_func(struct dc *dc,
713 struct pipe_ctx *pipe_ctx,
714 const struct dc_stream_state *stream)
715 {
716 int mpcc_id = pipe_ctx->plane_res.hubp->inst;
717 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
718 const struct pwl_params *params = NULL;
719 bool ret = false;
720
721 /* program OGAM or 3DLUT only for the top pipe*/
722 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
723 /*program shaper and 3dlut in MPC*/
724 ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
725 if (ret == false && mpc->funcs->set_output_gamma) {
726 if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
727 params = &stream->out_transfer_func.pwl;
728 else if (pipe_ctx->stream->out_transfer_func.type ==
729 TF_TYPE_DISTRIBUTED_POINTS &&
730 cm3_helper_translate_curve_to_hw_format(
731 &stream->out_transfer_func,
732 &mpc->blender_params, false))
733 params = &mpc->blender_params;
734 /* there are no ROM LUTs in OUTGAM */
735 if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
736 BREAK_TO_DEBUGGER();
737 }
738 }
739
740 if (mpc->funcs->set_output_gamma)
741 mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
742
743 return ret;
744 }
745
dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx * pipe_ctx,unsigned int * tmds_div)746 void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx,
747 unsigned int *tmds_div)
748 {
749 struct dc_stream_state *stream = pipe_ctx->stream;
750
751 if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
752 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
753 *tmds_div = PIXEL_RATE_DIV_BY_2;
754 else
755 *tmds_div = PIXEL_RATE_DIV_BY_4;
756 } else {
757 *tmds_div = PIXEL_RATE_DIV_BY_1;
758 }
759
760 if (*tmds_div == PIXEL_RATE_DIV_NA)
761 ASSERT(false);
762
763 }
764
enable_stream_timing_calc(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc,unsigned int * tmds_div,int * opp_inst,int * opp_cnt,struct pipe_ctx * opp_heads[MAX_PIPES],bool * manual_mode,struct drr_params * params,unsigned int * event_triggers)765 static void enable_stream_timing_calc(
766 struct pipe_ctx *pipe_ctx,
767 struct dc_state *context,
768 struct dc *dc,
769 unsigned int *tmds_div,
770 int *opp_inst,
771 int *opp_cnt,
772 struct pipe_ctx *opp_heads[MAX_PIPES],
773 bool *manual_mode,
774 struct drr_params *params,
775 unsigned int *event_triggers)
776 {
777 struct dc_stream_state *stream = pipe_ctx->stream;
778 int i;
779
780 if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal))
781 dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
782
783 *opp_cnt = resource_get_opp_heads_for_otg_master(pipe_ctx, &context->res_ctx, opp_heads);
784 for (i = 0; i < *opp_cnt; i++)
785 opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
786
787 if (dc_is_tmds_signal(stream->signal)) {
788 stream->link->phy_state.symclk_ref_cnts.otg = 1;
789 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
790 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
791 else
792 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
793 }
794
795 params->vertical_total_min = stream->adjust.v_total_min;
796 params->vertical_total_max = stream->adjust.v_total_max;
797 params->vertical_total_mid = stream->adjust.v_total_mid;
798 params->vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
799
800 // DRR should set trigger event to monitor surface update event
801 if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
802 *event_triggers = 0x80;
803 }
804
dcn401_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)805 enum dc_status dcn401_enable_stream_timing(
806 struct pipe_ctx *pipe_ctx,
807 struct dc_state *context,
808 struct dc *dc)
809 {
810 struct dce_hwseq *hws = dc->hwseq;
811 struct dc_stream_state *stream = pipe_ctx->stream;
812 struct drr_params params = {0};
813 unsigned int event_triggers = 0;
814 int opp_cnt = 1;
815 int opp_inst[MAX_PIPES] = {0};
816 struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
817 bool manual_mode;
818 unsigned int tmds_div = PIXEL_RATE_DIV_NA;
819 unsigned int unused_div = PIXEL_RATE_DIV_NA;
820 int odm_slice_width;
821 int last_odm_slice_width;
822 int i;
823
824 if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
825 return DC_OK;
826
827 enable_stream_timing_calc(pipe_ctx, context, dc, &tmds_div, opp_inst,
828 &opp_cnt, opp_heads, &manual_mode, ¶ms, &event_triggers);
829
830 if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
831 dc->res_pool->dccg->funcs->set_pixel_rate_div(
832 dc->res_pool->dccg, pipe_ctx->stream_res.tg->inst,
833 tmds_div, unused_div);
834 }
835
836 /* TODO check if timing_changed, disable stream if timing changed */
837
838 if (opp_cnt > 1) {
839 odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
840 last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
841 pipe_ctx->stream_res.tg->funcs->set_odm_combine(
842 pipe_ctx->stream_res.tg,
843 opp_inst, opp_cnt,
844 odm_slice_width, last_odm_slice_width);
845 }
846
847 /* HW program guide assume display already disable
848 * by unplug sequence. OTG assume stop.
849 */
850 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
851
852 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
853 pipe_ctx->clock_source,
854 &pipe_ctx->stream_res.pix_clk_params,
855 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
856 &pipe_ctx->pll_settings)) {
857 BREAK_TO_DEBUGGER();
858 return DC_ERROR_UNEXPECTED;
859 }
860
861 if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
862 dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
863
864 pipe_ctx->stream_res.tg->funcs->program_timing(
865 pipe_ctx->stream_res.tg,
866 &stream->timing,
867 pipe_ctx->pipe_dlg_param.vready_offset,
868 pipe_ctx->pipe_dlg_param.vstartup_start,
869 pipe_ctx->pipe_dlg_param.vupdate_offset,
870 pipe_ctx->pipe_dlg_param.vupdate_width,
871 pipe_ctx->pipe_dlg_param.pstate_keepout,
872 pipe_ctx->stream->signal,
873 true);
874
875 for (i = 0; i < opp_cnt; i++) {
876 opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
877 opp_heads[i]->stream_res.opp,
878 true);
879 opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
880 opp_heads[i]->stream_res.opp,
881 stream->timing.pixel_encoding,
882 resource_is_pipe_type(opp_heads[i], OTG_MASTER));
883 }
884
885 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
886 pipe_ctx->stream_res.opp,
887 true);
888
889 hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
890
891 /* VTG is within DCHUB command block. DCFCLK is always on */
892 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
893 BREAK_TO_DEBUGGER();
894 return DC_ERROR_UNEXPECTED;
895 }
896
897 hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
898
899 if (pipe_ctx->stream_res.tg->funcs->set_drr)
900 pipe_ctx->stream_res.tg->funcs->set_drr(
901 pipe_ctx->stream_res.tg, ¶ms);
902
903 /* Event triggers and num frames initialized for DRR, but can be
904 * later updated for PSR use. Note DRR trigger events are generated
905 * regardless of whether num frames met.
906 */
907 if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
908 pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
909 pipe_ctx->stream_res.tg, event_triggers, 2);
910
911 /* TODO program crtc source select for non-virtual signal*/
912 /* TODO program FMT */
913 /* TODO setup link_enc */
914 /* TODO set stream attributes */
915 /* TODO program audio */
916 /* TODO enable stream if timing changed */
917 /* TODO unblank stream if DP */
918
919 if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
920 if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
921 pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
922 }
923
924 return DC_OK;
925 }
926
get_phyd32clk_src(struct dc_link * link)927 static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
928 {
929 switch (link->link_enc->transmitter) {
930 case TRANSMITTER_UNIPHY_A:
931 return PHYD32CLKA;
932 case TRANSMITTER_UNIPHY_B:
933 return PHYD32CLKB;
934 case TRANSMITTER_UNIPHY_C:
935 return PHYD32CLKC;
936 case TRANSMITTER_UNIPHY_D:
937 return PHYD32CLKD;
938 case TRANSMITTER_UNIPHY_E:
939 return PHYD32CLKE;
940 default:
941 return PHYD32CLKA;
942 }
943 }
944
dcn401_enable_stream_calc(struct pipe_ctx * pipe_ctx,int * dp_hpo_inst,enum phyd32clk_clock_source * phyd32clk,unsigned int * tmds_div,uint32_t * early_control)945 static void dcn401_enable_stream_calc(
946 struct pipe_ctx *pipe_ctx,
947 int *dp_hpo_inst,
948 enum phyd32clk_clock_source *phyd32clk,
949 unsigned int *tmds_div,
950 uint32_t *early_control)
951 {
952
953 struct dc *dc = pipe_ctx->stream->ctx->dc;
954 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
955 enum dc_lane_count lane_count =
956 pipe_ctx->stream->link->cur_link_settings.lane_count;
957 uint32_t active_total_with_borders;
958
959 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx))
960 *dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
961
962 *phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
963
964 if (dc_is_tmds_signal(pipe_ctx->stream->signal))
965 dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
966 else
967 *tmds_div = PIXEL_RATE_DIV_BY_1;
968
969 /* enable early control to avoid corruption on DP monitor*/
970 active_total_with_borders =
971 timing->h_addressable
972 + timing->h_border_left
973 + timing->h_border_right;
974
975 if (lane_count != 0)
976 *early_control = active_total_with_borders % lane_count;
977
978 if (*early_control == 0)
979 *early_control = lane_count;
980
981 }
982
dcn401_enable_stream(struct pipe_ctx * pipe_ctx)983 void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
984 {
985 uint32_t early_control = 0;
986 struct timing_generator *tg = pipe_ctx->stream_res.tg;
987 struct dc_link *link = pipe_ctx->stream->link;
988 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
989 struct dc *dc = pipe_ctx->stream->ctx->dc;
990 struct dccg *dccg = dc->res_pool->dccg;
991 enum phyd32clk_clock_source phyd32clk;
992 int dp_hpo_inst = 0;
993 unsigned int tmds_div = PIXEL_RATE_DIV_NA;
994 unsigned int unused_div = PIXEL_RATE_DIV_NA;
995 struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
996 struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
997
998 dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk,
999 &tmds_div, &early_control);
1000
1001 if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
1002 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
1003 dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst);
1004
1005 dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
1006 } else {
1007 /* need to set DTBCLK_P source to DPREFCLK for DP8B10B */
1008 dccg->funcs->set_dtbclk_p_src(dccg, DPREFCLK, tg->inst);
1009 dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
1010 link_enc->transmitter - TRANSMITTER_UNIPHY_A);
1011 }
1012 }
1013
1014 if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
1015 dc->res_pool->dccg->funcs->set_pixel_rate_div(
1016 dc->res_pool->dccg,
1017 pipe_ctx->stream_res.tg->inst,
1018 tmds_div,
1019 unused_div);
1020 }
1021
1022 link_hwss->setup_stream_encoder(pipe_ctx);
1023
1024 if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
1025 if (dc->hwss.program_dmdata_engine)
1026 dc->hwss.program_dmdata_engine(pipe_ctx);
1027 }
1028
1029 dc->hwss.update_info_frame(pipe_ctx);
1030
1031 if (dc_is_dp_signal(pipe_ctx->stream->signal))
1032 dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
1033
1034 tg->funcs->set_early_control(tg, early_control);
1035 }
1036
dcn401_setup_hpo_hw_control(const struct dce_hwseq * hws,bool enable)1037 void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
1038 {
1039 REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable);
1040 }
1041
dcn401_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)1042 static bool dcn401_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
1043 {
1044 struct pipe_ctx *test_pipe, *split_pipe;
1045 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
1046 struct rect r1 = scl_data->recout, r2, r2_half;
1047 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
1048 int cur_layer = pipe_ctx->plane_state->layer_index;
1049
1050 /**
1051 * Disable the cursor if there's another pipe above this with a
1052 * plane that contains this pipe's viewport to prevent double cursor
1053 * and incorrect scaling artifacts.
1054 */
1055 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
1056 test_pipe = test_pipe->top_pipe) {
1057 // Skip invisible layer and pipe-split plane on same layer
1058 if (!test_pipe->plane_state ||
1059 !test_pipe->plane_state->visible ||
1060 test_pipe->plane_state->layer_index == cur_layer)
1061 continue;
1062
1063 r2 = test_pipe->plane_res.scl_data.recout;
1064 r2_r = r2.x + r2.width;
1065 r2_b = r2.y + r2.height;
1066 split_pipe = test_pipe;
1067
1068 /**
1069 * There is another half plane on same layer because of
1070 * pipe-split, merge together per same height.
1071 */
1072 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
1073 split_pipe = split_pipe->top_pipe)
1074 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
1075 r2_half = split_pipe->plane_res.scl_data.recout;
1076 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
1077 r2.width = r2.width + r2_half.width;
1078 r2_r = r2.x + r2.width;
1079 break;
1080 }
1081
1082 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
1083 return true;
1084 }
1085
1086 return false;
1087 }
1088
adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width,struct dc_cursor_position * pos_cpy)1089 void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy)
1090 {
1091 if (cursor_width <= 128) {
1092 pos_cpy->x_hotspot /= 2;
1093 pos_cpy->x_hotspot += 1;
1094 } else {
1095 pos_cpy->x_hotspot /= 2;
1096 pos_cpy->x_hotspot += 2;
1097 }
1098 }
1099
dcn401_set_cursor_position(struct pipe_ctx * pipe_ctx)1100 void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
1101 {
1102 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
1103 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1104 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1105 struct dc_cursor_mi_param param = {
1106 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
1107 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
1108 .viewport = pipe_ctx->plane_res.scl_data.viewport,
1109 .recout = pipe_ctx->plane_res.scl_data.recout,
1110 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
1111 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
1112 .rotation = pipe_ctx->plane_state->rotation,
1113 .mirror = pipe_ctx->plane_state->horizontal_mirror,
1114 .stream = pipe_ctx->stream
1115 };
1116 struct rect odm_slice_src = { 0 };
1117 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
1118 (pipe_ctx->prev_odm_pipe != NULL);
1119 int prev_odm_width = 0;
1120 struct pipe_ctx *prev_odm_pipe = NULL;
1121 bool mpc_combine_on = false;
1122 int bottom_pipe_x_pos = 0;
1123
1124 int x_pos = pos_cpy.x;
1125 int y_pos = pos_cpy.y;
1126 int recout_x_pos = 0;
1127 int recout_y_pos = 0;
1128
1129 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
1130 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
1131 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
1132 mpc_combine_on = true;
1133 }
1134 }
1135
1136 /* DCN4 moved cursor composition after Scaler, so in HW it is in
1137 * recout space and for HW Cursor position programming need to
1138 * translate to recout space.
1139 *
1140 * Cursor X and Y position programmed into HW can't be negative,
1141 * in fact it is X, Y coordinate shifted for the HW Cursor Hot spot
1142 * position that goes into HW X and Y coordinates while HW Hot spot
1143 * X and Y coordinates are length relative to the cursor top left
1144 * corner, hotspot must be smaller than the cursor size.
1145 *
1146 * DMs/DC interface for Cursor position is in stream->src space, and
1147 * DMs supposed to transform Cursor coordinates to stream->src space,
1148 * then here we need to translate Cursor coordinates to stream->dst
1149 * space, as now in HW, Cursor coordinates are in per pipe recout
1150 * space, and for the given pipe valid coordinates are only in range
1151 * from 0,0 - recout width, recout height space.
1152 * If certain pipe combining is in place, need to further adjust per
1153 * pipe to make sure each pipe enabling cursor on its part of the
1154 * screen.
1155 */
1156 x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width /
1157 pipe_ctx->stream->src.width;
1158 y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
1159 pipe_ctx->stream->src.height;
1160
1161 /* If the cursor's source viewport is clipped then we need to
1162 * translate the cursor to appear in the correct position on
1163 * the screen.
1164 *
1165 * This translation isn't affected by scaling so it needs to be
1166 * done *after* we adjust the position for the scale factor.
1167 *
1168 * This is only done by opt-in for now since there are still
1169 * some usecases like tiled display that might enable the
1170 * cursor on both streams while expecting dc to clip it.
1171 */
1172 if (pos_cpy.translate_by_source) {
1173 x_pos += pipe_ctx->plane_state->src_rect.x;
1174 y_pos += pipe_ctx->plane_state->src_rect.y;
1175 }
1176
1177 /* Adjust for ODM Combine
1178 * next/prev_odm_offset is to account for scaled modes that have underscan
1179 */
1180 if (odm_combine_on) {
1181 prev_odm_pipe = pipe_ctx->prev_odm_pipe;
1182
1183 while (prev_odm_pipe != NULL) {
1184 odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe);
1185 prev_odm_width += odm_slice_src.width;
1186 prev_odm_pipe = prev_odm_pipe->prev_odm_pipe;
1187 }
1188
1189 x_pos -= (prev_odm_width);
1190 }
1191
1192 /* If the position is negative then we need to add to the hotspot
1193 * to fix cursor size between ODM slices
1194 */
1195
1196 if (x_pos < 0) {
1197 pos_cpy.x_hotspot -= x_pos;
1198 if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1199 adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1200 x_pos = 0;
1201 }
1202
1203 if (y_pos < 0) {
1204 pos_cpy.y_hotspot -= y_pos;
1205 y_pos = 0;
1206 }
1207
1208 /* If the position on bottom MPC pipe is negative then we need to add to the hotspot and
1209 * adjust x_pos on bottom pipe to make cursor visible when crossing between MPC slices.
1210 */
1211 if (mpc_combine_on &&
1212 pipe_ctx->top_pipe &&
1213 (pipe_ctx == pipe_ctx->top_pipe->bottom_pipe)) {
1214
1215 bottom_pipe_x_pos = x_pos - pipe_ctx->plane_res.scl_data.recout.x;
1216 if (bottom_pipe_x_pos < 0) {
1217 x_pos = pipe_ctx->plane_res.scl_data.recout.x;
1218 pos_cpy.x_hotspot -= bottom_pipe_x_pos;
1219 if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1220 adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1221 }
1222 }
1223
1224 pos_cpy.x = (uint32_t)x_pos;
1225 pos_cpy.y = (uint32_t)y_pos;
1226
1227 if (pos_cpy.enable && dcn401_can_pipe_disable_cursor(pipe_ctx))
1228 pos_cpy.enable = false;
1229
1230 x_pos = pos_cpy.x - param.recout.x;
1231 y_pos = pos_cpy.y - param.recout.y;
1232
1233 recout_x_pos = x_pos - pos_cpy.x_hotspot;
1234 recout_y_pos = y_pos - pos_cpy.y_hotspot;
1235
1236 if (recout_x_pos >= (int)param.recout.width)
1237 pos_cpy.enable = false; /* not visible beyond right edge*/
1238
1239 if (recout_y_pos >= (int)param.recout.height)
1240 pos_cpy.enable = false; /* not visible beyond bottom edge*/
1241
1242 if (recout_x_pos + (int)hubp->curs_attr.width <= 0)
1243 pos_cpy.enable = false; /* not visible beyond left edge*/
1244
1245 if (recout_y_pos + (int)hubp->curs_attr.height <= 0)
1246 pos_cpy.enable = false; /* not visible beyond top edge*/
1247
1248 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
1249 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
1250 }
1251
dcn401_check_no_memory_request_for_cab(struct dc * dc)1252 static bool dcn401_check_no_memory_request_for_cab(struct dc *dc)
1253 {
1254 int i;
1255
1256 /* First, check no-memory-request case */
1257 for (i = 0; i < dc->current_state->stream_count; i++) {
1258 if ((dc->current_state->stream_status[i].plane_count) &&
1259 (dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED))
1260 /* Fail eligibility on a visible stream */
1261 return false;
1262 }
1263
1264 return true;
1265 }
1266
dcn401_calculate_cab_allocation(struct dc * dc,struct dc_state * ctx)1267 static uint32_t dcn401_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
1268 {
1269 int i;
1270 uint8_t num_ways = 0;
1271 uint32_t mall_ss_size_bytes = 0;
1272
1273 mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
1274 // TODO add additional logic for PSR active stream exclusion optimization
1275 // mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
1276
1277 // Include cursor size for CAB allocation
1278 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1279 struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i];
1280
1281 if (!pipe->stream || !pipe->plane_state)
1282 continue;
1283
1284 mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false);
1285 }
1286
1287 // Convert number of cache lines required to number of ways
1288 if (dc->debug.force_mall_ss_num_ways > 0)
1289 num_ways = dc->debug.force_mall_ss_num_ways;
1290 else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes)
1291 num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes);
1292 else
1293 num_ways = 0;
1294
1295 return num_ways;
1296 }
1297
dcn401_apply_idle_power_optimizations(struct dc * dc,bool enable)1298 bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
1299 {
1300 union dmub_rb_cmd cmd;
1301 uint8_t ways, i;
1302 int j;
1303 bool mall_ss_unsupported = false;
1304 struct dc_plane_state *plane = NULL;
1305
1306 if (!dc->ctx->dmub_srv || !dc->current_state)
1307 return false;
1308
1309 for (i = 0; i < dc->current_state->stream_count; i++) {
1310 /* MALL SS messaging is not supported with PSR at this time */
1311 if (dc->current_state->streams[i] != NULL &&
1312 dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
1313 DC_LOG_MALL("MALL SS not supported with PSR at this time\n");
1314 return false;
1315 }
1316 }
1317
1318 memset(&cmd, 0, sizeof(cmd));
1319 cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
1320 cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
1321
1322 if (enable) {
1323 if (dcn401_check_no_memory_request_for_cab(dc)) {
1324 /* 1. Check no memory request case for CAB.
1325 * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message
1326 */
1327 DC_LOG_MALL("sending CAB action NO_DCN_REQ\n");
1328 cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
1329 } else {
1330 /* 2. Check if all surfaces can fit in CAB.
1331 * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message
1332 * and configure HUBP's to fetch from MALL
1333 */
1334 ways = dcn401_calculate_cab_allocation(dc, dc->current_state);
1335
1336 /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
1337 * or TMZ surface, don't try to enter MALL.
1338 */
1339 for (i = 0; i < dc->current_state->stream_count; i++) {
1340 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
1341 plane = dc->current_state->stream_status[i].plane_states[j];
1342
1343 if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
1344 plane->address.tmz_surface) {
1345 mall_ss_unsupported = true;
1346 break;
1347 }
1348 }
1349 if (mall_ss_unsupported)
1350 break;
1351 }
1352 if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
1353 cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
1354 cmd.cab.cab_alloc_ways = ways;
1355 DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways);
1356 } else {
1357 cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB;
1358 DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways);
1359 }
1360 }
1361 } else {
1362 /* Disable CAB */
1363 cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION;
1364 DC_LOG_MALL("idle optimization disabled\n");
1365 }
1366
1367 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1368
1369 return true;
1370 }
1371
dcn401_wait_for_dcc_meta_propagation(const struct dc * dc,const struct pipe_ctx * top_pipe)1372 void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
1373 const struct pipe_ctx *top_pipe)
1374 {
1375 bool is_wait_needed = false;
1376 const struct pipe_ctx *pipe_ctx = top_pipe;
1377
1378 /* check if any surfaces are updating address while using flip immediate and dcc */
1379 while (pipe_ctx != NULL) {
1380 if (pipe_ctx->plane_state &&
1381 pipe_ctx->plane_state->dcc.enable &&
1382 pipe_ctx->plane_state->flip_immediate &&
1383 pipe_ctx->plane_state->update_flags.bits.addr_update) {
1384 is_wait_needed = true;
1385 break;
1386 }
1387
1388 /* check next pipe */
1389 pipe_ctx = pipe_ctx->bottom_pipe;
1390 }
1391
1392 if (is_wait_needed && dc->debug.dcc_meta_propagation_delay_us > 0) {
1393 udelay(dc->debug.dcc_meta_propagation_delay_us);
1394 }
1395 }
1396
dcn401_prepare_bandwidth(struct dc * dc,struct dc_state * context)1397 void dcn401_prepare_bandwidth(struct dc *dc,
1398 struct dc_state *context)
1399 {
1400 struct hubbub *hubbub = dc->res_pool->hubbub;
1401 bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
1402 unsigned int compbuf_size = 0;
1403
1404 /* Any transition into P-State support should disable MCLK switching first to avoid hangs */
1405 if (p_state_change_support) {
1406 dc->optimized_required = true;
1407 context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
1408 }
1409
1410 if (dc->clk_mgr->dc_mode_softmax_enabled)
1411 if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1412 context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1413 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
1414
1415 /* Increase clocks */
1416 dc->clk_mgr->funcs->update_clocks(
1417 dc->clk_mgr,
1418 context,
1419 false);
1420
1421 /* program dchubbub watermarks:
1422 * For assigning wm_optimized_required, use |= operator since we don't want
1423 * to clear the value if the optimize has not happened yet
1424 */
1425 dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
1426 &context->bw_ctx.bw.dcn.watermarks,
1427 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1428 false);
1429
1430 /* decrease compbuf size */
1431 if (hubbub->funcs->program_compbuf_segments) {
1432 compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
1433 dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
1434
1435 hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
1436 }
1437
1438 if (dc->debug.fams2_config.bits.enable) {
1439 dcn401_fams2_global_control_lock(dc, context, true);
1440 dcn401_fams2_update_config(dc, context, false);
1441 dcn401_fams2_global_control_lock(dc, context, false);
1442 }
1443
1444 if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
1445 /* After disabling P-State, restore the original value to ensure we get the correct P-State
1446 * on the next optimize. */
1447 context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
1448 }
1449 }
1450
dcn401_optimize_bandwidth(struct dc * dc,struct dc_state * context)1451 void dcn401_optimize_bandwidth(
1452 struct dc *dc,
1453 struct dc_state *context)
1454 {
1455 int i;
1456 struct hubbub *hubbub = dc->res_pool->hubbub;
1457
1458 /* enable fams2 if needed */
1459 if (dc->debug.fams2_config.bits.enable) {
1460 dcn401_fams2_global_control_lock(dc, context, true);
1461 dcn401_fams2_update_config(dc, context, true);
1462 dcn401_fams2_global_control_lock(dc, context, false);
1463 }
1464
1465 /* program dchubbub watermarks */
1466 hubbub->funcs->program_watermarks(hubbub,
1467 &context->bw_ctx.bw.dcn.watermarks,
1468 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1469 true);
1470
1471 if (dc->clk_mgr->dc_mode_softmax_enabled)
1472 if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1473 context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1474 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
1475
1476 /* increase compbuf size */
1477 if (hubbub->funcs->program_compbuf_segments)
1478 hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1479
1480 dc->clk_mgr->funcs->update_clocks(
1481 dc->clk_mgr,
1482 context,
1483 true);
1484 if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
1485 for (i = 0; i < dc->res_pool->pipe_count; ++i) {
1486 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1487
1488 if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
1489 && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
1490 && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
1491 pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
1492 pipe_ctx->dlg_regs.min_dst_y_next_start);
1493 }
1494 }
1495 }
1496
dcn401_fams2_global_control_lock(struct dc * dc,struct dc_state * context,bool lock)1497 void dcn401_fams2_global_control_lock(struct dc *dc,
1498 struct dc_state *context,
1499 bool lock)
1500 {
1501 /* use always for now */
1502 union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1503
1504 if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1505 return;
1506
1507 hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1508 hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1509 hw_lock_cmd.bits.lock = lock;
1510 hw_lock_cmd.bits.should_release = !lock;
1511 dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1512 }
1513
dcn401_fams2_global_control_lock_fast(union block_sequence_params * params)1514 void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params)
1515 {
1516 struct dc *dc = params->fams2_global_control_lock_fast_params.dc;
1517 bool lock = params->fams2_global_control_lock_fast_params.lock;
1518
1519 if (params->fams2_global_control_lock_fast_params.is_required) {
1520 union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1521
1522 hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1523 hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1524 hw_lock_cmd.bits.lock = lock;
1525 hw_lock_cmd.bits.should_release = !lock;
1526 dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1527 }
1528 }
1529
dcn401_fams2_update_config(struct dc * dc,struct dc_state * context,bool enable)1530 void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable)
1531 {
1532 bool fams2_required;
1533
1534 if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1535 return;
1536
1537 fams2_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
1538
1539 dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_required);
1540 }
1541
update_dsc_for_odm_change(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1542 static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
1543 struct pipe_ctx *otg_master)
1544 {
1545 int i;
1546 struct pipe_ctx *old_pipe;
1547 struct pipe_ctx *new_pipe;
1548 struct pipe_ctx *old_opp_heads[MAX_PIPES];
1549 struct pipe_ctx *old_otg_master;
1550 int old_opp_head_count = 0;
1551
1552 old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
1553
1554 if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
1555 old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
1556 &dc->current_state->res_ctx,
1557 old_opp_heads);
1558 } else {
1559 // DC cannot assume that the current state and the new state
1560 // share the same OTG pipe since this is not true when called
1561 // in the context of a commit stream not checked. Hence, set
1562 // old_otg_master to NULL to skip the DSC configuration.
1563 old_otg_master = NULL;
1564 }
1565
1566
1567 if (otg_master->stream_res.dsc)
1568 dcn32_update_dsc_on_stream(otg_master,
1569 otg_master->stream->timing.flags.DSC);
1570 if (old_otg_master && old_otg_master->stream_res.dsc) {
1571 for (i = 0; i < old_opp_head_count; i++) {
1572 old_pipe = old_opp_heads[i];
1573 new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
1574 if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc)
1575 old_pipe->stream_res.dsc->funcs->dsc_disconnect(
1576 old_pipe->stream_res.dsc);
1577 }
1578 }
1579 }
1580
dcn401_update_odm(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1581 void dcn401_update_odm(struct dc *dc, struct dc_state *context,
1582 struct pipe_ctx *otg_master)
1583 {
1584 struct pipe_ctx *opp_heads[MAX_PIPES];
1585 int opp_inst[MAX_PIPES] = {0};
1586 int opp_head_count;
1587 int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
1588 int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
1589 int i;
1590
1591 opp_head_count = resource_get_opp_heads_for_otg_master(
1592 otg_master, &context->res_ctx, opp_heads);
1593
1594 for (i = 0; i < opp_head_count; i++)
1595 opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
1596 if (opp_head_count > 1)
1597 otg_master->stream_res.tg->funcs->set_odm_combine(
1598 otg_master->stream_res.tg,
1599 opp_inst, opp_head_count,
1600 odm_slice_width, last_odm_slice_width);
1601 else
1602 otg_master->stream_res.tg->funcs->set_odm_bypass(
1603 otg_master->stream_res.tg,
1604 &otg_master->stream->timing);
1605
1606 for (i = 0; i < opp_head_count; i++) {
1607 opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
1608 opp_heads[i]->stream_res.opp,
1609 true);
1610 opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
1611 opp_heads[i]->stream_res.opp,
1612 opp_heads[i]->stream->timing.pixel_encoding,
1613 resource_is_pipe_type(opp_heads[i], OTG_MASTER));
1614 }
1615
1616 update_dsc_for_odm_change(dc, context, otg_master);
1617
1618 if (!resource_is_pipe_type(otg_master, DPP_PIPE))
1619 /*
1620 * blank pattern is generated by OPP, reprogram blank pattern
1621 * due to OPP count change
1622 */
1623 dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
1624 }
1625
dcn401_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)1626 void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
1627 struct dc_link_settings *link_settings)
1628 {
1629 struct encoder_unblank_param params = {0};
1630 struct dc_stream_state *stream = pipe_ctx->stream;
1631 struct dc_link *link = stream->link;
1632 struct dce_hwseq *hws = link->dc->hwseq;
1633
1634 /* calculate parameters for unblank */
1635 params.opp_cnt = resource_get_odm_slice_count(pipe_ctx);
1636
1637 params.timing = pipe_ctx->stream->timing;
1638 params.link_settings.link_rate = link_settings->link_rate;
1639 params.pix_per_cycle = pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle;
1640
1641 if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
1642 pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
1643 pipe_ctx->stream_res.hpo_dp_stream_enc,
1644 pipe_ctx->stream_res.tg->inst);
1645 } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
1646 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
1647 }
1648
1649 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
1650 hws->funcs.edp_backlight_control(link, true);
1651 }
1652
dcn401_hardware_release(struct dc * dc)1653 void dcn401_hardware_release(struct dc *dc)
1654 {
1655 dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
1656
1657 /* If pstate unsupported, or still supported
1658 * by firmware, force it supported by dcn
1659 */
1660 if (dc->current_state) {
1661 if ((!dc->clk_mgr->clks.p_state_change_support ||
1662 dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
1663 dc->res_pool->hubbub->funcs->force_pstate_change_control)
1664 dc->res_pool->hubbub->funcs->force_pstate_change_control(
1665 dc->res_pool->hubbub, true, true);
1666
1667 dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
1668 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
1669 }
1670 }
1671
dcn401_wait_for_det_buffer_update(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1672 void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
1673 {
1674 struct pipe_ctx *opp_heads[MAX_PIPES];
1675 struct pipe_ctx *dpp_pipes[MAX_PIPES];
1676 struct hubbub *hubbub = dc->res_pool->hubbub;
1677 int dpp_count = 0;
1678
1679 if (!otg_master->stream)
1680 return;
1681
1682 int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
1683 &context->res_ctx, opp_heads);
1684
1685 for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
1686 if (opp_heads[slice_idx]->plane_state) {
1687 dpp_count = resource_get_dpp_pipes_for_opp_head(
1688 opp_heads[slice_idx],
1689 &context->res_ctx,
1690 dpp_pipes);
1691 for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
1692 struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx];
1693 if (dpp_pipe && hubbub &&
1694 dpp_pipe->plane_res.hubp &&
1695 hubbub->funcs->wait_for_det_update)
1696 hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
1697 }
1698 }
1699 }
1700 }
1701
dcn401_interdependent_update_lock(struct dc * dc,struct dc_state * context,bool lock)1702 void dcn401_interdependent_update_lock(struct dc *dc,
1703 struct dc_state *context, bool lock)
1704 {
1705 unsigned int i = 0;
1706 struct pipe_ctx *pipe = NULL;
1707 struct timing_generator *tg = NULL;
1708 bool pipe_unlocked[MAX_PIPES] = {0};
1709
1710 if (lock) {
1711 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1712 pipe = &context->res_ctx.pipe_ctx[i];
1713 tg = pipe->stream_res.tg;
1714
1715 if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1716 !tg->funcs->is_tg_enabled(tg) ||
1717 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1718 continue;
1719 dc->hwss.pipe_control_lock(dc, pipe, true);
1720 }
1721 } else {
1722 /* Unlock pipes based on the change in DET allocation instead of pipe index
1723 * Prevents over allocation of DET during unlock process
1724 * e.g. 2 pipe config with different streams with a max of 20 DET segments
1725 * Before: After:
1726 * - Pipe0: 10 DET segments - Pipe0: 12 DET segments
1727 * - Pipe1: 10 DET segments - Pipe1: 8 DET segments
1728 * If Pipe0 gets updated first, 22 DET segments will be allocated
1729 */
1730 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1731 pipe = &context->res_ctx.pipe_ctx[i];
1732 tg = pipe->stream_res.tg;
1733 int current_pipe_idx = i;
1734
1735 if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1736 !tg->funcs->is_tg_enabled(tg) ||
1737 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1738 pipe_unlocked[i] = true;
1739 continue;
1740 }
1741
1742 // If the same stream exists in old context, ensure the OTG_MASTER pipes for the same stream get compared
1743 struct pipe_ctx *old_otg_master = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, pipe->stream);
1744
1745 if (old_otg_master)
1746 current_pipe_idx = old_otg_master->pipe_idx;
1747 if (resource_calculate_det_for_stream(context, pipe) <
1748 resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[current_pipe_idx])) {
1749 dc->hwss.pipe_control_lock(dc, pipe, false);
1750 pipe_unlocked[i] = true;
1751 dcn401_wait_for_det_buffer_update(dc, context, pipe);
1752 }
1753 }
1754
1755 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1756 if (pipe_unlocked[i])
1757 continue;
1758 pipe = &context->res_ctx.pipe_ctx[i];
1759 dc->hwss.pipe_control_lock(dc, pipe, false);
1760 }
1761 }
1762 }
1763
dcn401_program_outstanding_updates(struct dc * dc,struct dc_state * context)1764 void dcn401_program_outstanding_updates(struct dc *dc,
1765 struct dc_state *context)
1766 {
1767 struct hubbub *hubbub = dc->res_pool->hubbub;
1768
1769 /* update compbuf if required */
1770 if (hubbub->funcs->program_compbuf_segments)
1771 hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1772 }
1773