1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services.h"
27 #include "dcn10/dcn10_hubp.h"
28 #include "dcn10_hubbub.h"
29 #include "reg_helper.h"
30 
31 #define CTX \
32 	hubbub1->base.ctx
33 #define DC_LOGGER \
34 	hubbub1->base.ctx->logger
35 #define REG(reg)\
36 	hubbub1->regs->reg
37 
38 #undef FN
39 #define FN(reg_name, field_name) \
40 	hubbub1->shifts->field_name, hubbub1->masks->field_name
41 
hubbub1_wm_read_state(struct hubbub * hubbub,struct dcn_hubbub_wm * wm)42 void hubbub1_wm_read_state(struct hubbub *hubbub,
43 		struct dcn_hubbub_wm *wm)
44 {
45 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
46 	struct dcn_hubbub_wm_set *s;
47 
48 	memset(wm, 0, sizeof(struct dcn_hubbub_wm));
49 
50 	s = &wm->sets[0];
51 	s->wm_set = 0;
52 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
53 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
54 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
55 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
56 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
57 	}
58 	s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
59 
60 	s = &wm->sets[1];
61 	s->wm_set = 1;
62 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
63 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
64 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
65 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
66 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
67 	}
68 	s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
69 
70 	s = &wm->sets[2];
71 	s->wm_set = 2;
72 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
73 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
74 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
75 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
76 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
77 	}
78 	s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
79 
80 	s = &wm->sets[3];
81 	s->wm_set = 3;
82 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
83 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
84 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
85 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
86 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
87 	}
88 	s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
89 }
90 
hubbub1_allow_self_refresh_control(struct hubbub * hubbub,bool allow)91 void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
92 {
93 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
94 	/*
95 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
96 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
97 	 */
98 
99 	REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
100 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
101 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow);
102 }
103 
hubbub1_is_allow_self_refresh_enabled(struct hubbub * hubbub)104 bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
105 {
106 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
107 	uint32_t enable = 0;
108 
109 	REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
110 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
111 
112 	return enable ? true : false;
113 }
114 
115 
hubbub1_verify_allow_pstate_change_high(struct hubbub * hubbub)116 bool hubbub1_verify_allow_pstate_change_high(
117 	struct hubbub *hubbub)
118 {
119 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
120 
121 	/* pstate latency is ~20us so if we wait over 40us and pstate allow
122 	 * still not asserted, we are probably stuck and going to hang
123 	 *
124 	 * TODO: Figure out why it takes ~100us on linux
125 	 * pstate takes around ~100us (up to 200us) on linux. Unknown currently
126 	 * as to why it takes that long on linux
127 	 */
128 	const unsigned int pstate_wait_timeout_us = 200;
129 	const unsigned int pstate_wait_expected_timeout_us = 180;
130 	static unsigned int max_sampled_pstate_wait_us; /* data collection */
131 	static bool forced_pstate_allow; /* help with revert wa */
132 
133 	unsigned int debug_data = 0;
134 	unsigned int i;
135 
136 	if (forced_pstate_allow) {
137 		/* we hacked to force pstate allow to prevent hang last time
138 		 * we verify_allow_pstate_change_high.  so disable force
139 		 * here so we can check status
140 		 */
141 		REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
142 			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
143 			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
144 		forced_pstate_allow = false;
145 	}
146 
147 	/* The following table only applies to DCN1 and DCN2,
148 	 * for newer DCNs, need to consult with HW IP folks to read RTL
149 	 * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
150 	 * description
151 	 * 0:     Pipe0 Plane0 Allow Pstate Change
152 	 * 1:     Pipe0 Plane1 Allow Pstate Change
153 	 * 2:     Pipe0 Cursor0 Allow Pstate Change
154 	 * 3:     Pipe0 Cursor1 Allow Pstate Change
155 	 * 4:     Pipe1 Plane0 Allow Pstate Change
156 	 * 5:     Pipe1 Plane1 Allow Pstate Change
157 	 * 6:     Pipe1 Cursor0 Allow Pstate Change
158 	 * 7:     Pipe1 Cursor1 Allow Pstate Change
159 	 * 8:     Pipe2 Plane0 Allow Pstate Change
160 	 * 9:     Pipe2 Plane1 Allow Pstate Change
161 	 * 10:    Pipe2 Cursor0 Allow Pstate Change
162 	 * 11:    Pipe2 Cursor1 Allow Pstate Change
163 	 * 12:    Pipe3 Plane0 Allow Pstate Change
164 	 * 13:    Pipe3 Plane1 Allow Pstate Change
165 	 * 14:    Pipe3 Cursor0 Allow Pstate Change
166 	 * 15:    Pipe3 Cursor1 Allow Pstate Change
167 	 * 16:    Pipe4 Plane0 Allow Pstate Change
168 	 * 17:    Pipe4 Plane1 Allow Pstate Change
169 	 * 18:    Pipe4 Cursor0 Allow Pstate Change
170 	 * 19:    Pipe4 Cursor1 Allow Pstate Change
171 	 * 20:    Pipe5 Plane0 Allow Pstate Change
172 	 * 21:    Pipe5 Plane1 Allow Pstate Change
173 	 * 22:    Pipe5 Cursor0 Allow Pstate Change
174 	 * 23:    Pipe5 Cursor1 Allow Pstate Change
175 	 * 24:    Pipe6 Plane0 Allow Pstate Change
176 	 * 25:    Pipe6 Plane1 Allow Pstate Change
177 	 * 26:    Pipe6 Cursor0 Allow Pstate Change
178 	 * 27:    Pipe6 Cursor1 Allow Pstate Change
179 	 * 28:    WB0 Allow Pstate Change
180 	 * 29:    WB1 Allow Pstate Change
181 	 * 30:    Arbiter's allow_pstate_change
182 	 * 31:    SOC pstate change request
183 	 */
184 
185 	REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub1->debug_test_index_pstate);
186 
187 	for (i = 0; i < pstate_wait_timeout_us; i++) {
188 		debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
189 
190 		if (debug_data & (1 << 30)) {
191 
192 			if (i > pstate_wait_expected_timeout_us)
193 				DC_LOG_WARNING("pstate took longer than expected ~%dus\n",
194 						i);
195 
196 			return true;
197 		}
198 		if (max_sampled_pstate_wait_us < i)
199 			max_sampled_pstate_wait_us = i;
200 
201 		udelay(1);
202 	}
203 
204 	/* force pstate allow to prevent system hang
205 	 * and break to debugger to investigate
206 	 */
207 	REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
208 		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
209 		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
210 	forced_pstate_allow = true;
211 
212 	DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
213 			debug_data);
214 
215 	return false;
216 }
217 
convert_and_clamp(uint32_t wm_ns,uint32_t refclk_mhz,uint32_t clamp_value)218 static uint32_t convert_and_clamp(
219 	uint32_t wm_ns,
220 	uint32_t refclk_mhz,
221 	uint32_t clamp_value)
222 {
223 	uint32_t ret_val = 0;
224 	ret_val = wm_ns * refclk_mhz;
225 	ret_val /= 1000;
226 
227 	if (ret_val > clamp_value)
228 		ret_val = clamp_value;
229 
230 	return ret_val;
231 }
232 
233 
hubbub1_wm_change_req_wa(struct hubbub * hubbub)234 void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
235 {
236 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
237 
238 	REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
239 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0,
240 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
241 }
242 
hubbub1_program_urgent_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)243 bool hubbub1_program_urgent_watermarks(
244 		struct hubbub *hubbub,
245 		union dcn_watermark_set *watermarks,
246 		unsigned int refclk_mhz,
247 		bool safe_to_lower)
248 {
249 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
250 	uint32_t prog_wm_value;
251 	bool wm_pending = false;
252 
253 	/* Repeat for water mark set A, B, C and D. */
254 	/* clock state A */
255 	if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
256 		hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
257 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
258 				refclk_mhz, 0x1fffff);
259 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
260 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
261 
262 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
263 			"HW register value = 0x%x\n",
264 			watermarks->a.urgent_ns, prog_wm_value);
265 	} else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
266 		wm_pending = true;
267 
268 	if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
269 		hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
270 		prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
271 				refclk_mhz, 0x1fffff);
272 		REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
273 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
274 			"HW register value = 0x%x\n",
275 			watermarks->a.pte_meta_urgent_ns, prog_wm_value);
276 	} else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns)
277 		wm_pending = true;
278 
279 	/* clock state B */
280 	if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
281 		hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
282 		prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
283 				refclk_mhz, 0x1fffff);
284 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
285 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
286 
287 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
288 			"HW register value = 0x%x\n",
289 			watermarks->b.urgent_ns, prog_wm_value);
290 	} else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
291 		wm_pending = true;
292 
293 	if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
294 		hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
295 		prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
296 				refclk_mhz, 0x1fffff);
297 		REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
298 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
299 			"HW register value = 0x%x\n",
300 			watermarks->b.pte_meta_urgent_ns, prog_wm_value);
301 	} else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns)
302 		wm_pending = true;
303 
304 	/* clock state C */
305 	if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
306 		hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
307 		prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
308 				refclk_mhz, 0x1fffff);
309 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
310 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
311 
312 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
313 			"HW register value = 0x%x\n",
314 			watermarks->c.urgent_ns, prog_wm_value);
315 	} else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
316 		wm_pending = true;
317 
318 	if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
319 		hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
320 		prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
321 				refclk_mhz, 0x1fffff);
322 		REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
323 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
324 			"HW register value = 0x%x\n",
325 			watermarks->c.pte_meta_urgent_ns, prog_wm_value);
326 	} else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns)
327 		wm_pending = true;
328 
329 	/* clock state D */
330 	if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
331 		hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
332 		prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
333 				refclk_mhz, 0x1fffff);
334 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
335 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
336 
337 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
338 			"HW register value = 0x%x\n",
339 			watermarks->d.urgent_ns, prog_wm_value);
340 	} else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
341 		wm_pending = true;
342 
343 	if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
344 		hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
345 		prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
346 				refclk_mhz, 0x1fffff);
347 		REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
348 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
349 			"HW register value = 0x%x\n",
350 			watermarks->d.pte_meta_urgent_ns, prog_wm_value);
351 	} else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns)
352 		wm_pending = true;
353 
354 	return wm_pending;
355 }
356 
hubbub1_program_stutter_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)357 bool hubbub1_program_stutter_watermarks(
358 		struct hubbub *hubbub,
359 		union dcn_watermark_set *watermarks,
360 		unsigned int refclk_mhz,
361 		bool safe_to_lower)
362 {
363 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
364 	uint32_t prog_wm_value;
365 	bool wm_pending = false;
366 
367 	/* clock state A */
368 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
369 			> hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
370 		hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
371 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
372 		prog_wm_value = convert_and_clamp(
373 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
374 				refclk_mhz, 0x1fffff);
375 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
376 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
377 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
378 			"HW register value = 0x%x\n",
379 			watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
380 	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
381 			< hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
382 		wm_pending = true;
383 
384 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
385 			> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
386 		hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
387 				watermarks->a.cstate_pstate.cstate_exit_ns;
388 		prog_wm_value = convert_and_clamp(
389 				watermarks->a.cstate_pstate.cstate_exit_ns,
390 				refclk_mhz, 0x1fffff);
391 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
392 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
393 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
394 			"HW register value = 0x%x\n",
395 			watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
396 	} else if (watermarks->a.cstate_pstate.cstate_exit_ns
397 			< hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
398 		wm_pending = true;
399 
400 	/* clock state B */
401 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
402 			> hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
403 		hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
404 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
405 		prog_wm_value = convert_and_clamp(
406 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
407 				refclk_mhz, 0x1fffff);
408 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
409 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
410 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
411 			"HW register value = 0x%x\n",
412 			watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
413 	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
414 			< hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
415 		wm_pending = true;
416 
417 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
418 			> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
419 		hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
420 				watermarks->b.cstate_pstate.cstate_exit_ns;
421 		prog_wm_value = convert_and_clamp(
422 				watermarks->b.cstate_pstate.cstate_exit_ns,
423 				refclk_mhz, 0x1fffff);
424 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
425 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
426 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
427 			"HW register value = 0x%x\n",
428 			watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
429 	} else if (watermarks->b.cstate_pstate.cstate_exit_ns
430 			< hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
431 		wm_pending = true;
432 
433 	/* clock state C */
434 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
435 			> hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
436 		hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
437 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
438 		prog_wm_value = convert_and_clamp(
439 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
440 				refclk_mhz, 0x1fffff);
441 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
442 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
443 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
444 			"HW register value = 0x%x\n",
445 			watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
446 	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
447 			< hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
448 		wm_pending = true;
449 
450 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
451 			> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
452 		hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
453 				watermarks->c.cstate_pstate.cstate_exit_ns;
454 		prog_wm_value = convert_and_clamp(
455 				watermarks->c.cstate_pstate.cstate_exit_ns,
456 				refclk_mhz, 0x1fffff);
457 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
458 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
459 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
460 			"HW register value = 0x%x\n",
461 			watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
462 	} else if (watermarks->c.cstate_pstate.cstate_exit_ns
463 			< hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
464 		wm_pending = true;
465 
466 	/* clock state D */
467 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
468 			> hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
469 		hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
470 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
471 		prog_wm_value = convert_and_clamp(
472 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
473 				refclk_mhz, 0x1fffff);
474 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
475 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
476 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
477 			"HW register value = 0x%x\n",
478 			watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
479 	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
480 			< hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
481 		wm_pending = true;
482 
483 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
484 			> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
485 		hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
486 				watermarks->d.cstate_pstate.cstate_exit_ns;
487 		prog_wm_value = convert_and_clamp(
488 				watermarks->d.cstate_pstate.cstate_exit_ns,
489 				refclk_mhz, 0x1fffff);
490 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
491 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
492 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
493 			"HW register value = 0x%x\n",
494 			watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
495 	} else if (watermarks->d.cstate_pstate.cstate_exit_ns
496 			< hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
497 		wm_pending = true;
498 
499 	return wm_pending;
500 }
501 
hubbub1_program_pstate_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)502 bool hubbub1_program_pstate_watermarks(
503 		struct hubbub *hubbub,
504 		union dcn_watermark_set *watermarks,
505 		unsigned int refclk_mhz,
506 		bool safe_to_lower)
507 {
508 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
509 	uint32_t prog_wm_value;
510 	bool wm_pending = false;
511 
512 	/* clock state A */
513 	if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
514 			> hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
515 		hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
516 				watermarks->a.cstate_pstate.pstate_change_ns;
517 		prog_wm_value = convert_and_clamp(
518 				watermarks->a.cstate_pstate.pstate_change_ns,
519 				refclk_mhz, 0x1fffff);
520 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
521 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
522 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
523 			"HW register value = 0x%x\n\n",
524 			watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
525 	} else if (watermarks->a.cstate_pstate.pstate_change_ns
526 			< hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
527 		wm_pending = true;
528 
529 	/* clock state B */
530 	if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
531 			> hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
532 		hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
533 				watermarks->b.cstate_pstate.pstate_change_ns;
534 		prog_wm_value = convert_and_clamp(
535 				watermarks->b.cstate_pstate.pstate_change_ns,
536 				refclk_mhz, 0x1fffff);
537 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
538 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
539 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
540 			"HW register value = 0x%x\n\n",
541 			watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
542 	} else if (watermarks->b.cstate_pstate.pstate_change_ns
543 			< hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
544 		wm_pending = true;
545 
546 	/* clock state C */
547 	if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
548 			> hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
549 		hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
550 				watermarks->c.cstate_pstate.pstate_change_ns;
551 		prog_wm_value = convert_and_clamp(
552 				watermarks->c.cstate_pstate.pstate_change_ns,
553 				refclk_mhz, 0x1fffff);
554 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
555 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
556 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
557 			"HW register value = 0x%x\n\n",
558 			watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
559 	} else if (watermarks->c.cstate_pstate.pstate_change_ns
560 			< hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
561 		wm_pending = true;
562 
563 	/* clock state D */
564 	if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
565 			> hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
566 		hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
567 				watermarks->d.cstate_pstate.pstate_change_ns;
568 		prog_wm_value = convert_and_clamp(
569 				watermarks->d.cstate_pstate.pstate_change_ns,
570 				refclk_mhz, 0x1fffff);
571 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
572 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
573 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
574 			"HW register value = 0x%x\n\n",
575 			watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
576 	} else if (watermarks->d.cstate_pstate.pstate_change_ns
577 			< hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
578 		wm_pending = true;
579 
580 	return wm_pending;
581 }
582 
hubbub1_program_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)583 bool hubbub1_program_watermarks(
584 		struct hubbub *hubbub,
585 		union dcn_watermark_set *watermarks,
586 		unsigned int refclk_mhz,
587 		bool safe_to_lower)
588 {
589 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
590 	bool wm_pending = false;
591 	/*
592 	 * Need to clamp to max of the register values (i.e. no wrap)
593 	 * for dcn1, all wm registers are 21-bit wide
594 	 */
595 	if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
596 		wm_pending = true;
597 
598 	if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
599 		wm_pending = true;
600 
601 	if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
602 		wm_pending = true;
603 
604 	REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
605 			DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
606 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
607 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
608 
609 	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
610 
611 #if 0
612 	REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
613 			DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
614 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
615 #endif
616 	return wm_pending;
617 }
618 
hubbub1_update_dchub(struct hubbub * hubbub,struct dchub_init_data * dh_data)619 void hubbub1_update_dchub(
620 	struct hubbub *hubbub,
621 	struct dchub_init_data *dh_data)
622 {
623 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
624 
625 	if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
626 		ASSERT(false);
627 		/*should not come here*/
628 		return;
629 	}
630 	/* TODO: port code from dal2 */
631 	switch (dh_data->fb_mode) {
632 	case FRAME_BUFFER_MODE_ZFB_ONLY:
633 		/*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
634 		REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
635 				SDPIF_FB_TOP, 0);
636 
637 		REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
638 				SDPIF_FB_BASE, 0x0FFFF);
639 
640 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
641 				SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
642 
643 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
644 				SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
645 
646 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
647 				SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
648 						dh_data->zfb_size_in_byte - 1) >> 22);
649 		break;
650 	case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
651 		/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
652 
653 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
654 				SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
655 
656 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
657 				SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
658 
659 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
660 				SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
661 						dh_data->zfb_size_in_byte - 1) >> 22);
662 		break;
663 	case FRAME_BUFFER_MODE_LOCAL_ONLY:
664 		/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
665 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
666 				SDPIF_AGP_BASE, 0);
667 
668 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
669 				SDPIF_AGP_BOT, 0X03FFFF);
670 
671 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
672 				SDPIF_AGP_TOP, 0);
673 		break;
674 	default:
675 		break;
676 	}
677 
678 	dh_data->dchub_initialzied = true;
679 	dh_data->dchub_info_valid = false;
680 }
681 
hubbub1_toggle_watermark_change_req(struct hubbub * hubbub)682 void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
683 {
684 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
685 
686 	uint32_t watermark_change_req;
687 
688 	REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
689 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
690 
691 	if (watermark_change_req)
692 		watermark_change_req = 0;
693 	else
694 		watermark_change_req = 1;
695 
696 	REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
697 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
698 }
699 
hubbub1_soft_reset(struct hubbub * hubbub,bool reset)700 void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
701 {
702 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
703 
704 	uint32_t reset_en = reset ? 1 : 0;
705 
706 	REG_UPDATE(DCHUBBUB_SOFT_RESET,
707 			DCHUBBUB_GLOBAL_SOFT_RESET, reset_en);
708 }
709 
hubbub1_dcc_support_swizzle(enum swizzle_mode_values swizzle,unsigned int bytes_per_element,enum segment_order * segment_order_horz,enum segment_order * segment_order_vert)710 static bool hubbub1_dcc_support_swizzle(
711 		enum swizzle_mode_values swizzle,
712 		unsigned int bytes_per_element,
713 		enum segment_order *segment_order_horz,
714 		enum segment_order *segment_order_vert)
715 {
716 	bool standard_swizzle = false;
717 	bool display_swizzle = false;
718 
719 	switch (swizzle) {
720 	case DC_SW_4KB_S:
721 	case DC_SW_64KB_S:
722 	case DC_SW_VAR_S:
723 	case DC_SW_4KB_S_X:
724 	case DC_SW_64KB_S_X:
725 	case DC_SW_VAR_S_X:
726 		standard_swizzle = true;
727 		break;
728 	case DC_SW_4KB_D:
729 	case DC_SW_64KB_D:
730 	case DC_SW_VAR_D:
731 	case DC_SW_4KB_D_X:
732 	case DC_SW_64KB_D_X:
733 	case DC_SW_VAR_D_X:
734 		display_swizzle = true;
735 		break;
736 	default:
737 		break;
738 	}
739 
740 	if (bytes_per_element == 1 && standard_swizzle) {
741 		*segment_order_horz = segment_order__contiguous;
742 		*segment_order_vert = segment_order__na;
743 		return true;
744 	}
745 	if (bytes_per_element == 2 && standard_swizzle) {
746 		*segment_order_horz = segment_order__non_contiguous;
747 		*segment_order_vert = segment_order__contiguous;
748 		return true;
749 	}
750 	if (bytes_per_element == 4 && standard_swizzle) {
751 		*segment_order_horz = segment_order__non_contiguous;
752 		*segment_order_vert = segment_order__contiguous;
753 		return true;
754 	}
755 	if (bytes_per_element == 8 && standard_swizzle) {
756 		*segment_order_horz = segment_order__na;
757 		*segment_order_vert = segment_order__contiguous;
758 		return true;
759 	}
760 	if (bytes_per_element == 8 && display_swizzle) {
761 		*segment_order_horz = segment_order__contiguous;
762 		*segment_order_vert = segment_order__non_contiguous;
763 		return true;
764 	}
765 
766 	return false;
767 }
768 
hubbub1_dcc_support_pixel_format(enum surface_pixel_format format,unsigned int * bytes_per_element)769 static bool hubbub1_dcc_support_pixel_format(
770 		enum surface_pixel_format format,
771 		unsigned int *bytes_per_element)
772 {
773 	/* DML: get_bytes_per_element */
774 	switch (format) {
775 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
776 	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
777 		*bytes_per_element = 2;
778 		return true;
779 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
780 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
781 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
782 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
783 		*bytes_per_element = 4;
784 		return true;
785 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
786 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
787 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
788 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
789 		*bytes_per_element = 8;
790 		return true;
791 	default:
792 		return false;
793 	}
794 }
795 
hubbub1_get_blk256_size(unsigned int * blk256_width,unsigned int * blk256_height,unsigned int bytes_per_element)796 static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
797 		unsigned int bytes_per_element)
798 {
799 	/* copied from DML.  might want to refactor DML to leverage from DML */
800 	/* DML : get_blk256_size */
801 	if (bytes_per_element == 1) {
802 		*blk256_width = 16;
803 		*blk256_height = 16;
804 	} else if (bytes_per_element == 2) {
805 		*blk256_width = 16;
806 		*blk256_height = 8;
807 	} else if (bytes_per_element == 4) {
808 		*blk256_width = 8;
809 		*blk256_height = 8;
810 	} else if (bytes_per_element == 8) {
811 		*blk256_width = 8;
812 		*blk256_height = 4;
813 	}
814 }
815 
hubbub1_det_request_size(unsigned int height,unsigned int width,unsigned int bpe,bool * req128_horz_wc,bool * req128_vert_wc)816 static void hubbub1_det_request_size(
817 		unsigned int height,
818 		unsigned int width,
819 		unsigned int bpe,
820 		bool *req128_horz_wc,
821 		bool *req128_vert_wc)
822 {
823 	unsigned int detile_buf_size = 164 * 1024;  /* 164KB for DCN1.0 */
824 
825 	unsigned int blk256_height = 0;
826 	unsigned int blk256_width = 0;
827 	unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
828 
829 	hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
830 
831 	swath_bytes_horz_wc = width * blk256_height * bpe;
832 	swath_bytes_vert_wc = height * blk256_width * bpe;
833 
834 	*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
835 			false : /* full 256B request */
836 			true; /* half 128b request */
837 
838 	*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
839 			false : /* full 256B request */
840 			true; /* half 128b request */
841 }
842 
hubbub1_get_dcc_compression_cap(struct hubbub * hubbub,const struct dc_dcc_surface_param * input,struct dc_surface_dcc_cap * output)843 static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
844 		const struct dc_dcc_surface_param *input,
845 		struct dc_surface_dcc_cap *output)
846 {
847 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
848 	struct dc *dc = hubbub1->base.ctx->dc;
849 
850 	/* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
851 	enum dcc_control dcc_control;
852 	unsigned int bpe;
853 	enum segment_order segment_order_horz, segment_order_vert;
854 	bool req128_horz_wc, req128_vert_wc;
855 
856 	memset(output, 0, sizeof(*output));
857 
858 	if (dc->debug.disable_dcc == DCC_DISABLE)
859 		return false;
860 
861 	if (!hubbub1->base.funcs->dcc_support_pixel_format(input->format, &bpe))
862 		return false;
863 
864 	if (!hubbub1->base.funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
865 			&segment_order_horz, &segment_order_vert))
866 		return false;
867 
868 	hubbub1_det_request_size(input->surface_size.height,  input->surface_size.width,
869 			bpe, &req128_horz_wc, &req128_vert_wc);
870 
871 	if (!req128_horz_wc && !req128_vert_wc) {
872 		dcc_control = dcc_control__256_256_xxx;
873 	} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
874 		if (!req128_horz_wc)
875 			dcc_control = dcc_control__256_256_xxx;
876 		else if (segment_order_horz == segment_order__contiguous)
877 			dcc_control = dcc_control__128_128_xxx;
878 		else
879 			dcc_control = dcc_control__256_64_64;
880 	} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
881 		if (!req128_vert_wc)
882 			dcc_control = dcc_control__256_256_xxx;
883 		else if (segment_order_vert == segment_order__contiguous)
884 			dcc_control = dcc_control__128_128_xxx;
885 		else
886 			dcc_control = dcc_control__256_64_64;
887 	} else {
888 		if ((req128_horz_wc &&
889 			segment_order_horz == segment_order__non_contiguous) ||
890 			(req128_vert_wc &&
891 			segment_order_vert == segment_order__non_contiguous))
892 			/* access_dir not known, must use most constraining */
893 			dcc_control = dcc_control__256_64_64;
894 		else
895 			/* reg128 is true for either horz and vert
896 			 * but segment_order is contiguous
897 			 */
898 			dcc_control = dcc_control__128_128_xxx;
899 	}
900 
901 	if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
902 		dcc_control != dcc_control__256_256_xxx)
903 		return false;
904 
905 	switch (dcc_control) {
906 	case dcc_control__256_256_xxx:
907 		output->grph.rgb.max_uncompressed_blk_size = 256;
908 		output->grph.rgb.max_compressed_blk_size = 256;
909 		output->grph.rgb.independent_64b_blks = false;
910 		break;
911 	case dcc_control__128_128_xxx:
912 		output->grph.rgb.max_uncompressed_blk_size = 128;
913 		output->grph.rgb.max_compressed_blk_size = 128;
914 		output->grph.rgb.independent_64b_blks = false;
915 		break;
916 	case dcc_control__256_64_64:
917 		output->grph.rgb.max_uncompressed_blk_size = 256;
918 		output->grph.rgb.max_compressed_blk_size = 64;
919 		output->grph.rgb.independent_64b_blks = true;
920 		break;
921 	default:
922 		ASSERT(false);
923 		break;
924 	}
925 
926 	output->capable = true;
927 	output->const_color_support = false;
928 
929 	return true;
930 }
931 
932 static const struct hubbub_funcs hubbub1_funcs = {
933 	.update_dchub = hubbub1_update_dchub,
934 	.dcc_support_swizzle = hubbub1_dcc_support_swizzle,
935 	.dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
936 	.get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
937 	.wm_read_state = hubbub1_wm_read_state,
938 	.program_watermarks = hubbub1_program_watermarks,
939 	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
940 	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
941 	.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
942 };
943 
hubbub1_construct(struct hubbub * hubbub,struct dc_context * ctx,const struct dcn_hubbub_registers * hubbub_regs,const struct dcn_hubbub_shift * hubbub_shift,const struct dcn_hubbub_mask * hubbub_mask)944 void hubbub1_construct(struct hubbub *hubbub,
945 	struct dc_context *ctx,
946 	const struct dcn_hubbub_registers *hubbub_regs,
947 	const struct dcn_hubbub_shift *hubbub_shift,
948 	const struct dcn_hubbub_mask *hubbub_mask)
949 {
950 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
951 
952 	hubbub1->base.ctx = ctx;
953 
954 	hubbub1->base.funcs = &hubbub1_funcs;
955 
956 	hubbub1->regs = hubbub_regs;
957 	hubbub1->shifts = hubbub_shift;
958 	hubbub1->masks = hubbub_mask;
959 
960 	hubbub1->debug_test_index_pstate = 0x7;
961 	if (ctx->dce_version == DCN_VERSION_1_01)
962 		hubbub1->debug_test_index_pstate = 0xB;
963 }
964 
965