1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2022  Realtek Corporation
3  */
4 
5 #include "coex.h"
6 #include "debug.h"
7 #include "mac.h"
8 #include "phy.h"
9 #include "reg.h"
10 #include "rtw8852b.h"
11 #include "rtw8852b_common.h"
12 #include "rtw8852b_rfk.h"
13 #include "rtw8852b_rfk_table.h"
14 #include "rtw8852b_table.h"
15 
16 #define RTW8852B_RXDCK_VER 0x1
17 #define RTW8852B_IQK_VER 0x2a
18 #define RTW8852B_IQK_SS 2
19 #define RTW8852B_RXK_GROUP_NR 4
20 #define RTW8852B_TSSI_PATH_NR 2
21 #define RTW8852B_RF_REL_VERSION 34
22 #define RTW8852B_DPK_VER 0x0d
23 #define RTW8852B_DPK_RF_PATH 2
24 #define RTW8852B_DPK_KIP_REG_NUM 3
25 
26 #define _TSSI_DE_MASK GENMASK(21, 12)
27 #define ADDC_T_AVG 100
28 #define DPK_TXAGC_LOWER 0x2e
29 #define DPK_TXAGC_UPPER 0x3f
30 #define DPK_TXAGC_INVAL 0xff
31 #define RFREG_MASKRXBB 0x003e0
32 #define RFREG_MASKMODE 0xf0000
33 
34 enum rtw8852b_dpk_id {
35 	LBK_RXIQK	= 0x06,
36 	SYNC		= 0x10,
37 	MDPK_IDL	= 0x11,
38 	MDPK_MPA	= 0x12,
39 	GAIN_LOSS	= 0x13,
40 	GAIN_CAL	= 0x14,
41 	DPK_RXAGC	= 0x15,
42 	KIP_PRESET	= 0x16,
43 	KIP_RESTORE	= 0x17,
44 	DPK_TXAGC	= 0x19,
45 	D_KIP_PRESET	= 0x28,
46 	D_TXAGC		= 0x29,
47 	D_RXAGC		= 0x2a,
48 	D_SYNC		= 0x2b,
49 	D_GAIN_LOSS	= 0x2c,
50 	D_MDPK_IDL	= 0x2d,
51 	D_GAIN_NORM	= 0x2f,
52 	D_KIP_THERMAL	= 0x30,
53 	D_KIP_RESTORE	= 0x31
54 };
55 
56 enum dpk_agc_step {
57 	DPK_AGC_STEP_SYNC_DGAIN,
58 	DPK_AGC_STEP_GAIN_ADJ,
59 	DPK_AGC_STEP_GAIN_LOSS_IDX,
60 	DPK_AGC_STEP_GL_GT_CRITERION,
61 	DPK_AGC_STEP_GL_LT_CRITERION,
62 	DPK_AGC_STEP_SET_TX_GAIN,
63 };
64 
65 enum rtw8852b_iqk_type {
66 	ID_TXAGC = 0x0,
67 	ID_FLOK_COARSE = 0x1,
68 	ID_FLOK_FINE = 0x2,
69 	ID_TXK = 0x3,
70 	ID_RXAGC = 0x4,
71 	ID_RXK = 0x5,
72 	ID_NBTXK = 0x6,
73 	ID_NBRXK = 0x7,
74 	ID_FLOK_VBUFFER = 0x8,
75 	ID_A_FLOK_COARSE = 0x9,
76 	ID_G_FLOK_COARSE = 0xa,
77 	ID_A_FLOK_FINE = 0xb,
78 	ID_G_FLOK_FINE = 0xc,
79 	ID_IQK_RESTORE = 0x10,
80 };
81 
82 static const u32 _tssi_trigger[RTW8852B_TSSI_PATH_NR] = {0x5820, 0x7820};
83 static const u32 _tssi_cw_rpt_addr[RTW8852B_TSSI_PATH_NR] = {0x1c18, 0x3c18};
84 static const u32 _tssi_cw_default_addr[RTW8852B_TSSI_PATH_NR][4] = {
85 	{0x5634, 0x5630, 0x5630, 0x5630},
86 	{0x7634, 0x7630, 0x7630, 0x7630} };
87 static const u32 _tssi_cw_default_mask[4] = {
88 	0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff};
89 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852B] = {0x5858, 0x7858};
90 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852B] = {0x5860, 0x7860};
91 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852B] = {0x5838, 0x7838};
92 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852B] = {0x5840, 0x7840};
93 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852B] = {0x5848, 0x7848};
94 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852B] = {0x5850, 0x7850};
95 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852B] = {0x5828, 0x7828};
96 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852B] = {0x5830, 0x7830};
97 static const u32 _a_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x190, 0x198, 0x350, 0x352};
98 static const u32 _a_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x0f, 0x0f, 0x3f, 0x7f};
99 static const u32 _a_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x1, 0x0, 0x0};
100 static const u32 _g_idxrxgain[RTW8852B_RXK_GROUP_NR] = {0x212, 0x21c, 0x350, 0x360};
101 static const u32 _g_idxattc2[RTW8852B_RXK_GROUP_NR] = {0x00, 0x00, 0x28, 0x5f};
102 static const u32 _g_idxattc1[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x2, 0x1};
103 static const u32 _a_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0};
104 static const u32 _a_track_range[RTW8852B_RXK_GROUP_NR] = {0x3, 0x3, 0x6, 0x6};
105 static const u32 _a_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e};
106 static const u32 _a_itqt[RTW8852B_RXK_GROUP_NR] = {0x12, 0x12, 0x12, 0x1b};
107 static const u32 _g_power_range[RTW8852B_RXK_GROUP_NR] = {0x0, 0x0, 0x0, 0x0};
108 static const u32 _g_track_range[RTW8852B_RXK_GROUP_NR] = {0x4, 0x4, 0x6, 0x6};
109 static const u32 _g_gain_bb[RTW8852B_RXK_GROUP_NR] = {0x08, 0x0e, 0x06, 0x0e};
110 static const u32 _g_itqt[RTW8852B_RXK_GROUP_NR] = {0x09, 0x12, 0x1b, 0x24};
111 
112 static const u32 rtw8852b_backup_bb_regs[] = {0x2344, 0x5800, 0x7800};
113 static const u32 rtw8852b_backup_rf_regs[] = {
114 	0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x1e, 0x0, 0x2, 0x5, 0x10005
115 };
116 
117 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852b_backup_bb_regs)
118 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852b_backup_rf_regs)
119 
120 static const struct rtw89_reg3_def rtw8852b_set_nondbcc_path01[] = {
121 	{0x20fc, 0xffff0000, 0x0303},
122 	{0x5864, 0x18000000, 0x3},
123 	{0x7864, 0x18000000, 0x3},
124 	{0x12b8, 0x40000000, 0x1},
125 	{0x32b8, 0x40000000, 0x1},
126 	{0x030c, 0xff000000, 0x13},
127 	{0x032c, 0xffff0000, 0x0041},
128 	{0x12b8, 0x10000000, 0x1},
129 	{0x58c8, 0x01000000, 0x1},
130 	{0x78c8, 0x01000000, 0x1},
131 	{0x5864, 0xc0000000, 0x3},
132 	{0x7864, 0xc0000000, 0x3},
133 	{0x2008, 0x01ffffff, 0x1ffffff},
134 	{0x0c1c, 0x00000004, 0x1},
135 	{0x0700, 0x08000000, 0x1},
136 	{0x0c70, 0x000003ff, 0x3ff},
137 	{0x0c60, 0x00000003, 0x3},
138 	{0x0c6c, 0x00000001, 0x1},
139 	{0x58ac, 0x08000000, 0x1},
140 	{0x78ac, 0x08000000, 0x1},
141 	{0x0c3c, 0x00000200, 0x1},
142 	{0x2344, 0x80000000, 0x1},
143 	{0x4490, 0x80000000, 0x1},
144 	{0x12a0, 0x00007000, 0x7},
145 	{0x12a0, 0x00008000, 0x1},
146 	{0x12a0, 0x00070000, 0x3},
147 	{0x12a0, 0x00080000, 0x1},
148 	{0x32a0, 0x00070000, 0x3},
149 	{0x32a0, 0x00080000, 0x1},
150 	{0x0700, 0x01000000, 0x1},
151 	{0x0700, 0x06000000, 0x2},
152 	{0x20fc, 0xffff0000, 0x3333},
153 };
154 
155 static const struct rtw89_reg3_def rtw8852b_restore_nondbcc_path01[] = {
156 	{0x20fc, 0xffff0000, 0x0303},
157 	{0x12b8, 0x40000000, 0x0},
158 	{0x32b8, 0x40000000, 0x0},
159 	{0x5864, 0xc0000000, 0x0},
160 	{0x7864, 0xc0000000, 0x0},
161 	{0x2008, 0x01ffffff, 0x0000000},
162 	{0x0c1c, 0x00000004, 0x0},
163 	{0x0700, 0x08000000, 0x0},
164 	{0x0c70, 0x0000001f, 0x03},
165 	{0x0c70, 0x000003e0, 0x03},
166 	{0x12a0, 0x000ff000, 0x00},
167 	{0x32a0, 0x000ff000, 0x00},
168 	{0x0700, 0x07000000, 0x0},
169 	{0x20fc, 0xffff0000, 0x0000},
170 	{0x58c8, 0x01000000, 0x0},
171 	{0x78c8, 0x01000000, 0x0},
172 	{0x0c3c, 0x00000200, 0x0},
173 	{0x2344, 0x80000000, 0x0},
174 };
175 
_rfk_backup_bb_reg(struct rtw89_dev * rtwdev,u32 backup_bb_reg_val[])176 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
177 {
178 	u32 i;
179 
180 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
181 		backup_bb_reg_val[i] =
182 			rtw89_phy_read32_mask(rtwdev, rtw8852b_backup_bb_regs[i],
183 					      MASKDWORD);
184 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
185 			    "[RFK]backup bb reg : %x, value =%x\n",
186 			    rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]);
187 	}
188 }
189 
_rfk_backup_rf_reg(struct rtw89_dev * rtwdev,u32 backup_rf_reg_val[],u8 rf_path)190 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
191 			       u8 rf_path)
192 {
193 	u32 i;
194 
195 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
196 		backup_rf_reg_val[i] =
197 			rtw89_read_rf(rtwdev, rf_path,
198 				      rtw8852b_backup_rf_regs[i], RFREG_MASK);
199 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
200 			    "[RFK]backup rf S%d reg : %x, value =%x\n", rf_path,
201 			    rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]);
202 	}
203 }
204 
_rfk_restore_bb_reg(struct rtw89_dev * rtwdev,const u32 backup_bb_reg_val[])205 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
206 				const u32 backup_bb_reg_val[])
207 {
208 	u32 i;
209 
210 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
211 		rtw89_phy_write32_mask(rtwdev, rtw8852b_backup_bb_regs[i],
212 				       MASKDWORD, backup_bb_reg_val[i]);
213 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
214 			    "[RFK]restore bb reg : %x, value =%x\n",
215 			    rtw8852b_backup_bb_regs[i], backup_bb_reg_val[i]);
216 	}
217 }
218 
_rfk_restore_rf_reg(struct rtw89_dev * rtwdev,const u32 backup_rf_reg_val[],u8 rf_path)219 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
220 				const u32 backup_rf_reg_val[], u8 rf_path)
221 {
222 	u32 i;
223 
224 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
225 		rtw89_write_rf(rtwdev, rf_path, rtw8852b_backup_rf_regs[i],
226 			       RFREG_MASK, backup_rf_reg_val[i]);
227 
228 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
229 			    "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
230 			    rtw8852b_backup_rf_regs[i], backup_rf_reg_val[i]);
231 	}
232 }
233 
_rfk_rf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)234 static void _rfk_rf_direct_cntrl(struct rtw89_dev *rtwdev,
235 				 enum rtw89_rf_path path, bool is_bybb)
236 {
237 	if (is_bybb)
238 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
239 	else
240 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
241 }
242 
_rfk_drf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)243 static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev,
244 				  enum rtw89_rf_path path, bool is_bybb)
245 {
246 	if (is_bybb)
247 		rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
248 	else
249 		rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
250 }
251 
_iqk_check_cal(struct rtw89_dev * rtwdev,u8 path)252 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path)
253 {
254 	bool fail = true;
255 	u32 val;
256 	int ret;
257 
258 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
259 				       1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
260 	if (ret)
261 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n");
262 
263 	udelay(200);
264 
265 	if (!ret)
266 		fail = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
267 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
268 
269 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
270 	val = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
271 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8008 = 0x%x\n", path, val);
272 
273 	return fail;
274 }
275 
_kpath(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)276 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
277 {
278 	u8 val;
279 
280 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n",
281 		    rtwdev->dbcc_en, phy_idx);
282 
283 	if (!rtwdev->dbcc_en) {
284 		val = RF_AB;
285 	} else {
286 		if (phy_idx == RTW89_PHY_0)
287 			val = RF_A;
288 		else
289 			val = RF_B;
290 	}
291 	return val;
292 }
293 
_set_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)294 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
295 			enum rtw89_rf_path path)
296 {
297 	rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
298 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
299 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
300 	mdelay(1);
301 }
302 
_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)303 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
304 {
305 	u8 path, dck_tune;
306 	u32 rf_reg5;
307 
308 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
309 		    "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n",
310 		    RTW8852B_RXDCK_VER, rtwdev->hal.cv);
311 
312 	for (path = 0; path < RF_PATH_NUM_8852B; path++) {
313 		rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
314 		dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
315 
316 		if (rtwdev->is_tssi_mode[path])
317 			rtw89_phy_write32_mask(rtwdev,
318 					       R_P0_TSSI_TRK + (path << 13),
319 					       B_P0_TSSI_TRK_EN, 0x1);
320 
321 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
322 		rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
323 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
324 		_set_rx_dck(rtwdev, phy, path);
325 		rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
326 		rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
327 
328 		if (rtwdev->is_tssi_mode[path])
329 			rtw89_phy_write32_mask(rtwdev,
330 					       R_P0_TSSI_TRK + (path << 13),
331 					       B_P0_TSSI_TRK_EN, 0x0);
332 	}
333 }
334 
_rck(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)335 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
336 {
337 	u32 rf_reg5;
338 	u32 rck_val;
339 	u32 val;
340 	int ret;
341 
342 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
343 
344 	rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
345 
346 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
347 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
348 
349 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
350 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
351 
352 	/* RCK trigger */
353 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
354 
355 	ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
356 				       false, rtwdev, path, RR_RCKS, BIT(3));
357 
358 	rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
359 
360 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
361 		    rck_val, ret);
362 
363 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
364 	rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
365 
366 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
367 		    rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
368 }
369 
_afe_init(struct rtw89_dev * rtwdev)370 static void _afe_init(struct rtw89_dev *rtwdev)
371 {
372 	rtw89_write32(rtwdev, R_AX_PHYREG_SET, 0xf);
373 
374 	rtw89_rfk_parser(rtwdev, &rtw8852b_afe_init_defs_tbl);
375 }
376 
_drck(struct rtw89_dev * rtwdev)377 static void _drck(struct rtw89_dev *rtwdev)
378 {
379 	u32 rck_d;
380 	u32 val;
381 	int ret;
382 
383 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
384 	rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x1);
385 
386 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
387 				       false, rtwdev, R_DRCK_RS, B_DRCK_RS_DONE);
388 	if (ret)
389 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
390 
391 	rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_KICK, 0x0);
392 	rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
393 	udelay(1);
394 	rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
395 	rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RS, B_DRCK_RS_LPS);
396 	rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_SEL, 0x0);
397 	rtw89_phy_write32_mask(rtwdev, R_DRCK_V1, B_DRCK_V1_CV, rck_d);
398 
399 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0cc = 0x%x\n",
400 		    rtw89_phy_read32_mask(rtwdev, R_DRCK_V1, MASKDWORD));
401 }
402 
_addck_backup(struct rtw89_dev * rtwdev)403 static void _addck_backup(struct rtw89_dev *rtwdev)
404 {
405 	struct rtw89_dack_info *dack = &rtwdev->dack;
406 
407 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
408 	dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
409 	dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
410 
411 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
412 	dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0);
413 	dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1);
414 }
415 
_addck_reload(struct rtw89_dev * rtwdev)416 static void _addck_reload(struct rtw89_dev *rtwdev)
417 {
418 	struct rtw89_dack_info *dack = &rtwdev->dack;
419 
420 	/* S0 */
421 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL, dack->addck_d[0][0]);
422 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_VAL, dack->addck_d[0][1] >> 6);
423 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK0D_VAL2, dack->addck_d[0][1] & 0x3f);
424 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x3);
425 
426 	/* S1 */
427 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL, dack->addck_d[1][0]);
428 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK0_VAL, dack->addck_d[1][1] >> 6);
429 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1D, B_ADDCK1D_VAL2, dack->addck_d[1][1] & 0x3f);
430 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x3);
431 }
432 
_dack_backup_s0(struct rtw89_dev * rtwdev)433 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
434 {
435 	struct rtw89_dack_info *dack = &rtwdev->dack;
436 	u8 i;
437 
438 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
439 
440 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
441 		rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
442 		dack->msbk_d[0][0][i] =
443 			rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
444 		rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
445 		dack->msbk_d[0][1][i] =
446 			rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
447 	}
448 
449 	dack->biask_d[0][0] =
450 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
451 	dack->biask_d[0][1] =
452 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
453 
454 	dack->dadck_d[0][0] =
455 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00);
456 	dack->dadck_d[0][1] =
457 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01);
458 }
459 
_dack_backup_s1(struct rtw89_dev * rtwdev)460 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
461 {
462 	struct rtw89_dack_info *dack = &rtwdev->dack;
463 	u8 i;
464 
465 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
466 
467 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
468 		rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
469 		dack->msbk_d[1][0][i] =
470 			rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S);
471 		rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
472 		dack->msbk_d[1][1][i] =
473 			rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S);
474 	}
475 
476 	dack->biask_d[1][0] =
477 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10);
478 	dack->biask_d[1][1] =
479 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11);
480 
481 	dack->dadck_d[1][0] =
482 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10);
483 	dack->dadck_d[1][1] =
484 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11);
485 }
486 
_check_addc(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)487 static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
488 {
489 	s32 dc_re = 0, dc_im = 0;
490 	u32 tmp;
491 	u32 i;
492 
493 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
494 				 &rtw8852b_check_addc_defs_a_tbl,
495 				 &rtw8852b_check_addc_defs_b_tbl);
496 
497 	for (i = 0; i < ADDC_T_AVG; i++) {
498 		tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD);
499 		dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11);
500 		dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11);
501 	}
502 
503 	dc_re /= ADDC_T_AVG;
504 	dc_im /= ADDC_T_AVG;
505 
506 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
507 		    "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im);
508 }
509 
_addck(struct rtw89_dev * rtwdev)510 static void _addck(struct rtw89_dev *rtwdev)
511 {
512 	struct rtw89_dack_info *dack = &rtwdev->dack;
513 	u32 val;
514 	int ret;
515 
516 	/* S0 */
517 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0);
518 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x30, 0x0);
519 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
520 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
521 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
522 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
523 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
524 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
525 	rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x1);
526 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
527 
528 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n");
529 	_check_addc(rtwdev, RF_PATH_A);
530 
531 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1);
532 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0);
533 	udelay(1);
534 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
535 
536 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
537 				       false, rtwdev, R_ADDCKR0, BIT(0));
538 	if (ret) {
539 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
540 		dack->addck_timeout[0] = true;
541 	}
542 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
543 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n");
544 	_check_addc(rtwdev, RF_PATH_A);
545 
546 	rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(1), 0x0);
547 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
548 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
549 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
550 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
551 
552 	/* S1 */
553 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
554 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
555 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
556 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
557 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
558 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
559 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x1);
560 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
561 
562 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n");
563 	_check_addc(rtwdev, RF_PATH_B);
564 
565 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1);
566 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0);
567 	udelay(1);
568 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
569 
570 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
571 				       false, rtwdev, R_ADDCKR1, BIT(0));
572 	if (ret) {
573 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
574 		dack->addck_timeout[1] = true;
575 	}
576 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
577 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n");
578 	_check_addc(rtwdev, RF_PATH_B);
579 
580 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(1), 0x0);
581 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
582 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
583 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
584 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
585 }
586 
_check_dadc(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)587 static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
588 {
589 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
590 				 &rtw8852b_check_dadc_en_defs_a_tbl,
591 				 &rtw8852b_check_dadc_en_defs_b_tbl);
592 
593 	_check_addc(rtwdev, path);
594 
595 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
596 				 &rtw8852b_check_dadc_dis_defs_a_tbl,
597 				 &rtw8852b_check_dadc_dis_defs_b_tbl);
598 }
599 
_dack_s0_check_done(struct rtw89_dev * rtwdev,bool part1)600 static bool _dack_s0_check_done(struct rtw89_dev *rtwdev, bool part1)
601 {
602 	if (part1) {
603 		if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
604 		    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0)
605 			return false;
606 	} else {
607 		if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
608 		    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
609 			return false;
610 	}
611 
612 	return true;
613 }
614 
_dack_s0(struct rtw89_dev * rtwdev)615 static void _dack_s0(struct rtw89_dev *rtwdev)
616 {
617 	struct rtw89_dack_info *dack = &rtwdev->dack;
618 	bool done;
619 	int ret;
620 
621 	rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_1_defs_tbl);
622 
623 	ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000,
624 				       false, rtwdev, true);
625 	if (ret) {
626 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n");
627 		dack->msbk_timeout[0] = true;
628 	}
629 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
630 
631 	rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_2_defs_tbl);
632 
633 	ret = read_poll_timeout_atomic(_dack_s0_check_done, done, done, 1, 10000,
634 				       false, rtwdev, false);
635 	if (ret) {
636 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADCK timeout\n");
637 		dack->dadck_timeout[0] = true;
638 	}
639 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
640 
641 	rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s0_3_defs_tbl);
642 
643 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
644 
645 	_dack_backup_s0(rtwdev);
646 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
647 }
648 
_dack_s1_check_done(struct rtw89_dev * rtwdev,bool part1)649 static bool _dack_s1_check_done(struct rtw89_dev *rtwdev, bool part1)
650 {
651 	if (part1) {
652 		if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 &&
653 		    rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0)
654 			return false;
655 	} else {
656 		if (rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK_S1P2_OK) == 0 &&
657 		    rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK_S1P3_OK) == 0)
658 			return false;
659 	}
660 
661 	return true;
662 }
663 
_dack_s1(struct rtw89_dev * rtwdev)664 static void _dack_s1(struct rtw89_dev *rtwdev)
665 {
666 	struct rtw89_dack_info *dack = &rtwdev->dack;
667 	bool done;
668 	int ret;
669 
670 	rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_1_defs_tbl);
671 
672 	ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000,
673 				       false, rtwdev, true);
674 	if (ret) {
675 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n");
676 		dack->msbk_timeout[1] = true;
677 	}
678 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
679 
680 	rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_2_defs_tbl);
681 
682 	ret = read_poll_timeout_atomic(_dack_s1_check_done, done, done, 1, 10000,
683 				       false, rtwdev, false);
684 	if (ret) {
685 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n");
686 		dack->dadck_timeout[1] = true;
687 	}
688 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
689 
690 	rtw89_rfk_parser(rtwdev, &rtw8852b_dack_s1_3_defs_tbl);
691 
692 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
693 
694 	_check_dadc(rtwdev, RF_PATH_B);
695 	_dack_backup_s1(rtwdev);
696 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
697 }
698 
_dack(struct rtw89_dev * rtwdev)699 static void _dack(struct rtw89_dev *rtwdev)
700 {
701 	_dack_s0(rtwdev);
702 	_dack_s1(rtwdev);
703 }
704 
_dack_dump(struct rtw89_dev * rtwdev)705 static void _dack_dump(struct rtw89_dev *rtwdev)
706 {
707 	struct rtw89_dack_info *dack = &rtwdev->dack;
708 	u8 i;
709 	u8 t;
710 
711 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
712 		    "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
713 		    dack->addck_d[0][0], dack->addck_d[0][1]);
714 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
715 		    "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
716 		    dack->addck_d[1][0], dack->addck_d[1][1]);
717 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
718 		    "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
719 		    dack->dadck_d[0][0], dack->dadck_d[0][1]);
720 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
721 		    "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
722 		    dack->dadck_d[1][0], dack->dadck_d[1][1]);
723 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
724 		    "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
725 		    dack->biask_d[0][0], dack->biask_d[0][1]);
726 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
727 		    "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
728 		    dack->biask_d[1][0], dack->biask_d[1][1]);
729 
730 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
731 	for (i = 0; i < 0x10; i++) {
732 		t = dack->msbk_d[0][0][i];
733 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
734 	}
735 
736 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
737 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
738 		t = dack->msbk_d[0][1][i];
739 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
740 	}
741 
742 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
743 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
744 		t = dack->msbk_d[1][0][i];
745 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
746 	}
747 
748 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
749 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
750 		t = dack->msbk_d[1][1][i];
751 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
752 	}
753 }
754 
_dac_cal(struct rtw89_dev * rtwdev,bool force)755 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
756 {
757 	struct rtw89_dack_info *dack = &rtwdev->dack;
758 	u32 rf0_0, rf1_0;
759 
760 	dack->dack_done = false;
761 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK 0x1\n");
762 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
763 
764 	rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
765 	rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
766 	_afe_init(rtwdev);
767 	_drck(rtwdev);
768 
769 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
770 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
771 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
772 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
773 	_addck(rtwdev);
774 	_addck_backup(rtwdev);
775 	_addck_reload(rtwdev);
776 
777 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
778 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
779 	_dack(rtwdev);
780 	_dack_dump(rtwdev);
781 	dack->dack_done = true;
782 
783 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
784 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
785 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
786 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
787 	dack->dack_cnt++;
788 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
789 }
790 
_iqk_rxk_setting(struct rtw89_dev * rtwdev,u8 path)791 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
792 {
793 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
794 	u32 tmp;
795 
796 	switch (iqk_info->iqk_band[path]) {
797 	case RTW89_BAND_2G:
798 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
799 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
800 		tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
801 		rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
802 		break;
803 	case RTW89_BAND_5G:
804 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
805 		rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
806 		tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
807 		rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
808 		break;
809 	default:
810 		break;
811 	}
812 }
813 
_iqk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path,u8 ktype)814 static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
815 			  u8 path, u8 ktype)
816 {
817 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
818 	u32 iqk_cmd;
819 	bool fail;
820 
821 	switch (ktype) {
822 	case ID_FLOK_COARSE:
823 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
824 		iqk_cmd = 0x108 | (1 << (4 + path));
825 		break;
826 	case ID_FLOK_FINE:
827 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
828 		iqk_cmd = 0x208 | (1 << (4 + path));
829 		break;
830 	case ID_FLOK_VBUFFER:
831 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
832 		iqk_cmd = 0x308 | (1 << (4 + path));
833 		break;
834 	case ID_TXK:
835 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
836 		iqk_cmd = 0x008 | (1 << (path + 4)) |
837 			  (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
838 		break;
839 	case ID_RXAGC:
840 		iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
841 		break;
842 	case ID_RXK:
843 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
844 		iqk_cmd = 0x008 | (1 << (path + 4)) |
845 			  (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
846 		break;
847 	case ID_NBTXK:
848 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
849 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x011);
850 		iqk_cmd = 0x408 | (1 << (4 + path));
851 		break;
852 	case ID_NBRXK:
853 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
854 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
855 		iqk_cmd = 0x608 | (1 << (4 + path));
856 		break;
857 	default:
858 		return false;
859 	}
860 
861 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
862 	udelay(1);
863 	fail = _iqk_check_cal(rtwdev, path);
864 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
865 
866 	return fail;
867 }
868 
_rxk_group_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)869 static bool _rxk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
870 			   u8 path)
871 {
872 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
873 	bool kfail = false;
874 	bool fail;
875 	u8 gp;
876 
877 	for (gp = 0; gp < RTW8852B_RXK_GROUP_NR; gp++) {
878 		switch (iqk_info->iqk_band[path]) {
879 		case RTW89_BAND_2G:
880 			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
881 				       _g_idxrxgain[gp]);
882 			rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G,
883 				       _g_idxattc2[gp]);
884 			rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G,
885 				       _g_idxattc1[gp]);
886 			break;
887 		case RTW89_BAND_5G:
888 			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
889 				       _a_idxrxgain[gp]);
890 			rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT,
891 				       _a_idxattc2[gp]);
892 			rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2,
893 				       _a_idxattc1[gp]);
894 			break;
895 		default:
896 			break;
897 		}
898 
899 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
900 				       B_CFIR_LUT_SEL, 0x1);
901 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
902 				       B_CFIR_LUT_SET, 0x0);
903 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
904 				       B_CFIR_LUT_GP_V1, gp);
905 		fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
906 		rtw89_phy_write32_mask(rtwdev, R_IQKINF,
907 				       BIT(16 + gp + path * 4), fail);
908 		kfail |= fail;
909 	}
910 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
911 
912 	if (kfail) {
913 		iqk_info->nb_rxcfir[path] = 0x40000002;
914 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
915 				       B_IQK_RES_RXCFIR, 0x0);
916 		iqk_info->is_wb_rxiqk[path] = false;
917 	} else {
918 		iqk_info->nb_rxcfir[path] = 0x40000000;
919 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
920 				       B_IQK_RES_RXCFIR, 0x5);
921 		iqk_info->is_wb_rxiqk[path] = true;
922 	}
923 
924 	return kfail;
925 }
926 
_iqk_nbrxk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)927 static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
928 		       u8 path)
929 {
930 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
931 	const u8 gp = 0x3;
932 	bool kfail = false;
933 	bool fail;
934 
935 	switch (iqk_info->iqk_band[path]) {
936 	case RTW89_BAND_2G:
937 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
938 			       _g_idxrxgain[gp]);
939 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G,
940 			       _g_idxattc2[gp]);
941 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G,
942 			       _g_idxattc1[gp]);
943 		break;
944 	case RTW89_BAND_5G:
945 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM,
946 			       _a_idxrxgain[gp]);
947 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT,
948 			       _a_idxattc2[gp]);
949 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2,
950 			       _a_idxattc1[gp]);
951 		break;
952 	default:
953 		break;
954 	}
955 
956 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
957 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
958 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
959 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
960 	udelay(1);
961 
962 	fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
963 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
964 	kfail |= fail;
965 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
966 
967 	if (!kfail)
968 		iqk_info->nb_rxcfir[path] =
969 			 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD) | 0x2;
970 	else
971 		iqk_info->nb_rxcfir[path] = 0x40000002;
972 
973 	return kfail;
974 }
975 
_iqk_rxclk_setting(struct rtw89_dev * rtwdev,u8 path)976 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
977 {
978 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
979 
980 	if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
981 		rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
982 		rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
983 		udelay(1);
984 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
985 		udelay(1);
986 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
987 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
988 		udelay(1);
989 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
990 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x2);
991 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1);
992 		rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x2);
993 		rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1);
994 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
995 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1);
996 	} else {
997 		rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
998 		rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
999 		udelay(1);
1000 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
1001 		udelay(1);
1002 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
1003 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
1004 		udelay(1);
1005 		rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
1006 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_VAL, 0x1);
1007 		rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ON, 0x1);
1008 		rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_VAL, 0x1);
1009 		rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_RXCK_ON, 0x1);
1010 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
1011 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x0);
1012 	}
1013 }
1014 
_txk_group_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1015 static bool _txk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1016 {
1017 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1018 	bool kfail = false;
1019 	bool fail;
1020 	u8 gp;
1021 
1022 	for (gp = 0x0; gp < RTW8852B_RXK_GROUP_NR; gp++) {
1023 		switch (iqk_info->iqk_band[path]) {
1024 		case RTW89_BAND_2G:
1025 			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1026 				       _g_power_range[gp]);
1027 			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1028 				       _g_track_range[gp]);
1029 			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1030 				       _g_gain_bb[gp]);
1031 			rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1032 					       MASKDWORD, _g_itqt[gp]);
1033 			break;
1034 		case RTW89_BAND_5G:
1035 			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1036 				       _a_power_range[gp]);
1037 			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1038 				       _a_track_range[gp]);
1039 			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1040 				       _a_gain_bb[gp]);
1041 			rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1042 					       MASKDWORD, _a_itqt[gp]);
1043 			break;
1044 		default:
1045 			break;
1046 		}
1047 
1048 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1049 				       B_CFIR_LUT_SEL, 0x1);
1050 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1051 				       B_CFIR_LUT_SET, 0x1);
1052 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1053 				       B_CFIR_LUT_G2, 0x0);
1054 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1055 				       B_CFIR_LUT_GP, gp);
1056 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1057 		fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1058 		rtw89_phy_write32_mask(rtwdev, R_IQKINF,
1059 				       BIT(8 + gp + path * 4), fail);
1060 		kfail |= fail;
1061 	}
1062 
1063 	if (kfail) {
1064 		iqk_info->nb_txcfir[path] = 0x40000002;
1065 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1066 				       B_IQK_RES_TXCFIR, 0x0);
1067 		iqk_info->is_wb_txiqk[path] = false;
1068 	} else {
1069 		iqk_info->nb_txcfir[path] = 0x40000000;
1070 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1071 				       B_IQK_RES_TXCFIR, 0x5);
1072 		iqk_info->is_wb_txiqk[path] = true;
1073 	}
1074 
1075 	return kfail;
1076 }
1077 
_iqk_nbtxk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1078 static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1079 {
1080 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1081 	bool kfail;
1082 	u8 gp = 0x2;
1083 
1084 	switch (iqk_info->iqk_band[path]) {
1085 	case RTW89_BAND_2G:
1086 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1087 			       _g_power_range[gp]);
1088 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1089 			       _g_track_range[gp]);
1090 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1091 			       _g_gain_bb[gp]);
1092 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1093 				       MASKDWORD, _g_itqt[gp]);
1094 		break;
1095 	case RTW89_BAND_5G:
1096 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1097 			       _a_power_range[gp]);
1098 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1099 			       _a_track_range[gp]);
1100 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1101 			       _a_gain_bb[gp]);
1102 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1103 				       MASKDWORD, _a_itqt[gp]);
1104 		break;
1105 	default:
1106 		break;
1107 	}
1108 
1109 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
1110 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
1111 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
1112 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
1113 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1114 	kfail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1115 
1116 	if (!kfail)
1117 		iqk_info->nb_txcfir[path] =
1118 			rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
1119 					      MASKDWORD) | 0x2;
1120 	else
1121 		iqk_info->nb_txcfir[path] = 0x40000002;
1122 
1123 	return kfail;
1124 }
1125 
_lok_res_table(struct rtw89_dev * rtwdev,u8 path,u8 ibias)1126 static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
1127 {
1128 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1129 
1130 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1131 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias);
1132 
1133 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2);
1134 	if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1135 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0);
1136 	else
1137 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1);
1138 	rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias);
1139 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
1140 	rtw89_write_rf(rtwdev, path, RR_TXVBUF, RR_TXVBUF_DACEN, 0x1);
1141 
1142 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x7c = %x\n", path,
1143 		    rtw89_read_rf(rtwdev, path, RR_TXVBUF, RFREG_MASK));
1144 }
1145 
_lok_finetune_check(struct rtw89_dev * rtwdev,u8 path)1146 static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
1147 {
1148 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1149 	bool is_fail1, is_fail2;
1150 	u32 vbuff_i;
1151 	u32 vbuff_q;
1152 	u32 core_i;
1153 	u32 core_q;
1154 	u32 tmp;
1155 	u8 ch;
1156 
1157 	tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
1158 	core_i = FIELD_GET(RR_TXMO_COI, tmp);
1159 	core_q = FIELD_GET(RR_TXMO_COQ, tmp);
1160 	ch = (iqk_info->iqk_times / 2) % RTW89_IQK_CHS_NR;
1161 
1162 	if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
1163 		is_fail1 = true;
1164 	else
1165 		is_fail1 = false;
1166 
1167 	iqk_info->lok_idac[ch][path] = tmp;
1168 
1169 	tmp = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
1170 	vbuff_i = FIELD_GET(RR_LOKVB_COI, tmp);
1171 	vbuff_q = FIELD_GET(RR_LOKVB_COQ, tmp);
1172 
1173 	if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
1174 		is_fail2 = true;
1175 	else
1176 		is_fail2 = false;
1177 
1178 	iqk_info->lok_vbuf[ch][path] = tmp;
1179 
1180 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1181 		    "[IQK]S%x, lok_idac[%x][%x] = 0x%x\n", path, ch, path,
1182 		    iqk_info->lok_idac[ch][path]);
1183 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1184 		    "[IQK]S%x, lok_vbuf[%x][%x] = 0x%x\n", path, ch, path,
1185 		    iqk_info->lok_vbuf[ch][path]);
1186 
1187 	return is_fail1 | is_fail2;
1188 }
1189 
_iqk_lok(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1190 static bool _iqk_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1191 {
1192 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1193 	bool tmp;
1194 
1195 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1196 
1197 	switch (iqk_info->iqk_band[path]) {
1198 	case RTW89_BAND_2G:
1199 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1200 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1201 		break;
1202 	case RTW89_BAND_5G:
1203 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1204 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4);
1205 		break;
1206 	default:
1207 		break;
1208 	}
1209 
1210 	switch (iqk_info->iqk_band[path]) {
1211 	case RTW89_BAND_2G:
1212 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1213 		break;
1214 	case RTW89_BAND_5G:
1215 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1216 		break;
1217 	default:
1218 		break;
1219 	}
1220 
1221 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9);
1222 	tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
1223 	iqk_info->lok_cor_fail[0][path] = tmp;
1224 
1225 	switch (iqk_info->iqk_band[path]) {
1226 	case RTW89_BAND_2G:
1227 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1228 		break;
1229 	case RTW89_BAND_5G:
1230 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1231 		break;
1232 	default:
1233 		break;
1234 	}
1235 
1236 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24);
1237 	tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1238 
1239 	switch (iqk_info->iqk_band[path]) {
1240 	case RTW89_BAND_2G:
1241 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1242 		break;
1243 	case RTW89_BAND_5G:
1244 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1245 		break;
1246 	default:
1247 		break;
1248 	}
1249 
1250 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x9);
1251 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1252 	tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
1253 	iqk_info->lok_fin_fail[0][path] = tmp;
1254 
1255 	switch (iqk_info->iqk_band[path]) {
1256 	case RTW89_BAND_2G:
1257 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1258 		break;
1259 	case RTW89_BAND_5G:
1260 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1261 		break;
1262 	default:
1263 		break;
1264 	}
1265 
1266 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, 0x24);
1267 	_iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1268 
1269 	return _lok_finetune_check(rtwdev, path);
1270 }
1271 
_iqk_txk_setting(struct rtw89_dev * rtwdev,u8 path)1272 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1273 {
1274 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1275 
1276 	switch (iqk_info->iqk_band[path]) {
1277 	case RTW89_BAND_2G:
1278 		rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW2, 0x00);
1279 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1280 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
1281 		rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1282 		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1283 		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1284 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00);
1285 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1286 		udelay(1);
1287 		break;
1288 	case RTW89_BAND_5G:
1289 		rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00);
1290 		rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1);
1291 		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1292 		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1293 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80);
1294 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1295 		udelay(1);
1296 		break;
1297 	default:
1298 		break;
1299 	}
1300 }
1301 
_iqk_txclk_setting(struct rtw89_dev * rtwdev,u8 path)1302 static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path)
1303 {
1304 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
1305 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
1306 	udelay(1);
1307 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1308 	udelay(1);
1309 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1310 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1311 	udelay(1);
1312 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1313 }
1314 
_iqk_info_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1315 static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1316 {
1317 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1318 	u32 tmp;
1319 	bool flag;
1320 
1321 	flag = iqk_info->lok_cor_fail[0][path];
1322 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
1323 	flag = iqk_info->lok_fin_fail[0][path];
1324 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
1325 	flag = iqk_info->iqk_tx_fail[0][path];
1326 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
1327 	flag = iqk_info->iqk_rx_fail[0][path];
1328 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
1329 
1330 	tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
1331 	iqk_info->bp_iqkenable[path] = tmp;
1332 	tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1333 	iqk_info->bp_txkresult[path] = tmp;
1334 	tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1335 	iqk_info->bp_rxkresult[path] = tmp;
1336 
1337 	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT, iqk_info->iqk_times);
1338 
1339 	tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
1340 	if (tmp)
1341 		iqk_info->iqk_fail_cnt++;
1342 	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
1343 			       iqk_info->iqk_fail_cnt);
1344 }
1345 
_iqk_by_path(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1346 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1347 {
1348 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1349 	bool lok_is_fail = false;
1350 	const int try = 3;
1351 	u8 ibias = 0x1;
1352 	u8 i;
1353 
1354 	_iqk_txclk_setting(rtwdev, path);
1355 
1356 	/* LOK */
1357 	for (i = 0; i < try; i++) {
1358 		_lok_res_table(rtwdev, path, ibias++);
1359 		_iqk_txk_setting(rtwdev, path);
1360 		lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
1361 		if (!lok_is_fail)
1362 			break;
1363 	}
1364 
1365 	if (lok_is_fail)
1366 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] LOK (%d) fail\n", path);
1367 
1368 	/* TXK */
1369 	if (iqk_info->is_nbiqk)
1370 		iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
1371 	else
1372 		iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
1373 
1374 	/* RX */
1375 	_iqk_rxclk_setting(rtwdev, path);
1376 	_iqk_rxk_setting(rtwdev, path);
1377 	if (iqk_info->is_nbiqk)
1378 		iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
1379 	else
1380 		iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
1381 
1382 	_iqk_info_iqk(rtwdev, phy_idx, path);
1383 }
1384 
_iqk_get_ch_info(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 path,enum rtw89_chanctx_idx chanctx_idx)1385 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
1386 			     enum rtw89_chanctx_idx chanctx_idx)
1387 {
1388 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1389 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1390 	u32 reg_rf18;
1391 	u32 reg_35c;
1392 	u8 idx;
1393 	u8 get_empty_table = false;
1394 
1395 	for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1396 		if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
1397 			get_empty_table = true;
1398 			break;
1399 		}
1400 	}
1401 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx);
1402 
1403 	if (!get_empty_table) {
1404 		idx = iqk_info->iqk_table_idx[path] + 1;
1405 		if (idx > 1)
1406 			idx = 0;
1407 	}
1408 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx);
1409 
1410 	reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1411 	reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
1412 
1413 	iqk_info->iqk_band[path] = chan->band_type;
1414 	iqk_info->iqk_bw[path] = chan->band_width;
1415 	iqk_info->iqk_ch[path] = chan->channel;
1416 	iqk_info->iqk_mcc_ch[idx][path] = chan->channel;
1417 	iqk_info->iqk_table_idx[path] = idx;
1418 
1419 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n",
1420 		    path, reg_rf18, idx);
1421 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n",
1422 		    path, reg_rf18);
1423 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
1424 		    iqk_info->iqk_times, idx);
1425 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n",
1426 		    idx, path, iqk_info->iqk_mcc_ch[idx][path]);
1427 
1428 	if (reg_35c == 0x01)
1429 		iqk_info->syn1to2 = 0x1;
1430 	else
1431 		iqk_info->syn1to2 = 0x0;
1432 
1433 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1434 		    "[IQK]S%x, iqk_info->syn1to2= 0x%x\n", path,
1435 		    iqk_info->syn1to2);
1436 
1437 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852B_IQK_VER);
1438 	/* 2GHz/5GHz/6GHz = 0/1/2 */
1439 	rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
1440 			       iqk_info->iqk_band[path]);
1441 	/* 20/40/80 = 0/1/2 */
1442 	rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
1443 			       iqk_info->iqk_bw[path]);
1444 	rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
1445 			       iqk_info->iqk_ch[path]);
1446 }
1447 
_iqk_start_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1448 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1449 {
1450 	_iqk_by_path(rtwdev, phy_idx, path);
1451 }
1452 
_iqk_restore(struct rtw89_dev * rtwdev,u8 path)1453 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1454 {
1455 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1456 	bool fail;
1457 
1458 	rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
1459 			       iqk_info->nb_txcfir[path]);
1460 	rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
1461 			       iqk_info->nb_rxcfir[path]);
1462 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1463 			       0x00000e19 + (path << 4));
1464 	fail = _iqk_check_cal(rtwdev, path);
1465 
1466 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s result =%x\n", __func__, fail);
1467 
1468 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1469 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1470 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1471 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS, B_IQK_RES_K, 0x0);
1472 	rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K1, 0x0);
1473 	rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0);
1474 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1475 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1476 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3);
1477 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1478 	rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
1479 }
1480 
_iqk_afebb_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1481 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1482 			       enum rtw89_phy_idx phy_idx, u8 path)
1483 {
1484 	const struct rtw89_reg3_def *def;
1485 	int size;
1486 	u8 kpath;
1487 	int i;
1488 
1489 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
1490 
1491 	kpath = _kpath(rtwdev, phy_idx);
1492 
1493 	switch (kpath) {
1494 	case RF_A:
1495 	case RF_B:
1496 		return;
1497 	default:
1498 		size = ARRAY_SIZE(rtw8852b_restore_nondbcc_path01);
1499 		def = rtw8852b_restore_nondbcc_path01;
1500 		break;
1501 	}
1502 
1503 	for (i = 0; i < size; i++, def++)
1504 		rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
1505 }
1506 
_iqk_preset(struct rtw89_dev * rtwdev,u8 path)1507 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1508 {
1509 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1510 	u8 idx;
1511 
1512 	idx = iqk_info->iqk_table_idx[path];
1513 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (3)idx = %x\n", idx);
1514 
1515 	rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
1516 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
1517 
1518 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1519 	rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
1520 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1521 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1522 
1523 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x54 = 0x%x\n", path, 1 << path,
1524 		    rtw89_phy_read32_mask(rtwdev, R_CFIR_LUT + (path << 8), MASKDWORD));
1525 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK](1)S%x, 0x8%x04 = 0x%x\n", path, 1 << path,
1526 		    rtw89_phy_read32_mask(rtwdev, R_COEF_SEL + (path << 8), MASKDWORD));
1527 }
1528 
_iqk_macbb_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1529 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1530 			       enum rtw89_phy_idx phy_idx, u8 path)
1531 {
1532 	const struct rtw89_reg3_def *def;
1533 	int size;
1534 	u8 kpath;
1535 	int i;
1536 
1537 	kpath = _kpath(rtwdev, phy_idx);
1538 
1539 	switch (kpath) {
1540 	case RF_A:
1541 	case RF_B:
1542 		return;
1543 	default:
1544 		size = ARRAY_SIZE(rtw8852b_set_nondbcc_path01);
1545 		def = rtw8852b_set_nondbcc_path01;
1546 		break;
1547 	}
1548 
1549 	for (i = 0; i < size; i++, def++)
1550 		rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
1551 }
1552 
_iqk_init(struct rtw89_dev * rtwdev)1553 static void _iqk_init(struct rtw89_dev *rtwdev)
1554 {
1555 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1556 	u8 idx, path;
1557 
1558 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
1559 	if (iqk_info->is_iqk_init)
1560 		return;
1561 
1562 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1563 	iqk_info->is_iqk_init = true;
1564 	iqk_info->is_nbiqk = false;
1565 	iqk_info->iqk_fft_en = false;
1566 	iqk_info->iqk_sram_en = false;
1567 	iqk_info->iqk_cfir_en = false;
1568 	iqk_info->iqk_xym_en = false;
1569 	iqk_info->iqk_times = 0x0;
1570 
1571 	for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1572 		iqk_info->iqk_channel[idx] = 0x0;
1573 		for (path = 0; path < RTW8852B_IQK_SS; path++) {
1574 			iqk_info->lok_cor_fail[idx][path] = false;
1575 			iqk_info->lok_fin_fail[idx][path] = false;
1576 			iqk_info->iqk_tx_fail[idx][path] = false;
1577 			iqk_info->iqk_rx_fail[idx][path] = false;
1578 			iqk_info->iqk_mcc_ch[idx][path] = 0x0;
1579 			iqk_info->iqk_table_idx[path] = 0x0;
1580 		}
1581 	}
1582 }
1583 
_wait_rx_mode(struct rtw89_dev * rtwdev,u8 kpath)1584 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
1585 {
1586 	u32 rf_mode;
1587 	u8 path;
1588 	int ret;
1589 
1590 	for (path = 0; path < RF_PATH_MAX; path++) {
1591 		if (!(kpath & BIT(path)))
1592 			continue;
1593 
1594 		ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
1595 					       rf_mode != 2, 2, 5000, false,
1596 					       rtwdev, path, RR_MOD, RR_MOD_MASK);
1597 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1598 			    "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret);
1599 	}
1600 }
1601 
_tmac_tx_pause(struct rtw89_dev * rtwdev,enum rtw89_phy_idx band_idx,bool is_pause)1602 static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx,
1603 			   bool is_pause)
1604 {
1605 	if (!is_pause)
1606 		return;
1607 
1608 	_wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx));
1609 }
1610 
_doiqk(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy_idx,u8 path,enum rtw89_chanctx_idx chanctx_idx)1611 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1612 		   enum rtw89_phy_idx phy_idx, u8 path,
1613 		   enum rtw89_chanctx_idx chanctx_idx)
1614 {
1615 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1616 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
1617 	u32 backup_rf_val[RTW8852B_IQK_SS][BACKUP_RF_REGS_NR];
1618 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
1619 
1620 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1621 
1622 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1623 		    "[IQK]==========IQK start!!!!!==========\n");
1624 	iqk_info->iqk_times++;
1625 	iqk_info->version = RTW8852B_IQK_VER;
1626 
1627 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1628 	_iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
1629 
1630 	_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
1631 	_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1632 	_iqk_macbb_setting(rtwdev, phy_idx, path);
1633 	_iqk_preset(rtwdev, path);
1634 	_iqk_start_iqk(rtwdev, phy_idx, path);
1635 	_iqk_restore(rtwdev, path);
1636 	_iqk_afebb_restore(rtwdev, phy_idx, path);
1637 	_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
1638 	_rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
1639 
1640 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1641 }
1642 
_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,bool force,enum rtw89_chanctx_idx chanctx_idx)1643 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
1644 		 enum rtw89_chanctx_idx chanctx_idx)
1645 {
1646 	u8 kpath = _kpath(rtwdev, phy_idx);
1647 
1648 	switch (kpath) {
1649 	case RF_A:
1650 		_doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
1651 		break;
1652 	case RF_B:
1653 		_doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
1654 		break;
1655 	case RF_AB:
1656 		_doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
1657 		_doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
1658 		break;
1659 	default:
1660 		break;
1661 	}
1662 }
1663 
_dpk_bkup_kip(struct rtw89_dev * rtwdev,const u32 reg[],u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM],u8 path)1664 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1665 			  u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
1666 {
1667 	u8 i;
1668 
1669 	for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
1670 		reg_bkup[path][i] =
1671 			rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
1672 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
1673 			    reg[i] + (path << 8), reg_bkup[path][i]);
1674 	}
1675 }
1676 
_dpk_reload_kip(struct rtw89_dev * rtwdev,const u32 reg[],const u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM],u8 path)1677 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1678 			    const u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
1679 {
1680 	u8 i;
1681 
1682 	for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
1683 		rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD,
1684 				       reg_bkup[path][i]);
1685 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
1686 			    reg[i] + (path << 8), reg_bkup[path][i]);
1687 	}
1688 }
1689 
_dpk_order_convert(struct rtw89_dev * rtwdev)1690 static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
1691 {
1692 	u8 order;
1693 	u8 val;
1694 
1695 	order = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
1696 	val = 0x3 >> order;
1697 
1698 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
1699 
1700 	return val;
1701 }
1702 
_dpk_onoff(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool off)1703 static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
1704 {
1705 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1706 	u8 val, kidx = dpk->cur_idx[path];
1707 
1708 	val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
1709 
1710 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
1711 			       MASKBYTE3, _dpk_order_convert(rtwdev) << 1 | val);
1712 
1713 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
1714 		    kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
1715 }
1716 
_dpk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw8852b_dpk_id id)1717 static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1718 			  enum rtw89_rf_path path, enum rtw8852b_dpk_id id)
1719 {
1720 	u16 dpk_cmd;
1721 	u32 val;
1722 	int ret;
1723 
1724 	dpk_cmd = (id << 8) | (0x19 + (path << 4));
1725 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1726 
1727 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1728 				       1, 20000, false,
1729 				       rtwdev, 0xbff8, MASKBYTE0);
1730 	if (ret)
1731 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
1732 
1733 	udelay(1);
1734 
1735 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000);
1736 
1737 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
1738 				       1, 2000, false,
1739 				       rtwdev, 0x80fc, MASKLWORD);
1740 	if (ret)
1741 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
1742 
1743 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
1744 
1745 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1746 		    "[DPK] one-shot for %s = 0x%x\n",
1747 		    id == 0x06 ? "LBK_RXIQK" :
1748 		    id == 0x10 ? "SYNC" :
1749 		    id == 0x11 ? "MDPK_IDL" :
1750 		    id == 0x12 ? "MDPK_MPA" :
1751 		    id == 0x13 ? "GAIN_LOSS" :
1752 		    id == 0x14 ? "PWR_CAL" :
1753 		    id == 0x15 ? "DPK_RXAGC" :
1754 		    id == 0x16 ? "KIP_PRESET" :
1755 		    id == 0x17 ? "KIP_RESTORE" : "DPK_TXAGC",
1756 		    dpk_cmd);
1757 }
1758 
_dpk_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1759 static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1760 			enum rtw89_rf_path path)
1761 {
1762 	rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
1763 	_set_rx_dck(rtwdev, phy, path);
1764 }
1765 
_dpk_information(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw89_chanctx_idx chanctx_idx)1766 static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1767 			     enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
1768 {
1769 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1770 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1771 
1772 	u8 kidx = dpk->cur_idx[path];
1773 
1774 	dpk->bp[path][kidx].band = chan->band_type;
1775 	dpk->bp[path][kidx].ch = chan->channel;
1776 	dpk->bp[path][kidx].bw = chan->band_width;
1777 
1778 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1779 		    "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1780 		    path, dpk->cur_idx[path], phy,
1781 		    rtwdev->is_tssi_mode[path] ? "on" : "off",
1782 		    rtwdev->dbcc_en ? "on" : "off",
1783 		    dpk->bp[path][kidx].band == 0 ? "2G" :
1784 		    dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1785 		    dpk->bp[path][kidx].ch,
1786 		    dpk->bp[path][kidx].bw == 0 ? "20M" :
1787 		    dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1788 }
1789 
_dpk_bb_afe_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath,enum rtw89_chanctx_idx chanctx_idx)1790 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
1791 				enum rtw89_phy_idx phy,
1792 				enum rtw89_rf_path path, u8 kpath,
1793 				enum rtw89_chanctx_idx chanctx_idx)
1794 {
1795 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1796 
1797 	rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl);
1798 
1799 	if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
1800 		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x1);
1801 		rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x1);
1802 	}
1803 
1804 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1805 		    "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1806 }
1807 
_dpk_bb_afe_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath,enum rtw89_chanctx_idx chanctx_idx)1808 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
1809 				enum rtw89_phy_idx phy,
1810 				enum rtw89_rf_path path, u8 kpath,
1811 				enum rtw89_chanctx_idx chanctx_idx)
1812 {
1813 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1814 
1815 	rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl);
1816 
1817 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1818 		    "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
1819 
1820 	if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
1821 		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x0);
1822 		rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,  B_PATH1_BW_SEL_EX, 0x0);
1823 	}
1824 }
1825 
_dpk_tssi_pause(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_pause)1826 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1827 			    enum rtw89_rf_path path, bool is_pause)
1828 {
1829 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1830 			       B_P0_TSSI_TRK_EN, is_pause);
1831 
1832 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1833 		    is_pause ? "pause" : "resume");
1834 }
1835 
_dpk_kip_restore(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)1836 static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
1837 			     enum rtw89_rf_path path)
1838 {
1839 	rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_kip_defs_tbl);
1840 
1841 	if (rtwdev->hal.cv > CHIP_CAV)
1842 		rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), B_DPD_COM_OF, 0x1);
1843 
1844 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
1845 }
1846 
_dpk_lbk_rxiqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1847 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1848 			   enum rtw89_rf_path path)
1849 {
1850 	u8 cur_rxbb;
1851 	u32 tmp;
1852 
1853 	cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
1854 
1855 	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
1856 	rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0);
1857 
1858 	tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1859 	rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
1860 	rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd);
1861 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
1862 
1863 	if (cur_rxbb >= 0x11)
1864 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13);
1865 	else if (cur_rxbb <= 0xa)
1866 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00);
1867 	else
1868 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05);
1869 
1870 	rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0);
1871 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1872 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014);
1873 	udelay(70);
1874 
1875 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1876 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025);
1877 
1878 	_dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
1879 
1880 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
1881 		    rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
1882 
1883 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1884 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
1885 	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
1886 	rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
1887 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
1888 	rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5);
1889 }
1890 
_dpk_get_thermal(struct rtw89_dev * rtwdev,u8 kidx,enum rtw89_rf_path path)1891 static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
1892 {
1893 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1894 
1895 	rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
1896 	rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0);
1897 	rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
1898 
1899 	udelay(200);
1900 
1901 	dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL);
1902 
1903 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
1904 		    dpk->bp[path][kidx].ther_dpk);
1905 }
1906 
_dpk_rf_setting(struct rtw89_dev * rtwdev,u8 gain,enum rtw89_rf_path path,u8 kidx)1907 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
1908 			    enum rtw89_rf_path path, u8 kidx)
1909 {
1910 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1911 
1912 	if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
1913 		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1914 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2);
1915 		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1916 		rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1917 	} else {
1918 		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1919 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5);
1920 		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1921 		rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1922 		rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC);
1923 		rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0);
1924 		rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800);
1925 	}
1926 
1927 	rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
1928 	rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
1929 	rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
1930 
1931 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1932 		    "[DPK] ARF 0x0/0x11/0x1a = 0x%x/ 0x%x/ 0x%x\n",
1933 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
1934 		    rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK),
1935 		    rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
1936 }
1937 
_dpk_bypass_rxcfir(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bypass)1938 static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
1939 			       enum rtw89_rf_path path, bool is_bypass)
1940 {
1941 	if (is_bypass) {
1942 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1943 				       B_RXIQC_BYPASS2, 0x1);
1944 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1945 				       B_RXIQC_BYPASS, 0x1);
1946 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1947 			    "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
1948 			    rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1949 						  MASKDWORD));
1950 	} else {
1951 		rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
1952 		rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
1953 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1954 			    "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
1955 			    rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1956 						  MASKDWORD));
1957 	}
1958 }
1959 
1960 static
_dpk_tpg_sel(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)1961 void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
1962 {
1963 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1964 
1965 	if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
1966 		rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
1967 	else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
1968 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
1969 	else
1970 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
1971 
1972 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
1973 		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
1974 		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
1975 }
1976 
_dpk_table_select(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx,u8 gain)1977 static void _dpk_table_select(struct rtw89_dev *rtwdev,
1978 			      enum rtw89_rf_path path, u8 kidx, u8 gain)
1979 {
1980 	u8 val;
1981 
1982 	val = 0x80 + kidx * 0x20 + gain * 0x10;
1983 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
1984 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1985 		    "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
1986 		    gain, val);
1987 }
1988 
_dpk_sync_check(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)1989 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
1990 {
1991 #define DPK_SYNC_TH_DC_I 200
1992 #define DPK_SYNC_TH_DC_Q 200
1993 #define DPK_SYNC_TH_CORR 170
1994 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1995 	u16 dc_i, dc_q;
1996 	u8 corr_val, corr_idx;
1997 
1998 	rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
1999 
2000 	corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
2001 	corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
2002 
2003 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2004 		    "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
2005 		    path, corr_idx, corr_val);
2006 
2007 	dpk->corr_idx[path][kidx] = corr_idx;
2008 	dpk->corr_val[path][kidx] = corr_val;
2009 
2010 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
2011 
2012 	dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2013 	dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
2014 
2015 	dc_i = abs(sign_extend32(dc_i, 11));
2016 	dc_q = abs(sign_extend32(dc_q, 11));
2017 
2018 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
2019 		    path, dc_i, dc_q);
2020 
2021 	dpk->dc_i[path][kidx] = dc_i;
2022 	dpk->dc_q[path][kidx] = dc_q;
2023 
2024 	if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2025 	    corr_val < DPK_SYNC_TH_CORR)
2026 		return true;
2027 	else
2028 		return false;
2029 }
2030 
_dpk_sync(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2031 static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2032 		      enum rtw89_rf_path path, u8 kidx)
2033 {
2034 	_dpk_one_shot(rtwdev, phy, path, SYNC);
2035 
2036 	return _dpk_sync_check(rtwdev, path, kidx);
2037 }
2038 
_dpk_dgain_read(struct rtw89_dev * rtwdev)2039 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2040 {
2041 	u16 dgain;
2042 
2043 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2044 
2045 	dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2046 
2047 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain);
2048 
2049 	return dgain;
2050 }
2051 
_dpk_dgain_mapping(struct rtw89_dev * rtwdev,u16 dgain)2052 static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
2053 {
2054 	static const u16 bnd[15] = {
2055 		0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
2056 		0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
2057 	};
2058 	s8 offset;
2059 
2060 	if (dgain >= bnd[0])
2061 		offset = 0x6;
2062 	else if (bnd[0] > dgain && dgain >= bnd[1])
2063 		offset = 0x6;
2064 	else if (bnd[1] > dgain && dgain >= bnd[2])
2065 		offset = 0x5;
2066 	else if (bnd[2] > dgain && dgain >= bnd[3])
2067 		offset = 0x4;
2068 	else if (bnd[3] > dgain && dgain >= bnd[4])
2069 		offset = 0x3;
2070 	else if (bnd[4] > dgain && dgain >= bnd[5])
2071 		offset = 0x2;
2072 	else if (bnd[5] > dgain && dgain >= bnd[6])
2073 		offset = 0x1;
2074 	else if (bnd[6] > dgain && dgain >= bnd[7])
2075 		offset = 0x0;
2076 	else if (bnd[7] > dgain && dgain >= bnd[8])
2077 		offset = 0xff;
2078 	else if (bnd[8] > dgain && dgain >= bnd[9])
2079 		offset = 0xfe;
2080 	else if (bnd[9] > dgain && dgain >= bnd[10])
2081 		offset = 0xfd;
2082 	else if (bnd[10] > dgain && dgain >= bnd[11])
2083 		offset = 0xfc;
2084 	else if (bnd[11] > dgain && dgain >= bnd[12])
2085 		offset = 0xfb;
2086 	else if (bnd[12] > dgain && dgain >= bnd[13])
2087 		offset = 0xfa;
2088 	else if (bnd[13] > dgain && dgain >= bnd[14])
2089 		offset = 0xf9;
2090 	else if (bnd[14] > dgain)
2091 		offset = 0xf8;
2092 	else
2093 		offset = 0x0;
2094 
2095 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset);
2096 
2097 	return offset;
2098 }
2099 
_dpk_gainloss_read(struct rtw89_dev * rtwdev)2100 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2101 {
2102 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2103 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2104 
2105 	return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2106 }
2107 
_dpk_gainloss(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2108 static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2109 			  enum rtw89_rf_path path, u8 kidx)
2110 {
2111 	_dpk_table_select(rtwdev, path, kidx, 1);
2112 	_dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
2113 }
2114 
_dpk_kip_preset(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2115 static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2116 			    enum rtw89_rf_path path, u8 kidx)
2117 {
2118 	_dpk_tpg_sel(rtwdev, path, kidx);
2119 	_dpk_one_shot(rtwdev, phy, path, KIP_PRESET);
2120 }
2121 
_dpk_kip_pwr_clk_on(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)2122 static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
2123 				enum rtw89_rf_path path)
2124 {
2125 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
2126 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
2127 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
2128 
2129 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n");
2130 }
2131 
_dpk_kip_set_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 txagc)2132 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2133 			       enum rtw89_rf_path path, u8 txagc)
2134 {
2135 	rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, txagc);
2136 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2137 	_dpk_one_shot(rtwdev, phy, path, DPK_TXAGC);
2138 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2139 
2140 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc);
2141 }
2142 
_dpk_kip_set_rxagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2143 static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2144 			       enum rtw89_rf_path path)
2145 {
2146 	u32 tmp;
2147 
2148 	tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
2149 	rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, tmp);
2150 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2151 	_dpk_one_shot(rtwdev, phy, path, DPK_RXAGC);
2152 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2153 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL_V1, 0x8);
2154 
2155 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2156 		    "[DPK] set RXBB = 0x%x (RF0x0[9:5] = 0x%x)\n",
2157 		    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB_V1),
2158 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB));
2159 }
2160 
_dpk_set_offset(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,s8 gain_offset)2161 static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2162 			  enum rtw89_rf_path path, s8 gain_offset)
2163 {
2164 	u8 txagc;
2165 
2166 	txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK);
2167 
2168 	if (txagc - gain_offset < DPK_TXAGC_LOWER)
2169 		txagc = DPK_TXAGC_LOWER;
2170 	else if (txagc - gain_offset > DPK_TXAGC_UPPER)
2171 		txagc = DPK_TXAGC_UPPER;
2172 	else
2173 		txagc = txagc - gain_offset;
2174 
2175 	_dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2176 
2177 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
2178 		    gain_offset, txagc);
2179 	return txagc;
2180 }
2181 
_dpk_pas_read(struct rtw89_dev * rtwdev,bool is_check)2182 static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
2183 {
2184 	u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2185 	u8 i;
2186 
2187 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
2188 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
2189 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
2190 
2191 	if (is_check) {
2192 		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2193 		val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2194 		val1_i = abs(sign_extend32(val1_i, 11));
2195 		val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2196 		val1_q = abs(sign_extend32(val1_q, 11));
2197 
2198 		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2199 		val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2200 		val2_i = abs(sign_extend32(val2_i, 11));
2201 		val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2202 		val2_q = abs(sign_extend32(val2_q, 11));
2203 
2204 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2205 			    phy_div(val1_i * val1_i + val1_q * val1_q,
2206 				    val2_i * val2_i + val2_q * val2_q));
2207 	} else {
2208 		for (i = 0; i < 32; i++) {
2209 			rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2210 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2211 				    "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2212 				    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2213 		}
2214 	}
2215 
2216 	if (val1_i * val1_i + val1_q * val1_q >=
2217 	    (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
2218 		return true;
2219 
2220 	return false;
2221 }
2222 
_dpk_agc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 init_txagc,bool loss_only,enum rtw89_chanctx_idx chanctx_idx)2223 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2224 		   enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
2225 		   bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
2226 {
2227 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2228 	u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2229 	u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
2230 	u8 goout = 0, agc_cnt = 0, limited_rxbb = 0;
2231 	u16 dgain = 0;
2232 	s8 offset;
2233 	int limit = 200;
2234 
2235 	tmp_txagc = init_txagc;
2236 
2237 	do {
2238 		switch (step) {
2239 		case DPK_AGC_STEP_SYNC_DGAIN:
2240 			if (_dpk_sync(rtwdev, phy, path, kidx)) {
2241 				tmp_txagc = 0xff;
2242 				goout = 1;
2243 				break;
2244 			}
2245 
2246 			dgain = _dpk_dgain_read(rtwdev);
2247 
2248 			if (loss_only == 1 || limited_rxbb == 1)
2249 				step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2250 			else
2251 				step = DPK_AGC_STEP_GAIN_ADJ;
2252 			break;
2253 
2254 		case DPK_AGC_STEP_GAIN_ADJ:
2255 			tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD,
2256 						 RFREG_MASKRXBB);
2257 			offset = _dpk_dgain_mapping(rtwdev, dgain);
2258 
2259 			if (tmp_rxbb + offset > 0x1f) {
2260 				tmp_rxbb = 0x1f;
2261 				limited_rxbb = 1;
2262 			} else if (tmp_rxbb + offset < 0) {
2263 				tmp_rxbb = 0;
2264 				limited_rxbb = 1;
2265 			} else {
2266 				tmp_rxbb = tmp_rxbb + offset;
2267 			}
2268 
2269 			rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB,
2270 				       tmp_rxbb);
2271 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2272 				    "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
2273 			if (offset || agc_cnt == 0) {
2274 				if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
2275 					_dpk_bypass_rxcfir(rtwdev, path, true);
2276 				else
2277 					_dpk_lbk_rxiqk(rtwdev, phy, path);
2278 			}
2279 			if (dgain > 1922 || dgain < 342)
2280 				step = DPK_AGC_STEP_SYNC_DGAIN;
2281 			else
2282 				step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2283 
2284 			agc_cnt++;
2285 			break;
2286 
2287 		case DPK_AGC_STEP_GAIN_LOSS_IDX:
2288 			_dpk_gainloss(rtwdev, phy, path, kidx);
2289 			tmp_gl_idx = _dpk_gainloss_read(rtwdev);
2290 
2291 			if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
2292 			    tmp_gl_idx >= 7)
2293 				step = DPK_AGC_STEP_GL_GT_CRITERION;
2294 			else if (tmp_gl_idx == 0)
2295 				step = DPK_AGC_STEP_GL_LT_CRITERION;
2296 			else
2297 				step = DPK_AGC_STEP_SET_TX_GAIN;
2298 			break;
2299 
2300 		case DPK_AGC_STEP_GL_GT_CRITERION:
2301 			if (tmp_txagc == 0x2e) {
2302 				goout = 1;
2303 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
2304 					    "[DPK] Txagc@lower bound!!\n");
2305 			} else {
2306 				tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0x3);
2307 			}
2308 			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2309 			agc_cnt++;
2310 			break;
2311 
2312 		case DPK_AGC_STEP_GL_LT_CRITERION:
2313 			if (tmp_txagc == 0x3f) {
2314 				goout = 1;
2315 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
2316 					    "[DPK] Txagc@upper bound!!\n");
2317 			} else {
2318 				tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0xfe);
2319 			}
2320 			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2321 			agc_cnt++;
2322 			break;
2323 		case DPK_AGC_STEP_SET_TX_GAIN:
2324 			tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_gl_idx);
2325 			goout = 1;
2326 			agc_cnt++;
2327 			break;
2328 
2329 		default:
2330 			goout = 1;
2331 			break;
2332 		}
2333 	} while (!goout && agc_cnt < 6 && limit-- > 0);
2334 
2335 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2336 		    "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
2337 		    tmp_rxbb);
2338 
2339 	return tmp_txagc;
2340 }
2341 
_dpk_set_mdpd_para(struct rtw89_dev * rtwdev,u8 order)2342 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
2343 {
2344 	switch (order) {
2345 	case 0:
2346 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2347 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
2348 		rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
2349 		break;
2350 	case 1:
2351 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2352 		rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2353 		rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2354 		break;
2355 	case 2:
2356 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2357 		rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
2358 		rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
2359 		break;
2360 	default:
2361 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2362 			    "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2363 		break;
2364 	}
2365 
2366 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2367 		    "[DPK] Set MDPD order to 0x%x for IDL\n", order);
2368 }
2369 
_dpk_idl_mpa(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 gain)2370 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2371 			 enum rtw89_rf_path path, u8 kidx, u8 gain)
2372 {
2373 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2374 
2375 	if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
2376 	    dpk->bp[path][kidx].band == RTW89_BAND_5G)
2377 		_dpk_set_mdpd_para(rtwdev, 0x2);
2378 	else
2379 		_dpk_set_mdpd_para(rtwdev, 0x0);
2380 
2381 	_dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
2382 }
2383 
_dpk_fill_result(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 gain,u8 txagc)2384 static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2385 			     enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
2386 {
2387 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2388 	const u16 pwsf = 0x78;
2389 	u8 gs = dpk->dpk_gs[phy];
2390 
2391 	rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2392 			       B_COEF_SEL_MDPD, kidx);
2393 
2394 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2395 		    "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
2396 		    pwsf, gs);
2397 
2398 	dpk->bp[path][kidx].txagc_dpk = txagc;
2399 	rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
2400 			       0x3F << ((gain << 3) + (kidx << 4)), txagc);
2401 
2402 	dpk->bp[path][kidx].pwsf = pwsf;
2403 	rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2404 			       0x1FF << (gain << 4), pwsf);
2405 
2406 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2407 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
2408 
2409 	dpk->bp[path][kidx].gs = gs;
2410 	if (dpk->dpk_gs[phy] == 0x7f)
2411 		rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2412 				       MASKDWORD, 0x007f7f7f);
2413 	else
2414 		rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2415 				       MASKDWORD, 0x005b5b5b);
2416 
2417 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2418 			       B_DPD_ORDER_V1, _dpk_order_convert(rtwdev));
2419 	rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0);
2420 	rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0);
2421 }
2422 
_dpk_reload_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw89_chanctx_idx chanctx_idx)2423 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2424 			      enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
2425 {
2426 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2427 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2428 	bool is_reload = false;
2429 	u8 idx, cur_band, cur_ch;
2430 
2431 	cur_band = chan->band_type;
2432 	cur_ch = chan->channel;
2433 
2434 	for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2435 		if (cur_band != dpk->bp[path][idx].band ||
2436 		    cur_ch != dpk->bp[path][idx].ch)
2437 			continue;
2438 
2439 		rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2440 				       B_COEF_SEL_MDPD, idx);
2441 		dpk->cur_idx[path] = idx;
2442 		is_reload = true;
2443 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2444 			    "[DPK] reload S%d[%d] success\n", path, idx);
2445 	}
2446 
2447 	return is_reload;
2448 }
2449 
_dpk_main(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 gain,enum rtw89_chanctx_idx chanctx_idx)2450 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2451 		      enum rtw89_rf_path path, u8 gain,
2452 		      enum rtw89_chanctx_idx chanctx_idx)
2453 {
2454 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2455 	u8 txagc = 0x38, kidx = dpk->cur_idx[path];
2456 	bool is_fail = false;
2457 
2458 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2459 		    "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
2460 
2461 	_rfk_rf_direct_cntrl(rtwdev, path, false);
2462 	_rfk_drf_direct_cntrl(rtwdev, path, false);
2463 
2464 	_dpk_kip_pwr_clk_on(rtwdev, path);
2465 	_dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2466 	_dpk_rf_setting(rtwdev, gain, path, kidx);
2467 	_dpk_rx_dck(rtwdev, phy, path);
2468 
2469 	_dpk_kip_preset(rtwdev, phy, path, kidx);
2470 	_dpk_kip_set_rxagc(rtwdev, phy, path);
2471 	_dpk_table_select(rtwdev, path, kidx, gain);
2472 
2473 	txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
2474 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc);
2475 
2476 	if (txagc == 0xff) {
2477 		is_fail = true;
2478 	} else {
2479 		_dpk_get_thermal(rtwdev, kidx, path);
2480 
2481 		_dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
2482 
2483 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
2484 
2485 		_dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
2486 	}
2487 
2488 	if (!is_fail)
2489 		dpk->bp[path][kidx].path_ok = true;
2490 	else
2491 		dpk->bp[path][kidx].path_ok = false;
2492 
2493 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2494 		    is_fail ? "Check" : "Success");
2495 
2496 	return is_fail;
2497 }
2498 
_dpk_cal_select(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy,u8 kpath,enum rtw89_chanctx_idx chanctx_idx)2499 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
2500 			    enum rtw89_phy_idx phy, u8 kpath,
2501 			    enum rtw89_chanctx_idx chanctx_idx)
2502 {
2503 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2504 	static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120};
2505 	u32 kip_bkup[RTW8852B_DPK_RF_PATH][RTW8852B_DPK_KIP_REG_NUM] = {};
2506 	u32 backup_rf_val[RTW8852B_DPK_RF_PATH][BACKUP_RF_REGS_NR];
2507 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
2508 	bool is_fail = true, reloaded[RTW8852B_DPK_RF_PATH] = {};
2509 	u8 path;
2510 
2511 	if (dpk->is_dpk_reload_en) {
2512 		for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2513 			reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
2514 							   chanctx_idx);
2515 			if (!reloaded[path] && dpk->bp[path][0].ch)
2516 				dpk->cur_idx[path] = !dpk->cur_idx[path];
2517 			else
2518 				_dpk_onoff(rtwdev, path, false);
2519 		}
2520 	} else {
2521 		for (path = 0; path < RTW8852B_DPK_RF_PATH; path++)
2522 			dpk->cur_idx[path] = 0;
2523 	}
2524 
2525 	_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
2526 
2527 	for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2528 		_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
2529 		_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2530 		_dpk_information(rtwdev, phy, path, chanctx_idx);
2531 		if (rtwdev->is_tssi_mode[path])
2532 			_dpk_tssi_pause(rtwdev, path, true);
2533 	}
2534 
2535 	_dpk_bb_afe_setting(rtwdev, phy, path, kpath, chanctx_idx);
2536 
2537 	for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2538 		is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
2539 		_dpk_onoff(rtwdev, path, is_fail);
2540 	}
2541 
2542 	_dpk_bb_afe_restore(rtwdev, phy, path, kpath, chanctx_idx);
2543 	_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
2544 
2545 	for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2546 		_dpk_kip_restore(rtwdev, path);
2547 		_dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
2548 		_rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
2549 		if (rtwdev->is_tssi_mode[path])
2550 			_dpk_tssi_pause(rtwdev, path, false);
2551 	}
2552 }
2553 
_dpk_bypass_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_chanctx_idx chanctx_idx)2554 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2555 			      enum rtw89_chanctx_idx chanctx_idx)
2556 {
2557 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2558 	struct rtw89_fem_info *fem = &rtwdev->fem;
2559 
2560 	if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
2561 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2562 			    "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2563 		return true;
2564 	} else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
2565 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2566 			    "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2567 		return true;
2568 	} else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
2569 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2570 			    "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
2571 		return true;
2572 	}
2573 
2574 	return false;
2575 }
2576 
_dpk_force_bypass(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2577 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2578 {
2579 	u8 path, kpath;
2580 
2581 	kpath = _kpath(rtwdev, phy);
2582 
2583 	for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
2584 		if (kpath & BIT(path))
2585 			_dpk_onoff(rtwdev, path, true);
2586 	}
2587 }
2588 
_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool force,enum rtw89_chanctx_idx chanctx_idx)2589 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
2590 		 enum rtw89_chanctx_idx chanctx_idx)
2591 {
2592 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2593 		    "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
2594 		    RTW8852B_DPK_VER, rtwdev->hal.cv,
2595 		    RTW8852B_RF_REL_VERSION);
2596 
2597 	if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
2598 		_dpk_force_bypass(rtwdev, phy);
2599 	else
2600 		_dpk_cal_select(rtwdev, force, phy, RF_AB, chanctx_idx);
2601 }
2602 
_dpk_track(struct rtw89_dev * rtwdev)2603 static void _dpk_track(struct rtw89_dev *rtwdev)
2604 {
2605 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2606 	s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
2607 	s8 delta_ther[2] = {};
2608 	u8 trk_idx, txagc_rf;
2609 	u8 path, kidx;
2610 	u16 pwsf[2];
2611 	u8 cur_ther;
2612 	u32 tmp;
2613 
2614 	for (path = 0; path < RF_PATH_NUM_8852B; path++) {
2615 		kidx = dpk->cur_idx[path];
2616 
2617 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2618 			    "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2619 			    path, kidx, dpk->bp[path][kidx].ch);
2620 
2621 		cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2622 
2623 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2624 			    "[DPK_TRK] thermal now = %d\n", cur_ther);
2625 
2626 		if (dpk->bp[path][kidx].ch && cur_ther)
2627 			delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
2628 
2629 		if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2630 			delta_ther[path] = delta_ther[path] * 3 / 2;
2631 		else
2632 			delta_ther[path] = delta_ther[path] * 5 / 2;
2633 
2634 		txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2635 						 0x0000003f);
2636 
2637 		if (rtwdev->is_tssi_mode[path]) {
2638 			trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
2639 
2640 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2641 				    "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
2642 				    txagc_rf, trk_idx);
2643 
2644 			txagc_bb =
2645 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2646 						      MASKBYTE2);
2647 			txagc_bb_tp =
2648 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
2649 						      B_TXAGC_TP);
2650 
2651 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2652 				    "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2653 				    txagc_bb_tp, txagc_bb);
2654 
2655 			txagc_ofst =
2656 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2657 						      MASKBYTE3);
2658 
2659 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2660 				    "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
2661 				    txagc_ofst, delta_ther[path]);
2662 			tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
2663 						    B_DPD_COM_OF);
2664 			if (tmp == 0x1) {
2665 				txagc_ofst = 0;
2666 				rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2667 					    "[DPK_TRK] HW txagc offset mode\n");
2668 			}
2669 
2670 			if (txagc_rf && cur_ther)
2671 				ini_diff = txagc_ofst + (delta_ther[path]);
2672 
2673 			tmp = rtw89_phy_read32_mask(rtwdev,
2674 						    R_P0_TXDPD + (path << 13),
2675 						    B_P0_TXDPD);
2676 			if (tmp == 0x0) {
2677 				pwsf[0] = dpk->bp[path][kidx].pwsf +
2678 					  txagc_bb_tp - txagc_bb + ini_diff;
2679 				pwsf[1] = dpk->bp[path][kidx].pwsf +
2680 					  txagc_bb_tp - txagc_bb + ini_diff;
2681 			} else {
2682 				pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
2683 				pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
2684 			}
2685 
2686 		} else {
2687 			pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2688 			pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2689 		}
2690 
2691 		tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
2692 		if (!tmp && txagc_rf) {
2693 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2694 				    "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
2695 				    pwsf[0], pwsf[1]);
2696 
2697 			rtw89_phy_write32_mask(rtwdev,
2698 					       R_DPD_BND + (path << 8) + (kidx << 2),
2699 					       B_DPD_BND_0, pwsf[0]);
2700 			rtw89_phy_write32_mask(rtwdev,
2701 					       R_DPD_BND + (path << 8) + (kidx << 2),
2702 					       B_DPD_BND_1, pwsf[1]);
2703 		}
2704 	}
2705 }
2706 
_set_dpd_backoff(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2707 static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2708 {
2709 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2710 	u8 tx_scale, ofdm_bkof, path, kpath;
2711 
2712 	kpath = _kpath(rtwdev, phy);
2713 
2714 	ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
2715 	tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
2716 
2717 	if (ofdm_bkof + tx_scale >= 44) {
2718 		/* move dpd backoff to bb, and set dpd backoff to 0 */
2719 		dpk->dpk_gs[phy] = 0x7f;
2720 		for (path = 0; path < RF_PATH_NUM_8852B; path++) {
2721 			if (!(kpath & BIT(path)))
2722 				continue;
2723 
2724 			rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
2725 					       B_DPD_CFG, 0x7f7f7f);
2726 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2727 				    "[RFK] Set S%d DPD backoff to 0dB\n", path);
2728 		}
2729 	} else {
2730 		dpk->dpk_gs[phy] = 0x5b;
2731 	}
2732 }
2733 
_tssi_rf_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2734 static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2735 			     enum rtw89_rf_path path, const struct rtw89_chan *chan)
2736 {
2737 	enum rtw89_band band = chan->band_type;
2738 
2739 	if (band == RTW89_BAND_2G)
2740 		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
2741 	else
2742 		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
2743 }
2744 
_tssi_set_sys(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2745 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2746 			  enum rtw89_rf_path path, const struct rtw89_chan *chan)
2747 {
2748 	enum rtw89_band band = chan->band_type;
2749 
2750 	rtw89_rfk_parser(rtwdev, &rtw8852b_tssi_sys_defs_tbl);
2751 
2752 	if (path == RF_PATH_A)
2753 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2754 					 &rtw8852b_tssi_sys_a_defs_2g_tbl,
2755 					 &rtw8852b_tssi_sys_a_defs_5g_tbl);
2756 	else
2757 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2758 					 &rtw8852b_tssi_sys_b_defs_2g_tbl,
2759 					 &rtw8852b_tssi_sys_b_defs_5g_tbl);
2760 }
2761 
_tssi_ini_txpwr_ctrl_bb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2762 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev,
2763 				    enum rtw89_phy_idx phy,
2764 				    enum rtw89_rf_path path)
2765 {
2766 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2767 				 &rtw8852b_tssi_init_txpwr_defs_a_tbl,
2768 				 &rtw8852b_tssi_init_txpwr_defs_b_tbl);
2769 }
2770 
_tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2771 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2772 					  enum rtw89_phy_idx phy,
2773 					  enum rtw89_rf_path path)
2774 {
2775 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2776 				 &rtw8852b_tssi_init_txpwr_he_tb_defs_a_tbl,
2777 				 &rtw8852b_tssi_init_txpwr_he_tb_defs_b_tbl);
2778 }
2779 
_tssi_set_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2780 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2781 			  enum rtw89_rf_path path)
2782 {
2783 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2784 				 &rtw8852b_tssi_dck_defs_a_tbl,
2785 				 &rtw8852b_tssi_dck_defs_b_tbl);
2786 }
2787 
_tssi_set_tmeter_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2788 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2789 				 enum rtw89_rf_path path, const struct rtw89_chan *chan)
2790 {
2791 #define RTW8852B_TSSI_GET_VAL(ptr, idx)			\
2792 ({							\
2793 	s8 *__ptr = (ptr);				\
2794 	u8 __idx = (idx), __i, __v;			\
2795 	u32 __val = 0;					\
2796 	for (__i = 0; __i < 4; __i++) {			\
2797 		__v = (__ptr[__idx + __i]);		\
2798 		__val |= (__v << (8 * __i));		\
2799 	}						\
2800 	__val;						\
2801 })
2802 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2803 	u8 ch = chan->channel;
2804 	u8 subband = chan->subband_type;
2805 	const s8 *thm_up_a = NULL;
2806 	const s8 *thm_down_a = NULL;
2807 	const s8 *thm_up_b = NULL;
2808 	const s8 *thm_down_b = NULL;
2809 	u8 thermal = 0xff;
2810 	s8 thm_ofst[64] = {0};
2811 	u32 tmp = 0;
2812 	u8 i, j;
2813 
2814 	switch (subband) {
2815 	default:
2816 	case RTW89_CH_2G:
2817 		thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_p;
2818 		thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_2ga_n;
2819 		thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_p;
2820 		thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_2gb_n;
2821 		break;
2822 	case RTW89_CH_5G_BAND_1:
2823 		thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[0];
2824 		thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[0];
2825 		thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[0];
2826 		thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[0];
2827 		break;
2828 	case RTW89_CH_5G_BAND_3:
2829 		thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[1];
2830 		thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[1];
2831 		thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[1];
2832 		thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[1];
2833 		break;
2834 	case RTW89_CH_5G_BAND_4:
2835 		thm_up_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_p[2];
2836 		thm_down_a = rtw89_8852b_trk_cfg.delta_swingidx_5ga_n[2];
2837 		thm_up_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_p[2];
2838 		thm_down_b = rtw89_8852b_trk_cfg.delta_swingidx_5gb_n[2];
2839 		break;
2840 	}
2841 
2842 	if (path == RF_PATH_A) {
2843 		thermal = tssi_info->thermal[RF_PATH_A];
2844 
2845 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2846 			    "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
2847 
2848 		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
2849 		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
2850 
2851 		if (thermal == 0xff) {
2852 			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
2853 			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
2854 
2855 			for (i = 0; i < 64; i += 4) {
2856 				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
2857 
2858 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2859 					    "[TSSI] write 0x%x val=0x%08x\n",
2860 					    R_P0_TSSI_BASE + i, 0x0);
2861 			}
2862 
2863 		} else {
2864 			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
2865 			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
2866 					       thermal);
2867 
2868 			i = 0;
2869 			for (j = 0; j < 32; j++)
2870 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2871 					      -thm_down_a[i++] :
2872 					      -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
2873 
2874 			i = 1;
2875 			for (j = 63; j >= 32; j--)
2876 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2877 					      thm_up_a[i++] :
2878 					      thm_up_a[DELTA_SWINGIDX_SIZE - 1];
2879 
2880 			for (i = 0; i < 64; i += 4) {
2881 				tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i);
2882 				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
2883 
2884 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2885 					    "[TSSI] write 0x%x val=0x%08x\n",
2886 					    0x5c00 + i, tmp);
2887 			}
2888 		}
2889 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
2890 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
2891 
2892 	} else {
2893 		thermal = tssi_info->thermal[RF_PATH_B];
2894 
2895 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2896 			    "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
2897 
2898 		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
2899 		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
2900 
2901 		if (thermal == 0xff) {
2902 			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
2903 			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
2904 
2905 			for (i = 0; i < 64; i += 4) {
2906 				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
2907 
2908 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2909 					    "[TSSI] write 0x%x val=0x%08x\n",
2910 					    0x7c00 + i, 0x0);
2911 			}
2912 
2913 		} else {
2914 			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
2915 			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
2916 					       thermal);
2917 
2918 			i = 0;
2919 			for (j = 0; j < 32; j++)
2920 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2921 					      -thm_down_b[i++] :
2922 					      -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
2923 
2924 			i = 1;
2925 			for (j = 63; j >= 32; j--)
2926 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2927 					      thm_up_b[i++] :
2928 					      thm_up_b[DELTA_SWINGIDX_SIZE - 1];
2929 
2930 			for (i = 0; i < 64; i += 4) {
2931 				tmp = RTW8852B_TSSI_GET_VAL(thm_ofst, i);
2932 				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
2933 
2934 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2935 					    "[TSSI] write 0x%x val=0x%08x\n",
2936 					    0x7c00 + i, tmp);
2937 			}
2938 		}
2939 		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
2940 		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
2941 	}
2942 #undef RTW8852B_TSSI_GET_VAL
2943 }
2944 
_tssi_set_dac_gain_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2945 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2946 				   enum rtw89_rf_path path)
2947 {
2948 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2949 				 &rtw8852b_tssi_dac_gain_defs_a_tbl,
2950 				 &rtw8852b_tssi_dac_gain_defs_b_tbl);
2951 }
2952 
_tssi_slope_cal_org(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2953 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2954 				enum rtw89_rf_path path, const struct rtw89_chan *chan)
2955 {
2956 	enum rtw89_band band = chan->band_type;
2957 
2958 	if (path == RF_PATH_A)
2959 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2960 					 &rtw8852b_tssi_slope_a_defs_2g_tbl,
2961 					 &rtw8852b_tssi_slope_a_defs_5g_tbl);
2962 	else
2963 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2964 					 &rtw8852b_tssi_slope_b_defs_2g_tbl,
2965 					 &rtw8852b_tssi_slope_b_defs_5g_tbl);
2966 }
2967 
_tssi_alignment_default(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,bool all,const struct rtw89_chan * chan)2968 static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2969 				    enum rtw89_rf_path path, bool all,
2970 				    const struct rtw89_chan *chan)
2971 {
2972 	enum rtw89_band band = chan->band_type;
2973 	const struct rtw89_rfk_tbl *tbl = NULL;
2974 	u8 ch = chan->channel;
2975 
2976 	if (path == RF_PATH_A) {
2977 		if (band == RTW89_BAND_2G) {
2978 			if (all)
2979 				tbl = &rtw8852b_tssi_align_a_2g_all_defs_tbl;
2980 			else
2981 				tbl = &rtw8852b_tssi_align_a_2g_part_defs_tbl;
2982 		} else if (ch >= 36 && ch <= 64) {
2983 			if (all)
2984 				tbl = &rtw8852b_tssi_align_a_5g1_all_defs_tbl;
2985 			else
2986 				tbl = &rtw8852b_tssi_align_a_5g1_part_defs_tbl;
2987 		} else if (ch >= 100 && ch <= 144) {
2988 			if (all)
2989 				tbl = &rtw8852b_tssi_align_a_5g2_all_defs_tbl;
2990 			else
2991 				tbl = &rtw8852b_tssi_align_a_5g2_part_defs_tbl;
2992 		} else if (ch >= 149 && ch <= 177) {
2993 			if (all)
2994 				tbl = &rtw8852b_tssi_align_a_5g3_all_defs_tbl;
2995 			else
2996 				tbl = &rtw8852b_tssi_align_a_5g3_part_defs_tbl;
2997 		}
2998 	} else {
2999 		if (ch >= 1 && ch <= 14) {
3000 			if (all)
3001 				tbl = &rtw8852b_tssi_align_b_2g_all_defs_tbl;
3002 			else
3003 				tbl = &rtw8852b_tssi_align_b_2g_part_defs_tbl;
3004 		} else if (ch >= 36 && ch <= 64) {
3005 			if (all)
3006 				tbl = &rtw8852b_tssi_align_b_5g1_all_defs_tbl;
3007 			else
3008 				tbl = &rtw8852b_tssi_align_b_5g1_part_defs_tbl;
3009 		} else if (ch >= 100 && ch <= 144) {
3010 			if (all)
3011 				tbl = &rtw8852b_tssi_align_b_5g2_all_defs_tbl;
3012 			else
3013 				tbl = &rtw8852b_tssi_align_b_5g2_part_defs_tbl;
3014 		} else if (ch >= 149 && ch <= 177) {
3015 			if (all)
3016 				tbl = &rtw8852b_tssi_align_b_5g3_all_defs_tbl;
3017 			else
3018 				tbl = &rtw8852b_tssi_align_b_5g3_part_defs_tbl;
3019 		}
3020 	}
3021 
3022 	if (tbl)
3023 		rtw89_rfk_parser(rtwdev, tbl);
3024 }
3025 
_tssi_set_tssi_slope(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3026 static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3027 				 enum rtw89_rf_path path)
3028 {
3029 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3030 				 &rtw8852b_tssi_slope_defs_a_tbl,
3031 				 &rtw8852b_tssi_slope_defs_b_tbl);
3032 }
3033 
_tssi_set_tssi_track(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3034 static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3035 				 enum rtw89_rf_path path)
3036 {
3037 	if (path == RF_PATH_A)
3038 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0);
3039 	else
3040 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0);
3041 }
3042 
_tssi_set_txagc_offset_mv_avg(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3043 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3044 					  enum rtw89_phy_idx phy,
3045 					  enum rtw89_rf_path path)
3046 {
3047 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s   path=%d\n", __func__,
3048 		    path);
3049 
3050 	if (path == RF_PATH_A)
3051 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_MIX, 0x010);
3052 	else
3053 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_RFCTM_DEL, 0x010);
3054 }
3055 
_tssi_enable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3056 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3057 {
3058 	u8 i;
3059 
3060 	for (i = 0; i < RF_PATH_NUM_8852B; i++) {
3061 		_tssi_set_tssi_track(rtwdev, phy, i);
3062 		_tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3063 
3064 		if (i == RF_PATH_A) {
3065 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
3066 					       B_P0_TSSI_MV_CLR, 0x0);
3067 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3068 					       B_P0_TSSI_EN, 0x0);
3069 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3070 					       B_P0_TSSI_EN, 0x1);
3071 			rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3072 				       RR_TXGA_V1_TRK_EN, 0x1);
3073 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3074 					       B_P0_TSSI_RFC, 0x3);
3075 
3076 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3077 					       B_P0_TSSI_OFT, 0xc0);
3078 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3079 					       B_P0_TSSI_OFT_EN, 0x0);
3080 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3081 					       B_P0_TSSI_OFT_EN, 0x1);
3082 
3083 			rtwdev->is_tssi_mode[RF_PATH_A] = true;
3084 		} else {
3085 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
3086 					       B_P1_TSSI_MV_CLR, 0x0);
3087 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3088 					       B_P1_TSSI_EN, 0x0);
3089 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3090 					       B_P1_TSSI_EN, 0x1);
3091 			rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3092 				       RR_TXGA_V1_TRK_EN, 0x1);
3093 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3094 					       B_P1_TSSI_RFC, 0x3);
3095 
3096 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3097 					       B_P1_TSSI_OFT, 0xc0);
3098 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3099 					       B_P1_TSSI_OFT_EN, 0x0);
3100 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3101 					       B_P1_TSSI_OFT_EN, 0x1);
3102 
3103 			rtwdev->is_tssi_mode[RF_PATH_B] = true;
3104 		}
3105 	}
3106 }
3107 
_tssi_disable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3108 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3109 {
3110 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0);
3111 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1);
3112 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1);
3113 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0);
3114 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1);
3115 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1);
3116 
3117 	rtwdev->is_tssi_mode[RF_PATH_A] = false;
3118 	rtwdev->is_tssi_mode[RF_PATH_B] = false;
3119 }
3120 
_tssi_get_cck_group(struct rtw89_dev * rtwdev,u8 ch)3121 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3122 {
3123 	switch (ch) {
3124 	case 1 ... 2:
3125 		return 0;
3126 	case 3 ... 5:
3127 		return 1;
3128 	case 6 ... 8:
3129 		return 2;
3130 	case 9 ... 11:
3131 		return 3;
3132 	case 12 ... 13:
3133 		return 4;
3134 	case 14:
3135 		return 5;
3136 	}
3137 
3138 	return 0;
3139 }
3140 
3141 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
3142 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3143 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3144 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3145 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3146 
_tssi_get_ofdm_group(struct rtw89_dev * rtwdev,u8 ch)3147 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3148 {
3149 	switch (ch) {
3150 	case 1 ... 2:
3151 		return 0;
3152 	case 3 ... 5:
3153 		return 1;
3154 	case 6 ... 8:
3155 		return 2;
3156 	case 9 ... 11:
3157 		return 3;
3158 	case 12 ... 14:
3159 		return 4;
3160 	case 36 ... 40:
3161 		return 5;
3162 	case 41 ... 43:
3163 		return TSSI_EXTRA_GROUP(5);
3164 	case 44 ... 48:
3165 		return 6;
3166 	case 49 ... 51:
3167 		return TSSI_EXTRA_GROUP(6);
3168 	case 52 ... 56:
3169 		return 7;
3170 	case 57 ... 59:
3171 		return TSSI_EXTRA_GROUP(7);
3172 	case 60 ... 64:
3173 		return 8;
3174 	case 100 ... 104:
3175 		return 9;
3176 	case 105 ... 107:
3177 		return TSSI_EXTRA_GROUP(9);
3178 	case 108 ... 112:
3179 		return 10;
3180 	case 113 ... 115:
3181 		return TSSI_EXTRA_GROUP(10);
3182 	case 116 ... 120:
3183 		return 11;
3184 	case 121 ... 123:
3185 		return TSSI_EXTRA_GROUP(11);
3186 	case 124 ... 128:
3187 		return 12;
3188 	case 129 ... 131:
3189 		return TSSI_EXTRA_GROUP(12);
3190 	case 132 ... 136:
3191 		return 13;
3192 	case 137 ... 139:
3193 		return TSSI_EXTRA_GROUP(13);
3194 	case 140 ... 144:
3195 		return 14;
3196 	case 149 ... 153:
3197 		return 15;
3198 	case 154 ... 156:
3199 		return TSSI_EXTRA_GROUP(15);
3200 	case 157 ... 161:
3201 		return 16;
3202 	case 162 ... 164:
3203 		return TSSI_EXTRA_GROUP(16);
3204 	case 165 ... 169:
3205 		return 17;
3206 	case 170 ... 172:
3207 		return TSSI_EXTRA_GROUP(17);
3208 	case 173 ... 177:
3209 		return 18;
3210 	}
3211 
3212 	return 0;
3213 }
3214 
_tssi_get_trim_group(struct rtw89_dev * rtwdev,u8 ch)3215 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3216 {
3217 	switch (ch) {
3218 	case 1 ... 8:
3219 		return 0;
3220 	case 9 ... 14:
3221 		return 1;
3222 	case 36 ... 48:
3223 		return 2;
3224 	case 52 ... 64:
3225 		return 3;
3226 	case 100 ... 112:
3227 		return 4;
3228 	case 116 ... 128:
3229 		return 5;
3230 	case 132 ... 144:
3231 		return 6;
3232 	case 149 ... 177:
3233 		return 7;
3234 	}
3235 
3236 	return 0;
3237 }
3238 
_tssi_get_ofdm_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3239 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3240 			    enum rtw89_rf_path path, const struct rtw89_chan *chan)
3241 {
3242 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3243 	u8 ch = chan->channel;
3244 	u32 gidx, gidx_1st, gidx_2nd;
3245 	s8 de_1st;
3246 	s8 de_2nd;
3247 	s8 val;
3248 
3249 	gidx = _tssi_get_ofdm_group(rtwdev, ch);
3250 
3251 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3252 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx);
3253 
3254 	if (IS_TSSI_EXTRA_GROUP(gidx)) {
3255 		gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3256 		gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3257 		de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3258 		de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3259 		val = (de_1st + de_2nd) / 2;
3260 
3261 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3262 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3263 			    path, val, de_1st, de_2nd);
3264 	} else {
3265 		val = tssi_info->tssi_mcs[path][gidx];
3266 
3267 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3268 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3269 	}
3270 
3271 	return val;
3272 }
3273 
_tssi_get_ofdm_trim_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3274 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3275 				 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3276 {
3277 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3278 	u8 ch = chan->channel;
3279 	u32 tgidx, tgidx_1st, tgidx_2nd;
3280 	s8 tde_1st;
3281 	s8 tde_2nd;
3282 	s8 val;
3283 
3284 	tgidx = _tssi_get_trim_group(rtwdev, ch);
3285 
3286 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3287 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3288 		    path, tgidx);
3289 
3290 	if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3291 		tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3292 		tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3293 		tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3294 		tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3295 		val = (tde_1st + tde_2nd) / 2;
3296 
3297 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3298 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3299 			    path, val, tde_1st, tde_2nd);
3300 	} else {
3301 		val = tssi_info->tssi_trim[path][tgidx];
3302 
3303 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3304 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3305 			    path, val);
3306 	}
3307 
3308 	return val;
3309 }
3310 
_tssi_set_efuse_to_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan)3311 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3312 				  const struct rtw89_chan *chan)
3313 {
3314 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3315 	u8 ch = chan->channel;
3316 	u8 gidx;
3317 	s8 ofdm_de;
3318 	s8 trim_de;
3319 	s32 val;
3320 	u32 i;
3321 
3322 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3323 		    phy, ch);
3324 
3325 	for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
3326 		gidx = _tssi_get_cck_group(rtwdev, ch);
3327 		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
3328 		val = tssi_info->tssi_cck[i][gidx] + trim_de;
3329 
3330 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3331 			    "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3332 			    i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3333 
3334 		rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
3335 		rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
3336 
3337 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3338 			    "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3339 			    _tssi_de_cck_long[i],
3340 			    rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
3341 						  _TSSI_DE_MASK));
3342 
3343 		ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
3344 		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
3345 		val = ofdm_de + trim_de;
3346 
3347 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3348 			    "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3349 			    i, ofdm_de, trim_de);
3350 
3351 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
3352 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
3353 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
3354 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
3355 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
3356 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
3357 
3358 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3359 			    "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3360 			    _tssi_de_mcs_20m[i],
3361 			    rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
3362 						  _TSSI_DE_MASK));
3363 	}
3364 }
3365 
_tssi_alimentk_dump_result(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)3366 static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
3367 {
3368 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3369 		    "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n"
3370 		    "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n",
3371 		    R_TSSI_PA_K1 + (path << 13),
3372 		    rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K1 + (path << 13), MASKDWORD),
3373 		    R_TSSI_PA_K2 + (path << 13),
3374 		    rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K2 + (path << 13), MASKDWORD),
3375 		    R_P0_TSSI_ALIM1 + (path << 13),
3376 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD),
3377 		    R_P0_TSSI_ALIM3 + (path << 13),
3378 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD),
3379 		    R_TSSI_PA_K5 + (path << 13),
3380 		    rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K5 + (path << 13), MASKDWORD),
3381 		    R_P0_TSSI_ALIM2 + (path << 13),
3382 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD),
3383 		    R_P0_TSSI_ALIM4 + (path << 13),
3384 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD),
3385 		    R_TSSI_PA_K8 + (path << 13),
3386 		    rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K8 + (path << 13), MASKDWORD));
3387 }
3388 
_tssi_alimentk_done(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3389 static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
3390 				enum rtw89_phy_idx phy, enum rtw89_rf_path path,
3391 				const struct rtw89_chan *chan)
3392 {
3393 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3394 	u8 channel = chan->channel;
3395 	u8 band;
3396 
3397 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3398 		    "======>%s   phy=%d   path=%d\n", __func__, phy, path);
3399 
3400 	if (channel >= 1 && channel <= 14)
3401 		band = TSSI_ALIMK_2G;
3402 	else if (channel >= 36 && channel <= 64)
3403 		band = TSSI_ALIMK_5GL;
3404 	else if (channel >= 100 && channel <= 144)
3405 		band = TSSI_ALIMK_5GM;
3406 	else if (channel >= 149 && channel <= 177)
3407 		band = TSSI_ALIMK_5GH;
3408 	else
3409 		band = TSSI_ALIMK_2G;
3410 
3411 	if (tssi_info->alignment_done[path][band]) {
3412 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
3413 				       tssi_info->alignment_value[path][band][0]);
3414 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
3415 				       tssi_info->alignment_value[path][band][1]);
3416 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
3417 				       tssi_info->alignment_value[path][band][2]);
3418 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
3419 				       tssi_info->alignment_value[path][band][3]);
3420 	}
3421 
3422 	_tssi_alimentk_dump_result(rtwdev, path);
3423 }
3424 
_tssi_hw_tx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u16 cnt,u16 period,s16 pwr_dbm,u8 enable,const struct rtw89_chan * chan)3425 static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3426 			enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
3427 			u8 enable, const struct rtw89_chan *chan)
3428 {
3429 	enum rtw89_rf_path_bit rx_path;
3430 
3431 	if (path == RF_PATH_A)
3432 		rx_path = RF_A;
3433 	else if (path == RF_PATH_B)
3434 		rx_path = RF_B;
3435 	else if (path == RF_PATH_AB)
3436 		rx_path = RF_AB;
3437 	else
3438 		rx_path = RF_ABCD; /* don't change path, but still set others */
3439 
3440 	if (enable) {
3441 		rtw8852bx_bb_set_plcp_tx(rtwdev);
3442 		rtw8852bx_bb_cfg_tx_path(rtwdev, path);
3443 		rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
3444 		rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
3445 	}
3446 
3447 	rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan);
3448 }
3449 
_tssi_backup_bb_registers(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const u32 reg[],u32 reg_backup[],u32 reg_num)3450 static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
3451 				      enum rtw89_phy_idx phy, const u32 reg[],
3452 				      u32 reg_backup[], u32 reg_num)
3453 {
3454 	u32 i;
3455 
3456 	for (i = 0; i < reg_num; i++) {
3457 		reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD);
3458 
3459 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3460 			    "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i],
3461 			    reg_backup[i]);
3462 	}
3463 }
3464 
_tssi_reload_bb_registers(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const u32 reg[],u32 reg_backup[],u32 reg_num)3465 static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev,
3466 				      enum rtw89_phy_idx phy, const u32 reg[],
3467 				      u32 reg_backup[], u32 reg_num)
3468 
3469 {
3470 	u32 i;
3471 
3472 	for (i = 0; i < reg_num; i++) {
3473 		rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]);
3474 
3475 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3476 			    "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i],
3477 			    reg_backup[i]);
3478 	}
3479 }
3480 
_tssi_ch_to_idx(struct rtw89_dev * rtwdev,u8 channel)3481 static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
3482 {
3483 	u8 channel_index;
3484 
3485 	if (channel >= 1 && channel <= 14)
3486 		channel_index = channel - 1;
3487 	else if (channel >= 36 && channel <= 64)
3488 		channel_index = (channel - 36) / 2 + 14;
3489 	else if (channel >= 100 && channel <= 144)
3490 		channel_index = ((channel - 100) / 2) + 15 + 14;
3491 	else if (channel >= 149 && channel <= 177)
3492 		channel_index = ((channel - 149) / 2) + 38 + 14;
3493 	else
3494 		channel_index = 0;
3495 
3496 	return channel_index;
3497 }
3498 
_tssi_get_cw_report(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const s16 * power,u32 * tssi_cw_rpt,const struct rtw89_chan * chan)3499 static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3500 				enum rtw89_rf_path path, const s16 *power,
3501 				u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
3502 {
3503 	u32 tx_counter, tx_counter_tmp;
3504 	const int retry = 100;
3505 	u32 tmp;
3506 	int j, k;
3507 
3508 	for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) {
3509 		rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0);
3510 		rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1);
3511 
3512 		tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3513 
3514 		tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD);
3515 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3516 			    "[TSSI PA K] 0x%x = 0x%08x   path=%d\n",
3517 			    _tssi_trigger[path], tmp, path);
3518 
3519 		if (j == 0)
3520 			_tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true, chan);
3521 		else
3522 			_tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true,
3523 				    chan);
3524 
3525 		tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3526 		tx_counter_tmp -= tx_counter;
3527 
3528 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3529 			    "[TSSI PA K] First HWTXcounter=%d path=%d\n",
3530 			    tx_counter_tmp, path);
3531 
3532 		for (k = 0; k < retry; k++) {
3533 			tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
3534 						    B_TSSI_CWRPT_RDY);
3535 			if (tmp)
3536 				break;
3537 
3538 			udelay(30);
3539 
3540 			tx_counter_tmp =
3541 				rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3542 			tx_counter_tmp -= tx_counter;
3543 
3544 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
3545 				    "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n",
3546 				    k, tx_counter_tmp, path);
3547 		}
3548 
3549 		if (k >= retry) {
3550 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
3551 				    "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
3552 				    k, path);
3553 
3554 			_tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
3555 			return false;
3556 		}
3557 
3558 		tssi_cw_rpt[j] =
3559 			rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT);
3560 
3561 		_tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
3562 
3563 		tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3564 		tx_counter_tmp -= tx_counter;
3565 
3566 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3567 			    "[TSSI PA K] Final HWTXcounter=%d path=%d\n",
3568 			    tx_counter_tmp, path);
3569 	}
3570 
3571 	return true;
3572 }
3573 
_tssi_alimentk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3574 static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3575 			   enum rtw89_rf_path path, const struct rtw89_chan *chan)
3576 {
3577 	static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
3578 				      0x78e4, 0x49c0, 0x0d18, 0x0d80};
3579 	static const s16 power_2g[4] = {48, 20, 4, 4};
3580 	static const s16 power_5g[4] = {48, 20, 4, 4};
3581 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3582 	s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
3583 	u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0};
3584 	u8 channel = chan->channel;
3585 	u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
3586 	struct rtw8852bx_bb_tssi_bak tssi_bak;
3587 	s32 aliment_diff, tssi_cw_default;
3588 	u32 start_time, finish_time;
3589 	u32 bb_reg_backup[8] = {0};
3590 	const s16 *power;
3591 	u8 band;
3592 	bool ok;
3593 	u32 tmp;
3594 	u8 j;
3595 
3596 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3597 		    "======> %s   channel=%d   path=%d\n", __func__, channel,
3598 		    path);
3599 
3600 	if (tssi_info->check_backup_aligmk[path][ch_idx]) {
3601 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
3602 				       tssi_info->alignment_backup_by_ch[path][ch_idx][0]);
3603 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
3604 				       tssi_info->alignment_backup_by_ch[path][ch_idx][1]);
3605 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
3606 				       tssi_info->alignment_backup_by_ch[path][ch_idx][2]);
3607 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
3608 				       tssi_info->alignment_backup_by_ch[path][ch_idx][3]);
3609 
3610 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3611 			    "======> %s   Reload TSSI Alignment !!!\n", __func__);
3612 		_tssi_alimentk_dump_result(rtwdev, path);
3613 		return;
3614 	}
3615 
3616 	start_time = ktime_get_ns();
3617 
3618 	if (chan->band_type == RTW89_BAND_2G)
3619 		power = power_2g;
3620 	else
3621 		power = power_5g;
3622 
3623 	if (channel >= 1 && channel <= 14)
3624 		band = TSSI_ALIMK_2G;
3625 	else if (channel >= 36 && channel <= 64)
3626 		band = TSSI_ALIMK_5GL;
3627 	else if (channel >= 100 && channel <= 144)
3628 		band = TSSI_ALIMK_5GM;
3629 	else if (channel >= 149 && channel <= 177)
3630 		band = TSSI_ALIMK_5GH;
3631 	else
3632 		band = TSSI_ALIMK_2G;
3633 
3634 	rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak);
3635 	_tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
3636 
3637 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
3638 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8);
3639 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
3640 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
3641 
3642 	ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
3643 	if (!ok)
3644 		goto out;
3645 
3646 	for (j = 0; j < RTW8852B_TSSI_PATH_NR; j++) {
3647 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3648 			    "[TSSI PA K] power[%d]=%d  tssi_cw_rpt[%d]=%d\n", j,
3649 			    power[j], j, tssi_cw_rpt[j]);
3650 	}
3651 
3652 	tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1],
3653 				    _tssi_cw_default_mask[1]);
3654 	tssi_cw_default = sign_extend32(tmp, 8);
3655 	tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) -
3656 			     tssi_cw_rpt[1] + tssi_cw_default;
3657 	aliment_diff = tssi_alim_offset_1 - tssi_cw_default;
3658 
3659 	tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2],
3660 				    _tssi_cw_default_mask[2]);
3661 	tssi_cw_default = sign_extend32(tmp, 8);
3662 	tssi_alim_offset_2 = tssi_cw_default + aliment_diff;
3663 
3664 	tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3],
3665 				    _tssi_cw_default_mask[3]);
3666 	tssi_cw_default = sign_extend32(tmp, 8);
3667 	tssi_alim_offset_3 = tssi_cw_default + aliment_diff;
3668 
3669 	if (path == RF_PATH_A) {
3670 		tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3671 		      FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3672 		      FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3673 
3674 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp);
3675 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp);
3676 
3677 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3678 			    "[TSSI PA K] tssi_alim_offset = 0x%x   0x%x   0x%x   0x%x\n",
3679 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31),
3680 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11),
3681 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12),
3682 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13));
3683 	} else {
3684 		tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3685 		      FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3686 		      FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3687 
3688 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp);
3689 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp);
3690 
3691 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3692 			    "[TSSI PA K] tssi_alim_offset = 0x%x   0x%x   0x%x   0x%x\n",
3693 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31),
3694 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11),
3695 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12),
3696 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13));
3697 	}
3698 
3699 	tssi_info->alignment_done[path][band] = true;
3700 	tssi_info->alignment_value[path][band][0] =
3701 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3702 	tssi_info->alignment_value[path][band][1] =
3703 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3704 	tssi_info->alignment_value[path][band][2] =
3705 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3706 	tssi_info->alignment_value[path][band][3] =
3707 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3708 
3709 	tssi_info->check_backup_aligmk[path][ch_idx] = true;
3710 	tssi_info->alignment_backup_by_ch[path][ch_idx][0] =
3711 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3712 	tssi_info->alignment_backup_by_ch[path][ch_idx][1] =
3713 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3714 	tssi_info->alignment_backup_by_ch[path][ch_idx][2] =
3715 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3716 	tssi_info->alignment_backup_by_ch[path][ch_idx][3] =
3717 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3718 
3719 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3720 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n",
3721 		    path, band, R_P0_TSSI_ALIM1 + (path << 13),
3722 		    tssi_info->alignment_value[path][band][0]);
3723 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3724 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n",
3725 		    path, band, R_P0_TSSI_ALIM3 + (path << 13),
3726 		    tssi_info->alignment_value[path][band][1]);
3727 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3728 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n",
3729 		    path, band, R_P0_TSSI_ALIM2 + (path << 13),
3730 		    tssi_info->alignment_value[path][band][2]);
3731 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3732 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n",
3733 		    path, band, R_P0_TSSI_ALIM4 + (path << 13),
3734 		    tssi_info->alignment_value[path][band][3]);
3735 
3736 out:
3737 	_tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
3738 	rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
3739 	rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
3740 
3741 	finish_time = ktime_get_ns();
3742 	tssi_info->tssi_alimk_time += finish_time - start_time;
3743 
3744 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3745 		    "[TSSI PA K] %s processing time = %d ms\n", __func__,
3746 		    tssi_info->tssi_alimk_time);
3747 }
3748 
rtw8852b_dpk_init(struct rtw89_dev * rtwdev)3749 void rtw8852b_dpk_init(struct rtw89_dev *rtwdev)
3750 {
3751 	_set_dpd_backoff(rtwdev, RTW89_PHY_0);
3752 }
3753 
rtw8852b_rck(struct rtw89_dev * rtwdev)3754 void rtw8852b_rck(struct rtw89_dev *rtwdev)
3755 {
3756 	u8 path;
3757 
3758 	for (path = 0; path < RF_PATH_NUM_8852B; path++)
3759 		_rck(rtwdev, path);
3760 }
3761 
rtw8852b_dack(struct rtw89_dev * rtwdev,enum rtw89_chanctx_idx chanctx_idx)3762 void rtw8852b_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
3763 {
3764 	u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
3765 
3766 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
3767 	_dac_cal(rtwdev, false);
3768 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
3769 }
3770 
rtw8852b_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3771 void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3772 		  enum rtw89_chanctx_idx chanctx_idx)
3773 {
3774 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
3775 	u32 tx_en;
3776 
3777 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
3778 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3779 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3780 
3781 	_iqk_init(rtwdev);
3782 	_iqk(rtwdev, phy_idx, false, chanctx_idx);
3783 
3784 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3785 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
3786 }
3787 
rtw8852b_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3788 void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3789 		     enum rtw89_chanctx_idx chanctx_idx)
3790 {
3791 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
3792 	u32 tx_en;
3793 
3794 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
3795 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3796 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3797 
3798 	_rx_dck(rtwdev, phy_idx);
3799 
3800 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3801 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
3802 }
3803 
rtw8852b_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3804 void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3805 		  enum rtw89_chanctx_idx chanctx_idx)
3806 {
3807 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
3808 	u32 tx_en;
3809 
3810 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
3811 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3812 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3813 
3814 	rtwdev->dpk.is_dpk_enable = true;
3815 	rtwdev->dpk.is_dpk_reload_en = false;
3816 	_dpk(rtwdev, phy_idx, false, chanctx_idx);
3817 
3818 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3819 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
3820 }
3821 
rtw8852b_dpk_track(struct rtw89_dev * rtwdev)3822 void rtw8852b_dpk_track(struct rtw89_dev *rtwdev)
3823 {
3824 	_dpk_track(rtwdev);
3825 }
3826 
rtw8852b_tssi(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool hwtx_en,enum rtw89_chanctx_idx chanctx_idx)3827 void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3828 		   bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
3829 {
3830 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
3831 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx);
3832 	u32 tx_en;
3833 	u8 i;
3834 
3835 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
3836 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
3837 
3838 	_tssi_disable(rtwdev, phy);
3839 
3840 	for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
3841 		_tssi_rf_setting(rtwdev, phy, i, chan);
3842 		_tssi_set_sys(rtwdev, phy, i, chan);
3843 		_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
3844 		_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
3845 		_tssi_set_dck(rtwdev, phy, i);
3846 		_tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
3847 		_tssi_set_dac_gain_tbl(rtwdev, phy, i);
3848 		_tssi_slope_cal_org(rtwdev, phy, i, chan);
3849 		_tssi_alignment_default(rtwdev, phy, i, true, chan);
3850 		_tssi_set_tssi_slope(rtwdev, phy, i);
3851 
3852 		rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
3853 		_tmac_tx_pause(rtwdev, phy, true);
3854 		if (hwtx_en)
3855 			_tssi_alimentk(rtwdev, phy, i, chan);
3856 		_tmac_tx_pause(rtwdev, phy, false);
3857 		rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
3858 	}
3859 
3860 	_tssi_enable(rtwdev, phy);
3861 	_tssi_set_efuse_to_de(rtwdev, phy, chan);
3862 
3863 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
3864 }
3865 
rtw8852b_tssi_scan(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan)3866 void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3867 			const struct rtw89_chan *chan)
3868 {
3869 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3870 	u8 channel = chan->channel;
3871 	u8 band;
3872 	u32 i;
3873 
3874 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3875 		    "======>%s   phy=%d  channel=%d\n", __func__, phy, channel);
3876 
3877 	if (channel >= 1 && channel <= 14)
3878 		band = TSSI_ALIMK_2G;
3879 	else if (channel >= 36 && channel <= 64)
3880 		band = TSSI_ALIMK_5GL;
3881 	else if (channel >= 100 && channel <= 144)
3882 		band = TSSI_ALIMK_5GM;
3883 	else if (channel >= 149 && channel <= 177)
3884 		band = TSSI_ALIMK_5GH;
3885 	else
3886 		band = TSSI_ALIMK_2G;
3887 
3888 	_tssi_disable(rtwdev, phy);
3889 
3890 	for (i = RF_PATH_A; i < RTW8852B_TSSI_PATH_NR; i++) {
3891 		_tssi_rf_setting(rtwdev, phy, i, chan);
3892 		_tssi_set_sys(rtwdev, phy, i, chan);
3893 		_tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
3894 
3895 		if (tssi_info->alignment_done[i][band])
3896 			_tssi_alimentk_done(rtwdev, phy, i, chan);
3897 		else
3898 			_tssi_alignment_default(rtwdev, phy, i, true, chan);
3899 	}
3900 
3901 	_tssi_enable(rtwdev, phy);
3902 	_tssi_set_efuse_to_de(rtwdev, phy, chan);
3903 }
3904 
rtw8852b_tssi_default_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool enable,enum rtw89_chanctx_idx chanctx_idx)3905 static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
3906 					enum rtw89_phy_idx phy, bool enable,
3907 					enum rtw89_chanctx_idx chanctx_idx)
3908 {
3909 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
3910 	u8 channel = chan->channel;
3911 
3912 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s   ch=%d\n",
3913 		    __func__, channel);
3914 
3915 	if (enable) {
3916 		if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
3917 			rtw8852b_tssi(rtwdev, phy, true, chanctx_idx);
3918 		return;
3919 	}
3920 
3921 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3922 		    "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3923 		    __func__,
3924 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3925 		    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3926 
3927 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0);
3928 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,  0xc0);
3929 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
3930 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
3931 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
3932 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
3933 
3934 	_tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
3935 	_tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan);
3936 
3937 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3938 		    "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3939 		    __func__,
3940 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3941 		    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3942 
3943 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3944 		    "======> %s   SCAN_END\n", __func__);
3945 }
3946 
rtw8852b_wifi_scan_notify(struct rtw89_dev * rtwdev,bool scan_start,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3947 void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
3948 			       enum rtw89_phy_idx phy_idx,
3949 			       enum rtw89_chanctx_idx chanctx_idx)
3950 {
3951 	if (scan_start)
3952 		rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
3953 	else
3954 		rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
3955 }
3956 
_bw_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,enum rtw89_bandwidth bw,bool dav)3957 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
3958 			enum rtw89_bandwidth bw, bool dav)
3959 {
3960 	u32 rf_reg18;
3961 	u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
3962 
3963 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
3964 
3965 	rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
3966 	if (rf_reg18 == INV_RF_DATA) {
3967 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3968 			    "[RFK]Invalid RF_0x18 for Path-%d\n", path);
3969 		return;
3970 	}
3971 	rf_reg18 &= ~RR_CFGCH_BW;
3972 
3973 	switch (bw) {
3974 	case RTW89_CHANNEL_WIDTH_5:
3975 	case RTW89_CHANNEL_WIDTH_10:
3976 	case RTW89_CHANNEL_WIDTH_20:
3977 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
3978 		break;
3979 	case RTW89_CHANNEL_WIDTH_40:
3980 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
3981 		break;
3982 	case RTW89_CHANNEL_WIDTH_80:
3983 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
3984 		break;
3985 	default:
3986 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n");
3987 	}
3988 
3989 	rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
3990 		      RR_CFGCH_BW2) & RFREG_MASK;
3991 	rf_reg18 |= RR_CFGCH_BW2;
3992 	rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
3993 
3994 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n",
3995 		    bw, path, reg18_addr,
3996 		    rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
3997 }
3998 
_ctrl_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)3999 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4000 		     enum rtw89_bandwidth bw)
4001 {
4002 	_bw_setting(rtwdev, RF_PATH_A, bw, true);
4003 	_bw_setting(rtwdev, RF_PATH_B, bw, true);
4004 	_bw_setting(rtwdev, RF_PATH_A, bw, false);
4005 	_bw_setting(rtwdev, RF_PATH_B, bw, false);
4006 }
4007 
_set_s0_arfc18(struct rtw89_dev * rtwdev,u32 val)4008 static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
4009 {
4010 	u32 bak;
4011 	u32 tmp;
4012 	int ret;
4013 
4014 	bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK);
4015 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1);
4016 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val);
4017 
4018 	ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
4019 				       false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
4020 	if (ret)
4021 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n");
4022 
4023 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak);
4024 
4025 	return !!ret;
4026 }
4027 
_lck_check(struct rtw89_dev * rtwdev)4028 static void _lck_check(struct rtw89_dev *rtwdev)
4029 {
4030 	u32 tmp;
4031 
4032 	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4033 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n");
4034 
4035 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1);
4036 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0);
4037 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1);
4038 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0);
4039 	}
4040 
4041 	udelay(10);
4042 
4043 	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4044 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n");
4045 
4046 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4047 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4048 		_set_s0_arfc18(rtwdev, tmp);
4049 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4050 	}
4051 
4052 	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4053 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n");
4054 
4055 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK);
4056 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp);
4057 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK);
4058 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp);
4059 
4060 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1);
4061 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
4062 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
4063 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0);
4064 
4065 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4066 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4067 		_set_s0_arfc18(rtwdev, tmp);
4068 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4069 
4070 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n",
4071 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK),
4072 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK));
4073 	}
4074 }
4075 
_set_ch(struct rtw89_dev * rtwdev,u32 val)4076 static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
4077 {
4078 	bool timeout;
4079 
4080 	timeout = _set_s0_arfc18(rtwdev, val);
4081 	if (!timeout)
4082 		_lck_check(rtwdev);
4083 }
4084 
_ch_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 central_ch,bool dav)4085 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
4086 			u8 central_ch, bool dav)
4087 {
4088 	u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
4089 	bool is_2g_ch = central_ch <= 14;
4090 	u32 rf_reg18;
4091 
4092 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
4093 
4094 	rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
4095 	rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
4096 		      RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
4097 	rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
4098 
4099 	if (!is_2g_ch)
4100 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
4101 			    FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
4102 
4103 	rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
4104 		      RR_CFGCH_BW2) & RFREG_MASK;
4105 	rf_reg18 |= RR_CFGCH_BW2;
4106 
4107 	if (path == RF_PATH_A && dav)
4108 		_set_ch(rtwdev, rf_reg18);
4109 	else
4110 		rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
4111 
4112 	rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0);
4113 	rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1);
4114 
4115 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
4116 		    "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
4117 		    central_ch, path, reg18_addr,
4118 		    rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
4119 }
4120 
_ctrl_ch(struct rtw89_dev * rtwdev,u8 central_ch)4121 static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
4122 {
4123 	_ch_setting(rtwdev, RF_PATH_A, central_ch, true);
4124 	_ch_setting(rtwdev, RF_PATH_B, central_ch, true);
4125 	_ch_setting(rtwdev, RF_PATH_A, central_ch, false);
4126 	_ch_setting(rtwdev, RF_PATH_B, central_ch, false);
4127 }
4128 
_set_rxbb_bw(struct rtw89_dev * rtwdev,enum rtw89_bandwidth bw,enum rtw89_rf_path path)4129 static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
4130 			 enum rtw89_rf_path path)
4131 {
4132 	rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
4133 	rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12);
4134 
4135 	if (bw == RTW89_CHANNEL_WIDTH_20)
4136 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b);
4137 	else if (bw == RTW89_CHANNEL_WIDTH_40)
4138 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13);
4139 	else if (bw == RTW89_CHANNEL_WIDTH_80)
4140 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb);
4141 	else
4142 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3);
4143 
4144 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", path,
4145 		    rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB));
4146 
4147 	rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
4148 }
4149 
_rxbb_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)4150 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4151 		     enum rtw89_bandwidth bw)
4152 {
4153 	u8 kpath, path;
4154 
4155 	kpath = _kpath(rtwdev, phy);
4156 
4157 	for (path = 0; path < RF_PATH_NUM_8852B; path++) {
4158 		if (!(kpath & BIT(path)))
4159 			continue;
4160 
4161 		_set_rxbb_bw(rtwdev, bw, path);
4162 	}
4163 }
4164 
rtw8852b_ctrl_bw_ch(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 central_ch,enum rtw89_band band,enum rtw89_bandwidth bw)4165 static void rtw8852b_ctrl_bw_ch(struct rtw89_dev *rtwdev,
4166 				enum rtw89_phy_idx phy, u8 central_ch,
4167 				enum rtw89_band band, enum rtw89_bandwidth bw)
4168 {
4169 	_ctrl_ch(rtwdev, central_ch);
4170 	_ctrl_bw(rtwdev, phy, bw);
4171 	_rxbb_bw(rtwdev, phy, bw);
4172 }
4173 
rtw8852b_set_channel_rf(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)4174 void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
4175 			     const struct rtw89_chan *chan,
4176 			     enum rtw89_phy_idx phy_idx)
4177 {
4178 	rtw8852b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
4179 			    chan->band_width);
4180 }
4181