1  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2  /* Copyright(c) 2018-2019  Realtek Corporation
3   */
4  
5  #include <linux/iopoll.h>
6  
7  #include "main.h"
8  #include "coex.h"
9  #include "fw.h"
10  #include "tx.h"
11  #include "reg.h"
12  #include "sec.h"
13  #include "debug.h"
14  #include "util.h"
15  #include "wow.h"
16  #include "ps.h"
17  #include "phy.h"
18  #include "mac.h"
19  
20  static const struct rtw_hw_reg_desc fw_h2c_regs[] = {
21  	{REG_FWIMR, MASKDWORD, "FWIMR"},
22  	{REG_FWIMR, BIT_FS_H2CCMD_INT_EN, "FWIMR enable"},
23  	{REG_FWISR, MASKDWORD, "FWISR"},
24  	{REG_FWISR, BIT_FS_H2CCMD_INT, "FWISR enable"},
25  	{REG_HMETFR, BIT_INT_BOX_ALL, "BoxBitMap"},
26  	{REG_HMEBOX0, MASKDWORD, "MSG 0"},
27  	{REG_HMEBOX0_EX, MASKDWORD, "MSG_EX 0"},
28  	{REG_HMEBOX1, MASKDWORD, "MSG 1"},
29  	{REG_HMEBOX1_EX, MASKDWORD, "MSG_EX 1"},
30  	{REG_HMEBOX2, MASKDWORD, "MSG 2"},
31  	{REG_HMEBOX2_EX, MASKDWORD, "MSG_EX 2"},
32  	{REG_HMEBOX3, MASKDWORD, "MSG 3"},
33  	{REG_HMEBOX3_EX, MASKDWORD, "MSG_EX 3"},
34  	{REG_FT1IMR, MASKDWORD, "FT1IMR"},
35  	{REG_FT1IMR, BIT_FS_H2C_CMD_OK_INT_EN, "FT1IMR enable"},
36  	{REG_FT1ISR, MASKDWORD, "FT1ISR"},
37  	{REG_FT1ISR, BIT_FS_H2C_CMD_OK_INT, "FT1ISR enable "},
38  };
39  
40  static const struct rtw_hw_reg_desc fw_c2h_regs[] = {
41  	{REG_FWIMR, MASKDWORD, "FWIMR"},
42  	{REG_FWIMR, BIT_FS_H2CCMD_INT_EN, "CPWM"},
43  	{REG_FWIMR, BIT_FS_HRCV_INT_EN, "HRECV"},
44  	{REG_FWISR, MASKDWORD, "FWISR"},
45  	{REG_FWISR, BIT_FS_H2CCMD_INT, "CPWM"},
46  	{REG_FWISR, BIT_FS_HRCV_INT, "HRECV"},
47  	{REG_CPWM, MASKDWORD, "REG_CPWM"},
48  };
49  
50  static const struct rtw_hw_reg_desc fw_core_regs[] = {
51  	{REG_ARFR2_V1, MASKDWORD, "EPC"},
52  	{REG_ARFRH2_V1, MASKDWORD, "BADADDR"},
53  	{REG_ARFR3_V1, MASKDWORD, "CAUSE"},
54  	{REG_ARFR3_V1, BIT_EXC_CODE, "ExcCode"},
55  	{REG_ARFRH3_V1, MASKDWORD, "Status"},
56  	{REG_ARFR4, MASKDWORD, "SP"},
57  	{REG_ARFRH4, MASKDWORD, "RA"},
58  	{REG_FW_DBG6, MASKDWORD, "DBG 6"},
59  	{REG_FW_DBG7, MASKDWORD, "DBG 7"},
60  };
61  
_rtw_fw_dump_dbg_info(struct rtw_dev * rtwdev,const struct rtw_hw_reg_desc regs[],u32 size)62  static void _rtw_fw_dump_dbg_info(struct rtw_dev *rtwdev,
63  				  const struct rtw_hw_reg_desc regs[], u32 size)
64  {
65  	const struct rtw_hw_reg_desc *reg;
66  	u32 val;
67  	int i;
68  
69  	for (i = 0;  i < size; i++) {
70  		reg = &regs[i];
71  		val = rtw_read32_mask(rtwdev, reg->addr, reg->mask);
72  
73  		rtw_dbg(rtwdev, RTW_DBG_FW, "[%s]addr:0x%x mask:0x%x value:0x%x\n",
74  			reg->desc, reg->addr, reg->mask, val);
75  	}
76  }
77  
rtw_fw_dump_dbg_info(struct rtw_dev * rtwdev)78  void rtw_fw_dump_dbg_info(struct rtw_dev *rtwdev)
79  {
80  	int i;
81  
82  	if (!rtw_dbg_is_enabled(rtwdev, RTW_DBG_FW))
83  		return;
84  
85  	_rtw_fw_dump_dbg_info(rtwdev, fw_h2c_regs, ARRAY_SIZE(fw_h2c_regs));
86  	_rtw_fw_dump_dbg_info(rtwdev, fw_c2h_regs, ARRAY_SIZE(fw_c2h_regs));
87  	for (i = 0 ; i < RTW_DEBUG_DUMP_TIMES; i++) {
88  		rtw_dbg(rtwdev, RTW_DBG_FW, "Firmware Coredump %dth\n", i + 1);
89  		_rtw_fw_dump_dbg_info(rtwdev, fw_core_regs, ARRAY_SIZE(fw_core_regs));
90  	}
91  }
92  
rtw_fw_c2h_cmd_handle_ext(struct rtw_dev * rtwdev,struct sk_buff * skb)93  static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
94  				      struct sk_buff *skb)
95  {
96  	struct rtw_c2h_cmd *c2h;
97  	u8 sub_cmd_id;
98  
99  	c2h = get_c2h_from_skb(skb);
100  	sub_cmd_id = c2h->payload[0];
101  
102  	switch (sub_cmd_id) {
103  	case C2H_CCX_RPT:
104  		rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT);
105  		break;
106  	case C2H_SCAN_STATUS_RPT:
107  		rtw_hw_scan_status_report(rtwdev, skb);
108  		break;
109  	case C2H_CHAN_SWITCH:
110  		rtw_hw_scan_chan_switch(rtwdev, skb);
111  		break;
112  	default:
113  		break;
114  	}
115  }
116  
get_max_amsdu_len(u32 bit_rate)117  static u16 get_max_amsdu_len(u32 bit_rate)
118  {
119  	/* lower than ofdm, do not aggregate */
120  	if (bit_rate < 550)
121  		return 1;
122  
123  	/* lower than 20M 2ss mcs8, make it small */
124  	if (bit_rate < 1800)
125  		return 1200;
126  
127  	/* lower than 40M 2ss mcs9, make it medium */
128  	if (bit_rate < 4000)
129  		return 2600;
130  
131  	/* not yet 80M 2ss mcs8/9, make it twice regular packet size */
132  	if (bit_rate < 7000)
133  		return 3500;
134  
135  	/* unlimited */
136  	return 0;
137  }
138  
139  struct rtw_fw_iter_ra_data {
140  	struct rtw_dev *rtwdev;
141  	u8 *payload;
142  };
143  
rtw_fw_ra_report_iter(void * data,struct ieee80211_sta * sta)144  static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta)
145  {
146  	struct rtw_fw_iter_ra_data *ra_data = data;
147  	struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
148  	u8 mac_id, rate, sgi, bw;
149  	u8 mcs, nss;
150  	u32 bit_rate;
151  
152  	mac_id = GET_RA_REPORT_MACID(ra_data->payload);
153  	if (si->mac_id != mac_id)
154  		return;
155  
156  	si->ra_report.txrate.flags = 0;
157  
158  	rate = GET_RA_REPORT_RATE(ra_data->payload);
159  	sgi = GET_RA_REPORT_SGI(ra_data->payload);
160  	bw = GET_RA_REPORT_BW(ra_data->payload);
161  
162  	if (rate < DESC_RATEMCS0) {
163  		si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate);
164  		goto legacy;
165  	}
166  
167  	rtw_desc_to_mcsrate(rate, &mcs, &nss);
168  	if (rate >= DESC_RATEVHT1SS_MCS0)
169  		si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
170  	else if (rate >= DESC_RATEMCS0)
171  		si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS;
172  
173  	if (rate >= DESC_RATEMCS0) {
174  		si->ra_report.txrate.mcs = mcs;
175  		si->ra_report.txrate.nss = nss;
176  	}
177  
178  	if (sgi)
179  		si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
180  
181  	if (bw == RTW_CHANNEL_WIDTH_80)
182  		si->ra_report.txrate.bw = RATE_INFO_BW_80;
183  	else if (bw == RTW_CHANNEL_WIDTH_40)
184  		si->ra_report.txrate.bw = RATE_INFO_BW_40;
185  	else
186  		si->ra_report.txrate.bw = RATE_INFO_BW_20;
187  
188  legacy:
189  	bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate);
190  
191  	si->ra_report.desc_rate = rate;
192  	si->ra_report.bit_rate = bit_rate;
193  
194  	sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
195  }
196  
rtw_fw_ra_report_handle(struct rtw_dev * rtwdev,u8 * payload,u8 length)197  static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
198  				    u8 length)
199  {
200  	struct rtw_fw_iter_ra_data ra_data;
201  
202  	if (WARN(length < 7, "invalid ra report c2h length\n"))
203  		return;
204  
205  	rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload);
206  	ra_data.rtwdev = rtwdev;
207  	ra_data.payload = payload;
208  	rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
209  }
210  
211  struct rtw_beacon_filter_iter_data {
212  	struct rtw_dev *rtwdev;
213  	u8 *payload;
214  };
215  
rtw_fw_bcn_filter_notify_vif_iter(void * data,struct ieee80211_vif * vif)216  static void rtw_fw_bcn_filter_notify_vif_iter(void *data,
217  					      struct ieee80211_vif *vif)
218  {
219  	struct rtw_beacon_filter_iter_data *iter_data = data;
220  	struct rtw_dev *rtwdev = iter_data->rtwdev;
221  	u8 *payload = iter_data->payload;
222  	u8 type = GET_BCN_FILTER_NOTIFY_TYPE(payload);
223  	u8 event = GET_BCN_FILTER_NOTIFY_EVENT(payload);
224  	s8 sig = (s8)GET_BCN_FILTER_NOTIFY_RSSI(payload);
225  
226  	switch (type) {
227  	case BCN_FILTER_NOTIFY_SIGNAL_CHANGE:
228  		event = event ? NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
229  			NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
230  		ieee80211_cqm_rssi_notify(vif, event, sig, GFP_KERNEL);
231  		break;
232  	case BCN_FILTER_CONNECTION_LOSS:
233  		ieee80211_connection_loss(vif);
234  		break;
235  	case BCN_FILTER_CONNECTED:
236  		rtwdev->beacon_loss = false;
237  		break;
238  	case BCN_FILTER_NOTIFY_BEACON_LOSS:
239  		rtwdev->beacon_loss = true;
240  		rtw_leave_lps(rtwdev);
241  		break;
242  	}
243  }
244  
rtw_fw_bcn_filter_notify(struct rtw_dev * rtwdev,u8 * payload,u8 length)245  static void rtw_fw_bcn_filter_notify(struct rtw_dev *rtwdev, u8 *payload,
246  				     u8 length)
247  {
248  	struct rtw_beacon_filter_iter_data dev_iter_data;
249  
250  	dev_iter_data.rtwdev = rtwdev;
251  	dev_iter_data.payload = payload;
252  	rtw_iterate_vifs(rtwdev, rtw_fw_bcn_filter_notify_vif_iter,
253  			 &dev_iter_data);
254  }
255  
rtw_fw_scan_result(struct rtw_dev * rtwdev,u8 * payload,u8 length)256  static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload,
257  			       u8 length)
258  {
259  	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
260  
261  	dm_info->scan_density = payload[0];
262  
263  	rtw_dbg(rtwdev, RTW_DBG_FW, "scan.density = %x\n",
264  		dm_info->scan_density);
265  }
266  
rtw_fw_adaptivity_result(struct rtw_dev * rtwdev,u8 * payload,u8 length)267  static void rtw_fw_adaptivity_result(struct rtw_dev *rtwdev, u8 *payload,
268  				     u8 length)
269  {
270  	struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
271  	struct rtw_c2h_adaptivity *result = (struct rtw_c2h_adaptivity *)payload;
272  
273  	rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
274  		"Adaptivity: density %x igi %x l2h_th_init %x l2h %x h2l %x option %x\n",
275  		result->density, result->igi, result->l2h_th_init, result->l2h,
276  		result->h2l, result->option);
277  
278  	rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Reg Setting: L2H %x H2L %x\n",
279  		rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
280  				edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask),
281  		rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
282  				edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask));
283  
284  	rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA Flag %s\n",
285  		rtw_read32_mask(rtwdev, REG_EDCCA_REPORT, BIT_EDCCA_FLAG) ?
286  		"Set" : "Unset");
287  }
288  
rtw_fw_c2h_cmd_handle(struct rtw_dev * rtwdev,struct sk_buff * skb)289  void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
290  {
291  	struct rtw_c2h_cmd *c2h;
292  	u32 pkt_offset;
293  	u8 len;
294  
295  	pkt_offset = *((u32 *)skb->cb);
296  	c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
297  	len = skb->len - pkt_offset - 2;
298  
299  	mutex_lock(&rtwdev->mutex);
300  
301  	if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
302  		goto unlock;
303  
304  	switch (c2h->id) {
305  	case C2H_CCX_TX_RPT:
306  		rtw_tx_report_handle(rtwdev, skb, C2H_CCX_TX_RPT);
307  		break;
308  	case C2H_BT_INFO:
309  		rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
310  		break;
311  	case C2H_BT_HID_INFO:
312  		rtw_coex_bt_hid_info_notify(rtwdev, c2h->payload, len);
313  		break;
314  	case C2H_WLAN_INFO:
315  		rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
316  		break;
317  	case C2H_BCN_FILTER_NOTIFY:
318  		rtw_fw_bcn_filter_notify(rtwdev, c2h->payload, len);
319  		break;
320  	case C2H_HALMAC:
321  		rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
322  		break;
323  	case C2H_RA_RPT:
324  		rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
325  		break;
326  	default:
327  		rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id);
328  		break;
329  	}
330  
331  unlock:
332  	mutex_unlock(&rtwdev->mutex);
333  }
334  
rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev * rtwdev,u32 pkt_offset,struct sk_buff * skb)335  void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
336  			       struct sk_buff *skb)
337  {
338  	struct rtw_c2h_cmd *c2h;
339  	u8 len;
340  
341  	c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
342  	len = skb->len - pkt_offset - 2;
343  	*((u32 *)skb->cb) = pkt_offset;
344  
345  	rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
346  		c2h->id, c2h->seq, len);
347  
348  	switch (c2h->id) {
349  	case C2H_BT_MP_INFO:
350  		rtw_coex_info_response(rtwdev, skb);
351  		break;
352  	case C2H_WLAN_RFON:
353  		complete(&rtwdev->lps_leave_check);
354  		dev_kfree_skb_any(skb);
355  		break;
356  	case C2H_SCAN_RESULT:
357  		complete(&rtwdev->fw_scan_density);
358  		rtw_fw_scan_result(rtwdev, c2h->payload, len);
359  		dev_kfree_skb_any(skb);
360  		break;
361  	case C2H_ADAPTIVITY:
362  		rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
363  		dev_kfree_skb_any(skb);
364  		break;
365  	default:
366  		/* pass offset for further operation */
367  		*((u32 *)skb->cb) = pkt_offset;
368  		skb_queue_tail(&rtwdev->c2h_queue, skb);
369  		ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
370  		break;
371  	}
372  }
373  EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe);
374  
rtw_fw_c2h_cmd_isr(struct rtw_dev * rtwdev)375  void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev)
376  {
377  	if (rtw_read8(rtwdev, REG_MCU_TST_CFG) == VAL_FW_TRIGGER)
378  		rtw_fw_recovery(rtwdev);
379  	else
380  		rtw_warn(rtwdev, "unhandled firmware c2h interrupt\n");
381  }
382  EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr);
383  
rtw_fw_send_h2c_command_register(struct rtw_dev * rtwdev,struct rtw_h2c_register * h2c)384  static void rtw_fw_send_h2c_command_register(struct rtw_dev *rtwdev,
385  					     struct rtw_h2c_register *h2c)
386  {
387  	u32 box_reg, box_ex_reg;
388  	u8 box_state, box;
389  	int ret;
390  
391  	rtw_dbg(rtwdev, RTW_DBG_FW, "send H2C content %08x %08x\n", h2c->w0,
392  		h2c->w1);
393  
394  	lockdep_assert_held(&rtwdev->mutex);
395  
396  	box = rtwdev->h2c.last_box_num;
397  	switch (box) {
398  	case 0:
399  		box_reg = REG_HMEBOX0;
400  		box_ex_reg = REG_HMEBOX0_EX;
401  		break;
402  	case 1:
403  		box_reg = REG_HMEBOX1;
404  		box_ex_reg = REG_HMEBOX1_EX;
405  		break;
406  	case 2:
407  		box_reg = REG_HMEBOX2;
408  		box_ex_reg = REG_HMEBOX2_EX;
409  		break;
410  	case 3:
411  		box_reg = REG_HMEBOX3;
412  		box_ex_reg = REG_HMEBOX3_EX;
413  		break;
414  	default:
415  		WARN(1, "invalid h2c mail box number\n");
416  		return;
417  	}
418  
419  	ret = read_poll_timeout_atomic(rtw_read8, box_state,
420  				       !((box_state >> box) & 0x1), 100, 3000,
421  				       false, rtwdev, REG_HMETFR);
422  
423  	if (ret) {
424  		rtw_err(rtwdev, "failed to send h2c command\n");
425  		rtw_fw_dump_dbg_info(rtwdev);
426  		return;
427  	}
428  
429  	rtw_write32(rtwdev, box_ex_reg, h2c->w1);
430  	rtw_write32(rtwdev, box_reg, h2c->w0);
431  
432  	if (++rtwdev->h2c.last_box_num >= 4)
433  		rtwdev->h2c.last_box_num = 0;
434  }
435  
rtw_fw_send_h2c_command(struct rtw_dev * rtwdev,u8 * h2c)436  static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
437  				    u8 *h2c)
438  {
439  	struct rtw_h2c_cmd *h2c_cmd = (struct rtw_h2c_cmd *)h2c;
440  	u8 box;
441  	u8 box_state;
442  	u32 box_reg, box_ex_reg;
443  	int ret;
444  
445  	rtw_dbg(rtwdev, RTW_DBG_FW,
446  		"send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
447  		h2c[3], h2c[2], h2c[1], h2c[0],
448  		h2c[7], h2c[6], h2c[5], h2c[4]);
449  
450  	lockdep_assert_held(&rtwdev->mutex);
451  
452  	box = rtwdev->h2c.last_box_num;
453  	switch (box) {
454  	case 0:
455  		box_reg = REG_HMEBOX0;
456  		box_ex_reg = REG_HMEBOX0_EX;
457  		break;
458  	case 1:
459  		box_reg = REG_HMEBOX1;
460  		box_ex_reg = REG_HMEBOX1_EX;
461  		break;
462  	case 2:
463  		box_reg = REG_HMEBOX2;
464  		box_ex_reg = REG_HMEBOX2_EX;
465  		break;
466  	case 3:
467  		box_reg = REG_HMEBOX3;
468  		box_ex_reg = REG_HMEBOX3_EX;
469  		break;
470  	default:
471  		WARN(1, "invalid h2c mail box number\n");
472  		return;
473  	}
474  
475  	ret = read_poll_timeout_atomic(rtw_read8, box_state,
476  				       !((box_state >> box) & 0x1), 100, 3000,
477  				       false, rtwdev, REG_HMETFR);
478  
479  	if (ret) {
480  		rtw_err(rtwdev, "failed to send h2c command\n");
481  		return;
482  	}
483  
484  	rtw_write32(rtwdev, box_ex_reg, le32_to_cpu(h2c_cmd->msg_ext));
485  	rtw_write32(rtwdev, box_reg, le32_to_cpu(h2c_cmd->msg));
486  
487  	if (++rtwdev->h2c.last_box_num >= 4)
488  		rtwdev->h2c.last_box_num = 0;
489  }
490  
rtw_fw_h2c_cmd_dbg(struct rtw_dev * rtwdev,u8 * h2c)491  void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c)
492  {
493  	rtw_fw_send_h2c_command(rtwdev, h2c);
494  }
495  
rtw_fw_send_h2c_packet(struct rtw_dev * rtwdev,u8 * h2c_pkt)496  static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt)
497  {
498  	int ret;
499  
500  	lockdep_assert_held(&rtwdev->mutex);
501  
502  	FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq);
503  	ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE);
504  	if (ret)
505  		rtw_err(rtwdev, "failed to send h2c packet\n");
506  	rtwdev->h2c.seq++;
507  }
508  
509  void
rtw_fw_send_general_info(struct rtw_dev * rtwdev)510  rtw_fw_send_general_info(struct rtw_dev *rtwdev)
511  {
512  	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
513  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
514  	u16 total_size = H2C_PKT_HDR_SIZE + 4;
515  
516  	if (rtw_chip_wcpu_11n(rtwdev))
517  		return;
518  
519  	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
520  
521  	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
522  
523  	GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt,
524  					fifo->rsvd_fw_txbuf_addr -
525  					fifo->rsvd_boundary);
526  
527  	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
528  }
529  
530  void
rtw_fw_send_phydm_info(struct rtw_dev * rtwdev)531  rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
532  {
533  	struct rtw_hal *hal = &rtwdev->hal;
534  	struct rtw_efuse *efuse = &rtwdev->efuse;
535  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
536  	u16 total_size = H2C_PKT_HDR_SIZE + 8;
537  	u8 fw_rf_type = 0;
538  
539  	if (rtw_chip_wcpu_11n(rtwdev))
540  		return;
541  
542  	if (hal->rf_type == RF_1T1R)
543  		fw_rf_type = FW_RF_1T1R;
544  	else if (hal->rf_type == RF_2T2R)
545  		fw_rf_type = FW_RF_2T2R;
546  
547  	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO);
548  
549  	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
550  	PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option);
551  	PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type);
552  	PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version);
553  	PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx);
554  	PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx);
555  
556  	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
557  }
558  
rtw_fw_do_iqk(struct rtw_dev * rtwdev,struct rtw_iqk_para * para)559  void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
560  {
561  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
562  	u16 total_size = H2C_PKT_HDR_SIZE + 1;
563  
564  	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK);
565  	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
566  	IQK_SET_CLEAR(h2c_pkt, para->clear);
567  	IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk);
568  
569  	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
570  }
571  EXPORT_SYMBOL(rtw_fw_do_iqk);
572  
rtw_fw_inform_rfk_status(struct rtw_dev * rtwdev,bool start)573  void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start)
574  {
575  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
576  
577  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WIFI_CALIBRATION);
578  
579  	RFK_SET_INFORM_START(h2c_pkt, start);
580  
581  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
582  }
583  EXPORT_SYMBOL(rtw_fw_inform_rfk_status);
584  
rtw_fw_query_bt_info(struct rtw_dev * rtwdev)585  void rtw_fw_query_bt_info(struct rtw_dev *rtwdev)
586  {
587  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
588  
589  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO);
590  
591  	SET_QUERY_BT_INFO(h2c_pkt, true);
592  
593  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
594  }
595  
rtw_fw_default_port(struct rtw_dev * rtwdev,struct rtw_vif * rtwvif)596  void rtw_fw_default_port(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
597  {
598  	struct rtw_h2c_register h2c = {};
599  
600  	if (rtwvif->net_type != RTW_NET_MGD_LINKED)
601  		return;
602  
603  	/* Leave LPS before default port H2C so FW timer is correct */
604  	rtw_leave_lps(rtwdev);
605  
606  	h2c.w0 = u32_encode_bits(H2C_CMD_DEFAULT_PORT, RTW_H2C_W0_CMDID) |
607  		 u32_encode_bits(rtwvif->port, RTW_H2C_DEFAULT_PORT_W0_PORTID) |
608  		 u32_encode_bits(rtwvif->mac_id, RTW_H2C_DEFAULT_PORT_W0_MACID);
609  
610  	rtw_fw_send_h2c_command_register(rtwdev, &h2c);
611  }
612  
rtw_fw_wl_ch_info(struct rtw_dev * rtwdev,u8 link,u8 ch,u8 bw)613  void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw)
614  {
615  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
616  
617  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO);
618  
619  	SET_WL_CH_INFO_LINK(h2c_pkt, link);
620  	SET_WL_CH_INFO_CHNL(h2c_pkt, ch);
621  	SET_WL_CH_INFO_BW(h2c_pkt, bw);
622  
623  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
624  }
625  
rtw_fw_query_bt_mp_info(struct rtw_dev * rtwdev,struct rtw_coex_info_req * req)626  void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
627  			     struct rtw_coex_info_req *req)
628  {
629  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
630  
631  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO);
632  
633  	SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq);
634  	SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code);
635  	SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1);
636  	SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2);
637  	SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3);
638  
639  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
640  }
641  
rtw_fw_force_bt_tx_power(struct rtw_dev * rtwdev,u8 bt_pwr_dec_lvl)642  void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl)
643  {
644  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
645  	u8 index = 0 - bt_pwr_dec_lvl;
646  
647  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER);
648  
649  	SET_BT_TX_POWER_INDEX(h2c_pkt, index);
650  
651  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
652  }
653  
rtw_fw_bt_ignore_wlan_action(struct rtw_dev * rtwdev,bool enable)654  void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable)
655  {
656  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
657  
658  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION);
659  
660  	SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable);
661  
662  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
663  }
664  
rtw_fw_coex_tdma_type(struct rtw_dev * rtwdev,u8 para1,u8 para2,u8 para3,u8 para4,u8 para5)665  void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
666  			   u8 para1, u8 para2, u8 para3, u8 para4, u8 para5)
667  {
668  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
669  
670  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE);
671  
672  	SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1);
673  	SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2);
674  	SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3);
675  	SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4);
676  	SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5);
677  
678  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
679  }
680  
rtw_fw_coex_query_hid_info(struct rtw_dev * rtwdev,u8 sub_id,u8 data)681  void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data)
682  {
683  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
684  
685  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_HID_INFO);
686  
687  	SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, sub_id);
688  	SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, data);
689  
690  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
691  }
692  
rtw_fw_bt_wifi_control(struct rtw_dev * rtwdev,u8 op_code,u8 * data)693  void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
694  {
695  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
696  
697  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL);
698  
699  	SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code);
700  
701  	SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data);
702  	SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1));
703  	SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2));
704  	SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3));
705  	SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4));
706  
707  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
708  }
709  
rtw_fw_send_rssi_info(struct rtw_dev * rtwdev,struct rtw_sta_info * si)710  void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
711  {
712  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
713  	u8 rssi = ewma_rssi_read(&si->avg_rssi);
714  	bool stbc_en = si->stbc_en ? true : false;
715  
716  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR);
717  
718  	SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id);
719  	SET_RSSI_INFO_RSSI(h2c_pkt, rssi);
720  	SET_RSSI_INFO_STBC(h2c_pkt, stbc_en);
721  
722  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
723  }
724  
rtw_fw_send_ra_info(struct rtw_dev * rtwdev,struct rtw_sta_info * si,bool reset_ra_mask)725  void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
726  			 bool reset_ra_mask)
727  {
728  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
729  	bool disable_pt = true;
730  
731  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
732  
733  	SET_RA_INFO_MACID(h2c_pkt, si->mac_id);
734  	SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id);
735  	SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
736  	SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
737  	SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
738  	SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en);
739  	SET_RA_INFO_NO_UPDATE(h2c_pkt, !reset_ra_mask);
740  	SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
741  	SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
742  	SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
743  	SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8);
744  	SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16);
745  	SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
746  
747  	si->init_ra_lv = 0;
748  
749  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
750  }
751  
rtw_fw_media_status_report(struct rtw_dev * rtwdev,u8 mac_id,bool connect)752  void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
753  {
754  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
755  
756  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT);
757  	MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect);
758  	MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id);
759  
760  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
761  }
762  
rtw_fw_update_wl_phy_info(struct rtw_dev * rtwdev)763  void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev)
764  {
765  	struct rtw_traffic_stats *stats = &rtwdev->stats;
766  	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
767  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
768  
769  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_PHY_INFO);
770  	SET_WL_PHY_INFO_TX_TP(h2c_pkt, stats->tx_throughput);
771  	SET_WL_PHY_INFO_RX_TP(h2c_pkt, stats->rx_throughput);
772  	SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, dm_info->tx_rate);
773  	SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, dm_info->curr_rx_rate);
774  	SET_WL_PHY_INFO_RX_EVM(h2c_pkt, dm_info->rx_evm_dbm[RF_PATH_A]);
775  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
776  }
777  
rtw_fw_beacon_filter_config(struct rtw_dev * rtwdev,bool connect,struct ieee80211_vif * vif)778  void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
779  				 struct ieee80211_vif *vif)
780  {
781  	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
782  	struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
783  	static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100;
784  	struct rtw_sta_info *si =
785  		sta ? (struct rtw_sta_info *)sta->drv_priv : NULL;
786  	s32 thold = RTW_DEFAULT_CQM_THOLD;
787  	u32 hyst = RTW_DEFAULT_CQM_HYST;
788  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
789  
790  	if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER))
791  		return;
792  
793  	if (bss_conf->cqm_rssi_thold)
794  		thold = bss_conf->cqm_rssi_thold;
795  	if (bss_conf->cqm_rssi_hyst)
796  		hyst = bss_conf->cqm_rssi_hyst;
797  
798  	if (!connect) {
799  		SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
800  		SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
801  		rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
802  
803  		return;
804  	}
805  
806  	if (!si)
807  		return;
808  
809  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0);
810  	ether_addr_copy(&h2c_pkt[1], bss_conf->bssid);
811  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
812  
813  	memset(h2c_pkt, 0, sizeof(h2c_pkt));
814  	thold = clamp_t(s32, thold + rssi_offset, rssi_min, rssi_max);
815  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
816  	SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
817  	SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt,
818  					       BCN_FILTER_OFFLOAD_MODE_DEFAULT);
819  	SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, thold);
820  	SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT);
821  	SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id);
822  	SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, hyst);
823  	SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int);
824  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
825  }
826  
rtw_fw_set_pwr_mode(struct rtw_dev * rtwdev)827  void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
828  {
829  	struct rtw_lps_conf *conf = &rtwdev->lps_conf;
830  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
831  
832  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE);
833  
834  	SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode);
835  	SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm);
836  	SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps);
837  	SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval);
838  	SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id);
839  	SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state);
840  
841  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
842  }
843  
rtw_fw_set_keep_alive_cmd(struct rtw_dev * rtwdev,bool enable)844  void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable)
845  {
846  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
847  	struct rtw_fw_wow_keep_alive_para mode = {
848  		.adopt = true,
849  		.pkt_type = KEEP_ALIVE_NULL_PKT,
850  		.period = 5,
851  	};
852  
853  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE);
854  	SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable);
855  	SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt);
856  	SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type);
857  	SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period);
858  
859  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
860  }
861  
rtw_fw_set_disconnect_decision_cmd(struct rtw_dev * rtwdev,bool enable)862  void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable)
863  {
864  	struct rtw_wow_param *rtw_wow = &rtwdev->wow;
865  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
866  	struct rtw_fw_wow_disconnect_para mode = {
867  		.adopt = true,
868  		.period = 30,
869  		.retry_count = 5,
870  	};
871  
872  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION);
873  
874  	if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
875  		SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable);
876  		SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt);
877  		SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period);
878  		SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count);
879  	}
880  
881  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
882  }
883  
rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev * rtwdev,bool enable)884  void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
885  {
886  	struct rtw_wow_param *rtw_wow = &rtwdev->wow;
887  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
888  
889  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN);
890  
891  	SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable);
892  	if (rtw_wow_mgd_linked(rtwdev)) {
893  		if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
894  			SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable);
895  		if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
896  			SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable);
897  		if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags))
898  			SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable);
899  		if (rtw_wow->pattern_cnt)
900  			SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable);
901  	}
902  
903  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
904  }
905  
rtw_fw_set_aoac_global_info_cmd(struct rtw_dev * rtwdev,u8 pairwise_key_enc,u8 group_key_enc)906  void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
907  				     u8 pairwise_key_enc,
908  				     u8 group_key_enc)
909  {
910  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
911  
912  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO);
913  
914  	SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc);
915  	SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc);
916  
917  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
918  }
919  
rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev * rtwdev,bool enable)920  void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
921  {
922  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
923  
924  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL);
925  
926  	SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable);
927  
928  	if (rtw_wow_no_link(rtwdev))
929  		SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable);
930  
931  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
932  }
933  
rtw_get_rsvd_page_location(struct rtw_dev * rtwdev,enum rtw_rsvd_packet_type type)934  static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
935  				     enum rtw_rsvd_packet_type type)
936  {
937  	struct rtw_rsvd_page *rsvd_pkt;
938  	u8 location = 0;
939  
940  	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
941  		if (type == rsvd_pkt->type)
942  			location = rsvd_pkt->page;
943  	}
944  
945  	return location;
946  }
947  
rtw_fw_set_nlo_info(struct rtw_dev * rtwdev,bool enable)948  void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable)
949  {
950  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
951  	u8 loc_nlo;
952  
953  	loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO);
954  
955  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO);
956  
957  	SET_NLO_FUN_EN(h2c_pkt, enable);
958  	if (enable) {
959  		if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE)
960  			SET_NLO_PS_32K(h2c_pkt, enable);
961  		SET_NLO_IGNORE_SECURITY(h2c_pkt, enable);
962  		SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo);
963  	}
964  
965  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
966  }
967  
rtw_fw_set_recover_bt_device(struct rtw_dev * rtwdev)968  void rtw_fw_set_recover_bt_device(struct rtw_dev *rtwdev)
969  {
970  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
971  
972  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RECOVER_BT_DEV);
973  	SET_RECOVER_BT_DEV_EN(h2c_pkt, 1);
974  
975  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
976  }
977  
rtw_fw_set_pg_info(struct rtw_dev * rtwdev)978  void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
979  {
980  	struct rtw_lps_conf *conf = &rtwdev->lps_conf;
981  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
982  	u8 loc_pg, loc_dpk;
983  
984  	loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO);
985  	loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK);
986  
987  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO);
988  
989  	LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
990  	LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
991  	LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
992  	LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup);
993  
994  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
995  }
996  
rtw_get_rsvd_page_probe_req_location(struct rtw_dev * rtwdev,struct cfg80211_ssid * ssid)997  static u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
998  					       struct cfg80211_ssid *ssid)
999  {
1000  	struct rtw_rsvd_page *rsvd_pkt;
1001  	u8 location = 0;
1002  
1003  	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1004  		if (rsvd_pkt->type != RSVD_PROBE_REQ)
1005  			continue;
1006  		if ((!ssid && !rsvd_pkt->ssid) ||
1007  		    cfg80211_ssid_eq(rsvd_pkt->ssid, ssid))
1008  			location = rsvd_pkt->page;
1009  	}
1010  
1011  	return location;
1012  }
1013  
rtw_get_rsvd_page_probe_req_size(struct rtw_dev * rtwdev,struct cfg80211_ssid * ssid)1014  static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
1015  					    struct cfg80211_ssid *ssid)
1016  {
1017  	struct rtw_rsvd_page *rsvd_pkt;
1018  	u16 size = 0;
1019  
1020  	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1021  		if (rsvd_pkt->type != RSVD_PROBE_REQ)
1022  			continue;
1023  		if ((!ssid && !rsvd_pkt->ssid) ||
1024  		    cfg80211_ssid_eq(rsvd_pkt->ssid, ssid))
1025  			size = rsvd_pkt->probe_req_size;
1026  	}
1027  
1028  	return size;
1029  }
1030  
rtw_send_rsvd_page_h2c(struct rtw_dev * rtwdev)1031  void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
1032  {
1033  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1034  	u8 location = 0;
1035  
1036  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE);
1037  
1038  	location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP);
1039  	*(h2c_pkt + 1) = location;
1040  	rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location);
1041  
1042  	location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL);
1043  	*(h2c_pkt + 2) = location;
1044  	rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location);
1045  
1046  	location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL);
1047  	*(h2c_pkt + 3) = location;
1048  	rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location);
1049  
1050  	location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL);
1051  	*(h2c_pkt + 4) = location;
1052  	rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location);
1053  
1054  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
1055  }
1056  
rtw_nlo_info_get(struct ieee80211_hw * hw)1057  static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
1058  {
1059  	struct rtw_dev *rtwdev = hw->priv;
1060  	const struct rtw_chip_info *chip = rtwdev->chip;
1061  	struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
1062  	struct rtw_nlo_info_hdr *nlo_hdr;
1063  	struct cfg80211_ssid *ssid;
1064  	struct sk_buff *skb;
1065  	u8 *pos, loc;
1066  	u32 size;
1067  	int i;
1068  
1069  	if (!pno_req->inited || !pno_req->match_set_cnt)
1070  		return NULL;
1071  
1072  	size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt *
1073  		      IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz;
1074  
1075  	skb = alloc_skb(size, GFP_KERNEL);
1076  	if (!skb)
1077  		return NULL;
1078  
1079  	skb_reserve(skb, chip->tx_pkt_desc_sz);
1080  
1081  	nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr));
1082  
1083  	nlo_hdr->nlo_count = pno_req->match_set_cnt;
1084  	nlo_hdr->hidden_ap_count = pno_req->match_set_cnt;
1085  
1086  	/* pattern check for firmware */
1087  	memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE);
1088  
1089  	for (i = 0; i < pno_req->match_set_cnt; i++)
1090  		nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len;
1091  
1092  	for (i = 0; i < pno_req->match_set_cnt; i++) {
1093  		ssid = &pno_req->match_sets[i].ssid;
1094  		loc  = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
1095  		if (!loc) {
1096  			rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
1097  			kfree_skb(skb);
1098  			return NULL;
1099  		}
1100  		nlo_hdr->location[i] = loc;
1101  	}
1102  
1103  	for (i = 0; i < pno_req->match_set_cnt; i++) {
1104  		pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN);
1105  		memcpy(pos, pno_req->match_sets[i].ssid.ssid,
1106  		       pno_req->match_sets[i].ssid.ssid_len);
1107  	}
1108  
1109  	return skb;
1110  }
1111  
rtw_cs_channel_info_get(struct ieee80211_hw * hw)1112  static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
1113  {
1114  	struct rtw_dev *rtwdev = hw->priv;
1115  	const struct rtw_chip_info *chip = rtwdev->chip;
1116  	struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
1117  	struct ieee80211_channel *channels = pno_req->channels;
1118  	struct sk_buff *skb;
1119  	int count =  pno_req->channel_cnt;
1120  	u8 *pos;
1121  	int i = 0;
1122  
1123  	skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL);
1124  	if (!skb)
1125  		return NULL;
1126  
1127  	skb_reserve(skb, chip->tx_pkt_desc_sz);
1128  
1129  	for (i = 0; i < count; i++) {
1130  		pos = skb_put_zero(skb, 4);
1131  
1132  		CHSW_INFO_SET_CH(pos, channels[i].hw_value);
1133  
1134  		if (channels[i].flags & IEEE80211_CHAN_RADAR)
1135  			CHSW_INFO_SET_ACTION_ID(pos, 0);
1136  		else
1137  			CHSW_INFO_SET_ACTION_ID(pos, 1);
1138  		CHSW_INFO_SET_TIMEOUT(pos, 1);
1139  		CHSW_INFO_SET_PRI_CH_IDX(pos, 1);
1140  		CHSW_INFO_SET_BW(pos, 0);
1141  	}
1142  
1143  	return skb;
1144  }
1145  
rtw_lps_pg_dpk_get(struct ieee80211_hw * hw)1146  static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
1147  {
1148  	struct rtw_dev *rtwdev = hw->priv;
1149  	const struct rtw_chip_info *chip = rtwdev->chip;
1150  	struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
1151  	struct rtw_lps_pg_dpk_hdr *dpk_hdr;
1152  	struct sk_buff *skb;
1153  	u32 size;
1154  
1155  	size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr);
1156  	skb = alloc_skb(size, GFP_KERNEL);
1157  	if (!skb)
1158  		return NULL;
1159  
1160  	skb_reserve(skb, chip->tx_pkt_desc_sz);
1161  	dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr));
1162  	dpk_hdr->dpk_ch = dpk_info->dpk_ch;
1163  	dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0];
1164  	memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2);
1165  	memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4);
1166  	memcpy(dpk_hdr->coef, dpk_info->coef, 160);
1167  
1168  	return skb;
1169  }
1170  
rtw_lps_pg_info_get(struct ieee80211_hw * hw)1171  static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
1172  {
1173  	struct rtw_dev *rtwdev = hw->priv;
1174  	const struct rtw_chip_info *chip = rtwdev->chip;
1175  	struct rtw_lps_conf *conf = &rtwdev->lps_conf;
1176  	struct rtw_lps_pg_info_hdr *pg_info_hdr;
1177  	struct rtw_wow_param *rtw_wow = &rtwdev->wow;
1178  	struct sk_buff *skb;
1179  	u32 size;
1180  
1181  	size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr);
1182  	skb = alloc_skb(size, GFP_KERNEL);
1183  	if (!skb)
1184  		return NULL;
1185  
1186  	skb_reserve(skb, chip->tx_pkt_desc_sz);
1187  	pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr));
1188  	pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num;
1189  	pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
1190  	pg_info_hdr->sec_cam_count =
1191  		rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
1192  	pg_info_hdr->pattern_count = rtw_wow->pattern_cnt;
1193  
1194  	conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
1195  	conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0;
1196  
1197  	return skb;
1198  }
1199  
rtw_get_rsvd_page_skb(struct ieee80211_hw * hw,struct rtw_rsvd_page * rsvd_pkt)1200  static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
1201  					     struct rtw_rsvd_page *rsvd_pkt)
1202  {
1203  	struct ieee80211_vif *vif;
1204  	struct rtw_vif *rtwvif;
1205  	struct sk_buff *skb_new;
1206  	struct cfg80211_ssid *ssid;
1207  	u16 tim_offset = 0;
1208  
1209  	if (rsvd_pkt->type == RSVD_DUMMY) {
1210  		skb_new = alloc_skb(1, GFP_KERNEL);
1211  		if (!skb_new)
1212  			return NULL;
1213  
1214  		skb_put(skb_new, 1);
1215  		return skb_new;
1216  	}
1217  
1218  	rtwvif = rsvd_pkt->rtwvif;
1219  	if (!rtwvif)
1220  		return NULL;
1221  
1222  	vif = rtwvif_to_vif(rtwvif);
1223  
1224  	switch (rsvd_pkt->type) {
1225  	case RSVD_BEACON:
1226  		skb_new = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL, 0);
1227  		rsvd_pkt->tim_offset = tim_offset;
1228  		break;
1229  	case RSVD_PS_POLL:
1230  		skb_new = ieee80211_pspoll_get(hw, vif);
1231  		break;
1232  	case RSVD_PROBE_RESP:
1233  		skb_new = ieee80211_proberesp_get(hw, vif);
1234  		break;
1235  	case RSVD_NULL:
1236  		skb_new = ieee80211_nullfunc_get(hw, vif, -1, false);
1237  		break;
1238  	case RSVD_QOS_NULL:
1239  		skb_new = ieee80211_nullfunc_get(hw, vif, -1, true);
1240  		break;
1241  	case RSVD_LPS_PG_DPK:
1242  		skb_new = rtw_lps_pg_dpk_get(hw);
1243  		break;
1244  	case RSVD_LPS_PG_INFO:
1245  		skb_new = rtw_lps_pg_info_get(hw);
1246  		break;
1247  	case RSVD_PROBE_REQ:
1248  		ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
1249  		if (ssid)
1250  			skb_new = ieee80211_probereq_get(hw, vif->addr,
1251  							 ssid->ssid,
1252  							 ssid->ssid_len, 0);
1253  		else
1254  			skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
1255  		if (skb_new)
1256  			rsvd_pkt->probe_req_size = (u16)skb_new->len;
1257  		break;
1258  	case RSVD_NLO_INFO:
1259  		skb_new = rtw_nlo_info_get(hw);
1260  		break;
1261  	case RSVD_CH_INFO:
1262  		skb_new = rtw_cs_channel_info_get(hw);
1263  		break;
1264  	default:
1265  		return NULL;
1266  	}
1267  
1268  	if (!skb_new)
1269  		return NULL;
1270  
1271  	return skb_new;
1272  }
1273  
rtw_fill_rsvd_page_desc(struct rtw_dev * rtwdev,struct sk_buff * skb,enum rtw_rsvd_packet_type type)1274  static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
1275  				    enum rtw_rsvd_packet_type type)
1276  {
1277  	struct rtw_tx_pkt_info pkt_info = {0};
1278  	const struct rtw_chip_info *chip = rtwdev->chip;
1279  	u8 *pkt_desc;
1280  
1281  	rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type);
1282  	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
1283  	memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
1284  	rtw_tx_fill_tx_desc(&pkt_info, skb);
1285  }
1286  
rtw_len_to_page(unsigned int len,u8 page_size)1287  static inline u8 rtw_len_to_page(unsigned int len, u8 page_size)
1288  {
1289  	return DIV_ROUND_UP(len, page_size);
1290  }
1291  
rtw_rsvd_page_list_to_buf(struct rtw_dev * rtwdev,u8 page_size,u8 page_margin,u32 page,u8 * buf,struct rtw_rsvd_page * rsvd_pkt)1292  static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
1293  				      u8 page_margin, u32 page, u8 *buf,
1294  				      struct rtw_rsvd_page *rsvd_pkt)
1295  {
1296  	struct sk_buff *skb = rsvd_pkt->skb;
1297  
1298  	if (page >= 1)
1299  		memcpy(buf + page_margin + page_size * (page - 1),
1300  		       skb->data, skb->len);
1301  	else
1302  		memcpy(buf, skb->data, skb->len);
1303  }
1304  
rtw_alloc_rsvd_page(struct rtw_dev * rtwdev,enum rtw_rsvd_packet_type type,bool txdesc)1305  static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
1306  						 enum rtw_rsvd_packet_type type,
1307  						 bool txdesc)
1308  {
1309  	struct rtw_rsvd_page *rsvd_pkt = NULL;
1310  
1311  	rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
1312  
1313  	if (!rsvd_pkt)
1314  		return NULL;
1315  
1316  	INIT_LIST_HEAD(&rsvd_pkt->vif_list);
1317  	INIT_LIST_HEAD(&rsvd_pkt->build_list);
1318  	rsvd_pkt->type = type;
1319  	rsvd_pkt->add_txdesc = txdesc;
1320  
1321  	return rsvd_pkt;
1322  }
1323  
rtw_insert_rsvd_page(struct rtw_dev * rtwdev,struct rtw_vif * rtwvif,struct rtw_rsvd_page * rsvd_pkt)1324  static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev,
1325  				 struct rtw_vif *rtwvif,
1326  				 struct rtw_rsvd_page *rsvd_pkt)
1327  {
1328  	lockdep_assert_held(&rtwdev->mutex);
1329  
1330  	list_add_tail(&rsvd_pkt->vif_list, &rtwvif->rsvd_page_list);
1331  }
1332  
rtw_add_rsvd_page(struct rtw_dev * rtwdev,struct rtw_vif * rtwvif,enum rtw_rsvd_packet_type type,bool txdesc)1333  static void rtw_add_rsvd_page(struct rtw_dev *rtwdev,
1334  			      struct rtw_vif *rtwvif,
1335  			      enum rtw_rsvd_packet_type type,
1336  			      bool txdesc)
1337  {
1338  	struct rtw_rsvd_page *rsvd_pkt;
1339  
1340  	rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc);
1341  	if (!rsvd_pkt) {
1342  		rtw_err(rtwdev, "failed to alloc rsvd page %d\n", type);
1343  		return;
1344  	}
1345  
1346  	rsvd_pkt->rtwvif = rtwvif;
1347  	rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
1348  }
1349  
rtw_add_rsvd_page_probe_req(struct rtw_dev * rtwdev,struct rtw_vif * rtwvif,struct cfg80211_ssid * ssid)1350  static void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
1351  					struct rtw_vif *rtwvif,
1352  					struct cfg80211_ssid *ssid)
1353  {
1354  	struct rtw_rsvd_page *rsvd_pkt;
1355  
1356  	rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true);
1357  	if (!rsvd_pkt) {
1358  		rtw_err(rtwdev, "failed to alloc probe req rsvd page\n");
1359  		return;
1360  	}
1361  
1362  	rsvd_pkt->rtwvif = rtwvif;
1363  	rsvd_pkt->ssid = ssid;
1364  	rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
1365  }
1366  
rtw_remove_rsvd_page(struct rtw_dev * rtwdev,struct rtw_vif * rtwvif)1367  void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
1368  			  struct rtw_vif *rtwvif)
1369  {
1370  	struct rtw_rsvd_page *rsvd_pkt, *tmp;
1371  
1372  	lockdep_assert_held(&rtwdev->mutex);
1373  
1374  	/* remove all of the rsvd pages for vif */
1375  	list_for_each_entry_safe(rsvd_pkt, tmp, &rtwvif->rsvd_page_list,
1376  				 vif_list) {
1377  		list_del(&rsvd_pkt->vif_list);
1378  		if (!list_empty(&rsvd_pkt->build_list))
1379  			list_del(&rsvd_pkt->build_list);
1380  		kfree(rsvd_pkt);
1381  	}
1382  }
1383  
rtw_add_rsvd_page_bcn(struct rtw_dev * rtwdev,struct rtw_vif * rtwvif)1384  void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev,
1385  			   struct rtw_vif *rtwvif)
1386  {
1387  	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1388  
1389  	if (vif->type != NL80211_IFTYPE_AP &&
1390  	    vif->type != NL80211_IFTYPE_ADHOC &&
1391  	    vif->type != NL80211_IFTYPE_MESH_POINT) {
1392  		rtw_warn(rtwdev, "Cannot add beacon rsvd page for %d\n",
1393  			 vif->type);
1394  		return;
1395  	}
1396  
1397  	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_BEACON, false);
1398  }
1399  
rtw_add_rsvd_page_pno(struct rtw_dev * rtwdev,struct rtw_vif * rtwvif)1400  void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
1401  			   struct rtw_vif *rtwvif)
1402  {
1403  	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1404  	struct rtw_wow_param *rtw_wow = &rtwdev->wow;
1405  	struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
1406  	struct cfg80211_ssid *ssid;
1407  	int i;
1408  
1409  	if (vif->type != NL80211_IFTYPE_STATION) {
1410  		rtw_warn(rtwdev, "Cannot add PNO rsvd page for %d\n",
1411  			 vif->type);
1412  		return;
1413  	}
1414  
1415  	for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
1416  		ssid = &rtw_pno_req->match_sets[i].ssid;
1417  		rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, ssid);
1418  	}
1419  
1420  	rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, NULL);
1421  	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NLO_INFO, false);
1422  	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_CH_INFO, true);
1423  }
1424  
rtw_add_rsvd_page_sta(struct rtw_dev * rtwdev,struct rtw_vif * rtwvif)1425  void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
1426  			   struct rtw_vif *rtwvif)
1427  {
1428  	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1429  
1430  	if (vif->type != NL80211_IFTYPE_STATION) {
1431  		rtw_warn(rtwdev, "Cannot add sta rsvd page for %d\n",
1432  			 vif->type);
1433  		return;
1434  	}
1435  
1436  	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_PS_POLL, true);
1437  	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_QOS_NULL, true);
1438  	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NULL, true);
1439  	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_DPK, true);
1440  	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_INFO, true);
1441  }
1442  
rtw_fw_write_data_rsvd_page(struct rtw_dev * rtwdev,u16 pg_addr,u8 * buf,u32 size)1443  int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
1444  				u8 *buf, u32 size)
1445  {
1446  	u8 bckp[2];
1447  	u8 val;
1448  	u16 rsvd_pg_head;
1449  	u32 bcn_valid_addr;
1450  	u32 bcn_valid_mask;
1451  	int ret;
1452  
1453  	lockdep_assert_held(&rtwdev->mutex);
1454  
1455  	if (!size)
1456  		return -EINVAL;
1457  
1458  	if (rtw_chip_wcpu_11n(rtwdev)) {
1459  		rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID);
1460  	} else {
1461  		pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
1462  		pg_addr |= BIT_BCN_VALID_V1;
1463  		rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr);
1464  	}
1465  
1466  	val = rtw_read8(rtwdev, REG_CR + 1);
1467  	bckp[0] = val;
1468  	val |= BIT_ENSWBCN >> 8;
1469  	rtw_write8(rtwdev, REG_CR + 1, val);
1470  
1471  	if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) {
1472  		val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
1473  		bckp[1] = val;
1474  		val &= ~(BIT_EN_BCNQ_DL >> 16);
1475  		rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
1476  	}
1477  
1478  	ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
1479  	if (ret) {
1480  		rtw_err(rtwdev, "failed to write data to rsvd page\n");
1481  		goto restore;
1482  	}
1483  
1484  	if (rtw_chip_wcpu_11n(rtwdev)) {
1485  		bcn_valid_addr = REG_DWBCN0_CTRL;
1486  		bcn_valid_mask = BIT_BCN_VALID;
1487  	} else {
1488  		bcn_valid_addr = REG_FIFOPAGE_CTRL_2;
1489  		bcn_valid_mask = BIT_BCN_VALID_V1;
1490  	}
1491  
1492  	if (!check_hw_ready(rtwdev, bcn_valid_addr, bcn_valid_mask, 1)) {
1493  		rtw_err(rtwdev, "error beacon valid\n");
1494  		ret = -EBUSY;
1495  	}
1496  
1497  restore:
1498  	rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
1499  	rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
1500  		    rsvd_pg_head | BIT_BCN_VALID_V1);
1501  	if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
1502  		rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
1503  	rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
1504  
1505  	return ret;
1506  }
1507  
rtw_download_drv_rsvd_page(struct rtw_dev * rtwdev,u8 * buf,u32 size)1508  static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
1509  {
1510  	u32 pg_size;
1511  	u32 pg_num = 0;
1512  	u16 pg_addr = 0;
1513  
1514  	pg_size = rtwdev->chip->page_size;
1515  	pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0);
1516  	if (pg_num > rtwdev->fifo.rsvd_drv_pg_num)
1517  		return -ENOMEM;
1518  
1519  	pg_addr = rtwdev->fifo.rsvd_drv_addr;
1520  
1521  	return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
1522  }
1523  
__rtw_build_rsvd_page_reset(struct rtw_dev * rtwdev)1524  static void __rtw_build_rsvd_page_reset(struct rtw_dev *rtwdev)
1525  {
1526  	struct rtw_rsvd_page *rsvd_pkt, *tmp;
1527  
1528  	list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
1529  				 build_list) {
1530  		list_del_init(&rsvd_pkt->build_list);
1531  
1532  		/* Don't free except for the dummy rsvd page,
1533  		 * others will be freed when removing vif
1534  		 */
1535  		if (rsvd_pkt->type == RSVD_DUMMY)
1536  			kfree(rsvd_pkt);
1537  	}
1538  }
1539  
rtw_build_rsvd_page_iter(void * data,u8 * mac,struct ieee80211_vif * vif)1540  static void rtw_build_rsvd_page_iter(void *data, u8 *mac,
1541  				     struct ieee80211_vif *vif)
1542  {
1543  	struct rtw_dev *rtwdev = data;
1544  	struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
1545  	struct rtw_rsvd_page *rsvd_pkt;
1546  
1547  	/* AP not yet started, don't gather its rsvd pages */
1548  	if (vif->type == NL80211_IFTYPE_AP && !rtwdev->ap_active)
1549  		return;
1550  
1551  	list_for_each_entry(rsvd_pkt, &rtwvif->rsvd_page_list, vif_list) {
1552  		if (rsvd_pkt->type == RSVD_BEACON)
1553  			list_add(&rsvd_pkt->build_list,
1554  				 &rtwdev->rsvd_page_list);
1555  		else
1556  			list_add_tail(&rsvd_pkt->build_list,
1557  				      &rtwdev->rsvd_page_list);
1558  	}
1559  }
1560  
__rtw_build_rsvd_page_from_vifs(struct rtw_dev * rtwdev)1561  static int  __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev)
1562  {
1563  	struct rtw_rsvd_page *rsvd_pkt;
1564  
1565  	__rtw_build_rsvd_page_reset(rtwdev);
1566  
1567  	/* gather rsvd page from vifs */
1568  	rtw_iterate_vifs_atomic(rtwdev, rtw_build_rsvd_page_iter, rtwdev);
1569  
1570  	rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
1571  					    struct rtw_rsvd_page, build_list);
1572  	if (!rsvd_pkt) {
1573  		WARN(1, "Should not have an empty reserved page\n");
1574  		return -EINVAL;
1575  	}
1576  
1577  	/* the first rsvd should be beacon, otherwise add a dummy one */
1578  	if (rsvd_pkt->type != RSVD_BEACON) {
1579  		struct rtw_rsvd_page *dummy_pkt;
1580  
1581  		dummy_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_DUMMY, false);
1582  		if (!dummy_pkt) {
1583  			rtw_err(rtwdev, "failed to alloc dummy rsvd page\n");
1584  			return -ENOMEM;
1585  		}
1586  
1587  		list_add(&dummy_pkt->build_list, &rtwdev->rsvd_page_list);
1588  	}
1589  
1590  	return 0;
1591  }
1592  
rtw_build_rsvd_page(struct rtw_dev * rtwdev,u32 * size)1593  static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
1594  {
1595  	struct ieee80211_hw *hw = rtwdev->hw;
1596  	const struct rtw_chip_info *chip = rtwdev->chip;
1597  	struct sk_buff *iter;
1598  	struct rtw_rsvd_page *rsvd_pkt;
1599  	u32 page = 0;
1600  	u8 total_page = 0;
1601  	u8 page_size, page_margin, tx_desc_sz;
1602  	u8 *buf;
1603  	int ret;
1604  
1605  	page_size = chip->page_size;
1606  	tx_desc_sz = chip->tx_pkt_desc_sz;
1607  	page_margin = page_size - tx_desc_sz;
1608  
1609  	ret = __rtw_build_rsvd_page_from_vifs(rtwdev);
1610  	if (ret) {
1611  		rtw_err(rtwdev,
1612  			"failed to build rsvd page from vifs, ret %d\n", ret);
1613  		return NULL;
1614  	}
1615  
1616  	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1617  		iter = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
1618  		if (!iter) {
1619  			rtw_err(rtwdev, "failed to build rsvd packet\n");
1620  			goto release_skb;
1621  		}
1622  
1623  		/* Fill the tx_desc for the rsvd pkt that requires one.
1624  		 * And iter->len will be added with size of tx_desc_sz.
1625  		 */
1626  		if (rsvd_pkt->add_txdesc)
1627  			rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type);
1628  
1629  		rsvd_pkt->skb = iter;
1630  		rsvd_pkt->page = total_page;
1631  
1632  		/* Reserved page is downloaded via TX path, and TX path will
1633  		 * generate a tx_desc at the header to describe length of
1634  		 * the buffer. If we are not counting page numbers with the
1635  		 * size of tx_desc added at the first rsvd_pkt (usually a
1636  		 * beacon, firmware default refer to the first page as the
1637  		 * content of beacon), we could generate a buffer which size
1638  		 * is smaller than the actual size of the whole rsvd_page
1639  		 */
1640  		if (total_page == 0) {
1641  			if (rsvd_pkt->type != RSVD_BEACON &&
1642  			    rsvd_pkt->type != RSVD_DUMMY) {
1643  				rtw_err(rtwdev, "first page should be a beacon\n");
1644  				goto release_skb;
1645  			}
1646  			total_page += rtw_len_to_page(iter->len + tx_desc_sz,
1647  						      page_size);
1648  		} else {
1649  			total_page += rtw_len_to_page(iter->len, page_size);
1650  		}
1651  	}
1652  
1653  	if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
1654  		rtw_err(rtwdev, "rsvd page over size: %d\n", total_page);
1655  		goto release_skb;
1656  	}
1657  
1658  	*size = (total_page - 1) * page_size + page_margin;
1659  	buf = kzalloc(*size, GFP_KERNEL);
1660  	if (!buf)
1661  		goto release_skb;
1662  
1663  	/* Copy the content of each rsvd_pkt to the buf, and they should
1664  	 * be aligned to the pages.
1665  	 *
1666  	 * Note that the first rsvd_pkt is a beacon no matter what vif->type.
1667  	 * And that rsvd_pkt does not require tx_desc because when it goes
1668  	 * through TX path, the TX path will generate one for it.
1669  	 */
1670  	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1671  		rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
1672  					  page, buf, rsvd_pkt);
1673  		if (page == 0)
1674  			page += rtw_len_to_page(rsvd_pkt->skb->len +
1675  						tx_desc_sz, page_size);
1676  		else
1677  			page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
1678  
1679  		kfree_skb(rsvd_pkt->skb);
1680  		rsvd_pkt->skb = NULL;
1681  	}
1682  
1683  	return buf;
1684  
1685  release_skb:
1686  	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1687  		kfree_skb(rsvd_pkt->skb);
1688  		rsvd_pkt->skb = NULL;
1689  	}
1690  
1691  	return NULL;
1692  }
1693  
rtw_download_beacon(struct rtw_dev * rtwdev)1694  static int rtw_download_beacon(struct rtw_dev *rtwdev)
1695  {
1696  	struct ieee80211_hw *hw = rtwdev->hw;
1697  	struct rtw_rsvd_page *rsvd_pkt;
1698  	struct sk_buff *skb;
1699  	int ret = 0;
1700  
1701  	rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
1702  					    struct rtw_rsvd_page, build_list);
1703  	if (!rsvd_pkt) {
1704  		rtw_err(rtwdev, "failed to get rsvd page from build list\n");
1705  		return -ENOENT;
1706  	}
1707  
1708  	if (rsvd_pkt->type != RSVD_BEACON &&
1709  	    rsvd_pkt->type != RSVD_DUMMY) {
1710  		rtw_err(rtwdev, "invalid rsvd page type %d, should be beacon or dummy\n",
1711  			rsvd_pkt->type);
1712  		return -EINVAL;
1713  	}
1714  
1715  	skb = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
1716  	if (!skb) {
1717  		rtw_err(rtwdev, "failed to get beacon skb\n");
1718  		return -ENOMEM;
1719  	}
1720  
1721  	ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
1722  	if (ret)
1723  		rtw_err(rtwdev, "failed to download drv rsvd page\n");
1724  
1725  	dev_kfree_skb(skb);
1726  
1727  	return ret;
1728  }
1729  
rtw_fw_download_rsvd_page(struct rtw_dev * rtwdev)1730  int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev)
1731  {
1732  	u8 *buf;
1733  	u32 size;
1734  	int ret;
1735  
1736  	buf = rtw_build_rsvd_page(rtwdev, &size);
1737  	if (!buf) {
1738  		rtw_err(rtwdev, "failed to build rsvd page pkt\n");
1739  		return -ENOMEM;
1740  	}
1741  
1742  	ret = rtw_download_drv_rsvd_page(rtwdev, buf, size);
1743  	if (ret) {
1744  		rtw_err(rtwdev, "failed to download drv rsvd page\n");
1745  		goto free;
1746  	}
1747  
1748  	/* The last thing is to download the *ONLY* beacon again, because
1749  	 * the previous tx_desc is to describe the total rsvd page. Download
1750  	 * the beacon again to replace the TX desc header, and we will get
1751  	 * a correct tx_desc for the beacon in the rsvd page.
1752  	 */
1753  	ret = rtw_download_beacon(rtwdev);
1754  	if (ret) {
1755  		rtw_err(rtwdev, "failed to download beacon\n");
1756  		goto free;
1757  	}
1758  
1759  free:
1760  	kfree(buf);
1761  
1762  	return ret;
1763  }
1764  
rtw_fw_update_beacon_work(struct work_struct * work)1765  void rtw_fw_update_beacon_work(struct work_struct *work)
1766  {
1767  	struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
1768  					      update_beacon_work);
1769  
1770  	mutex_lock(&rtwdev->mutex);
1771  	rtw_fw_download_rsvd_page(rtwdev);
1772  	rtw_send_rsvd_page_h2c(rtwdev);
1773  	mutex_unlock(&rtwdev->mutex);
1774  }
1775  
rtw_fw_read_fifo_page(struct rtw_dev * rtwdev,u32 offset,u32 size,u32 * buf,u32 residue,u16 start_pg)1776  static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
1777  				  u32 *buf, u32 residue, u16 start_pg)
1778  {
1779  	u32 i;
1780  	u16 idx = 0;
1781  	u16 ctl;
1782  
1783  	ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
1784  	/* disable rx clock gate */
1785  	rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK);
1786  
1787  	do {
1788  		rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
1789  
1790  		for (i = FIFO_DUMP_ADDR + residue;
1791  		     i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) {
1792  			buf[idx++] = rtw_read32(rtwdev, i);
1793  			size -= 4;
1794  			if (size == 0)
1795  				goto out;
1796  		}
1797  
1798  		residue = 0;
1799  		start_pg++;
1800  	} while (size);
1801  
1802  out:
1803  	rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
1804  	/* restore rx clock gate */
1805  	rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK);
1806  }
1807  
rtw_fw_read_fifo(struct rtw_dev * rtwdev,enum rtw_fw_fifo_sel sel,u32 offset,u32 size,u32 * buf)1808  static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
1809  			     u32 offset, u32 size, u32 *buf)
1810  {
1811  	const struct rtw_chip_info *chip = rtwdev->chip;
1812  	u32 start_pg, residue;
1813  
1814  	if (sel >= RTW_FW_FIFO_MAX) {
1815  		rtw_dbg(rtwdev, RTW_DBG_FW, "wrong fw fifo sel\n");
1816  		return;
1817  	}
1818  	if (sel == RTW_FW_FIFO_SEL_RSVD_PAGE)
1819  		offset += rtwdev->fifo.rsvd_boundary << TX_PAGE_SIZE_SHIFT;
1820  	residue = offset & (FIFO_PAGE_SIZE - 1);
1821  	start_pg = (offset >> FIFO_PAGE_SIZE_SHIFT) + chip->fw_fifo_addr[sel];
1822  
1823  	rtw_fw_read_fifo_page(rtwdev, offset, size, buf, residue, start_pg);
1824  }
1825  
rtw_fw_dump_check_size(struct rtw_dev * rtwdev,enum rtw_fw_fifo_sel sel,u32 start_addr,u32 size)1826  static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev,
1827  				   enum rtw_fw_fifo_sel sel,
1828  				   u32 start_addr, u32 size)
1829  {
1830  	switch (sel) {
1831  	case RTW_FW_FIFO_SEL_TX:
1832  	case RTW_FW_FIFO_SEL_RX:
1833  		if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel])
1834  			return false;
1835  		fallthrough;
1836  	default:
1837  		return true;
1838  	}
1839  }
1840  
rtw_fw_dump_fifo(struct rtw_dev * rtwdev,u8 fifo_sel,u32 addr,u32 size,u32 * buffer)1841  int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
1842  		     u32 *buffer)
1843  {
1844  	if (!rtwdev->chip->fw_fifo_addr[0]) {
1845  		rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n");
1846  		return -ENOTSUPP;
1847  	}
1848  
1849  	if (size == 0 || !buffer)
1850  		return -EINVAL;
1851  
1852  	if (size & 0x3) {
1853  		rtw_dbg(rtwdev, RTW_DBG_FW, "not 4byte alignment\n");
1854  		return -EINVAL;
1855  	}
1856  
1857  	if (!rtw_fw_dump_check_size(rtwdev, fifo_sel, addr, size)) {
1858  		rtw_dbg(rtwdev, RTW_DBG_FW, "fw fifo dump size overflow\n");
1859  		return -EINVAL;
1860  	}
1861  
1862  	rtw_fw_read_fifo(rtwdev, fifo_sel, addr, size, buffer);
1863  
1864  	return 0;
1865  }
1866  
__rtw_fw_update_pkt(struct rtw_dev * rtwdev,u8 pkt_id,u16 size,u8 location)1867  static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
1868  				u8 location)
1869  {
1870  	const struct rtw_chip_info *chip = rtwdev->chip;
1871  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1872  	u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
1873  
1874  	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT);
1875  
1876  	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
1877  	UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id);
1878  	UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
1879  
1880  	/* include txdesc size */
1881  	size += chip->tx_pkt_desc_sz;
1882  	UPDATE_PKT_SET_SIZE(h2c_pkt, size);
1883  
1884  	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
1885  }
1886  
rtw_fw_update_pkt_probe_req(struct rtw_dev * rtwdev,struct cfg80211_ssid * ssid)1887  void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
1888  				 struct cfg80211_ssid *ssid)
1889  {
1890  	u8 loc;
1891  	u16 size;
1892  
1893  	loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
1894  	if (!loc) {
1895  		rtw_err(rtwdev, "failed to get probe_req rsvd loc\n");
1896  		return;
1897  	}
1898  
1899  	size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid);
1900  	if (!size) {
1901  		rtw_err(rtwdev, "failed to get probe_req rsvd size\n");
1902  		return;
1903  	}
1904  
1905  	__rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc);
1906  }
1907  
rtw_fw_channel_switch(struct rtw_dev * rtwdev,bool enable)1908  void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
1909  {
1910  	struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req;
1911  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1912  	u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN;
1913  	u8 loc_ch_info;
1914  	const struct rtw_ch_switch_option cs_option = {
1915  		.dest_ch_en = 1,
1916  		.dest_ch = 1,
1917  		.periodic_option = 2,
1918  		.normal_period = 5,
1919  		.normal_period_sel = 0,
1920  		.normal_cycle = 10,
1921  		.slow_period = 1,
1922  		.slow_period_sel = 1,
1923  	};
1924  
1925  	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH);
1926  	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
1927  
1928  	CH_SWITCH_SET_START(h2c_pkt, enable);
1929  	CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en);
1930  	CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch);
1931  	CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period);
1932  	CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel);
1933  	CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period);
1934  	CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel);
1935  	CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle);
1936  	CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option);
1937  
1938  	CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt);
1939  	CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4);
1940  
1941  	loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO);
1942  	CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info);
1943  
1944  	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
1945  }
1946  
rtw_fw_adaptivity(struct rtw_dev * rtwdev)1947  void rtw_fw_adaptivity(struct rtw_dev *rtwdev)
1948  {
1949  	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
1950  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1951  
1952  	if (!rtw_edcca_enabled) {
1953  		dm_info->edcca_mode = RTW_EDCCA_NORMAL;
1954  		rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
1955  			"EDCCA disabled by debugfs\n");
1956  	}
1957  
1958  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY);
1959  	SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode);
1960  	SET_ADAPTIVITY_OPTION(h2c_pkt, 1);
1961  	SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]);
1962  	SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini);
1963  	SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density);
1964  
1965  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
1966  }
1967  
rtw_fw_scan_notify(struct rtw_dev * rtwdev,bool start)1968  void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
1969  {
1970  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1971  
1972  	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SCAN);
1973  	SET_SCAN_START(h2c_pkt, start);
1974  
1975  	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
1976  }
1977  
rtw_append_probe_req_ie(struct rtw_dev * rtwdev,struct sk_buff * skb,struct sk_buff_head * list,u8 * bands,struct rtw_vif * rtwvif)1978  static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
1979  				   struct sk_buff_head *list, u8 *bands,
1980  				   struct rtw_vif *rtwvif)
1981  {
1982  	const struct rtw_chip_info *chip = rtwdev->chip;
1983  	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
1984  	struct sk_buff *new;
1985  	u8 idx;
1986  
1987  	for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
1988  		if (!(BIT(idx) & chip->band))
1989  			continue;
1990  		new = skb_copy(skb, GFP_KERNEL);
1991  		if (!new)
1992  			return -ENOMEM;
1993  		skb_put_data(new, ies->ies[idx], ies->len[idx]);
1994  		skb_put_data(new, ies->common_ies, ies->common_ie_len);
1995  		skb_queue_tail(list, new);
1996  		(*bands)++;
1997  	}
1998  
1999  	return 0;
2000  }
2001  
_rtw_hw_scan_update_probe_req(struct rtw_dev * rtwdev,u8 num_probes,struct sk_buff_head * probe_req_list)2002  static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
2003  					 struct sk_buff_head *probe_req_list)
2004  {
2005  	const struct rtw_chip_info *chip = rtwdev->chip;
2006  	struct sk_buff *skb, *tmp;
2007  	u8 page_offset = 1, *buf, page_size = chip->page_size;
2008  	u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
2009  	u16 buf_offset = page_size * page_offset;
2010  	u8 tx_desc_sz = chip->tx_pkt_desc_sz;
2011  	u8 page_cnt, pages;
2012  	unsigned int pkt_len;
2013  	int ret;
2014  
2015  	if (rtw_fw_feature_ext_check(&rtwdev->fw, FW_FEATURE_EXT_OLD_PAGE_NUM))
2016  		page_cnt = RTW_OLD_PROBE_PG_CNT;
2017  	else
2018  		page_cnt = RTW_PROBE_PG_CNT;
2019  
2020  	pages = page_offset + num_probes * page_cnt;
2021  
2022  	buf = kzalloc(page_size * pages, GFP_KERNEL);
2023  	if (!buf)
2024  		return -ENOMEM;
2025  
2026  	buf_offset -= tx_desc_sz;
2027  	skb_queue_walk_safe(probe_req_list, skb, tmp) {
2028  		skb_unlink(skb, probe_req_list);
2029  		rtw_fill_rsvd_page_desc(rtwdev, skb, RSVD_PROBE_REQ);
2030  		if (skb->len > page_size * page_cnt) {
2031  			ret = -EINVAL;
2032  			goto out;
2033  		}
2034  
2035  		memcpy(buf + buf_offset, skb->data, skb->len);
2036  		pkt_len = skb->len - tx_desc_sz;
2037  		loc = pg_addr - rtwdev->fifo.rsvd_boundary + page_offset;
2038  		__rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, pkt_len, loc);
2039  
2040  		buf_offset += page_cnt * page_size;
2041  		page_offset += page_cnt;
2042  		kfree_skb(skb);
2043  	}
2044  
2045  	ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, buf_offset);
2046  	if (ret) {
2047  		rtw_err(rtwdev, "Download probe request to firmware failed\n");
2048  		goto out;
2049  	}
2050  
2051  	rtwdev->scan_info.probe_pg_size = page_offset;
2052  out:
2053  	kfree(buf);
2054  	skb_queue_walk_safe(probe_req_list, skb, tmp)
2055  		kfree_skb(skb);
2056  
2057  	return ret;
2058  }
2059  
rtw_hw_scan_update_probe_req(struct rtw_dev * rtwdev,struct rtw_vif * rtwvif)2060  static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
2061  					struct rtw_vif *rtwvif)
2062  {
2063  	struct cfg80211_scan_request *req = rtwvif->scan_req;
2064  	struct sk_buff_head list;
2065  	struct sk_buff *skb, *tmp;
2066  	u8 num = req->n_ssids, i, bands = 0;
2067  	int ret;
2068  
2069  	skb_queue_head_init(&list);
2070  	for (i = 0; i < num; i++) {
2071  		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
2072  					     req->ssids[i].ssid,
2073  					     req->ssids[i].ssid_len,
2074  					     req->ie_len);
2075  		if (!skb) {
2076  			ret = -ENOMEM;
2077  			goto out;
2078  		}
2079  		ret = rtw_append_probe_req_ie(rtwdev, skb, &list, &bands,
2080  					      rtwvif);
2081  		if (ret)
2082  			goto out;
2083  
2084  		kfree_skb(skb);
2085  	}
2086  
2087  	return _rtw_hw_scan_update_probe_req(rtwdev, num * bands, &list);
2088  
2089  out:
2090  	skb_queue_walk_safe(&list, skb, tmp)
2091  		kfree_skb(skb);
2092  
2093  	return ret;
2094  }
2095  
rtw_add_chan_info(struct rtw_dev * rtwdev,struct rtw_chan_info * info,struct rtw_chan_list * list,u8 * buf)2096  static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info,
2097  			     struct rtw_chan_list *list, u8 *buf)
2098  {
2099  	u8 *chan = &buf[list->size];
2100  	u8 info_size = RTW_CH_INFO_SIZE;
2101  
2102  	if (list->size > list->buf_size)
2103  		return -ENOMEM;
2104  
2105  	CH_INFO_SET_CH(chan, info->channel);
2106  	CH_INFO_SET_PRI_CH_IDX(chan, info->pri_ch_idx);
2107  	CH_INFO_SET_BW(chan, info->bw);
2108  	CH_INFO_SET_TIMEOUT(chan, info->timeout);
2109  	CH_INFO_SET_ACTION_ID(chan, info->action_id);
2110  	CH_INFO_SET_EXTRA_INFO(chan, info->extra_info);
2111  	if (info->extra_info) {
2112  		EXTRA_CH_INFO_SET_ID(chan, RTW_SCAN_EXTRA_ID_DFS);
2113  		EXTRA_CH_INFO_SET_INFO(chan, RTW_SCAN_EXTRA_ACTION_SCAN);
2114  		EXTRA_CH_INFO_SET_SIZE(chan, RTW_EX_CH_INFO_SIZE -
2115  				       RTW_EX_CH_INFO_HDR_SIZE);
2116  		EXTRA_CH_INFO_SET_DFS_EXT_TIME(chan, RTW_DFS_CHAN_TIME);
2117  		info_size += RTW_EX_CH_INFO_SIZE;
2118  	}
2119  	list->size += info_size;
2120  	list->ch_num++;
2121  
2122  	return 0;
2123  }
2124  
rtw_add_chan_list(struct rtw_dev * rtwdev,struct rtw_vif * rtwvif,struct rtw_chan_list * list,u8 * buf)2125  static int rtw_add_chan_list(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
2126  			     struct rtw_chan_list *list, u8 *buf)
2127  {
2128  	struct cfg80211_scan_request *req = rtwvif->scan_req;
2129  	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
2130  	struct ieee80211_channel *channel;
2131  	int i, ret = 0;
2132  
2133  	for (i = 0; i < req->n_channels; i++) {
2134  		struct rtw_chan_info ch_info = {0};
2135  
2136  		channel = req->channels[i];
2137  		ch_info.channel = channel->hw_value;
2138  		ch_info.bw = RTW_SCAN_WIDTH;
2139  		ch_info.pri_ch_idx = RTW_PRI_CH_IDX;
2140  		ch_info.timeout = req->duration_mandatory ?
2141  				  req->duration : RTW_CHANNEL_TIME;
2142  
2143  		if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) {
2144  			ch_info.action_id = RTW_CHANNEL_RADAR;
2145  			ch_info.extra_info = 1;
2146  			/* Overwrite duration for passive scans if necessary */
2147  			ch_info.timeout = ch_info.timeout > RTW_PASS_CHAN_TIME ?
2148  					  ch_info.timeout : RTW_PASS_CHAN_TIME;
2149  		} else {
2150  			ch_info.action_id = RTW_CHANNEL_ACTIVE;
2151  		}
2152  
2153  		ret = rtw_add_chan_info(rtwdev, &ch_info, list, buf);
2154  		if (ret)
2155  			return ret;
2156  	}
2157  
2158  	if (list->size > fifo->rsvd_pg_num << TX_PAGE_SIZE_SHIFT) {
2159  		rtw_err(rtwdev, "List exceeds rsvd page total size\n");
2160  		return -EINVAL;
2161  	}
2162  
2163  	list->addr = fifo->rsvd_h2c_info_addr + rtwdev->scan_info.probe_pg_size;
2164  	ret = rtw_fw_write_data_rsvd_page(rtwdev, list->addr, buf, list->size);
2165  	if (ret)
2166  		rtw_err(rtwdev, "Download channel list failed\n");
2167  
2168  	return ret;
2169  }
2170  
rtw_fw_set_scan_offload(struct rtw_dev * rtwdev,struct rtw_ch_switch_option * opt,struct rtw_vif * rtwvif,struct rtw_chan_list * list)2171  static void rtw_fw_set_scan_offload(struct rtw_dev *rtwdev,
2172  				    struct rtw_ch_switch_option *opt,
2173  				    struct rtw_vif *rtwvif,
2174  				    struct rtw_chan_list *list)
2175  {
2176  	struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2177  	struct cfg80211_scan_request *req = rtwvif->scan_req;
2178  	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
2179  	/* reserve one dummy page at the beginning for tx descriptor */
2180  	u8 pkt_loc = fifo->rsvd_h2c_info_addr - fifo->rsvd_boundary + 1;
2181  	bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
2182  	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
2183  
2184  	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_SCAN_OFFLOAD);
2185  	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, H2C_PKT_CH_SWITCH_LEN);
2186  
2187  	SCAN_OFFLOAD_SET_START(h2c_pkt, opt->switch_en);
2188  	SCAN_OFFLOAD_SET_BACK_OP_EN(h2c_pkt, opt->back_op_en);
2189  	SCAN_OFFLOAD_SET_RANDOM_SEQ_EN(h2c_pkt, random_seq);
2190  	SCAN_OFFLOAD_SET_NO_CCK_EN(h2c_pkt, req->no_cck);
2191  	SCAN_OFFLOAD_SET_CH_NUM(h2c_pkt, list->ch_num);
2192  	SCAN_OFFLOAD_SET_CH_INFO_SIZE(h2c_pkt, list->size);
2193  	SCAN_OFFLOAD_SET_CH_INFO_LOC(h2c_pkt, list->addr - fifo->rsvd_boundary);
2194  	SCAN_OFFLOAD_SET_OP_CH(h2c_pkt, scan_info->op_chan);
2195  	SCAN_OFFLOAD_SET_OP_PRI_CH_IDX(h2c_pkt, scan_info->op_pri_ch_idx);
2196  	SCAN_OFFLOAD_SET_OP_BW(h2c_pkt, scan_info->op_bw);
2197  	SCAN_OFFLOAD_SET_OP_PORT_ID(h2c_pkt, rtwvif->port);
2198  	SCAN_OFFLOAD_SET_OP_DWELL_TIME(h2c_pkt, req->duration_mandatory ?
2199  				       req->duration : RTW_CHANNEL_TIME);
2200  	SCAN_OFFLOAD_SET_OP_GAP_TIME(h2c_pkt, RTW_OFF_CHAN_TIME);
2201  	SCAN_OFFLOAD_SET_SSID_NUM(h2c_pkt, req->n_ssids);
2202  	SCAN_OFFLOAD_SET_PKT_LOC(h2c_pkt, pkt_loc);
2203  
2204  	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
2205  }
2206  
rtw_hw_scan_start(struct rtw_dev * rtwdev,struct ieee80211_vif * vif,struct ieee80211_scan_request * scan_req)2207  void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
2208  		       struct ieee80211_scan_request *scan_req)
2209  {
2210  	struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
2211  	struct cfg80211_scan_request *req = &scan_req->req;
2212  	u8 mac_addr[ETH_ALEN];
2213  
2214  	rtwdev->scan_info.scanning_vif = vif;
2215  	rtwvif->scan_ies = &scan_req->ies;
2216  	rtwvif->scan_req = req;
2217  
2218  	ieee80211_stop_queues(rtwdev->hw);
2219  	rtw_leave_lps_deep(rtwdev);
2220  	rtw_hci_flush_all_queues(rtwdev, false);
2221  	rtw_mac_flush_all_queues(rtwdev, false);
2222  	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
2223  		get_random_mask_addr(mac_addr, req->mac_addr,
2224  				     req->mac_addr_mask);
2225  	else
2226  		ether_addr_copy(mac_addr, vif->addr);
2227  
2228  	rtw_core_scan_start(rtwdev, rtwvif, mac_addr, true);
2229  
2230  	rtwdev->hal.rcr &= ~BIT_CBSSID_BCN;
2231  	rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
2232  }
2233  
rtw_hw_scan_complete(struct rtw_dev * rtwdev,struct ieee80211_vif * vif,bool aborted)2234  void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
2235  			  bool aborted)
2236  {
2237  	struct cfg80211_scan_info info = {
2238  		.aborted = aborted,
2239  	};
2240  	struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2241  	struct rtw_hal *hal = &rtwdev->hal;
2242  	struct rtw_vif *rtwvif;
2243  	u8 chan = scan_info->op_chan;
2244  
2245  	if (!vif)
2246  		return;
2247  
2248  	rtwdev->hal.rcr |= BIT_CBSSID_BCN;
2249  	rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
2250  
2251  	rtw_core_scan_complete(rtwdev, vif, true);
2252  
2253  	rtwvif = (struct rtw_vif *)vif->drv_priv;
2254  	if (chan)
2255  		rtw_store_op_chan(rtwdev, false);
2256  	rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
2257  	ieee80211_wake_queues(rtwdev->hw);
2258  	ieee80211_scan_completed(rtwdev->hw, &info);
2259  
2260  	rtwvif->scan_req = NULL;
2261  	rtwvif->scan_ies = NULL;
2262  	rtwdev->scan_info.scanning_vif = NULL;
2263  }
2264  
rtw_hw_scan_prehandle(struct rtw_dev * rtwdev,struct rtw_vif * rtwvif,struct rtw_chan_list * list)2265  static int rtw_hw_scan_prehandle(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
2266  				 struct rtw_chan_list *list)
2267  {
2268  	struct cfg80211_scan_request *req = rtwvif->scan_req;
2269  	int size = req->n_channels * (RTW_CH_INFO_SIZE + RTW_EX_CH_INFO_SIZE);
2270  	u8 *buf;
2271  	int ret;
2272  
2273  	buf = kmalloc(size, GFP_KERNEL);
2274  	if (!buf)
2275  		return -ENOMEM;
2276  
2277  	ret = rtw_hw_scan_update_probe_req(rtwdev, rtwvif);
2278  	if (ret) {
2279  		rtw_err(rtwdev, "Update probe request failed\n");
2280  		goto out;
2281  	}
2282  
2283  	list->buf_size = size;
2284  	list->size = 0;
2285  	list->ch_num = 0;
2286  	ret = rtw_add_chan_list(rtwdev, rtwvif, list, buf);
2287  out:
2288  	kfree(buf);
2289  
2290  	return ret;
2291  }
2292  
rtw_hw_scan_offload(struct rtw_dev * rtwdev,struct ieee80211_vif * vif,bool enable)2293  int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
2294  			bool enable)
2295  {
2296  	struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL;
2297  	struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2298  	struct rtw_ch_switch_option cs_option = {0};
2299  	struct rtw_chan_list chan_list = {0};
2300  	int ret = 0;
2301  
2302  	if (!rtwvif)
2303  		return -EINVAL;
2304  
2305  	cs_option.switch_en = enable;
2306  	cs_option.back_op_en = scan_info->op_chan != 0;
2307  	if (enable) {
2308  		ret = rtw_hw_scan_prehandle(rtwdev, rtwvif, &chan_list);
2309  		if (ret)
2310  			goto out;
2311  	}
2312  	rtw_fw_set_scan_offload(rtwdev, &cs_option, rtwvif, &chan_list);
2313  out:
2314  	if (rtwdev->ap_active) {
2315  		ret = rtw_download_beacon(rtwdev);
2316  		if (ret)
2317  			rtw_err(rtwdev, "HW scan download beacon failed\n");
2318  	}
2319  
2320  	return ret;
2321  }
2322  
rtw_hw_scan_abort(struct rtw_dev * rtwdev)2323  void rtw_hw_scan_abort(struct rtw_dev *rtwdev)
2324  {
2325  	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
2326  
2327  	if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD))
2328  		return;
2329  
2330  	rtw_hw_scan_offload(rtwdev, vif, false);
2331  	rtw_hw_scan_complete(rtwdev, vif, true);
2332  }
2333  
rtw_hw_scan_status_report(struct rtw_dev * rtwdev,struct sk_buff * skb)2334  void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
2335  {
2336  	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
2337  	struct rtw_c2h_cmd *c2h;
2338  	bool aborted;
2339  	u8 rc;
2340  
2341  	if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
2342  		return;
2343  
2344  	c2h = get_c2h_from_skb(skb);
2345  	rc = GET_SCAN_REPORT_RETURN_CODE(c2h->payload);
2346  	aborted = rc != RTW_SCAN_REPORT_SUCCESS;
2347  	rtw_hw_scan_complete(rtwdev, vif, aborted);
2348  
2349  	if (aborted)
2350  		rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "HW scan aborted with code: %d\n", rc);
2351  }
2352  
rtw_store_op_chan(struct rtw_dev * rtwdev,bool backup)2353  void rtw_store_op_chan(struct rtw_dev *rtwdev, bool backup)
2354  {
2355  	struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2356  	struct rtw_hal *hal = &rtwdev->hal;
2357  	u8 band;
2358  
2359  	if (backup) {
2360  		scan_info->op_chan = hal->current_channel;
2361  		scan_info->op_bw = hal->current_band_width;
2362  		scan_info->op_pri_ch_idx = hal->current_primary_channel_index;
2363  		scan_info->op_pri_ch = hal->primary_channel;
2364  	} else {
2365  		band = scan_info->op_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
2366  		rtw_update_channel(rtwdev, scan_info->op_chan,
2367  				   scan_info->op_pri_ch,
2368  				   band, scan_info->op_bw);
2369  	}
2370  }
2371  
rtw_clear_op_chan(struct rtw_dev * rtwdev)2372  void rtw_clear_op_chan(struct rtw_dev *rtwdev)
2373  {
2374  	struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2375  
2376  	scan_info->op_chan = 0;
2377  	scan_info->op_bw = 0;
2378  	scan_info->op_pri_ch_idx = 0;
2379  	scan_info->op_pri_ch = 0;
2380  }
2381  
rtw_is_op_chan(struct rtw_dev * rtwdev,u8 channel)2382  static bool rtw_is_op_chan(struct rtw_dev *rtwdev, u8 channel)
2383  {
2384  	struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2385  
2386  	return channel == scan_info->op_chan;
2387  }
2388  
rtw_hw_scan_chan_switch(struct rtw_dev * rtwdev,struct sk_buff * skb)2389  void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
2390  {
2391  	struct rtw_hal *hal = &rtwdev->hal;
2392  	struct rtw_c2h_cmd *c2h;
2393  	enum rtw_scan_notify_id id;
2394  	u8 chan, band, status;
2395  
2396  	if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
2397  		return;
2398  
2399  	c2h = get_c2h_from_skb(skb);
2400  	chan = GET_CHAN_SWITCH_CENTRAL_CH(c2h->payload);
2401  	id = GET_CHAN_SWITCH_ID(c2h->payload);
2402  	status = GET_CHAN_SWITCH_STATUS(c2h->payload);
2403  
2404  	if (id == RTW_SCAN_NOTIFY_ID_POSTSWITCH) {
2405  		band = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
2406  		rtw_update_channel(rtwdev, chan, chan, band,
2407  				   RTW_CHANNEL_WIDTH_20);
2408  		if (rtw_is_op_chan(rtwdev, chan)) {
2409  			rtw_store_op_chan(rtwdev, false);
2410  			ieee80211_wake_queues(rtwdev->hw);
2411  			rtw_core_enable_beacon(rtwdev, true);
2412  		}
2413  	} else if (id == RTW_SCAN_NOTIFY_ID_PRESWITCH) {
2414  		if (IS_CH_5G_BAND(chan)) {
2415  			rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
2416  		} else if (IS_CH_2G_BAND(chan)) {
2417  			u8 chan_type;
2418  
2419  			if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
2420  				chan_type = COEX_SWITCH_TO_24G;
2421  			else
2422  				chan_type = COEX_SWITCH_TO_24G_NOFORSCAN;
2423  			rtw_coex_switchband_notify(rtwdev, chan_type);
2424  		}
2425  		/* The channel of C2H RTW_SCAN_NOTIFY_ID_PRESWITCH is next
2426  		 * channel that hardware will switch. We need to stop queue
2427  		 * if next channel is non-op channel.
2428  		 */
2429  		if (!rtw_is_op_chan(rtwdev, chan) &&
2430  		    rtw_is_op_chan(rtwdev, hal->current_channel)) {
2431  			rtw_core_enable_beacon(rtwdev, false);
2432  			ieee80211_stop_queues(rtwdev->hw);
2433  		}
2434  	}
2435  
2436  	rtw_dbg(rtwdev, RTW_DBG_HW_SCAN,
2437  		"Chan switch: %x, id: %x, status: %x\n", chan, id, status);
2438  }
2439