xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/dp_rx_mon.c (revision 6f3a375902d676398fbb5b8710604e6236bff43f)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_peer.h"
24 #include "hal_rx.h"
25 #include "hal_api.h"
26 #include "qdf_trace.h"
27 #include "qdf_nbuf.h"
28 #include "hal_api_mon.h"
29 #include "dp_internal.h"
30 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
31 #include "dp_htt.h"
32 #include "dp_mon.h"
33 #include "dp_rx_mon.h"
34 
35 #include "htt.h"
36 #ifdef FEATURE_PERPKT_INFO
37 #include "dp_ratetable.h"
38 #endif
39 
40 #ifndef IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK
41 #define IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK 0xe0
42 #endif
43 
44 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
45 void
46 dp_rx_mon_handle_cfr_mu_info(struct dp_pdev *pdev,
47 			     struct hal_rx_ppdu_info *ppdu_info,
48 			     struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
49 {
50 	struct dp_peer *peer;
51 	struct dp_soc *soc = pdev->soc;
52 	struct mon_rx_user_status *rx_user_status;
53 	struct cdp_rx_stats_ppdu_user *rx_stats_peruser;
54 	uint32_t num_users;
55 	int user_id;
56 	uint16_t sw_peer_id;
57 
58 	num_users = ppdu_info->com_info.num_users;
59 	for (user_id = 0; user_id < num_users; user_id++) {
60 		if (user_id > OFDMA_NUM_USERS) {
61 			return;
62 		}
63 
64 		rx_user_status =  &ppdu_info->rx_user_status[user_id];
65 		rx_stats_peruser = &cdp_rx_ppdu->user[user_id];
66 		sw_peer_id = rx_user_status->sw_peer_id;
67 		peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
68 					     DP_MOD_ID_RX_PPDU_STATS);
69 		if (!peer) {
70 			rx_stats_peruser->peer_id = HTT_INVALID_PEER;
71 			continue;
72 		}
73 
74 		qdf_mem_copy(rx_stats_peruser->mac_addr,
75 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
76 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
77 	}
78 }
79 
80 void
81 dp_rx_mon_populate_cfr_ppdu_info(struct dp_pdev *pdev,
82 				 struct hal_rx_ppdu_info *ppdu_info,
83 				 struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
84 {
85 	struct dp_peer *peer;
86 	struct dp_soc *soc = pdev->soc;
87 	int chain;
88 	uint16_t sw_peer_id;
89 	struct mon_rx_user_status *rx_user_status;
90 	uint32_t num_users = ppdu_info->com_info.num_users;
91 
92 	cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
93 	cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
94 	cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
95 
96 	for (chain = 0; chain < MAX_CHAIN; chain++)
97 		cdp_rx_ppdu->per_chain_rssi[chain] =
98 			ppdu_info->rx_status.rssi[chain];
99 
100 	cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
101 	cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
102 	cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
103 
104 	if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) &&
105 	    (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC))
106 		cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
107 	else
108 		cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
109 
110 	if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) {
111 		cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc;
112 	} else if (ppdu_info->rx_status.preamble_type ==
113 			HAL_RX_PKT_TYPE_11AX) {
114 		cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >>
115 				       QDF_MON_STATUS_STBC_SHIFT) & 0x1;
116 		cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >>
117 				      QDF_MON_STATUS_DCM_SHIFT) & 0x1;
118 	}
119 
120 	qdf_assert_always(num_users <= CDP_MU_MAX_USERS);
121 	dp_rx_mon_handle_cfr_mu_info(pdev, ppdu_info, cdp_rx_ppdu);
122 	rx_user_status = &ppdu_info->rx_user_status[num_users - 1];
123 	sw_peer_id = rx_user_status->sw_peer_id;
124 	peer = dp_peer_get_ref_by_id(soc, sw_peer_id, DP_MOD_ID_RX_PPDU_STATS);
125 	if (!peer) {
126 		cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
127 		cdp_rx_ppdu->num_users = 0;
128 		return;
129 	}
130 
131 	cdp_rx_ppdu->peer_id = peer->peer_id;
132 	cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
133 	cdp_rx_ppdu->num_users = num_users;
134 }
135 
136 bool
137 dp_cfr_rcc_mode_status(struct dp_pdev *pdev)
138 {
139 	return pdev->cfr_rcc_mode;
140 }
141 
142 void
143 dp_rx_mon_populate_cfr_info(struct dp_pdev *pdev,
144 			    struct hal_rx_ppdu_info *ppdu_info,
145 			    struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
146 {
147 	struct cdp_rx_ppdu_cfr_info *cfr_info;
148 
149 	if (!qdf_unlikely(dp_cfr_rcc_mode_status(pdev)))
150 		return;
151 
152 	cfr_info = &cdp_rx_ppdu->cfr_info;
153 
154 	cfr_info->bb_captured_channel
155 		= ppdu_info->cfr_info.bb_captured_channel;
156 	cfr_info->bb_captured_timeout
157 		= ppdu_info->cfr_info.bb_captured_timeout;
158 	cfr_info->bb_captured_reason
159 		= ppdu_info->cfr_info.bb_captured_reason;
160 	cfr_info->rx_location_info_valid
161 		= ppdu_info->cfr_info.rx_location_info_valid;
162 	cfr_info->chan_capture_status
163 		= ppdu_info->cfr_info.chan_capture_status;
164 	cfr_info->rtt_che_buffer_pointer_high8
165 		= ppdu_info->cfr_info.rtt_che_buffer_pointer_high8;
166 	cfr_info->rtt_che_buffer_pointer_low32
167 		= ppdu_info->cfr_info.rtt_che_buffer_pointer_low32;
168 	cfr_info->rtt_cfo_measurement
169 		= (int16_t)ppdu_info->cfr_info.rtt_cfo_measurement;
170 	cfr_info->agc_gain_info0
171 		= ppdu_info->cfr_info.agc_gain_info0;
172 	cfr_info->agc_gain_info1
173 		= ppdu_info->cfr_info.agc_gain_info1;
174 	cfr_info->agc_gain_info2
175 		= ppdu_info->cfr_info.agc_gain_info2;
176 	cfr_info->agc_gain_info3
177 		= ppdu_info->cfr_info.agc_gain_info3;
178 	cfr_info->rx_start_ts
179 		= ppdu_info->cfr_info.rx_start_ts;
180 	cfr_info->mcs_rate
181 		= ppdu_info->cfr_info.mcs_rate;
182 	cfr_info->gi_type
183 		= ppdu_info->cfr_info.gi_type;
184 }
185 
186 void
187 dp_update_cfr_dbg_stats(struct dp_pdev *pdev,
188 			struct hal_rx_ppdu_info *ppdu_info)
189 {
190 	struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info;
191 
192 	DP_STATS_INC(pdev,
193 		     rcc.chan_capture_status[cfr->chan_capture_status], 1);
194 	if (cfr->rx_location_info_valid) {
195 		DP_STATS_INC(pdev, rcc.rx_loc_info_valid_cnt, 1);
196 		if (cfr->bb_captured_channel) {
197 			DP_STATS_INC(pdev, rcc.bb_captured_channel_cnt, 1);
198 			DP_STATS_INC(pdev,
199 				     rcc.reason_cnt[cfr->bb_captured_reason],
200 				     1);
201 		} else if (cfr->bb_captured_timeout) {
202 			DP_STATS_INC(pdev, rcc.bb_captured_timeout_cnt, 1);
203 			DP_STATS_INC(pdev,
204 				     rcc.reason_cnt[cfr->bb_captured_reason],
205 				     1);
206 		}
207 	}
208 }
209 
210 void
211 dp_rx_handle_cfr(struct dp_soc *soc, struct dp_pdev *pdev,
212 		 struct hal_rx_ppdu_info *ppdu_info)
213 {
214 	qdf_nbuf_t ppdu_nbuf;
215 	struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
216 
217 	dp_update_cfr_dbg_stats(pdev, ppdu_info);
218 	if (!ppdu_info->cfr_info.bb_captured_channel)
219 		return;
220 
221 	ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
222 				   sizeof(struct cdp_rx_indication_ppdu),
223 				   0,
224 				   0,
225 				   FALSE);
226 	if (ppdu_nbuf) {
227 		cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
228 
229 		dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu);
230 		dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu);
231 		qdf_nbuf_put_tail(ppdu_nbuf,
232 				  sizeof(struct cdp_rx_indication_ppdu));
233 		dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
234 				     ppdu_nbuf, HTT_INVALID_PEER,
235 				     WDI_NO_VAL, pdev->pdev_id);
236 	}
237 }
238 
239 void
240 dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev,
241 				 struct hal_rx_ppdu_info *ppdu_info,
242 				 struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
243 {
244 	if (!dp_cfr_rcc_mode_status(pdev))
245 		return;
246 
247 	if (ppdu_info->cfr_info.bb_captured_channel)
248 		dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu);
249 }
250 
251 /**
252  * dp_bb_captured_chan_status() - Get the bb_captured_channel status
253  * @pdev: pdev ctx
254  * @ppdu_info: structure for rx ppdu ring
255  *
256  * Return: Success/ Failure
257  */
258 static inline QDF_STATUS
259 dp_bb_captured_chan_status(struct dp_pdev *pdev,
260 			   struct hal_rx_ppdu_info *ppdu_info)
261 {
262 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
263 	struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info;
264 
265 	if (dp_cfr_rcc_mode_status(pdev)) {
266 		if (cfr->bb_captured_channel)
267 			status = QDF_STATUS_SUCCESS;
268 	}
269 
270 	return status;
271 }
272 #else
273 static inline QDF_STATUS
274 dp_bb_captured_chan_status(struct dp_pdev *pdev,
275 			   struct hal_rx_ppdu_info *ppdu_info)
276 {
277 	return QDF_STATUS_E_NOSUPPORT;
278 }
279 #endif /* WLAN_CFR_ENABLE */
280 
281 #ifdef QCA_ENHANCED_STATS_SUPPORT
282 #ifdef QCA_RSSI_DB2DBM
283 /**
284  * dp_rx_mon_rf_index_conv() - this function will convert BB index to RF
285  *			index in the rssi_chain[chain][bw] array
286  *
287  * @chain: BB chain index
288  * @mon_pdev: pdev structure
289  *
290  * Return: return RF chain index
291  *
292  * Computation:
293  *  3 Bytes of xbar_config are used for RF to BB mapping
294  *  Samples of xbar_config,
295  *
296  * If xbar_config is 0x688FAC(hex):
297  *     RF chains 0-3 are connected to BB chains 4-7
298  *     RF chains 4-7 are connected to BB chains 0-3
299  *     here,
300  *     bits 0 to 2 = 4, maps BB chain 4 for RF chain 0
301  *     bits 3 to 5 = 5, maps BB chain 5 for RF chain 1
302  *     bits 6 to 8 = 6, maps BB chain 6 for RF chain 2
303  *     bits 9 to 11 = 7, maps BB chain 7 for RF chain 3
304  *     bits 12 to 14 = 0, maps BB chain 0 for RF chain 4
305  *     bits 15 to 17 = 1, maps BB chain 1 for RF chain 5
306  *     bits 18 to 20 = 2, maps BB chain 2 for RF chain 6
307  *     bits 21 to 23 = 3, maps BB chain 3 for RF chain 7
308  */
309 static uint8_t dp_rx_mon_rf_index_conv(uint8_t chain,
310 				       struct dp_mon_pdev *mon_pdev)
311 {
312 	uint32_t xbar_config = mon_pdev->rssi_offsets.xbar_config;
313 
314 	if (mon_pdev->rssi_dbm_conv_support && xbar_config)
315 		return ((xbar_config >> (3 * chain)) & 0x07);
316 	return chain;
317 }
318 #else
319 static uint8_t dp_rx_mon_rf_index_conv(uint8_t chain,
320 				       struct dp_mon_pdev *mon_pdev)
321 {
322 	return chain;
323 }
324 #endif
325 void
326 dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info *ppdu_info,
327 			     struct cdp_rx_indication_ppdu *cdp_rx_ppdu,
328 			     struct dp_pdev *pdev)
329 {
330 	uint8_t chain, bw;
331 	uint8_t rssi;
332 	uint8_t chain_rf;
333 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
334 
335 	for (chain = 0; chain < SS_COUNT; chain++) {
336 		for (bw = 0; bw < MAX_BW; bw++) {
337 			chain_rf = dp_rx_mon_rf_index_conv(chain, mon_pdev);
338 			rssi = ppdu_info->rx_status.rssi_chain[chain_rf][bw];
339 			if (rssi != DP_RSSI_INVAL)
340 				cdp_rx_ppdu->rssi_chain[chain_rf][bw] = rssi;
341 			else
342 				cdp_rx_ppdu->rssi_chain[chain_rf][bw] = 0;
343 		}
344 	}
345 }
346 
347 void
348 dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info *ppdu_info,
349 			      struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
350 {
351 	uint8_t pilot_evm;
352 	uint8_t nss_count;
353 	uint8_t pilot_count;
354 
355 	nss_count = ppdu_info->evm_info.nss_count;
356 	pilot_count = ppdu_info->evm_info.pilot_count;
357 
358 	if ((nss_count * pilot_count) > DP_RX_MAX_SU_EVM_COUNT) {
359 		qdf_err("pilot evm count is more than expected");
360 		return;
361 	}
362 	cdp_rx_ppdu->evm_info.pilot_count = pilot_count;
363 	cdp_rx_ppdu->evm_info.nss_count = nss_count;
364 
365 	/* Populate evm for pilot_evm  = nss_count*pilot_count */
366 	for (pilot_evm = 0; pilot_evm < nss_count * pilot_count; pilot_evm++) {
367 		cdp_rx_ppdu->evm_info.pilot_evm[pilot_evm] =
368 			ppdu_info->evm_info.pilot_evm[pilot_evm];
369 	}
370 }
371 
372 /**
373  * dp_rx_inc_rusize_cnt() - increment pdev stats based on RU size
374  * @pdev: pdev ctx
375  * @rx_user_status: mon rx user status
376  *
377  * Return: bool
378  */
379 static inline bool
380 dp_rx_inc_rusize_cnt(struct dp_pdev *pdev,
381 		     struct mon_rx_user_status *rx_user_status)
382 {
383 	uint32_t ru_size;
384 	bool is_data;
385 
386 	ru_size = rx_user_status->ofdma_ru_size;
387 
388 	if (dp_is_subtype_data(rx_user_status->frame_control)) {
389 		DP_STATS_INC(pdev,
390 			     ul_ofdma.data_rx_ru_size[ru_size], 1);
391 		is_data = true;
392 	} else {
393 		DP_STATS_INC(pdev,
394 			     ul_ofdma.nondata_rx_ru_size[ru_size], 1);
395 		is_data = false;
396 	}
397 
398 	return is_data;
399 }
400 
401 /**
402  * dp_rx_populate_cdp_indication_ppdu_user() - Populate per user cdp indication
403  * @pdev: pdev ctx
404  * @ppdu_info: ppdu info structure from ppdu ring
405  * @cdp_rx_ppdu: Rx PPDU indication structure
406  *
407  * Return: none
408  */
409 static void
410 dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev,
411 					struct hal_rx_ppdu_info *ppdu_info,
412 					struct cdp_rx_indication_ppdu
413 					*cdp_rx_ppdu)
414 {
415 	struct dp_peer *peer;
416 	struct dp_soc *soc = pdev->soc;
417 	int i;
418 	struct mon_rx_user_status *rx_user_status;
419 	struct mon_rx_user_info *rx_user_info;
420 	struct cdp_rx_stats_ppdu_user *rx_stats_peruser;
421 	int ru_size;
422 	bool is_data = false;
423 	uint32_t num_users;
424 	struct dp_mon_ops *mon_ops;
425 	uint16_t sw_peer_id;
426 
427 	num_users = ppdu_info->com_info.num_users;
428 	for (i = 0; i < num_users; i++) {
429 		if (i > OFDMA_NUM_USERS)
430 			return;
431 
432 		rx_user_status =  &ppdu_info->rx_user_status[i];
433 		rx_user_info = &ppdu_info->rx_user_info[i];
434 		rx_stats_peruser = &cdp_rx_ppdu->user[i];
435 
436 		sw_peer_id = rx_user_status->sw_peer_id;
437 		peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
438 					     DP_MOD_ID_RX_PPDU_STATS);
439 		if (qdf_unlikely(!peer)) {
440 			rx_stats_peruser->peer_id = HTT_INVALID_PEER;
441 			continue;
442 		}
443 		rx_stats_peruser->is_bss_peer = peer->bss_peer;
444 
445 		rx_stats_peruser->first_data_seq_ctrl =
446 			rx_user_status->first_data_seq_ctrl;
447 
448 		rx_stats_peruser->frame_control_info_valid =
449 			rx_user_status->frame_control_info_valid;
450 		rx_stats_peruser->frame_control =
451 			rx_user_status->frame_control;
452 
453 		rx_stats_peruser->qos_control_info_valid =
454 			rx_user_info->qos_control_info_valid;
455 		rx_stats_peruser->qos_control =
456 			rx_user_info->qos_control;
457 		rx_stats_peruser->tcp_msdu_count =
458 			rx_user_status->tcp_msdu_count;
459 		rx_stats_peruser->udp_msdu_count =
460 			rx_user_status->udp_msdu_count;
461 		rx_stats_peruser->other_msdu_count =
462 			rx_user_status->other_msdu_count;
463 
464 		rx_stats_peruser->num_msdu =
465 			rx_stats_peruser->tcp_msdu_count +
466 			rx_stats_peruser->udp_msdu_count +
467 			rx_stats_peruser->other_msdu_count;
468 
469 		rx_stats_peruser->preamble_type =
470 				cdp_rx_ppdu->u.preamble;
471 		rx_stats_peruser->mpdu_cnt_fcs_ok =
472 			rx_user_status->mpdu_cnt_fcs_ok;
473 		rx_stats_peruser->mpdu_cnt_fcs_err =
474 			rx_user_status->mpdu_cnt_fcs_err;
475 		qdf_mem_copy(&rx_stats_peruser->mpdu_fcs_ok_bitmap,
476 			     &rx_user_status->mpdu_fcs_ok_bitmap,
477 			     HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
478 			     sizeof(rx_user_status->mpdu_fcs_ok_bitmap[0]));
479 		rx_stats_peruser->mpdu_ok_byte_count =
480 			rx_user_status->mpdu_ok_byte_count;
481 		rx_stats_peruser->mpdu_err_byte_count =
482 			rx_user_status->mpdu_err_byte_count;
483 
484 		cdp_rx_ppdu->num_mpdu += rx_user_status->mpdu_cnt_fcs_ok;
485 		cdp_rx_ppdu->num_msdu += rx_stats_peruser->num_msdu;
486 		rx_stats_peruser->retries =
487 			CDP_FC_IS_RETRY_SET(rx_stats_peruser->frame_control) ?
488 			rx_stats_peruser->mpdu_cnt_fcs_ok : 0;
489 		cdp_rx_ppdu->retries += rx_stats_peruser->retries;
490 
491 		if (rx_stats_peruser->mpdu_cnt_fcs_ok > 1)
492 			rx_stats_peruser->is_ampdu = 1;
493 		else
494 			rx_stats_peruser->is_ampdu = 0;
495 
496 		rx_stats_peruser->tid = ppdu_info->rx_status.tid;
497 
498 		qdf_mem_copy(rx_stats_peruser->mac_addr,
499 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
500 		rx_stats_peruser->peer_id = peer->peer_id;
501 		cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
502 		rx_stats_peruser->vdev_id = peer->vdev->vdev_id;
503 		rx_stats_peruser->mu_ul_info_valid = 0;
504 
505 		mon_ops = dp_mon_ops_get(soc);
506 		if (mon_ops && mon_ops->mon_rx_populate_ppdu_usr_info)
507 			mon_ops->mon_rx_populate_ppdu_usr_info(rx_user_status,
508 							       rx_stats_peruser);
509 
510 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
511 		if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA ||
512 		    cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) {
513 			if (rx_user_status->mu_ul_info_valid) {
514 				rx_stats_peruser->nss = rx_user_status->nss;
515 				cdp_rx_ppdu->usr_nss_sum += rx_stats_peruser->nss;
516 				rx_stats_peruser->mcs = rx_user_status->mcs;
517 				rx_stats_peruser->mu_ul_info_valid =
518 					rx_user_status->mu_ul_info_valid;
519 				rx_stats_peruser->ofdma_ru_start_index =
520 					rx_user_status->ofdma_ru_start_index;
521 				rx_stats_peruser->ofdma_ru_width =
522 					rx_user_status->ofdma_ru_width;
523 				cdp_rx_ppdu->usr_ru_tones_sum +=
524 					rx_stats_peruser->ofdma_ru_width;
525 				rx_stats_peruser->user_index = i;
526 				ru_size = rx_user_status->ofdma_ru_size;
527 				/*
528 				 * max RU size will be equal to
529 				 * HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2
530 				 */
531 				if (qdf_unlikely(ru_size >= OFDMA_NUM_RU_SIZE)) {
532 					dp_err("invalid ru_size %d\n",
533 					       ru_size);
534 					return;
535 				}
536 				is_data = dp_rx_inc_rusize_cnt(pdev,
537 							       rx_user_status);
538 			}
539 			if (is_data) {
540 				/* counter to get number of MU OFDMA */
541 				pdev->stats.ul_ofdma.data_rx_ppdu++;
542 				pdev->stats.ul_ofdma.data_users[num_users]++;
543 			}
544 		}
545 	}
546 }
547 
548 /**
549  * dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure
550  * @pdev: pdev ctx
551  * @ppdu_info: ppdu info structure from ppdu ring
552  * @cdp_rx_ppdu: Rx PPDU indication structure
553  *
554  * Return: none
555  */
556 static void
557 dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
558 				   struct hal_rx_ppdu_info *ppdu_info,
559 				   struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
560 {
561 	struct dp_peer *peer;
562 	struct dp_soc *soc = pdev->soc;
563 	uint32_t i;
564 	struct dp_mon_ops *mon_ops;
565 	uint16_t sw_peer_id;
566 	struct mon_rx_user_status *rx_user_status;
567 	uint32_t num_users = ppdu_info->com_info.num_users;
568 
569 	cdp_rx_ppdu->first_data_seq_ctrl =
570 		ppdu_info->rx_status.first_data_seq_ctrl;
571 	cdp_rx_ppdu->frame_ctrl =
572 		ppdu_info->rx_status.frame_control;
573 	cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count;
574 	cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count;
575 	cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count;
576 	/* num mpdu is consolidated and added together in num user loop */
577 	cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok;
578 	/* num msdu is consolidated and added together in num user loop */
579 	cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count +
580 				 cdp_rx_ppdu->udp_msdu_count +
581 				 cdp_rx_ppdu->other_msdu_count);
582 
583 	cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ?
584 		ppdu_info->com_info.mpdu_cnt_fcs_ok : 0;
585 
586 	if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1)
587 		cdp_rx_ppdu->is_ampdu = 1;
588 	else
589 		cdp_rx_ppdu->is_ampdu = 0;
590 	cdp_rx_ppdu->tid = ppdu_info->rx_status.tid;
591 
592 	qdf_assert_always(num_users <= CDP_MU_MAX_USERS);
593 	rx_user_status = &ppdu_info->rx_user_status[num_users - 1];
594 	sw_peer_id = rx_user_status->sw_peer_id;
595 	peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
596 				     DP_MOD_ID_RX_PPDU_STATS);
597 	if (qdf_unlikely(!peer)) {
598 		cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
599 		cdp_rx_ppdu->num_users = 0;
600 		goto end;
601 	}
602 
603 	qdf_mem_copy(cdp_rx_ppdu->mac_addr,
604 		     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
605 	cdp_rx_ppdu->peer_id = peer->peer_id;
606 	cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
607 
608 	cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
609 	cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len;
610 	cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
611 	cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
612 	cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
613 	if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) &&
614 	    (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC))
615 		cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
616 	else
617 		cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
618 	cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
619 	cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
620 	cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >>
621 				   QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3;
622 	cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
623 	cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
624 	cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num;
625 	cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
626 	cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len;
627 	cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate;
628 	cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
629 
630 	if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) {
631 		cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc;
632 	} else if (ppdu_info->rx_status.preamble_type ==
633 			HAL_RX_PKT_TYPE_11AX) {
634 		cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >>
635 				       QDF_MON_STATUS_STBC_SHIFT) & 0x1;
636 		cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >>
637 				      QDF_MON_STATUS_DCM_SHIFT) & 0x1;
638 	}
639 	dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu, pdev);
640 	dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu);
641 	cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna;
642 
643 	mon_ops = dp_mon_ops_get(pdev->soc);
644 	if (mon_ops && mon_ops->mon_rx_populate_ppdu_info)
645 		mon_ops->mon_rx_populate_ppdu_info(ppdu_info,
646 						   cdp_rx_ppdu);
647 
648 	cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor;
649 	for (i = 0; i < MAX_CHAIN; i++)
650 		cdp_rx_ppdu->per_chain_rssi[i] = ppdu_info->rx_status.rssi[i];
651 
652 	cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast;
653 
654 	cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users;
655 
656 	dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu);
657 
658 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
659 
660 	return;
661 end:
662 	dp_rx_populate_cfr_non_assoc_sta(pdev, ppdu_info, cdp_rx_ppdu);
663 }
664 
665 /**
666  * dp_rx_rate_stats_update() - Update per-peer rate statistics
667  * @peer: Datapath peer handle
668  * @ppdu: PPDU Descriptor
669  * @user: user index
670  *
671  * Return: None
672  */
673 static inline void dp_rx_rate_stats_update(struct dp_peer *peer,
674 					   struct cdp_rx_indication_ppdu *ppdu,
675 					   uint32_t user)
676 {
677 	uint32_t ratekbps = 0;
678 	uint32_t ppdu_rx_rate = 0;
679 	uint32_t nss = 0;
680 	uint8_t mcs = 0;
681 	uint32_t rix;
682 	uint16_t ratecode = 0;
683 	struct cdp_rx_stats_ppdu_user *ppdu_user = NULL;
684 	struct dp_mon_peer *mon_peer = NULL;
685 
686 	if (!peer || !ppdu)
687 		return;
688 
689 	mon_peer = peer->monitor_peer;
690 	ppdu_user = &ppdu->user[user];
691 
692 	if (!mon_peer)
693 		return;
694 
695 	if (ppdu->u.ppdu_type != HAL_RX_TYPE_SU) {
696 		if (ppdu_user->nss == 0)
697 			nss = 0;
698 		else
699 			nss = ppdu_user->nss - 1;
700 		mcs = ppdu_user->mcs;
701 
702 		mon_peer->stats.rx.nss_info = ppdu_user->nss;
703 		mon_peer->stats.rx.mcs_info = ppdu_user->mcs;
704 	} else {
705 		if (ppdu->u.nss == 0)
706 			nss = 0;
707 		else
708 			nss = ppdu->u.nss - 1;
709 		mcs = ppdu->u.mcs;
710 
711 		mon_peer->stats.rx.nss_info = ppdu->u.nss;
712 		mon_peer->stats.rx.mcs_info = ppdu->u.mcs;
713 	}
714 
715 	ratekbps = dp_getrateindex(ppdu->u.gi,
716 				   mcs,
717 				   nss,
718 				   ppdu->u.preamble,
719 				   ppdu->u.bw,
720 				   ppdu->punc_bw,
721 				   &rix,
722 				   &ratecode);
723 
724 	if (!ratekbps) {
725 		ppdu->rix = 0;
726 		ppdu_user->rix = 0;
727 		ppdu->rx_ratekbps = 0;
728 		ppdu->rx_ratecode = 0;
729 		ppdu_user->rx_ratekbps = 0;
730 		return;
731 	}
732 
733 	mon_peer->stats.rx.bw_info = ppdu->u.bw;
734 	mon_peer->stats.rx.gi_info = ppdu->u.gi;
735 	mon_peer->stats.rx.preamble_info = ppdu->u.preamble;
736 
737 	ppdu->rix = rix;
738 	ppdu_user->rix = rix;
739 	DP_STATS_UPD(mon_peer, rx.last_rx_rate, ratekbps);
740 	mon_peer->stats.rx.avg_rx_rate =
741 		dp_ath_rate_lpf(mon_peer->stats.rx.avg_rx_rate, ratekbps);
742 	ppdu_rx_rate = dp_ath_rate_out(mon_peer->stats.rx.avg_rx_rate);
743 	DP_STATS_UPD(mon_peer, rx.rnd_avg_rx_rate, ppdu_rx_rate);
744 	ppdu->rx_ratekbps = ratekbps;
745 	ppdu->rx_ratecode = ratecode;
746 	ppdu_user->rx_ratekbps = ratekbps;
747 
748 	if (peer->vdev)
749 		peer->vdev->stats.rx.last_rx_rate = ratekbps;
750 }
751 
752 #ifdef WLAN_FEATURE_11BE
753 static inline uint8_t dp_get_bw_offset_frm_bw(struct dp_soc *soc,
754 					      enum CMN_BW_TYPES bw)
755 {
756 	uint8_t pkt_bw_offset;
757 
758 	switch (bw) {
759 	case CMN_BW_20MHZ:
760 		pkt_bw_offset = PKT_BW_GAIN_20MHZ;
761 		break;
762 	case CMN_BW_40MHZ:
763 		pkt_bw_offset = PKT_BW_GAIN_40MHZ;
764 		break;
765 	case CMN_BW_80MHZ:
766 		pkt_bw_offset = PKT_BW_GAIN_80MHZ;
767 		break;
768 	case CMN_BW_160MHZ:
769 		pkt_bw_offset = PKT_BW_GAIN_160MHZ;
770 		break;
771 	case CMN_BW_320MHZ:
772 		pkt_bw_offset = PKT_BW_GAIN_320MHZ;
773 		break;
774 	default:
775 		pkt_bw_offset = 0;
776 		dp_rx_mon_status_debug("%pK: Invalid BW index = %d",
777 				       soc, bw);
778 	}
779 
780 	return pkt_bw_offset;
781 }
782 #else
783 static inline uint8_t dp_get_bw_offset_frm_bw(struct dp_soc *soc,
784 					      enum CMN_BW_TYPES bw)
785 {
786 	uint8_t pkt_bw_offset;
787 
788 	switch (bw) {
789 	case CMN_BW_20MHZ:
790 		pkt_bw_offset = PKT_BW_GAIN_20MHZ;
791 		break;
792 	case CMN_BW_40MHZ:
793 		pkt_bw_offset = PKT_BW_GAIN_40MHZ;
794 		break;
795 	case CMN_BW_80MHZ:
796 		pkt_bw_offset = PKT_BW_GAIN_80MHZ;
797 		break;
798 	case CMN_BW_160MHZ:
799 		pkt_bw_offset = PKT_BW_GAIN_160MHZ;
800 		break;
801 	default:
802 		pkt_bw_offset = 0;
803 		dp_rx_mon_status_debug("%pK: Invalid BW index = %d",
804 				       soc, bw);
805 	}
806 
807 	return pkt_bw_offset;
808 }
809 #endif
810 
811 #ifdef WLAN_CONFIG_TELEMETRY_AGENT
812 static void
813 dp_ppdu_desc_user_rx_time_update(struct dp_pdev *pdev,
814 				 struct dp_peer *peer,
815 				 struct cdp_rx_indication_ppdu *ppdu_desc,
816 				 struct cdp_rx_stats_ppdu_user *user)
817 {
818 	uint32_t nss_ru_width_sum = 0;
819 	struct dp_mon_peer *mon_peer = NULL;
820 	uint8_t ac = 0;
821 
822 	if (!pdev || !ppdu_desc || !user || !peer)
823 		return;
824 
825 	nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
826 	if (!nss_ru_width_sum)
827 		nss_ru_width_sum = 1;
828 
829 	if (ppdu_desc->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA ||
830 	    ppdu_desc->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) {
831 		user->rx_time_us = (ppdu_desc->duration *
832 				    user->nss * user->ofdma_ru_width) /
833 				    nss_ru_width_sum;
834 	} else {
835 		user->rx_time_us = ppdu_desc->duration;
836 	}
837 
838 	mon_peer = peer->monitor_peer;
839 	if (qdf_unlikely(!mon_peer))
840 		return;
841 
842 	ac = TID_TO_WME_AC(user->tid);
843 	DP_STATS_INC(mon_peer, airtime_stats.rx_airtime_consumption[ac].consumption,
844 		     user->rx_time_us);
845 }
846 
847 /**
848  * dp_rx_mon_update_user_deter_stats() - Update per-peer deterministic stats
849  * @pdev: Datapath pdev handle
850  * @peer: Datapath peer handle
851  * @ppdu: PPDU Descriptor
852  * @user: Per user RX stats
853  *
854  * Return: None
855  */
856 static inline
857 void dp_rx_mon_update_user_deter_stats(struct dp_pdev *pdev,
858 				       struct dp_peer *peer,
859 				       struct cdp_rx_indication_ppdu *ppdu,
860 				       struct cdp_rx_stats_ppdu_user *user)
861 {
862 	struct dp_mon_peer *mon_peer;
863 	uint8_t tid;
864 
865 	if (!pdev || !ppdu || !user || !peer)
866 		return;
867 
868 	if (!dp_is_subtype_data(ppdu->frame_ctrl))
869 		return;
870 
871 	if (ppdu->u.ppdu_type != HAL_RX_TYPE_SU)
872 		return;
873 
874 	mon_peer = peer->monitor_peer;
875 	if (!mon_peer)
876 		return;
877 
878 	tid = user->tid;
879 	if (tid >= CDP_DATA_TID_MAX)
880 		return;
881 
882 	DP_STATS_INC(mon_peer,
883 		     deter_stats.deter[tid].rx_det.mode_cnt,
884 		     1);
885 	DP_STATS_UPD(mon_peer,
886 		     deter_stats.deter[tid].rx_det.avg_rate,
887 		     mon_peer->stats.rx.avg_rx_rate);
888 }
889 
890 /**
891  * dp_rx_mon_update_pdev_deter_stats() - Update pdev deterministic stats
892  * @pdev: Datapath pdev handle
893  * @ppdu: PPDU Descriptor
894  *
895  * Return: None
896  */
897 static inline
898 void dp_rx_mon_update_pdev_deter_stats(struct dp_pdev *pdev,
899 				       struct cdp_rx_indication_ppdu *ppdu)
900 {
901 	if (!dp_is_subtype_data(ppdu->frame_ctrl))
902 		return;
903 
904 	DP_STATS_INC(pdev,
905 		     deter_stats.rx_su_cnt,
906 		     1);
907 }
908 #else
909 static inline void
910 dp_ppdu_desc_user_rx_time_update(struct dp_pdev *pdev,
911 				 struct dp_peer *peer,
912 				 struct cdp_rx_indication_ppdu *ppdu_desc,
913 				 struct cdp_rx_stats_ppdu_user *user)
914 { }
915 
916 static inline
917 void dp_rx_mon_update_user_deter_stats(struct dp_pdev *pdev,
918 				       struct dp_peer *peer,
919 				       struct cdp_rx_indication_ppdu *ppdu,
920 				       struct cdp_rx_stats_ppdu_user *user)
921 { }
922 
923 static inline
924 void dp_rx_mon_update_pdev_deter_stats(struct dp_pdev *pdev,
925 				       struct cdp_rx_indication_ppdu *ppdu)
926 { }
927 #endif
928 
929 static void dp_rx_stats_update(struct dp_pdev *pdev,
930 			       struct cdp_rx_indication_ppdu *ppdu)
931 {
932 	struct dp_soc *soc = NULL;
933 	uint8_t mcs, preamble, ac = 0, nss, ppdu_type;
934 	uint32_t num_msdu;
935 	uint8_t pkt_bw_offset;
936 	struct dp_peer *peer;
937 	struct dp_mon_peer *mon_peer;
938 	struct cdp_rx_stats_ppdu_user *ppdu_user;
939 	uint32_t i;
940 	enum cdp_mu_packet_type mu_pkt_type;
941 	struct dp_mon_ops *mon_ops;
942 	struct dp_mon_pdev *mon_pdev = NULL;
943 	uint64_t byte_count;
944 
945 	if (qdf_likely(pdev))
946 		soc = pdev->soc;
947 	else
948 		return;
949 
950 	if (qdf_likely(!soc) || soc->process_rx_status)
951 		return;
952 
953 	mon_pdev = pdev->monitor_pdev;
954 
955 	preamble = ppdu->u.preamble;
956 	ppdu_type = ppdu->u.ppdu_type;
957 
958 	for (i = 0; i < ppdu->num_users && i < CDP_MU_MAX_USERS; i++) {
959 		peer = NULL;
960 		ppdu_user = &ppdu->user[i];
961 		peer = dp_peer_get_ref_by_id(soc, ppdu_user->peer_id,
962 					     DP_MOD_ID_RX_PPDU_STATS);
963 
964 		if (qdf_unlikely(!peer))
965 			mon_peer = mon_pdev->invalid_mon_peer;
966 		else
967 			mon_peer = peer->monitor_peer;
968 
969 		if (qdf_unlikely(!mon_peer)) {
970 			if (peer)
971 				dp_peer_unref_delete(peer,
972 						     DP_MOD_ID_RX_PPDU_STATS);
973 
974 			continue;
975 		}
976 
977 		if ((preamble == DOT11_A) || (preamble == DOT11_B))
978 			ppdu->u.nss = 1;
979 
980 		if (ppdu_type == HAL_RX_TYPE_SU) {
981 			mcs = ppdu->u.mcs;
982 			nss = ppdu->u.nss;
983 		} else {
984 			mcs = ppdu_user->mcs;
985 			nss = ppdu_user->nss;
986 		}
987 
988 		num_msdu = ppdu_user->num_msdu;
989 		byte_count = ppdu_user->mpdu_ok_byte_count +
990 			ppdu_user->mpdu_err_byte_count;
991 
992 		pkt_bw_offset = dp_get_bw_offset_frm_bw(soc, ppdu->u.bw);
993 		DP_STATS_UPD(mon_peer, rx.snr, (ppdu->rssi + pkt_bw_offset));
994 
995 		if (qdf_unlikely(mon_peer->stats.rx.avg_snr == CDP_INVALID_SNR))
996 			mon_peer->stats.rx.avg_snr =
997 				CDP_SNR_IN(mon_peer->stats.rx.snr);
998 		else
999 			CDP_SNR_UPDATE_AVG(mon_peer->stats.rx.avg_snr,
1000 					   mon_peer->stats.rx.snr);
1001 
1002 		if (ppdu_type == HAL_RX_TYPE_SU) {
1003 			if (nss) {
1004 				DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu);
1005 				DP_STATS_INC(mon_peer, rx.ppdu_nss[nss - 1], 1);
1006 			}
1007 
1008 			DP_STATS_INC(mon_peer, rx.mpdu_cnt_fcs_ok,
1009 				     ppdu_user->mpdu_cnt_fcs_ok);
1010 			DP_STATS_INC(mon_peer, rx.mpdu_cnt_fcs_err,
1011 				     ppdu_user->mpdu_cnt_fcs_err);
1012 		}
1013 
1014 		if (ppdu_type >= HAL_RX_TYPE_MU_MIMO &&
1015 		    ppdu_type <= HAL_RX_TYPE_MU_OFDMA) {
1016 			if (ppdu_type == HAL_RX_TYPE_MU_MIMO)
1017 				mu_pkt_type = TXRX_TYPE_MU_MIMO;
1018 			else
1019 				mu_pkt_type = TXRX_TYPE_MU_OFDMA;
1020 
1021 			if (qdf_likely(nss)) {
1022 				DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu);
1023 				DP_STATS_INC(mon_peer,
1024 					rx.rx_mu[mu_pkt_type].ppdu_nss[nss - 1],
1025 					1);
1026 			}
1027 
1028 			DP_STATS_INC(mon_peer,
1029 				     rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_ok,
1030 				     ppdu_user->mpdu_cnt_fcs_ok);
1031 			DP_STATS_INC(mon_peer,
1032 				     rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_err,
1033 				     ppdu_user->mpdu_cnt_fcs_err);
1034 		}
1035 
1036 		DP_STATS_INC(mon_peer, rx.sgi_count[ppdu->u.gi], num_msdu);
1037 		DP_STATS_INC(mon_peer, rx.bw[ppdu->u.bw], num_msdu);
1038 		DP_STATS_INC(mon_peer, rx.reception_type[ppdu->u.ppdu_type],
1039 			     num_msdu);
1040 		DP_STATS_INC(mon_peer, rx.ppdu_cnt[ppdu->u.ppdu_type], 1);
1041 		DP_STATS_INCC(mon_peer, rx.ampdu_cnt, num_msdu,
1042 			      ppdu_user->is_ampdu);
1043 		DP_STATS_INCC(mon_peer, rx.non_ampdu_cnt, num_msdu,
1044 			      !(ppdu_user->is_ampdu));
1045 		DP_STATS_UPD(mon_peer, rx.rx_rate, mcs);
1046 		DP_STATS_INCC(mon_peer,
1047 			rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
1048 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
1049 		DP_STATS_INCC(mon_peer,
1050 			rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1051 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
1052 		DP_STATS_INCC(mon_peer,
1053 			rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
1054 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
1055 		DP_STATS_INCC(mon_peer,
1056 			rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1057 			((mcs < MAX_MCS_11B) && (preamble == DOT11_B)));
1058 		DP_STATS_INCC(mon_peer,
1059 			rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
1060 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
1061 		DP_STATS_INCC(mon_peer,
1062 			rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1063 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
1064 		DP_STATS_INCC(mon_peer,
1065 			rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
1066 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
1067 		DP_STATS_INCC(mon_peer,
1068 			rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1069 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
1070 		DP_STATS_INCC(mon_peer,
1071 			rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
1072 			((mcs >= (MAX_MCS_11AX)) && (preamble == DOT11_AX)));
1073 		DP_STATS_INCC(mon_peer,
1074 			rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1075 			((mcs < (MAX_MCS_11AX)) && (preamble == DOT11_AX)));
1076 		DP_STATS_INCC(mon_peer,
1077 			rx.su_ax_ppdu_cnt.mcs_count[MAX_MCS - 1], 1,
1078 			((mcs >= (MAX_MCS_11AX)) && (preamble == DOT11_AX) &&
1079 			(ppdu_type == HAL_RX_TYPE_SU)));
1080 		DP_STATS_INCC(mon_peer,
1081 			rx.su_ax_ppdu_cnt.mcs_count[mcs], 1,
1082 			((mcs < (MAX_MCS_11AX)) && (preamble == DOT11_AX) &&
1083 			(ppdu_type == HAL_RX_TYPE_SU)));
1084 		DP_STATS_INCC(mon_peer,
1085 			rx.rx_mu[TXRX_TYPE_MU_OFDMA].ppdu.mcs_count[MAX_MCS - 1],
1086 			1, ((mcs >= (MAX_MCS_11AX)) &&
1087 			(preamble == DOT11_AX) &&
1088 			(ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
1089 		DP_STATS_INCC(mon_peer,
1090 			rx.rx_mu[TXRX_TYPE_MU_OFDMA].ppdu.mcs_count[mcs],
1091 			1, ((mcs < (MAX_MCS_11AX)) &&
1092 			(preamble == DOT11_AX) &&
1093 			(ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
1094 		DP_STATS_INCC(mon_peer,
1095 			rx.rx_mu[TXRX_TYPE_MU_MIMO].ppdu.mcs_count[MAX_MCS - 1],
1096 			1, ((mcs >= (MAX_MCS_11AX)) &&
1097 			(preamble == DOT11_AX) &&
1098 			(ppdu_type == HAL_RX_TYPE_MU_MIMO)));
1099 		DP_STATS_INCC(mon_peer,
1100 			rx.rx_mu[TXRX_TYPE_MU_MIMO].ppdu.mcs_count[mcs],
1101 			1, ((mcs < (MAX_MCS_11AX)) &&
1102 			(preamble == DOT11_AX) &&
1103 			(ppdu_type == HAL_RX_TYPE_MU_MIMO)));
1104 
1105 		/*
1106 		 * If invalid TID, it could be a non-qos frame, hence do not
1107 		 * update any AC counters
1108 		 */
1109 		ac = TID_TO_WME_AC(ppdu_user->tid);
1110 
1111 		if (qdf_likely(ppdu->tid != HAL_TID_INVALID)) {
1112 			DP_STATS_INC(mon_peer, rx.wme_ac_type[ac], num_msdu);
1113 			DP_STATS_INC(mon_peer, rx.wme_ac_type_bytes[ac],
1114 				     byte_count);
1115 		}
1116 
1117 		DP_STATS_INC(mon_peer, rx.rx_ppdus, 1);
1118 		DP_STATS_INC(mon_peer, rx.rx_mpdus,
1119 			(ppdu_user->mpdu_cnt_fcs_ok + ppdu_user->mpdu_cnt_fcs_err));
1120 
1121 		mon_ops = dp_mon_ops_get(soc);
1122 		if (qdf_likely(mon_ops && mon_ops->mon_rx_stats_update))
1123 			mon_ops->mon_rx_stats_update(mon_peer, ppdu, ppdu_user);
1124 
1125 		if (qdf_unlikely(!peer))
1126 			continue;
1127 
1128 		dp_peer_stats_notify(pdev, peer);
1129 		DP_STATS_UPD(mon_peer, rx.last_snr,
1130 			     (ppdu->rssi + pkt_bw_offset));
1131 
1132 		dp_peer_qos_stats_notify(pdev, ppdu_user);
1133 
1134 		if (dp_is_subtype_data(ppdu->frame_ctrl))
1135 			dp_rx_rate_stats_update(peer, ppdu, i);
1136 
1137 		dp_send_stats_event(pdev, peer, ppdu_user->peer_id);
1138 
1139 		dp_ppdu_desc_user_rx_time_update(pdev, peer, ppdu, ppdu_user);
1140 		dp_rx_mon_update_user_deter_stats(pdev, peer, ppdu, ppdu_user);
1141 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
1142 	}
1143 }
1144 
1145 void
1146 dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
1147 			struct hal_rx_ppdu_info *ppdu_info)
1148 {
1149 	qdf_nbuf_t ppdu_nbuf;
1150 	struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
1151 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1152 	uint64_t size = 0;
1153 	uint8_t num_users = 0;
1154 
1155 	/*
1156 	 * Do not allocate if fcs error,
1157 	 * ast idx invalid / fctl invalid
1158 	 *
1159 	 * In CFR RCC mode - PPDU status TLVs of error pkts are also needed
1160 	 */
1161 	if (qdf_unlikely(ppdu_info->com_info.mpdu_cnt_fcs_ok == 0))
1162 		return;
1163 
1164 	if (qdf_unlikely(mon_pdev->neighbour_peers_added)) {
1165 		if (ppdu_info->nac_info.fc_valid &&
1166 		    ppdu_info->nac_info.to_ds_flag &&
1167 		    ppdu_info->nac_info.mac_addr2_valid) {
1168 			struct dp_neighbour_peer *peer = NULL;
1169 			uint8_t rssi = ppdu_info->rx_status.rssi_comb;
1170 
1171 			qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
1172 			if (mon_pdev->neighbour_peers_added) {
1173 				TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
1174 					      neighbour_peer_list_elem) {
1175 					if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr,
1176 							 &ppdu_info->nac_info.mac_addr2,
1177 							 QDF_MAC_ADDR_SIZE)) {
1178 						peer->rssi = rssi;
1179 						break;
1180 					}
1181 				}
1182 			}
1183 			qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1184 		} else {
1185 			dp_info("Neighbour peers RSSI update failed! fc_valid = %d, to_ds_flag = %d and mac_addr2_valid = %d",
1186 					ppdu_info->nac_info.fc_valid,
1187 					ppdu_info->nac_info.to_ds_flag,
1188 					ppdu_info->nac_info.mac_addr2_valid);
1189 		}
1190 	}
1191 
1192 	/* need not generate wdi event when mcopy, cfr rcc mode and
1193 	 * enhanced stats are not enabled
1194 	 */
1195 	if (qdf_unlikely(!mon_pdev->mcopy_mode &&
1196 			 !mon_pdev->enhanced_stats_en &&
1197 			 !dp_cfr_rcc_mode_status(pdev)))
1198 		return;
1199 
1200 	if (qdf_unlikely(dp_cfr_rcc_mode_status(pdev)))
1201 		dp_update_cfr_dbg_stats(pdev, ppdu_info);
1202 
1203 	if (qdf_unlikely(!ppdu_info->rx_status.frame_control_info_valid ||
1204 			 ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)) {
1205 		if (!(mon_pdev->mcopy_mode ||
1206 		      (dp_bb_captured_chan_status(pdev, ppdu_info) ==
1207 		       QDF_STATUS_SUCCESS)))
1208 			return;
1209 	}
1210 	num_users = ppdu_info->com_info.num_users;
1211 	qdf_assert_always(num_users <= CDP_MU_MAX_USERS);
1212 	size = sizeof(struct cdp_rx_indication_ppdu) +
1213 		num_users * sizeof(struct cdp_rx_stats_ppdu_user);
1214 	ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
1215 				   size,
1216 				   0, 0, FALSE);
1217 	if (qdf_likely(ppdu_nbuf)) {
1218 		cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)qdf_nbuf_data(ppdu_nbuf);
1219 
1220 		qdf_mem_zero(cdp_rx_ppdu, size);
1221 		dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu);
1222 		dp_rx_populate_cdp_indication_ppdu(pdev,
1223 						   ppdu_info, cdp_rx_ppdu);
1224 		if (!qdf_unlikely(qdf_nbuf_put_tail(ppdu_nbuf,
1225 				       sizeof(struct cdp_rx_indication_ppdu))))
1226 			return;
1227 		if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_SU)
1228 			dp_rx_mon_update_pdev_deter_stats(pdev, cdp_rx_ppdu);
1229 
1230 		dp_rx_stats_update(pdev, cdp_rx_ppdu);
1231 
1232 		if (qdf_unlikely(cdp_rx_ppdu->peer_id != HTT_INVALID_PEER)) {
1233 			dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC,
1234 					     soc, ppdu_nbuf,
1235 					     cdp_rx_ppdu->peer_id,
1236 					     WDI_NO_VAL, pdev->pdev_id);
1237 		} else if (qdf_unlikely(mon_pdev->mcopy_mode || dp_cfr_rcc_mode_status(pdev))) {
1238 			dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
1239 					     ppdu_nbuf, HTT_INVALID_PEER,
1240 					     WDI_NO_VAL, pdev->pdev_id);
1241 		} else {
1242 			qdf_nbuf_free(ppdu_nbuf);
1243 		}
1244 	}
1245 }
1246 #endif/* QCA_ENHANCED_STATS_SUPPORT */
1247 
1248 #ifdef QCA_UNDECODED_METADATA_SUPPORT
1249 #define RX_PHYERR_MASK_GET64(_val1, _val2) (((uint64_t)(_val2) << 32) | (_val1))
1250 /**
1251  * dp_rx_populate_cdp_indication_ppdu_undecoded_metadata() - Populate cdp
1252  * rx indication structure
1253  * @pdev: pdev ctx
1254  * @ppdu_info: ppdu info structure from ppdu ring
1255  * @cdp_rx_ppdu: Rx PPDU indication structure
1256  *
1257  * Return: none
1258  */
1259 static void
1260 dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(struct dp_pdev *pdev,
1261 				struct hal_rx_ppdu_info *ppdu_info,
1262 				struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
1263 {
1264 	uint32_t chain;
1265 
1266 	cdp_rx_ppdu->phyrx_abort = ppdu_info->rx_status.phyrx_abort;
1267 	cdp_rx_ppdu->phyrx_abort_reason =
1268 		ppdu_info->rx_status.phyrx_abort_reason;
1269 
1270 	cdp_rx_ppdu->first_data_seq_ctrl =
1271 		ppdu_info->rx_status.first_data_seq_ctrl;
1272 	cdp_rx_ppdu->frame_ctrl =
1273 		ppdu_info->rx_status.frame_control;
1274 	cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count;
1275 	cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count;
1276 	cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count;
1277 	cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type;
1278 	cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok;
1279 	cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count +
1280 				 cdp_rx_ppdu->udp_msdu_count +
1281 				 cdp_rx_ppdu->other_msdu_count);
1282 
1283 	cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ?
1284 		ppdu_info->com_info.mpdu_cnt_fcs_ok : 0;
1285 
1286 	if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1)
1287 		cdp_rx_ppdu->is_ampdu = 1;
1288 	else
1289 		cdp_rx_ppdu->is_ampdu = 0;
1290 	cdp_rx_ppdu->tid = ppdu_info->rx_status.tid;
1291 
1292 	cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
1293 	cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len;
1294 	cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
1295 	cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw;
1296 	cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
1297 	cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
1298 	if (ppdu_info->rx_status.sgi == VHT_SGI_NYSM &&
1299 	    ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC)
1300 		cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
1301 	else
1302 		cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
1303 
1304 	cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
1305 	cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
1306 	cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >>
1307 				   QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3;
1308 
1309 	cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
1310 	cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
1311 	cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num;
1312 	cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
1313 	cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len;
1314 	cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate;
1315 	cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
1316 
1317 	if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) {
1318 		cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc;
1319 		cdp_rx_ppdu->vht_no_txop_ps =
1320 			ppdu_info->rx_status.vht_no_txop_ps;
1321 		cdp_rx_ppdu->vht_crc = ppdu_info->rx_status.vht_crc;
1322 		cdp_rx_ppdu->group_id = ppdu_info->rx_status.vht_flag_values5;
1323 	} else if (ppdu_info->rx_status.preamble_type ==
1324 			HAL_RX_PKT_TYPE_11AX) {
1325 		cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >>
1326 				       QDF_MON_STATUS_STBC_SHIFT) & 0x1;
1327 		cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >>
1328 				      QDF_MON_STATUS_DCM_SHIFT) & 0x1;
1329 	} else {
1330 		cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.ht_stbc;
1331 		cdp_rx_ppdu->ht_length = ppdu_info->rx_status.ht_length;
1332 		cdp_rx_ppdu->ht_smoothing = ppdu_info->rx_status.smoothing;
1333 		cdp_rx_ppdu->ht_not_sounding =
1334 			ppdu_info->rx_status.not_sounding;
1335 		cdp_rx_ppdu->ht_aggregation = ppdu_info->rx_status.aggregation;
1336 		cdp_rx_ppdu->ht_stbc = ppdu_info->rx_status.ht_stbc;
1337 		cdp_rx_ppdu->ht_crc = ppdu_info->rx_status.ht_crc;
1338 	}
1339 
1340 	cdp_rx_ppdu->l_sig_length = ppdu_info->rx_status.l_sig_length;
1341 	cdp_rx_ppdu->l_sig_a_parity = ppdu_info->rx_status.l_sig_a_parity;
1342 	cdp_rx_ppdu->l_sig_a_pkt_type = ppdu_info->rx_status.l_sig_a_pkt_type;
1343 
1344 	if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AX) {
1345 		cdp_rx_ppdu->he_crc = ppdu_info->rx_status.he_crc;
1346 		cdp_rx_ppdu->bss_color_id =
1347 			ppdu_info->rx_status.he_data3 & 0x3F;
1348 		cdp_rx_ppdu->beam_change = (ppdu_info->rx_status.he_data3 >>
1349 				QDF_MON_STATUS_BEAM_CHANGE_SHIFT) & 0x1;
1350 		cdp_rx_ppdu->dl_ul_flag = (ppdu_info->rx_status.he_data3 >>
1351 		QDF_MON_STATUS_DL_UL_SHIFT) & 0x1;
1352 		cdp_rx_ppdu->ldpc_extra_sym = (ppdu_info->rx_status.he_data3 >>
1353 				QDF_MON_STATUS_LDPC_EXTRA_SYMBOL_SHIFT) & 0x1;
1354 		cdp_rx_ppdu->special_reuse =
1355 			ppdu_info->rx_status.he_data4 & 0xF;
1356 		cdp_rx_ppdu->ltf_sym = (ppdu_info->rx_status.he_data5 >>
1357 				QDF_MON_STATUS_HE_LTF_SYM_SHIFT) & 0x7;
1358 		cdp_rx_ppdu->txbf = (ppdu_info->rx_status.he_data5 >>
1359 				QDF_MON_STATUS_TXBF_SHIFT) & 0x1;
1360 		cdp_rx_ppdu->pe_disambiguity = (ppdu_info->rx_status.he_data5 >>
1361 				QDF_MON_STATUS_PE_DISAMBIGUITY_SHIFT) & 0x1;
1362 		cdp_rx_ppdu->pre_fec_pad = (ppdu_info->rx_status.he_data5 >>
1363 				QDF_MON_STATUS_PRE_FEC_PAD_SHIFT) & 0x3;
1364 		cdp_rx_ppdu->dopplar = (ppdu_info->rx_status.he_data6 >>
1365 				QDF_MON_STATUS_DOPPLER_SHIFT) & 0x1;
1366 		cdp_rx_ppdu->txop_duration = (ppdu_info->rx_status.he_data6 >>
1367 				QDF_MON_STATUS_TXOP_SHIFT) & 0x7F;
1368 		cdp_rx_ppdu->sig_b_mcs = ppdu_info->rx_status.he_flags1 & 0x7;
1369 		cdp_rx_ppdu->sig_b_dcm = (ppdu_info->rx_status.he_flags1 >>
1370 				QDF_MON_STATUS_DCM_FLAG_1_SHIFT) & 0x1;
1371 		cdp_rx_ppdu->sig_b_sym = (ppdu_info->rx_status.he_flags2 >>
1372 				QDF_MON_STATUS_NUM_SIG_B_SYMBOLS_SHIFT) & 0xF;
1373 		cdp_rx_ppdu->sig_b_comp = (ppdu_info->rx_status.he_flags2 >>
1374 			QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_2_SHIFT) & 0x1;
1375 	}
1376 	dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu, pdev);
1377 	dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu);
1378 	cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna;
1379 
1380 	cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor;
1381 	for (chain = 0; chain < MAX_CHAIN; chain++)
1382 		cdp_rx_ppdu->per_chain_rssi[chain] =
1383 			ppdu_info->rx_status.rssi[chain];
1384 
1385 	cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast;
1386 
1387 	cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users;
1388 
1389 	dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu);
1390 }
1391 
1392 /**
1393  * dp_rx_is_valid_undecoded_frame() - Check unencoded frame received valid
1394  * or not against configured error mask
1395  * @err_mask: configured err mask
1396  * @err_code: Received error reason code for phy abort
1397  *
1398  * Return: true / false
1399  */
1400 static inline bool
1401 dp_rx_is_valid_undecoded_frame(uint64_t err_mask, uint8_t err_code)
1402 {
1403 	if (err_code < CDP_PHYRX_ERR_MAX &&
1404 	    (err_mask & (1L << err_code)))
1405 		return true;
1406 
1407 	return false;
1408 }
1409 
1410 void
1411 dp_rx_handle_ppdu_undecoded_metadata(struct dp_soc *soc, struct dp_pdev *pdev,
1412 				     struct hal_rx_ppdu_info *ppdu_info)
1413 {
1414 	qdf_nbuf_t ppdu_nbuf;
1415 	struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
1416 	uint8_t abort_reason = 0;
1417 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1418 	uint64_t mask64;
1419 
1420 	 /* Return if RX_ABORT not set */
1421 	if (ppdu_info->rx_status.phyrx_abort == 0)
1422 		return;
1423 
1424 	mask64 = RX_PHYERR_MASK_GET64(mon_pdev->phyrx_error_mask,
1425 				      mon_pdev->phyrx_error_mask_cont);
1426 	abort_reason = ppdu_info->rx_status.phyrx_abort_reason;
1427 
1428 	if (!dp_rx_is_valid_undecoded_frame(mask64, abort_reason))
1429 		return;
1430 
1431 	ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
1432 				   sizeof(struct cdp_rx_indication_ppdu),
1433 				   0, 0, FALSE);
1434 	if (ppdu_nbuf) {
1435 		cdp_rx_ppdu = ((struct cdp_rx_indication_ppdu *)
1436 				qdf_nbuf_data(ppdu_nbuf));
1437 
1438 		qdf_mem_zero(cdp_rx_ppdu,
1439 			     sizeof(struct cdp_rx_indication_ppdu));
1440 		dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(pdev,
1441 				ppdu_info, cdp_rx_ppdu);
1442 
1443 		if (!qdf_nbuf_put_tail(ppdu_nbuf,
1444 				       sizeof(struct cdp_rx_indication_ppdu))) {
1445 			return;
1446 		}
1447 
1448 		mon_pdev->rx_mon_stats.rx_undecoded_count++;
1449 		mon_pdev->rx_mon_stats.rx_undecoded_error[abort_reason] += 1;
1450 
1451 		dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC_UNDECODED_METADATA,
1452 				     soc, ppdu_nbuf, HTT_INVALID_PEER,
1453 				     WDI_NO_VAL, pdev->pdev_id);
1454 	}
1455 }
1456 #endif/* QCA_UNDECODED_METADATA_SUPPORT */
1457 
1458 #ifdef QCA_MCOPY_SUPPORT
1459 QDF_STATUS
1460 dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
1461 			struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf,
1462 			uint8_t fcs_ok_mpdu_cnt, bool deliver_frame)
1463 {
1464 	uint16_t size = 0;
1465 	struct ieee80211_frame *wh;
1466 	uint32_t *nbuf_data;
1467 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1468 
1469 	if (!ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload)
1470 		return QDF_STATUS_SUCCESS;
1471 
1472 	/* For M_COPY mode only one msdu per ppdu is sent to upper layer*/
1473 	if (mon_pdev->mcopy_mode == M_COPY) {
1474 		if (mon_pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id)
1475 			return QDF_STATUS_SUCCESS;
1476 	}
1477 
1478 	wh = (struct ieee80211_frame *)(ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload + 4);
1479 
1480 	size = (ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload -
1481 				qdf_nbuf_data(nbuf));
1482 
1483 	if (qdf_nbuf_pull_head(nbuf, size) == NULL)
1484 		return QDF_STATUS_SUCCESS;
1485 
1486 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1487 	     IEEE80211_FC0_TYPE_MGT) ||
1488 	     ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1489 	     IEEE80211_FC0_TYPE_CTL)) {
1490 		return QDF_STATUS_SUCCESS;
1491 	}
1492 
1493 	nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf);
1494 	*nbuf_data = mon_pdev->ppdu_info.com_info.ppdu_id;
1495 	/* only retain RX MSDU payload in the skb */
1496 	qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].payload_len);
1497 	if (deliver_frame) {
1498 		mon_pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
1499 		dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
1500 				     nbuf, HTT_INVALID_PEER,
1501 				     WDI_NO_VAL, pdev->pdev_id);
1502 	}
1503 	return QDF_STATUS_E_ALREADY;
1504 }
1505 
1506 void
1507 dp_rx_mcopy_handle_last_mpdu(struct dp_soc *soc, struct dp_pdev *pdev,
1508 			     struct hal_rx_ppdu_info *ppdu_info,
1509 			     qdf_nbuf_t status_nbuf)
1510 {
1511 	QDF_STATUS mcopy_status;
1512 	qdf_nbuf_t nbuf_clone = NULL;
1513 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1514 
1515 	/* If the MPDU end tlv and RX header are received in different buffers,
1516 	 * process the RX header based on fcs status.
1517 	 */
1518 	if (mon_pdev->mcopy_status_nbuf) {
1519 		/* For M_COPY mode only one msdu per ppdu is sent to upper layer*/
1520 		if (mon_pdev->mcopy_mode == M_COPY) {
1521 			if (mon_pdev->m_copy_id.rx_ppdu_id ==
1522 			    ppdu_info->com_info.ppdu_id)
1523 				goto end1;
1524 		}
1525 
1526 		if (ppdu_info->is_fcs_passed) {
1527 			nbuf_clone = qdf_nbuf_clone(mon_pdev->mcopy_status_nbuf);
1528 			if (!nbuf_clone) {
1529 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1530 					  QDF_TRACE_LEVEL_ERROR,
1531 					  "Failed to clone nbuf");
1532 				goto end1;
1533 			}
1534 
1535 			mon_pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
1536 			dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
1537 					     nbuf_clone,
1538 					     HTT_INVALID_PEER,
1539 					     WDI_NO_VAL, pdev->pdev_id);
1540 			ppdu_info->is_fcs_passed = false;
1541 		}
1542 end1:
1543 		qdf_nbuf_free(mon_pdev->mcopy_status_nbuf);
1544 		mon_pdev->mcopy_status_nbuf = NULL;
1545 	}
1546 
1547 	/* If the MPDU end tlv and RX header are received in different buffers,
1548 	 * preserve the RX header as the fcs status will be received in MPDU
1549 	 * end tlv in next buffer. So, cache the buffer to be processd in next
1550 	 * iteration
1551 	 */
1552 	if ((ppdu_info->fcs_ok_cnt + ppdu_info->fcs_err_cnt) !=
1553 	    ppdu_info->com_info.mpdu_cnt) {
1554 		mon_pdev->mcopy_status_nbuf = qdf_nbuf_clone(status_nbuf);
1555 		if (mon_pdev->mcopy_status_nbuf) {
1556 			mcopy_status = dp_rx_handle_mcopy_mode(
1557 							soc, pdev,
1558 							ppdu_info,
1559 							mon_pdev->mcopy_status_nbuf,
1560 							ppdu_info->fcs_ok_cnt,
1561 							false);
1562 			if (mcopy_status == QDF_STATUS_SUCCESS) {
1563 				qdf_nbuf_free(mon_pdev->mcopy_status_nbuf);
1564 				mon_pdev->mcopy_status_nbuf = NULL;
1565 			}
1566 		}
1567 	}
1568 }
1569 
1570 void
1571 dp_rx_mcopy_process_ppdu_info(struct dp_pdev *pdev,
1572 			      struct hal_rx_ppdu_info *ppdu_info,
1573 			      uint32_t tlv_status)
1574 {
1575 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1576 
1577 	if (qdf_unlikely(!mon_pdev->mcopy_mode))
1578 		return;
1579 
1580 	/* The fcs status is received in MPDU end tlv. If the RX header
1581 	 * and its MPDU end tlv are received in different status buffer then
1582 	 * to process that header ppdu_info->is_fcs_passed is used.
1583 	 * If end tlv is received in next status buffer then com_info.mpdu_cnt
1584 	 * will be 0 at the time of receiving MPDU end tlv and we update the
1585 	 * is_fcs_passed flag based on ppdu_info->fcs_err.
1586 	 */
1587 	if (tlv_status != HAL_TLV_STATUS_MPDU_END)
1588 		return;
1589 
1590 	if (!ppdu_info->fcs_err) {
1591 		if (ppdu_info->fcs_ok_cnt >
1592 		    HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER) {
1593 			dp_err("No. of MPDUs(%d) per status buff exceeded",
1594 					ppdu_info->fcs_ok_cnt);
1595 			return;
1596 		}
1597 		if (ppdu_info->com_info.mpdu_cnt)
1598 			ppdu_info->fcs_ok_cnt++;
1599 		else
1600 			ppdu_info->is_fcs_passed = true;
1601 	} else {
1602 		if (ppdu_info->com_info.mpdu_cnt)
1603 			ppdu_info->fcs_err_cnt++;
1604 		else
1605 			ppdu_info->is_fcs_passed = false;
1606 	}
1607 }
1608 
1609 void
1610 dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
1611 			 struct hal_rx_ppdu_info *ppdu_info,
1612 			 uint32_t tlv_status,
1613 			 qdf_nbuf_t status_nbuf)
1614 {
1615 	QDF_STATUS mcopy_status;
1616 	qdf_nbuf_t nbuf_clone = NULL;
1617 	uint8_t fcs_ok_mpdu_cnt = 0;
1618 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1619 
1620 	dp_rx_mcopy_handle_last_mpdu(soc, pdev, ppdu_info, status_nbuf);
1621 
1622 	if (qdf_unlikely(!ppdu_info->com_info.mpdu_cnt))
1623 		goto end;
1624 
1625 	if (qdf_unlikely(!ppdu_info->fcs_ok_cnt))
1626 		goto end;
1627 
1628 	/* For M_COPY mode only one msdu per ppdu is sent to upper layer*/
1629 	if (mon_pdev->mcopy_mode == M_COPY)
1630 		ppdu_info->fcs_ok_cnt = 1;
1631 
1632 	while (fcs_ok_mpdu_cnt < ppdu_info->fcs_ok_cnt) {
1633 		nbuf_clone = qdf_nbuf_clone(status_nbuf);
1634 		if (!nbuf_clone) {
1635 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1636 				  "Failed to clone nbuf");
1637 			goto end;
1638 		}
1639 
1640 		mcopy_status = dp_rx_handle_mcopy_mode(soc, pdev,
1641 						       ppdu_info,
1642 						       nbuf_clone,
1643 						       fcs_ok_mpdu_cnt,
1644 						       true);
1645 
1646 		if (mcopy_status == QDF_STATUS_SUCCESS)
1647 			qdf_nbuf_free(nbuf_clone);
1648 
1649 		fcs_ok_mpdu_cnt++;
1650 	}
1651 end:
1652 	qdf_nbuf_free(status_nbuf);
1653 	ppdu_info->fcs_ok_cnt = 0;
1654 	ppdu_info->fcs_err_cnt = 0;
1655 	ppdu_info->com_info.mpdu_cnt = 0;
1656 	qdf_mem_zero(&ppdu_info->ppdu_msdu_info,
1657 		     HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER
1658 		     * sizeof(struct hal_rx_msdu_payload_info));
1659 }
1660 #endif /* QCA_MCOPY_SUPPORT */
1661 
1662 int
1663 dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev,
1664 			      struct hal_rx_ppdu_info *ppdu_info,
1665 			      qdf_nbuf_t nbuf)
1666 {
1667 	uint8_t size = 0;
1668 	struct dp_mon_vdev *mon_vdev;
1669 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1670 
1671 	if (!mon_pdev->mvdev) {
1672 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1673 			  "[%s]:[%d] Monitor vdev is NULL !!",
1674 			  __func__, __LINE__);
1675 		return 1;
1676 	}
1677 
1678 	mon_vdev = mon_pdev->mvdev->monitor_vdev;
1679 
1680 	if (!ppdu_info->msdu_info.first_msdu_payload) {
1681 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1682 			  "[%s]:[%d] First msdu payload not present",
1683 			  __func__, __LINE__);
1684 		return 1;
1685 	}
1686 
1687 	/* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */
1688 	size = (ppdu_info->msdu_info.first_msdu_payload -
1689 		qdf_nbuf_data(nbuf)) + 4;
1690 	ppdu_info->msdu_info.first_msdu_payload = NULL;
1691 
1692 	if (!qdf_nbuf_pull_head(nbuf, size)) {
1693 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1694 			  "[%s]:[%d] No header present",
1695 			  __func__, __LINE__);
1696 		return 1;
1697 	}
1698 
1699 	/* Only retain RX MSDU payload in the skb */
1700 	qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
1701 			   ppdu_info->msdu_info.payload_len);
1702 	if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, nbuf,
1703 				      qdf_nbuf_headroom(nbuf))) {
1704 		DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
1705 		return 1;
1706 	}
1707 
1708 	mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
1709 			      nbuf, NULL);
1710 	mon_pdev->ppdu_info.rx_status.monitor_direct_used = 0;
1711 	return 0;
1712 }
1713 
1714 qdf_nbuf_t
1715 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
1716 {
1717 	uint8_t *buf;
1718 	int32_t nbuf_retry_count;
1719 	QDF_STATUS ret;
1720 	qdf_nbuf_t nbuf = NULL;
1721 
1722 	for (nbuf_retry_count = 0; nbuf_retry_count <
1723 		QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
1724 			nbuf_retry_count++) {
1725 		/* Allocate a new skb using alloc_skb */
1726 		nbuf = qdf_nbuf_alloc_no_recycler(RX_MON_STATUS_BUF_SIZE,
1727 						  RX_MON_STATUS_BUF_RESERVATION,
1728 						  RX_DATA_BUFFER_ALIGNMENT);
1729 
1730 		if (!nbuf) {
1731 			DP_STATS_INC(pdev, replenish.nbuf_alloc_fail, 1);
1732 			continue;
1733 		}
1734 
1735 		buf = qdf_nbuf_data(nbuf);
1736 
1737 		memset(buf, 0, RX_MON_STATUS_BUF_SIZE);
1738 
1739 		ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
1740 						 QDF_DMA_FROM_DEVICE,
1741 						 RX_MON_STATUS_BUF_SIZE);
1742 
1743 		/* nbuf map failed */
1744 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
1745 			qdf_nbuf_free(nbuf);
1746 			DP_STATS_INC(pdev, replenish.map_err, 1);
1747 			continue;
1748 		}
1749 		/* qdf_nbuf alloc and map succeeded */
1750 		break;
1751 	}
1752 
1753 	/* qdf_nbuf still alloc or map failed */
1754 	if (qdf_unlikely(nbuf_retry_count >=
1755 			QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
1756 		return NULL;
1757 
1758 	return nbuf;
1759 }
1760 
1761 #ifndef DISABLE_MON_CONFIG
1762 uint32_t
1763 dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
1764 	       uint32_t mac_id, uint32_t quota)
1765 {
1766 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1767 
1768 	if (mon_soc && mon_soc->mon_rx_process)
1769 		return mon_soc->mon_rx_process(soc, int_ctx,
1770 					       mac_id, quota);
1771 	return 0;
1772 }
1773 #else
1774 uint32_t
1775 dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
1776 	       uint32_t mac_id, uint32_t quota)
1777 {
1778 	return 0;
1779 }
1780 #endif
1781 
1782 /**
1783  * dp_send_mgmt_packet_to_stack(): send indicataion to upper layers
1784  *
1785  * @soc: soc handle
1786  * @nbuf: Mgmt packet
1787  * @pdev: pdev handle
1788  *
1789  * Return: QDF_STATUS_SUCCESS on success
1790  *         QDF_STATUS_E_INVAL in error
1791  */
1792 #ifdef QCA_MCOPY_SUPPORT
1793 static inline QDF_STATUS
1794 dp_send_mgmt_packet_to_stack(struct dp_soc *soc,
1795 			     qdf_nbuf_t nbuf,
1796 			     struct dp_pdev *pdev)
1797 {
1798 	uint32_t *nbuf_data;
1799 	struct ieee80211_frame *wh;
1800 	qdf_frag_t addr;
1801 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1802 
1803 	if (!nbuf)
1804 		return QDF_STATUS_E_INVAL;
1805 
1806 	/* Get addr pointing to80211 header */
1807 	addr = dp_rx_mon_get_nbuf_80211_hdr(nbuf);
1808 	if (qdf_unlikely(!addr)) {
1809 		qdf_nbuf_free(nbuf);
1810 		return QDF_STATUS_E_INVAL;
1811 	}
1812 
1813 	/*check if this is not a mgmt packet*/
1814 	wh = (struct ieee80211_frame *)addr;
1815 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
1816 	     IEEE80211_FC0_TYPE_MGT) &&
1817 	     ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
1818 	     IEEE80211_FC0_TYPE_CTL)) {
1819 		qdf_nbuf_free(nbuf);
1820 		return QDF_STATUS_E_INVAL;
1821 	}
1822 	nbuf_data = (uint32_t *)qdf_nbuf_push_head(nbuf, 4);
1823 	if (!nbuf_data) {
1824 		QDF_TRACE(QDF_MODULE_ID_DP,
1825 			  QDF_TRACE_LEVEL_ERROR,
1826 			  FL("No headroom"));
1827 		qdf_nbuf_free(nbuf);
1828 		return QDF_STATUS_E_INVAL;
1829 	}
1830 	*nbuf_data = mon_pdev->ppdu_info.com_info.ppdu_id;
1831 
1832 	dp_wdi_event_handler(WDI_EVENT_RX_MGMT_CTRL, soc, nbuf,
1833 			     HTT_INVALID_PEER,
1834 			     WDI_NO_VAL, pdev->pdev_id);
1835 	return QDF_STATUS_SUCCESS;
1836 }
1837 #else
1838 static inline QDF_STATUS
1839 dp_send_mgmt_packet_to_stack(struct dp_soc *soc,
1840 			     qdf_nbuf_t nbuf,
1841 			     struct dp_pdev *pdev)
1842 {
1843 	return QDF_STATUS_SUCCESS;
1844 }
1845 #endif /* QCA_MCOPY_SUPPORT */
1846 
1847 QDF_STATUS dp_rx_mon_process_dest_pktlog(struct dp_soc *soc,
1848 					 uint32_t mac_id,
1849 					 qdf_nbuf_t mpdu)
1850 {
1851 	uint32_t event, msdu_timestamp = 0;
1852 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1853 	void *data;
1854 	struct ieee80211_frame *wh;
1855 	uint8_t type, subtype;
1856 	struct dp_mon_pdev *mon_pdev;
1857 
1858 	if (!pdev)
1859 		return QDF_STATUS_E_INVAL;
1860 
1861 	mon_pdev = pdev->monitor_pdev;
1862 
1863 	if (mon_pdev->rx_pktlog_cbf) {
1864 		if (qdf_nbuf_get_nr_frags(mpdu))
1865 			data = qdf_nbuf_get_frag_addr(mpdu, 0);
1866 		else
1867 			data = qdf_nbuf_data(mpdu);
1868 
1869 		/* CBF logging required, doesn't matter if it is a full mode
1870 		 * or lite mode.
1871 		 * Need to look for mpdu with:
1872 		 * TYPE = ACTION, SUBTYPE = NO ACK in the header
1873 		 */
1874 		event = WDI_EVENT_RX_CBF;
1875 
1876 		wh = (struct ieee80211_frame *)data;
1877 		type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1878 		subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1879 		if (type == IEEE80211_FC0_TYPE_MGT &&
1880 		    subtype == IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK) {
1881 			msdu_timestamp = mon_pdev->ppdu_info.rx_status.tsft;
1882 			dp_rx_populate_cbf_hdr(soc,
1883 					       mac_id, event,
1884 					       mpdu,
1885 					       msdu_timestamp);
1886 		}
1887 	}
1888 	return QDF_STATUS_SUCCESS;
1889 }
1890 
1891 QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id,
1892 			     qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu)
1893 {
1894 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1895 	struct cdp_mon_status *rs;
1896 	qdf_nbuf_t mon_skb, skb_next;
1897 	qdf_nbuf_t mon_mpdu = NULL;
1898 	struct dp_mon_vdev *mon_vdev;
1899 	struct dp_mon_pdev *mon_pdev;
1900 
1901 	if (!pdev)
1902 		goto mon_deliver_fail;
1903 
1904 	mon_pdev = pdev->monitor_pdev;
1905 	rs = &mon_pdev->rx_mon_recv_status;
1906 
1907 	if (!mon_pdev->mvdev && !mon_pdev->mcopy_mode &&
1908 	    !mon_pdev->rx_pktlog_cbf)
1909 		goto mon_deliver_fail;
1910 
1911 	/* restitch mon MPDU for delivery via monitor interface */
1912 	mon_mpdu = dp_rx_mon_restitch_mpdu(soc, mac_id, head_msdu,
1913 					   tail_msdu, rs);
1914 
1915 	/* If MPDU restitch fails, free buffers*/
1916 	if (!mon_mpdu) {
1917 		dp_info("MPDU restitch failed, free buffers");
1918 		goto mon_deliver_fail;
1919 	}
1920 
1921 	dp_rx_mon_process_dest_pktlog(soc, mac_id, mon_mpdu);
1922 
1923 	/* monitor vap cannot be present when mcopy is enabled
1924 	 * hence same skb can be consumed
1925 	 */
1926 	if (mon_pdev->mcopy_mode)
1927 		return dp_send_mgmt_packet_to_stack(soc, mon_mpdu, pdev);
1928 
1929 	if (mon_mpdu && mon_pdev->mvdev &&
1930 	    mon_pdev->mvdev->osif_vdev &&
1931 	    mon_pdev->mvdev->monitor_vdev &&
1932 	    mon_pdev->mvdev->monitor_vdev->osif_rx_mon) {
1933 		mon_vdev = mon_pdev->mvdev->monitor_vdev;
1934 
1935 		mon_pdev->ppdu_info.rx_status.ppdu_id =
1936 			mon_pdev->ppdu_info.com_info.ppdu_id;
1937 		mon_pdev->ppdu_info.rx_status.device_id = soc->device_id;
1938 		mon_pdev->ppdu_info.rx_status.chan_noise_floor =
1939 			pdev->chan_noise_floor;
1940 		dp_handle_tx_capture(soc, pdev, mon_mpdu);
1941 
1942 		if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status,
1943 					      mon_mpdu,
1944 					      qdf_nbuf_headroom(mon_mpdu))) {
1945 			DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
1946 			goto mon_deliver_fail;
1947 		}
1948 
1949 		dp_rx_mon_update_pf_tag_to_buf_headroom(soc, mon_mpdu);
1950 		mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
1951 				      mon_mpdu,
1952 				      &mon_pdev->ppdu_info.rx_status);
1953 	} else {
1954 		dp_rx_mon_dest_debug("%pK: mon_mpdu=%pK monitor_vdev %pK osif_vdev %pK"
1955 				     , soc, mon_mpdu, mon_pdev->mvdev,
1956 				     (mon_pdev->mvdev ? mon_pdev->mvdev->osif_vdev
1957 				     : NULL));
1958 		goto mon_deliver_fail;
1959 	}
1960 
1961 	return QDF_STATUS_SUCCESS;
1962 
1963 mon_deliver_fail:
1964 	mon_skb = head_msdu;
1965 	while (mon_skb) {
1966 		skb_next = qdf_nbuf_next(mon_skb);
1967 
1968 		 dp_rx_mon_dest_debug("%pK: [%s][%d] mon_skb=%pK len %u",
1969 				      soc,  __func__, __LINE__, mon_skb, mon_skb->len);
1970 
1971 		qdf_nbuf_free(mon_skb);
1972 		mon_skb = skb_next;
1973 	}
1974 	return QDF_STATUS_E_INVAL;
1975 }
1976 
1977 QDF_STATUS dp_rx_mon_deliver_non_std(struct dp_soc *soc,
1978 				     uint32_t mac_id)
1979 {
1980 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1981 	ol_txrx_rx_mon_fp osif_rx_mon;
1982 	qdf_nbuf_t dummy_msdu;
1983 	struct dp_mon_pdev *mon_pdev;
1984 	struct dp_mon_vdev *mon_vdev;
1985 
1986 	/* Sanity checking */
1987 	if (!pdev || !pdev->monitor_pdev)
1988 		goto mon_deliver_non_std_fail;
1989 
1990 	mon_pdev = pdev->monitor_pdev;
1991 
1992 	if (!mon_pdev->mvdev || !mon_pdev->mvdev ||
1993 	    !mon_pdev->mvdev->monitor_vdev ||
1994 	    !mon_pdev->mvdev->monitor_vdev->osif_rx_mon)
1995 		goto mon_deliver_non_std_fail;
1996 
1997 	mon_vdev = mon_pdev->mvdev->monitor_vdev;
1998 	/* Generate a dummy skb_buff */
1999 	osif_rx_mon = mon_vdev->osif_rx_mon;
2000 	dummy_msdu = qdf_nbuf_alloc(soc->osdev, MAX_MONITOR_HEADER,
2001 				    MAX_MONITOR_HEADER, 4, FALSE);
2002 	if (!dummy_msdu)
2003 		goto allocate_dummy_msdu_fail;
2004 
2005 	qdf_nbuf_set_pktlen(dummy_msdu, 0);
2006 	qdf_nbuf_set_next(dummy_msdu, NULL);
2007 
2008 	mon_pdev->ppdu_info.rx_status.ppdu_id =
2009 		mon_pdev->ppdu_info.com_info.ppdu_id;
2010 
2011 	/* Apply the radio header to this dummy skb */
2012 	if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, dummy_msdu,
2013 				      qdf_nbuf_headroom(dummy_msdu))) {
2014 		DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
2015 		qdf_nbuf_free(dummy_msdu);
2016 		goto mon_deliver_non_std_fail;
2017 	}
2018 
2019 	/* deliver to the user layer application */
2020 	osif_rx_mon(mon_pdev->mvdev->osif_vdev,
2021 		    dummy_msdu, NULL);
2022 
2023 	/* Clear rx_status*/
2024 	qdf_mem_zero(&mon_pdev->ppdu_info.rx_status,
2025 		     sizeof(mon_pdev->ppdu_info.rx_status));
2026 	mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
2027 
2028 	return QDF_STATUS_SUCCESS;
2029 
2030 allocate_dummy_msdu_fail:
2031 		 dp_rx_mon_dest_debug("%pK: mon_skb=%pK ",
2032 				      soc, dummy_msdu);
2033 
2034 mon_deliver_non_std_fail:
2035 	return QDF_STATUS_E_INVAL;
2036 }
2037 
2038 /**
2039  * dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based
2040  *                                     filtering enabled
2041  * @soc: core txrx main context
2042  * @ppdu_info: Structure for rx ppdu info
2043  * @status_nbuf: Qdf nbuf abstraction for linux skb
2044  * @pdev_id: mac_id/pdev_id correspondinggly for MCL and WIN
2045  *
2046  * Return: none
2047  */
2048 void
2049 dp_rx_process_peer_based_pktlog(struct dp_soc *soc,
2050 				struct hal_rx_ppdu_info *ppdu_info,
2051 				qdf_nbuf_t status_nbuf, uint32_t pdev_id)
2052 {
2053 	struct dp_peer *peer;
2054 	struct mon_rx_user_status *rx_user_status;
2055 	uint32_t num_users = ppdu_info->com_info.num_users;
2056 	uint16_t sw_peer_id;
2057 
2058 	/* Sanity check for num_users */
2059 	if (!num_users)
2060 		return;
2061 
2062 	qdf_assert_always(num_users <= CDP_MU_MAX_USERS);
2063 	rx_user_status = &ppdu_info->rx_user_status[num_users - 1];
2064 
2065 	sw_peer_id = rx_user_status->sw_peer_id;
2066 
2067 	peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
2068 				     DP_MOD_ID_RX_PPDU_STATS);
2069 
2070 	if (!peer)
2071 		return;
2072 
2073 	if ((peer->peer_id != HTT_INVALID_PEER) && (peer->monitor_peer) &&
2074 	    (peer->monitor_peer->peer_based_pktlog_filter)) {
2075 		dp_wdi_event_handler(
2076 				     WDI_EVENT_RX_DESC, soc,
2077 				     status_nbuf,
2078 				     peer->peer_id,
2079 				     WDI_NO_VAL, pdev_id);
2080 	}
2081 	dp_peer_unref_delete(peer,
2082 			     DP_MOD_ID_RX_PPDU_STATS);
2083 }
2084 
2085 uint32_t
2086 dp_mon_rx_add_tlv(uint8_t id, uint16_t len, void *value, qdf_nbuf_t mpdu_nbuf)
2087 {
2088 	uint8_t *dest = NULL;
2089 	uint32_t num_bytes_pushed = 0;
2090 
2091 	/* Add tlv id field */
2092 	dest = qdf_nbuf_push_head(mpdu_nbuf, sizeof(uint8_t));
2093 	if (qdf_likely(dest)) {
2094 		*((uint8_t *)dest) = id;
2095 		num_bytes_pushed += sizeof(uint8_t);
2096 	}
2097 
2098 	/* Add tlv len field */
2099 	dest = qdf_nbuf_push_head(mpdu_nbuf, sizeof(uint16_t));
2100 	if (qdf_likely(dest)) {
2101 		*((uint16_t *)dest) = len;
2102 		num_bytes_pushed += sizeof(uint16_t);
2103 	}
2104 
2105 	/* Add tlv value field */
2106 	dest = qdf_nbuf_push_head(mpdu_nbuf, len);
2107 	if (qdf_likely(dest)) {
2108 		qdf_mem_copy(dest, value, len);
2109 		num_bytes_pushed += len;
2110 	}
2111 
2112 	return num_bytes_pushed;
2113 }
2114 
2115 void
2116 dp_mon_rx_stats_update_rssi_dbm_params(struct dp_mon_pdev *mon_pdev,
2117 				       struct hal_rx_ppdu_info *ppdu_info)
2118 {
2119 	ppdu_info->rx_status.rssi_offset = mon_pdev->rssi_offsets.rssi_offset;
2120 	ppdu_info->rx_status.rssi_dbm_conv_support =
2121 				mon_pdev->rssi_dbm_conv_support;
2122 	ppdu_info->rx_status.chan_noise_floor =
2123 		mon_pdev->rssi_offsets.rssi_offset;
2124 }
2125 
2126 #ifdef WLAN_SUPPORT_CTRL_FRAME_STATS
2127 void dp_rx_mon_update_user_ctrl_frame_stats(struct dp_pdev *pdev,
2128 					    struct hal_rx_ppdu_info *ppdu_info)
2129 {
2130 	struct dp_peer *peer;
2131 	struct dp_mon_peer *mon_peer;
2132 	struct dp_soc *soc = pdev->soc;
2133 	uint16_t fc, sw_peer_id;
2134 	uint8_t i;
2135 
2136 	if (qdf_unlikely(!ppdu_info))
2137 		return;
2138 
2139 	fc = ppdu_info->nac_info.frame_control;
2140 	if (qdf_likely((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_TYPE_MASK) !=
2141 	    QDF_IEEE80211_FC0_TYPE_CTL))
2142 		return;
2143 
2144 	for (i = 0; i < ppdu_info->com_info.num_users; i++) {
2145 		sw_peer_id = ppdu_info->rx_user_status[i].sw_peer_id;
2146 		peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
2147 					     DP_MOD_ID_RX_PPDU_STATS);
2148 		if (qdf_unlikely(!peer))
2149 			continue;
2150 		mon_peer = peer->monitor_peer;
2151 		if (qdf_unlikely(!mon_peer)) {
2152 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
2153 			continue;
2154 		}
2155 		DP_STATS_INCC(mon_peer, rx.ndpa_cnt, 1,
2156 			      ppdu_info->ctrl_frm_info[i].ndpa);
2157 		DP_STATS_INCC(mon_peer, rx.bar_cnt, 1,
2158 			      ppdu_info->ctrl_frm_info[i].bar);
2159 
2160 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
2161 	}
2162 }
2163 #endif /* WLAN_SUPPORT_CTRL_FRAME_STATS */
2164