xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/dp_rx_mon.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_peer.h"
24 #include "hal_rx.h"
25 #include "hal_api.h"
26 #include "qdf_trace.h"
27 #include "qdf_nbuf.h"
28 #include "hal_api_mon.h"
29 #include "dp_internal.h"
30 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
31 #include "dp_htt.h"
32 #include "dp_mon.h"
33 #include "dp_rx_mon.h"
34 
35 #include "htt.h"
36 #ifdef FEATURE_PERPKT_INFO
37 #include "dp_ratetable.h"
38 #endif
39 
40 #ifndef IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK
41 #define IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK 0xe0
42 #endif
43 
44 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
45 void
46 dp_rx_mon_handle_cfr_mu_info(struct dp_pdev *pdev,
47 			     struct hal_rx_ppdu_info *ppdu_info,
48 			     struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
49 {
50 	struct dp_peer *peer;
51 	struct dp_soc *soc = pdev->soc;
52 	struct mon_rx_user_status *rx_user_status;
53 	struct cdp_rx_stats_ppdu_user *rx_stats_peruser;
54 	uint32_t num_users;
55 	int user_id;
56 	uint16_t sw_peer_id;
57 
58 	num_users = ppdu_info->com_info.num_users;
59 	for (user_id = 0; user_id < num_users; user_id++) {
60 		if (user_id > OFDMA_NUM_USERS) {
61 			return;
62 		}
63 
64 		rx_user_status =  &ppdu_info->rx_user_status[user_id];
65 		rx_stats_peruser = &cdp_rx_ppdu->user[user_id];
66 		sw_peer_id = rx_user_status->sw_peer_id;
67 		peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
68 					     DP_MOD_ID_RX_PPDU_STATS);
69 		if (!peer) {
70 			rx_stats_peruser->peer_id = HTT_INVALID_PEER;
71 			continue;
72 		}
73 
74 		qdf_mem_copy(rx_stats_peruser->mac_addr,
75 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
76 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
77 	}
78 }
79 
80 void
81 dp_rx_mon_populate_cfr_ppdu_info(struct dp_pdev *pdev,
82 				 struct hal_rx_ppdu_info *ppdu_info,
83 				 struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
84 {
85 	struct dp_peer *peer;
86 	struct dp_soc *soc = pdev->soc;
87 	int chain;
88 	uint16_t sw_peer_id;
89 	struct mon_rx_user_status *rx_user_status;
90 	uint32_t num_users = ppdu_info->com_info.num_users;
91 
92 	cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
93 	cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
94 	cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
95 
96 	for (chain = 0; chain < MAX_CHAIN; chain++)
97 		cdp_rx_ppdu->per_chain_rssi[chain] =
98 			ppdu_info->rx_status.rssi[chain];
99 
100 	cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
101 	cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
102 	cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
103 
104 	if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) &&
105 	    (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC))
106 		cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
107 	else
108 		cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
109 
110 	if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) {
111 		cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc;
112 	} else if (ppdu_info->rx_status.preamble_type ==
113 			HAL_RX_PKT_TYPE_11AX) {
114 		cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >>
115 				       QDF_MON_STATUS_STBC_SHIFT) & 0x1;
116 		cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >>
117 				      QDF_MON_STATUS_DCM_SHIFT) & 0x1;
118 	}
119 
120 	qdf_assert_always(num_users <= CDP_MU_MAX_USERS);
121 	dp_rx_mon_handle_cfr_mu_info(pdev, ppdu_info, cdp_rx_ppdu);
122 	rx_user_status = &ppdu_info->rx_user_status[num_users - 1];
123 	sw_peer_id = rx_user_status->sw_peer_id;
124 	peer = dp_peer_get_ref_by_id(soc, sw_peer_id, DP_MOD_ID_RX_PPDU_STATS);
125 	if (!peer) {
126 		cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
127 		cdp_rx_ppdu->num_users = 0;
128 		return;
129 	}
130 
131 	cdp_rx_ppdu->peer_id = peer->peer_id;
132 	cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
133 	cdp_rx_ppdu->num_users = num_users;
134 }
135 
136 bool
137 dp_cfr_rcc_mode_status(struct dp_pdev *pdev)
138 {
139 	return pdev->cfr_rcc_mode;
140 }
141 
142 void
143 dp_rx_mon_populate_cfr_info(struct dp_pdev *pdev,
144 			    struct hal_rx_ppdu_info *ppdu_info,
145 			    struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
146 {
147 	struct cdp_rx_ppdu_cfr_info *cfr_info;
148 
149 	if (!qdf_unlikely(dp_cfr_rcc_mode_status(pdev)))
150 		return;
151 
152 	cfr_info = &cdp_rx_ppdu->cfr_info;
153 
154 	cfr_info->bb_captured_channel
155 		= ppdu_info->cfr_info.bb_captured_channel;
156 	cfr_info->bb_captured_timeout
157 		= ppdu_info->cfr_info.bb_captured_timeout;
158 	cfr_info->bb_captured_reason
159 		= ppdu_info->cfr_info.bb_captured_reason;
160 	cfr_info->rx_location_info_valid
161 		= ppdu_info->cfr_info.rx_location_info_valid;
162 	cfr_info->chan_capture_status
163 		= ppdu_info->cfr_info.chan_capture_status;
164 	cfr_info->rtt_che_buffer_pointer_high8
165 		= ppdu_info->cfr_info.rtt_che_buffer_pointer_high8;
166 	cfr_info->rtt_che_buffer_pointer_low32
167 		= ppdu_info->cfr_info.rtt_che_buffer_pointer_low32;
168 	cfr_info->rtt_cfo_measurement
169 		= (int16_t)ppdu_info->cfr_info.rtt_cfo_measurement;
170 	cfr_info->agc_gain_info0
171 		= ppdu_info->cfr_info.agc_gain_info0;
172 	cfr_info->agc_gain_info1
173 		= ppdu_info->cfr_info.agc_gain_info1;
174 	cfr_info->agc_gain_info2
175 		= ppdu_info->cfr_info.agc_gain_info2;
176 	cfr_info->agc_gain_info3
177 		= ppdu_info->cfr_info.agc_gain_info3;
178 	cfr_info->rx_start_ts
179 		= ppdu_info->cfr_info.rx_start_ts;
180 	cfr_info->mcs_rate
181 		= ppdu_info->cfr_info.mcs_rate;
182 	cfr_info->gi_type
183 		= ppdu_info->cfr_info.gi_type;
184 }
185 
186 void
187 dp_update_cfr_dbg_stats(struct dp_pdev *pdev,
188 			struct hal_rx_ppdu_info *ppdu_info)
189 {
190 	struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info;
191 
192 	DP_STATS_INC(pdev,
193 		     rcc.chan_capture_status[cfr->chan_capture_status], 1);
194 	if (cfr->rx_location_info_valid) {
195 		DP_STATS_INC(pdev, rcc.rx_loc_info_valid_cnt, 1);
196 		if (cfr->bb_captured_channel) {
197 			DP_STATS_INC(pdev, rcc.bb_captured_channel_cnt, 1);
198 			DP_STATS_INC(pdev,
199 				     rcc.reason_cnt[cfr->bb_captured_reason],
200 				     1);
201 		} else if (cfr->bb_captured_timeout) {
202 			DP_STATS_INC(pdev, rcc.bb_captured_timeout_cnt, 1);
203 			DP_STATS_INC(pdev,
204 				     rcc.reason_cnt[cfr->bb_captured_reason],
205 				     1);
206 		}
207 	}
208 }
209 
210 void
211 dp_rx_handle_cfr(struct dp_soc *soc, struct dp_pdev *pdev,
212 		 struct hal_rx_ppdu_info *ppdu_info)
213 {
214 	qdf_nbuf_t ppdu_nbuf;
215 	struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
216 
217 	dp_update_cfr_dbg_stats(pdev, ppdu_info);
218 	if (!ppdu_info->cfr_info.bb_captured_channel)
219 		return;
220 
221 	ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
222 				   sizeof(struct cdp_rx_indication_ppdu),
223 				   0,
224 				   0,
225 				   FALSE);
226 	if (ppdu_nbuf) {
227 		cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
228 
229 		dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu);
230 		dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu);
231 		qdf_nbuf_put_tail(ppdu_nbuf,
232 				  sizeof(struct cdp_rx_indication_ppdu));
233 		dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
234 				     ppdu_nbuf, HTT_INVALID_PEER,
235 				     WDI_NO_VAL, pdev->pdev_id);
236 	}
237 }
238 
239 void
240 dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev,
241 				 struct hal_rx_ppdu_info *ppdu_info,
242 				 struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
243 {
244 	if (!dp_cfr_rcc_mode_status(pdev))
245 		return;
246 
247 	if (ppdu_info->cfr_info.bb_captured_channel)
248 		dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu);
249 }
250 
251 /**
252  * dp_bb_captured_chan_status() - Get the bb_captured_channel status
253  * @ppdu_info: structure for rx ppdu ring
254  *
255  * Return: Success/ Failure
256  */
257 static inline QDF_STATUS
258 dp_bb_captured_chan_status(struct dp_pdev *pdev,
259 			   struct hal_rx_ppdu_info *ppdu_info)
260 {
261 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
262 	struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info;
263 
264 	if (dp_cfr_rcc_mode_status(pdev)) {
265 		if (cfr->bb_captured_channel)
266 			status = QDF_STATUS_SUCCESS;
267 	}
268 
269 	return status;
270 }
271 #else
272 static inline QDF_STATUS
273 dp_bb_captured_chan_status(struct dp_pdev *pdev,
274 			   struct hal_rx_ppdu_info *ppdu_info)
275 {
276 	return QDF_STATUS_E_NOSUPPORT;
277 }
278 #endif /* WLAN_CFR_ENABLE */
279 
280 #ifdef QCA_ENHANCED_STATS_SUPPORT
281 #ifdef QCA_RSSI_DB2DBM
282 /**
283  * dp_rx_mon_rf_index_conv() - this function will convert BB index to RF
284  *			index in the rssi_chain[chain][bw] array
285  *
286  * @chain: BB chain index
287  * @pdev: pdev structure
288  *
289  * Return: return RF chain index
290  *
291  * Computation:
292  *  3 Bytes of xbar_config are used for RF to BB mapping
293  *  Samples of xbar_config,
294  *
295  * If xbar_config is 0x688FAC(hex):
296  *     RF chains 0-3 are connected to BB chains 4-7
297  *     RF chains 4-7 are connected to BB chains 0-3
298  *     here,
299  *     bits 0 to 2 = 4, maps BB chain 4 for RF chain 0
300  *     bits 3 to 5 = 5, maps BB chain 5 for RF chain 1
301  *     bits 6 to 8 = 6, maps BB chain 6 for RF chain 2
302  *     bits 9 to 11 = 7, maps BB chain 7 for RF chain 3
303  *     bits 12 to 14 = 0, maps BB chain 0 for RF chain 4
304  *     bits 15 to 17 = 1, maps BB chain 1 for RF chain 5
305  *     bits 18 to 20 = 2, maps BB chain 2 for RF chain 6
306  *     bits 21 to 23 = 3, maps BB chain 3 for RF chain 7
307  */
308 static uint8_t dp_rx_mon_rf_index_conv(uint8_t chain,
309 				       struct dp_mon_pdev *mon_pdev)
310 {
311 	uint32_t xbar_config = mon_pdev->rssi_offsets.xbar_config;
312 
313 	if (mon_pdev->rssi_dbm_conv_support && xbar_config)
314 		return ((xbar_config >> (3 * chain)) & 0x07);
315 	return chain;
316 }
317 #else
318 static uint8_t dp_rx_mon_rf_index_conv(uint8_t chain,
319 				       struct dp_mon_pdev *mon_pdev)
320 {
321 	return chain;
322 }
323 #endif
324 void
325 dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info *ppdu_info,
326 			     struct cdp_rx_indication_ppdu *cdp_rx_ppdu,
327 			     struct dp_pdev *pdev)
328 {
329 	uint8_t chain, bw;
330 	uint8_t rssi;
331 	uint8_t chain_rf;
332 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
333 
334 	for (chain = 0; chain < SS_COUNT; chain++) {
335 		for (bw = 0; bw < MAX_BW; bw++) {
336 			chain_rf = dp_rx_mon_rf_index_conv(chain, mon_pdev);
337 			rssi = ppdu_info->rx_status.rssi_chain[chain_rf][bw];
338 			if (rssi != DP_RSSI_INVAL)
339 				cdp_rx_ppdu->rssi_chain[chain_rf][bw] = rssi;
340 			else
341 				cdp_rx_ppdu->rssi_chain[chain_rf][bw] = 0;
342 		}
343 	}
344 }
345 
346 void
347 dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info *ppdu_info,
348 			      struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
349 {
350 	uint8_t pilot_evm;
351 	uint8_t nss_count;
352 	uint8_t pilot_count;
353 
354 	nss_count = ppdu_info->evm_info.nss_count;
355 	pilot_count = ppdu_info->evm_info.pilot_count;
356 
357 	if ((nss_count * pilot_count) > DP_RX_MAX_SU_EVM_COUNT) {
358 		qdf_err("pilot evm count is more than expected");
359 		return;
360 	}
361 	cdp_rx_ppdu->evm_info.pilot_count = pilot_count;
362 	cdp_rx_ppdu->evm_info.nss_count = nss_count;
363 
364 	/* Populate evm for pilot_evm  = nss_count*pilot_count */
365 	for (pilot_evm = 0; pilot_evm < nss_count * pilot_count; pilot_evm++) {
366 		cdp_rx_ppdu->evm_info.pilot_evm[pilot_evm] =
367 			ppdu_info->evm_info.pilot_evm[pilot_evm];
368 	}
369 }
370 
371 /**
372  * dp_rx_inc_rusize_cnt() - increment pdev stats based on RU size
373  * @pdev: pdev ctx
374  * @rx_user_status: mon rx user status
375  *
376  * Return: bool
377  */
378 static inline bool
379 dp_rx_inc_rusize_cnt(struct dp_pdev *pdev,
380 		     struct mon_rx_user_status *rx_user_status)
381 {
382 	uint32_t ru_size;
383 	bool is_data;
384 
385 	ru_size = rx_user_status->ofdma_ru_size;
386 
387 	if (dp_is_subtype_data(rx_user_status->frame_control)) {
388 		DP_STATS_INC(pdev,
389 			     ul_ofdma.data_rx_ru_size[ru_size], 1);
390 		is_data = true;
391 	} else {
392 		DP_STATS_INC(pdev,
393 			     ul_ofdma.nondata_rx_ru_size[ru_size], 1);
394 		is_data = false;
395 	}
396 
397 	return is_data;
398 }
399 
400 /**
401  * dp_rx_populate_cdp_indication_ppdu_user() - Populate per user cdp indication
402  * @pdev: pdev ctx
403  * @ppdu_info: ppdu info structure from ppdu ring
404  * @cdp_rx_ppdu: Rx PPDU indication structure
405  *
406  * Return: none
407  */
408 static void
409 dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev,
410 					struct hal_rx_ppdu_info *ppdu_info,
411 					struct cdp_rx_indication_ppdu
412 					*cdp_rx_ppdu)
413 {
414 	struct dp_peer *peer;
415 	struct dp_soc *soc = pdev->soc;
416 	int i;
417 	struct mon_rx_user_status *rx_user_status;
418 	struct mon_rx_user_info *rx_user_info;
419 	struct cdp_rx_stats_ppdu_user *rx_stats_peruser;
420 	int ru_size;
421 	bool is_data = false;
422 	uint32_t num_users;
423 	struct dp_mon_ops *mon_ops;
424 	uint16_t sw_peer_id;
425 
426 	num_users = ppdu_info->com_info.num_users;
427 	for (i = 0; i < num_users; i++) {
428 		if (i > OFDMA_NUM_USERS)
429 			return;
430 
431 		rx_user_status =  &ppdu_info->rx_user_status[i];
432 		rx_user_info = &ppdu_info->rx_user_info[i];
433 		rx_stats_peruser = &cdp_rx_ppdu->user[i];
434 
435 		sw_peer_id = rx_user_status->sw_peer_id;
436 		peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
437 					     DP_MOD_ID_RX_PPDU_STATS);
438 		if (qdf_unlikely(!peer)) {
439 			rx_stats_peruser->peer_id = HTT_INVALID_PEER;
440 			continue;
441 		}
442 		rx_stats_peruser->is_bss_peer = peer->bss_peer;
443 
444 		rx_stats_peruser->first_data_seq_ctrl =
445 			rx_user_status->first_data_seq_ctrl;
446 
447 		rx_stats_peruser->frame_control_info_valid =
448 			rx_user_status->frame_control_info_valid;
449 		rx_stats_peruser->frame_control =
450 			rx_user_status->frame_control;
451 
452 		rx_stats_peruser->qos_control_info_valid =
453 			rx_user_info->qos_control_info_valid;
454 		rx_stats_peruser->qos_control =
455 			rx_user_info->qos_control;
456 		rx_stats_peruser->tcp_msdu_count =
457 			rx_user_status->tcp_msdu_count;
458 		rx_stats_peruser->udp_msdu_count =
459 			rx_user_status->udp_msdu_count;
460 		rx_stats_peruser->other_msdu_count =
461 			rx_user_status->other_msdu_count;
462 
463 		rx_stats_peruser->num_msdu =
464 			rx_stats_peruser->tcp_msdu_count +
465 			rx_stats_peruser->udp_msdu_count +
466 			rx_stats_peruser->other_msdu_count;
467 
468 		rx_stats_peruser->preamble_type =
469 				cdp_rx_ppdu->u.preamble;
470 		rx_stats_peruser->mpdu_cnt_fcs_ok =
471 			rx_user_status->mpdu_cnt_fcs_ok;
472 		rx_stats_peruser->mpdu_cnt_fcs_err =
473 			rx_user_status->mpdu_cnt_fcs_err;
474 		qdf_mem_copy(&rx_stats_peruser->mpdu_fcs_ok_bitmap,
475 			     &rx_user_status->mpdu_fcs_ok_bitmap,
476 			     HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
477 			     sizeof(rx_user_status->mpdu_fcs_ok_bitmap[0]));
478 		rx_stats_peruser->mpdu_ok_byte_count =
479 			rx_user_status->mpdu_ok_byte_count;
480 		rx_stats_peruser->mpdu_err_byte_count =
481 			rx_user_status->mpdu_err_byte_count;
482 
483 		cdp_rx_ppdu->num_mpdu += rx_user_status->mpdu_cnt_fcs_ok;
484 		cdp_rx_ppdu->num_msdu += rx_stats_peruser->num_msdu;
485 		rx_stats_peruser->retries =
486 			CDP_FC_IS_RETRY_SET(rx_stats_peruser->frame_control) ?
487 			rx_stats_peruser->mpdu_cnt_fcs_ok : 0;
488 		cdp_rx_ppdu->retries += rx_stats_peruser->retries;
489 
490 		if (rx_stats_peruser->mpdu_cnt_fcs_ok > 1)
491 			rx_stats_peruser->is_ampdu = 1;
492 		else
493 			rx_stats_peruser->is_ampdu = 0;
494 
495 		rx_stats_peruser->tid = ppdu_info->rx_status.tid;
496 
497 		qdf_mem_copy(rx_stats_peruser->mac_addr,
498 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
499 		rx_stats_peruser->peer_id = peer->peer_id;
500 		cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
501 		rx_stats_peruser->vdev_id = peer->vdev->vdev_id;
502 		rx_stats_peruser->mu_ul_info_valid = 0;
503 
504 		mon_ops = dp_mon_ops_get(soc);
505 		if (mon_ops && mon_ops->mon_rx_populate_ppdu_usr_info)
506 			mon_ops->mon_rx_populate_ppdu_usr_info(rx_user_status,
507 							       rx_stats_peruser);
508 
509 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
510 		if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA ||
511 		    cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) {
512 			if (rx_user_status->mu_ul_info_valid) {
513 				rx_stats_peruser->nss = rx_user_status->nss;
514 				cdp_rx_ppdu->usr_nss_sum += rx_stats_peruser->nss;
515 				rx_stats_peruser->mcs = rx_user_status->mcs;
516 				rx_stats_peruser->mu_ul_info_valid =
517 					rx_user_status->mu_ul_info_valid;
518 				rx_stats_peruser->ofdma_ru_start_index =
519 					rx_user_status->ofdma_ru_start_index;
520 				rx_stats_peruser->ofdma_ru_width =
521 					rx_user_status->ofdma_ru_width;
522 				cdp_rx_ppdu->usr_ru_tones_sum +=
523 					rx_stats_peruser->ofdma_ru_width;
524 				rx_stats_peruser->user_index = i;
525 				ru_size = rx_user_status->ofdma_ru_size;
526 				/*
527 				 * max RU size will be equal to
528 				 * HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2
529 				 */
530 				if (qdf_unlikely(ru_size >= OFDMA_NUM_RU_SIZE)) {
531 					dp_err("invalid ru_size %d\n",
532 					       ru_size);
533 					return;
534 				}
535 				is_data = dp_rx_inc_rusize_cnt(pdev,
536 							       rx_user_status);
537 			}
538 			if (is_data) {
539 				/* counter to get number of MU OFDMA */
540 				pdev->stats.ul_ofdma.data_rx_ppdu++;
541 				pdev->stats.ul_ofdma.data_users[num_users]++;
542 			}
543 		}
544 	}
545 }
546 
547 /**
548  * dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure
549  * @pdev: pdev ctx
550  * @ppdu_info: ppdu info structure from ppdu ring
551  * @cdp_rx_ppdu: Rx PPDU indication structure
552  *
553  * Return: none
554  */
555 static void
556 dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
557 				   struct hal_rx_ppdu_info *ppdu_info,
558 				   struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
559 {
560 	struct dp_peer *peer;
561 	struct dp_soc *soc = pdev->soc;
562 	uint32_t i;
563 	struct dp_mon_ops *mon_ops;
564 	uint16_t sw_peer_id;
565 	struct mon_rx_user_status *rx_user_status;
566 	uint32_t num_users = ppdu_info->com_info.num_users;
567 
568 	cdp_rx_ppdu->first_data_seq_ctrl =
569 		ppdu_info->rx_status.first_data_seq_ctrl;
570 	cdp_rx_ppdu->frame_ctrl =
571 		ppdu_info->rx_status.frame_control;
572 	cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count;
573 	cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count;
574 	cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count;
575 	/* num mpdu is consolidated and added together in num user loop */
576 	cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok;
577 	/* num msdu is consolidated and added together in num user loop */
578 	cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count +
579 				 cdp_rx_ppdu->udp_msdu_count +
580 				 cdp_rx_ppdu->other_msdu_count);
581 
582 	cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ?
583 		ppdu_info->com_info.mpdu_cnt_fcs_ok : 0;
584 
585 	if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1)
586 		cdp_rx_ppdu->is_ampdu = 1;
587 	else
588 		cdp_rx_ppdu->is_ampdu = 0;
589 	cdp_rx_ppdu->tid = ppdu_info->rx_status.tid;
590 
591 	qdf_assert_always(num_users <= CDP_MU_MAX_USERS);
592 	rx_user_status = &ppdu_info->rx_user_status[num_users - 1];
593 	sw_peer_id = rx_user_status->sw_peer_id;
594 	peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
595 				     DP_MOD_ID_RX_PPDU_STATS);
596 	if (qdf_unlikely(!peer)) {
597 		cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
598 		cdp_rx_ppdu->num_users = 0;
599 		goto end;
600 	}
601 
602 	qdf_mem_copy(cdp_rx_ppdu->mac_addr,
603 		     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
604 	cdp_rx_ppdu->peer_id = peer->peer_id;
605 	cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
606 
607 	cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
608 	cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len;
609 	cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
610 	cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
611 	cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
612 	if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) &&
613 	    (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC))
614 		cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
615 	else
616 		cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
617 	cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
618 	cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
619 	cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >>
620 				   QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3;
621 	cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
622 	cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
623 	cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num;
624 	cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
625 	cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len;
626 	cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate;
627 	cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
628 
629 	if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) {
630 		cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc;
631 	} else if (ppdu_info->rx_status.preamble_type ==
632 			HAL_RX_PKT_TYPE_11AX) {
633 		cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >>
634 				       QDF_MON_STATUS_STBC_SHIFT) & 0x1;
635 		cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >>
636 				      QDF_MON_STATUS_DCM_SHIFT) & 0x1;
637 	}
638 	dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu, pdev);
639 	dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu);
640 	cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna;
641 
642 	mon_ops = dp_mon_ops_get(pdev->soc);
643 	if (mon_ops && mon_ops->mon_rx_populate_ppdu_info)
644 		mon_ops->mon_rx_populate_ppdu_info(ppdu_info,
645 						   cdp_rx_ppdu);
646 
647 	cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor;
648 	for (i = 0; i < MAX_CHAIN; i++)
649 		cdp_rx_ppdu->per_chain_rssi[i] = ppdu_info->rx_status.rssi[i];
650 
651 	cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast;
652 
653 	cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users;
654 
655 	dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu);
656 
657 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
658 
659 	return;
660 end:
661 	dp_rx_populate_cfr_non_assoc_sta(pdev, ppdu_info, cdp_rx_ppdu);
662 }
663 
664 /**
665  * dp_rx_stats_update() - Update per-peer statistics
666  * @soc: Datapath SOC handle
667  * @peer: Datapath peer handle
668  * @ppdu: PPDU Descriptor
669  *
670  * Return: None
671  */
672 static inline void dp_rx_rate_stats_update(struct dp_peer *peer,
673 					   struct cdp_rx_indication_ppdu *ppdu,
674 					   uint32_t user)
675 {
676 	uint32_t ratekbps = 0;
677 	uint32_t ppdu_rx_rate = 0;
678 	uint32_t nss = 0;
679 	uint8_t mcs = 0;
680 	uint32_t rix;
681 	uint16_t ratecode = 0;
682 	struct cdp_rx_stats_ppdu_user *ppdu_user = NULL;
683 	struct dp_mon_peer *mon_peer = NULL;
684 
685 	if (!peer || !ppdu)
686 		return;
687 
688 	mon_peer = peer->monitor_peer;
689 	ppdu_user = &ppdu->user[user];
690 
691 	if (!mon_peer)
692 		return;
693 
694 	if (ppdu->u.ppdu_type != HAL_RX_TYPE_SU) {
695 		if (ppdu_user->nss == 0)
696 			nss = 0;
697 		else
698 			nss = ppdu_user->nss - 1;
699 		mcs = ppdu_user->mcs;
700 
701 		mon_peer->stats.rx.nss_info = ppdu_user->nss;
702 		mon_peer->stats.rx.mcs_info = ppdu_user->mcs;
703 	} else {
704 		if (ppdu->u.nss == 0)
705 			nss = 0;
706 		else
707 			nss = ppdu->u.nss - 1;
708 		mcs = ppdu->u.mcs;
709 
710 		mon_peer->stats.rx.nss_info = ppdu->u.nss;
711 		mon_peer->stats.rx.mcs_info = ppdu->u.mcs;
712 	}
713 
714 	ratekbps = dp_getrateindex(ppdu->u.gi,
715 				   mcs,
716 				   nss,
717 				   ppdu->u.preamble,
718 				   ppdu->u.bw,
719 				   ppdu->punc_bw,
720 				   &rix,
721 				   &ratecode);
722 
723 	if (!ratekbps) {
724 		ppdu->rix = 0;
725 		ppdu_user->rix = 0;
726 		ppdu->rx_ratekbps = 0;
727 		ppdu->rx_ratecode = 0;
728 		ppdu_user->rx_ratekbps = 0;
729 		return;
730 	}
731 
732 	mon_peer->stats.rx.bw_info = ppdu->u.bw;
733 	mon_peer->stats.rx.gi_info = ppdu->u.gi;
734 	mon_peer->stats.rx.preamble_info = ppdu->u.preamble;
735 
736 	ppdu->rix = rix;
737 	ppdu_user->rix = rix;
738 	DP_STATS_UPD(mon_peer, rx.last_rx_rate, ratekbps);
739 	mon_peer->stats.rx.avg_rx_rate =
740 		dp_ath_rate_lpf(mon_peer->stats.rx.avg_rx_rate, ratekbps);
741 	ppdu_rx_rate = dp_ath_rate_out(mon_peer->stats.rx.avg_rx_rate);
742 	DP_STATS_UPD(mon_peer, rx.rnd_avg_rx_rate, ppdu_rx_rate);
743 	ppdu->rx_ratekbps = ratekbps;
744 	ppdu->rx_ratecode = ratecode;
745 	ppdu_user->rx_ratekbps = ratekbps;
746 
747 	if (peer->vdev)
748 		peer->vdev->stats.rx.last_rx_rate = ratekbps;
749 }
750 
751 #ifdef WLAN_FEATURE_11BE
752 static inline uint8_t dp_get_bw_offset_frm_bw(struct dp_soc *soc,
753 					      enum CMN_BW_TYPES bw)
754 {
755 	uint8_t pkt_bw_offset;
756 
757 	switch (bw) {
758 	case CMN_BW_20MHZ:
759 		pkt_bw_offset = PKT_BW_GAIN_20MHZ;
760 		break;
761 	case CMN_BW_40MHZ:
762 		pkt_bw_offset = PKT_BW_GAIN_40MHZ;
763 		break;
764 	case CMN_BW_80MHZ:
765 		pkt_bw_offset = PKT_BW_GAIN_80MHZ;
766 		break;
767 	case CMN_BW_160MHZ:
768 		pkt_bw_offset = PKT_BW_GAIN_160MHZ;
769 		break;
770 	case CMN_BW_320MHZ:
771 		pkt_bw_offset = PKT_BW_GAIN_320MHZ;
772 		break;
773 	default:
774 		pkt_bw_offset = 0;
775 		dp_rx_mon_status_debug("%pK: Invalid BW index = %d",
776 				       soc, bw);
777 	}
778 
779 	return pkt_bw_offset;
780 }
781 #else
782 static inline uint8_t dp_get_bw_offset_frm_bw(struct dp_soc *soc,
783 					      enum CMN_BW_TYPES bw)
784 {
785 	uint8_t pkt_bw_offset;
786 
787 	switch (bw) {
788 	case CMN_BW_20MHZ:
789 		pkt_bw_offset = PKT_BW_GAIN_20MHZ;
790 		break;
791 	case CMN_BW_40MHZ:
792 		pkt_bw_offset = PKT_BW_GAIN_40MHZ;
793 		break;
794 	case CMN_BW_80MHZ:
795 		pkt_bw_offset = PKT_BW_GAIN_80MHZ;
796 		break;
797 	case CMN_BW_160MHZ:
798 		pkt_bw_offset = PKT_BW_GAIN_160MHZ;
799 		break;
800 	default:
801 		pkt_bw_offset = 0;
802 		dp_rx_mon_status_debug("%pK: Invalid BW index = %d",
803 				       soc, bw);
804 	}
805 
806 	return pkt_bw_offset;
807 }
808 #endif
809 
810 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
811 static void
812 dp_ppdu_desc_user_rx_time_update(struct dp_pdev *pdev,
813 				 struct dp_peer *peer,
814 				 struct cdp_rx_indication_ppdu *ppdu_desc,
815 				 struct cdp_rx_stats_ppdu_user *user)
816 {
817 	uint32_t nss_ru_width_sum = 0;
818 	struct dp_mon_peer *mon_peer = NULL;
819 	uint8_t ac = 0;
820 
821 	if (!pdev || !ppdu_desc || !user || !peer)
822 		return;
823 
824 	nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
825 	if (!nss_ru_width_sum)
826 		nss_ru_width_sum = 1;
827 
828 	if (ppdu_desc->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA ||
829 	    ppdu_desc->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) {
830 		user->rx_time_us = (ppdu_desc->duration *
831 				    user->nss * user->ofdma_ru_width) /
832 				    nss_ru_width_sum;
833 	} else {
834 		user->rx_time_us = ppdu_desc->duration;
835 	}
836 
837 	mon_peer = peer->monitor_peer;
838 	if (qdf_unlikely(!mon_peer))
839 		return;
840 
841 	ac = TID_TO_WME_AC(user->tid);
842 	DP_STATS_INC(mon_peer, airtime_consumption[ac].consumption,
843 		     user->rx_time_us);
844 }
845 #else
846 static inline void
847 dp_ppdu_desc_user_rx_time_update(struct dp_pdev *pdev,
848 				 struct dp_peer *peer,
849 				 struct cdp_rx_indication_ppdu *ppdu_desc,
850 				 struct cdp_rx_stats_ppdu_user *user)
851 { }
852 #endif
853 
854 static void dp_rx_stats_update(struct dp_pdev *pdev,
855 			       struct cdp_rx_indication_ppdu *ppdu)
856 {
857 	struct dp_soc *soc = NULL;
858 	uint8_t mcs, preamble, ac = 0, nss, ppdu_type;
859 	uint32_t num_msdu;
860 	uint8_t pkt_bw_offset;
861 	struct dp_peer *peer;
862 	struct dp_mon_peer *mon_peer;
863 	struct cdp_rx_stats_ppdu_user *ppdu_user;
864 	uint32_t i;
865 	enum cdp_mu_packet_type mu_pkt_type;
866 	struct dp_mon_ops *mon_ops;
867 	struct dp_mon_pdev *mon_pdev = NULL;
868 
869 	if (qdf_likely(pdev))
870 		soc = pdev->soc;
871 	else
872 		return;
873 
874 	if (qdf_likely(!soc) || soc->process_rx_status)
875 		return;
876 
877 	mon_pdev = pdev->monitor_pdev;
878 
879 	preamble = ppdu->u.preamble;
880 	ppdu_type = ppdu->u.ppdu_type;
881 
882 	for (i = 0; i < ppdu->num_users && i < CDP_MU_MAX_USERS; i++) {
883 		peer = NULL;
884 		ppdu_user = &ppdu->user[i];
885 		peer = dp_peer_get_ref_by_id(soc, ppdu_user->peer_id,
886 					     DP_MOD_ID_RX_PPDU_STATS);
887 
888 		if (qdf_unlikely(!peer))
889 			mon_peer = mon_pdev->invalid_mon_peer;
890 		else
891 			mon_peer = peer->monitor_peer;
892 
893 		if (qdf_unlikely(!mon_peer)) {
894 			if (peer)
895 				dp_peer_unref_delete(peer,
896 						     DP_MOD_ID_RX_PPDU_STATS);
897 
898 			continue;
899 		}
900 
901 		if ((preamble == DOT11_A) || (preamble == DOT11_B))
902 			ppdu->u.nss = 1;
903 
904 		if (ppdu_type == HAL_RX_TYPE_SU) {
905 			mcs = ppdu->u.mcs;
906 			nss = ppdu->u.nss;
907 		} else {
908 			mcs = ppdu_user->mcs;
909 			nss = ppdu_user->nss;
910 		}
911 
912 		num_msdu = ppdu_user->num_msdu;
913 
914 		pkt_bw_offset = dp_get_bw_offset_frm_bw(soc, ppdu->u.bw);
915 		DP_STATS_UPD(mon_peer, rx.snr, (ppdu->rssi + pkt_bw_offset));
916 
917 		if (qdf_unlikely(mon_peer->stats.rx.avg_snr == CDP_INVALID_SNR))
918 			mon_peer->stats.rx.avg_snr =
919 				CDP_SNR_IN(mon_peer->stats.rx.snr);
920 		else
921 			CDP_SNR_UPDATE_AVG(mon_peer->stats.rx.avg_snr,
922 					   mon_peer->stats.rx.snr);
923 
924 		if (ppdu_type == HAL_RX_TYPE_SU) {
925 			if (nss) {
926 				DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu);
927 				DP_STATS_INC(mon_peer, rx.ppdu_nss[nss - 1], 1);
928 			}
929 
930 			DP_STATS_INC(mon_peer, rx.mpdu_cnt_fcs_ok,
931 				     ppdu_user->mpdu_cnt_fcs_ok);
932 			DP_STATS_INC(mon_peer, rx.mpdu_cnt_fcs_err,
933 				     ppdu_user->mpdu_cnt_fcs_err);
934 		}
935 
936 		if (ppdu_type >= HAL_RX_TYPE_MU_MIMO &&
937 		    ppdu_type <= HAL_RX_TYPE_MU_OFDMA) {
938 			if (ppdu_type == HAL_RX_TYPE_MU_MIMO)
939 				mu_pkt_type = TXRX_TYPE_MU_MIMO;
940 			else
941 				mu_pkt_type = TXRX_TYPE_MU_OFDMA;
942 
943 			if (qdf_likely(nss)) {
944 				DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu);
945 				DP_STATS_INC(mon_peer,
946 					rx.rx_mu[mu_pkt_type].ppdu_nss[nss - 1],
947 					1);
948 			}
949 
950 			DP_STATS_INC(mon_peer,
951 				     rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_ok,
952 				     ppdu_user->mpdu_cnt_fcs_ok);
953 			DP_STATS_INC(mon_peer,
954 				     rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_err,
955 				     ppdu_user->mpdu_cnt_fcs_err);
956 		}
957 
958 		DP_STATS_INC(mon_peer, rx.sgi_count[ppdu->u.gi], num_msdu);
959 		DP_STATS_INC(mon_peer, rx.bw[ppdu->u.bw], num_msdu);
960 		DP_STATS_INC(mon_peer, rx.reception_type[ppdu->u.ppdu_type],
961 			     num_msdu);
962 		DP_STATS_INC(mon_peer, rx.ppdu_cnt[ppdu->u.ppdu_type], 1);
963 		DP_STATS_INCC(mon_peer, rx.ampdu_cnt, num_msdu,
964 			      ppdu_user->is_ampdu);
965 		DP_STATS_INCC(mon_peer, rx.non_ampdu_cnt, num_msdu,
966 			      !(ppdu_user->is_ampdu));
967 		DP_STATS_UPD(mon_peer, rx.rx_rate, mcs);
968 		DP_STATS_INCC(mon_peer,
969 			rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
970 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
971 		DP_STATS_INCC(mon_peer,
972 			rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
973 			((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
974 		DP_STATS_INCC(mon_peer,
975 			rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
976 			((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
977 		DP_STATS_INCC(mon_peer,
978 			rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
979 			((mcs < MAX_MCS_11B) && (preamble == DOT11_B)));
980 		DP_STATS_INCC(mon_peer,
981 			rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
982 			((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
983 		DP_STATS_INCC(mon_peer,
984 			rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
985 			((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
986 		DP_STATS_INCC(mon_peer,
987 			rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
988 			((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
989 		DP_STATS_INCC(mon_peer,
990 			rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
991 			((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
992 		DP_STATS_INCC(mon_peer,
993 			rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
994 			((mcs >= (MAX_MCS_11AX)) && (preamble == DOT11_AX)));
995 		DP_STATS_INCC(mon_peer,
996 			rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
997 			((mcs < (MAX_MCS_11AX)) && (preamble == DOT11_AX)));
998 		DP_STATS_INCC(mon_peer,
999 			rx.su_ax_ppdu_cnt.mcs_count[MAX_MCS - 1], 1,
1000 			((mcs >= (MAX_MCS_11AX)) && (preamble == DOT11_AX) &&
1001 			(ppdu_type == HAL_RX_TYPE_SU)));
1002 		DP_STATS_INCC(mon_peer,
1003 			rx.su_ax_ppdu_cnt.mcs_count[mcs], 1,
1004 			((mcs < (MAX_MCS_11AX)) && (preamble == DOT11_AX) &&
1005 			(ppdu_type == HAL_RX_TYPE_SU)));
1006 		DP_STATS_INCC(mon_peer,
1007 			rx.rx_mu[TXRX_TYPE_MU_OFDMA].ppdu.mcs_count[MAX_MCS - 1],
1008 			1, ((mcs >= (MAX_MCS_11AX)) &&
1009 			(preamble == DOT11_AX) &&
1010 			(ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
1011 		DP_STATS_INCC(mon_peer,
1012 			rx.rx_mu[TXRX_TYPE_MU_OFDMA].ppdu.mcs_count[mcs],
1013 			1, ((mcs < (MAX_MCS_11AX)) &&
1014 			(preamble == DOT11_AX) &&
1015 			(ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
1016 		DP_STATS_INCC(mon_peer,
1017 			rx.rx_mu[TXRX_TYPE_MU_MIMO].ppdu.mcs_count[MAX_MCS - 1],
1018 			1, ((mcs >= (MAX_MCS_11AX)) &&
1019 			(preamble == DOT11_AX) &&
1020 			(ppdu_type == HAL_RX_TYPE_MU_MIMO)));
1021 		DP_STATS_INCC(mon_peer,
1022 			rx.rx_mu[TXRX_TYPE_MU_MIMO].ppdu.mcs_count[mcs],
1023 			1, ((mcs < (MAX_MCS_11AX)) &&
1024 			(preamble == DOT11_AX) &&
1025 			(ppdu_type == HAL_RX_TYPE_MU_MIMO)));
1026 
1027 		/*
1028 		 * If invalid TID, it could be a non-qos frame, hence do not
1029 		 * update any AC counters
1030 		 */
1031 		ac = TID_TO_WME_AC(ppdu_user->tid);
1032 
1033 		if (qdf_likely(ppdu->tid != HAL_TID_INVALID))
1034 			DP_STATS_INC(mon_peer, rx.wme_ac_type[ac], num_msdu);
1035 
1036 		DP_STATS_INC(mon_peer, rx.rx_ppdus, 1);
1037 		DP_STATS_INC(mon_peer, rx.rx_mpdus,
1038 			(ppdu_user->mpdu_cnt_fcs_ok + ppdu_user->mpdu_cnt_fcs_err));
1039 
1040 		mon_ops = dp_mon_ops_get(soc);
1041 		if (qdf_likely(mon_ops && mon_ops->mon_rx_stats_update))
1042 			mon_ops->mon_rx_stats_update(mon_peer, ppdu, ppdu_user);
1043 
1044 		if (qdf_unlikely(!peer))
1045 			continue;
1046 
1047 		dp_peer_stats_notify(pdev, peer);
1048 		DP_STATS_UPD(mon_peer, rx.last_snr,
1049 			     (ppdu->rssi + pkt_bw_offset));
1050 
1051 		dp_peer_qos_stats_notify(pdev, ppdu_user);
1052 
1053 		if (dp_is_subtype_data(ppdu->frame_ctrl))
1054 			dp_rx_rate_stats_update(peer, ppdu, i);
1055 
1056 		dp_send_stats_event(pdev, peer, ppdu_user->peer_id);
1057 
1058 		dp_ppdu_desc_user_rx_time_update(pdev, peer, ppdu, ppdu_user);
1059 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
1060 	}
1061 }
1062 
1063 void
1064 dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
1065 			struct hal_rx_ppdu_info *ppdu_info)
1066 {
1067 	qdf_nbuf_t ppdu_nbuf;
1068 	struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
1069 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1070 	uint64_t size = 0;
1071 	uint8_t num_users = 0;
1072 
1073 	/*
1074 	 * Do not allocate if fcs error,
1075 	 * ast idx invalid / fctl invalid
1076 	 *
1077 	 * In CFR RCC mode - PPDU status TLVs of error pkts are also needed
1078 	 */
1079 	if (qdf_unlikely(ppdu_info->com_info.mpdu_cnt_fcs_ok == 0))
1080 		return;
1081 
1082 	if (qdf_unlikely(mon_pdev->neighbour_peers_added)) {
1083 		if (ppdu_info->nac_info.fc_valid &&
1084 		    ppdu_info->nac_info.to_ds_flag &&
1085 		    ppdu_info->nac_info.mac_addr2_valid) {
1086 			struct dp_neighbour_peer *peer = NULL;
1087 			uint8_t rssi = ppdu_info->rx_status.rssi_comb;
1088 
1089 			qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
1090 			if (mon_pdev->neighbour_peers_added) {
1091 				TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
1092 					      neighbour_peer_list_elem) {
1093 					if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr,
1094 							 &ppdu_info->nac_info.mac_addr2,
1095 							 QDF_MAC_ADDR_SIZE)) {
1096 						peer->rssi = rssi;
1097 						break;
1098 					}
1099 				}
1100 			}
1101 			qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1102 		} else {
1103 			dp_info("Neighbour peers RSSI update failed! fc_valid = %d, to_ds_flag = %d and mac_addr2_valid = %d",
1104 					ppdu_info->nac_info.fc_valid,
1105 					ppdu_info->nac_info.to_ds_flag,
1106 					ppdu_info->nac_info.mac_addr2_valid);
1107 		}
1108 	}
1109 
1110 	/* need not generate wdi event when mcopy, cfr rcc mode and
1111 	 * enhanced stats are not enabled
1112 	 */
1113 	if (qdf_unlikely(!mon_pdev->mcopy_mode &&
1114 			 !mon_pdev->enhanced_stats_en &&
1115 			 !dp_cfr_rcc_mode_status(pdev)))
1116 		return;
1117 
1118 	if (qdf_unlikely(dp_cfr_rcc_mode_status(pdev)))
1119 		dp_update_cfr_dbg_stats(pdev, ppdu_info);
1120 
1121 	if (qdf_unlikely(!ppdu_info->rx_status.frame_control_info_valid ||
1122 			 ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)) {
1123 		if (!(mon_pdev->mcopy_mode ||
1124 		      (dp_bb_captured_chan_status(pdev, ppdu_info) ==
1125 		       QDF_STATUS_SUCCESS)))
1126 			return;
1127 	}
1128 	num_users = ppdu_info->com_info.num_users;
1129 	qdf_assert_always(num_users <= CDP_MU_MAX_USERS);
1130 	size = sizeof(struct cdp_rx_indication_ppdu) +
1131 		num_users * sizeof(struct cdp_rx_stats_ppdu_user);
1132 	ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
1133 				   size,
1134 				   0, 0, FALSE);
1135 	if (qdf_likely(ppdu_nbuf)) {
1136 		cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)qdf_nbuf_data(ppdu_nbuf);
1137 
1138 		qdf_mem_zero(cdp_rx_ppdu, size);
1139 		dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu);
1140 		dp_rx_populate_cdp_indication_ppdu(pdev,
1141 						   ppdu_info, cdp_rx_ppdu);
1142 		if (!qdf_unlikely(qdf_nbuf_put_tail(ppdu_nbuf,
1143 				       sizeof(struct cdp_rx_indication_ppdu))))
1144 			return;
1145 
1146 		dp_rx_stats_update(pdev, cdp_rx_ppdu);
1147 
1148 		if (qdf_unlikely(cdp_rx_ppdu->peer_id != HTT_INVALID_PEER)) {
1149 			dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC,
1150 					     soc, ppdu_nbuf,
1151 					     cdp_rx_ppdu->peer_id,
1152 					     WDI_NO_VAL, pdev->pdev_id);
1153 		} else if (qdf_unlikely(mon_pdev->mcopy_mode || dp_cfr_rcc_mode_status(pdev))) {
1154 			dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
1155 					     ppdu_nbuf, HTT_INVALID_PEER,
1156 					     WDI_NO_VAL, pdev->pdev_id);
1157 		} else {
1158 			qdf_nbuf_free(ppdu_nbuf);
1159 		}
1160 	}
1161 }
1162 #endif/* QCA_ENHANCED_STATS_SUPPORT */
1163 
1164 #ifdef QCA_UNDECODED_METADATA_SUPPORT
1165 #define RX_PHYERR_MASK_GET64(_val1, _val2) (((uint64_t)(_val2) << 32) | (_val1))
1166 /**
1167  * dp_rx_populate_cdp_indication_ppdu_undecoded_metadata() - Populate cdp
1168  * rx indication structure
1169  * @pdev: pdev ctx
1170  * @ppdu_info: ppdu info structure from ppdu ring
1171  * @cdp_rx_ppdu: Rx PPDU indication structure
1172  *
1173  * Return: none
1174  */
1175 static void
1176 dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(struct dp_pdev *pdev,
1177 				struct hal_rx_ppdu_info *ppdu_info,
1178 				struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
1179 {
1180 	uint32_t chain;
1181 
1182 	cdp_rx_ppdu->phyrx_abort = ppdu_info->rx_status.phyrx_abort;
1183 	cdp_rx_ppdu->phyrx_abort_reason =
1184 		ppdu_info->rx_status.phyrx_abort_reason;
1185 
1186 	cdp_rx_ppdu->first_data_seq_ctrl =
1187 		ppdu_info->rx_status.first_data_seq_ctrl;
1188 	cdp_rx_ppdu->frame_ctrl =
1189 		ppdu_info->rx_status.frame_control;
1190 	cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count;
1191 	cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count;
1192 	cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count;
1193 	cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type;
1194 	cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok;
1195 	cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count +
1196 				 cdp_rx_ppdu->udp_msdu_count +
1197 				 cdp_rx_ppdu->other_msdu_count);
1198 
1199 	cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ?
1200 		ppdu_info->com_info.mpdu_cnt_fcs_ok : 0;
1201 
1202 	if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1)
1203 		cdp_rx_ppdu->is_ampdu = 1;
1204 	else
1205 		cdp_rx_ppdu->is_ampdu = 0;
1206 	cdp_rx_ppdu->tid = ppdu_info->rx_status.tid;
1207 
1208 	cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
1209 	cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len;
1210 	cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
1211 	cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw;
1212 	cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
1213 	cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
1214 	if (ppdu_info->rx_status.sgi == VHT_SGI_NYSM &&
1215 	    ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC)
1216 		cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
1217 	else
1218 		cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
1219 
1220 	cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
1221 	cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
1222 	cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >>
1223 				   QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3;
1224 
1225 	cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
1226 	cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
1227 	cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num;
1228 	cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
1229 	cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len;
1230 	cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate;
1231 	cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
1232 
1233 	if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) {
1234 		cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc;
1235 		cdp_rx_ppdu->vht_no_txop_ps =
1236 			ppdu_info->rx_status.vht_no_txop_ps;
1237 		cdp_rx_ppdu->vht_crc = ppdu_info->rx_status.vht_crc;
1238 		cdp_rx_ppdu->group_id = ppdu_info->rx_status.vht_flag_values5;
1239 	} else if (ppdu_info->rx_status.preamble_type ==
1240 			HAL_RX_PKT_TYPE_11AX) {
1241 		cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >>
1242 				       QDF_MON_STATUS_STBC_SHIFT) & 0x1;
1243 		cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >>
1244 				      QDF_MON_STATUS_DCM_SHIFT) & 0x1;
1245 	} else {
1246 		cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.ht_stbc;
1247 		cdp_rx_ppdu->ht_length = ppdu_info->rx_status.ht_length;
1248 		cdp_rx_ppdu->ht_smoothing = ppdu_info->rx_status.smoothing;
1249 		cdp_rx_ppdu->ht_not_sounding =
1250 			ppdu_info->rx_status.not_sounding;
1251 		cdp_rx_ppdu->ht_aggregation = ppdu_info->rx_status.aggregation;
1252 		cdp_rx_ppdu->ht_stbc = ppdu_info->rx_status.ht_stbc;
1253 		cdp_rx_ppdu->ht_crc = ppdu_info->rx_status.ht_crc;
1254 	}
1255 
1256 	cdp_rx_ppdu->l_sig_length = ppdu_info->rx_status.l_sig_length;
1257 	cdp_rx_ppdu->l_sig_a_parity = ppdu_info->rx_status.l_sig_a_parity;
1258 	cdp_rx_ppdu->l_sig_a_pkt_type = ppdu_info->rx_status.l_sig_a_pkt_type;
1259 
1260 	if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AX) {
1261 		cdp_rx_ppdu->he_crc = ppdu_info->rx_status.he_crc;
1262 		cdp_rx_ppdu->bss_color_id =
1263 			ppdu_info->rx_status.he_data3 & 0x3F;
1264 		cdp_rx_ppdu->beam_change = (ppdu_info->rx_status.he_data3 >>
1265 				QDF_MON_STATUS_BEAM_CHANGE_SHIFT) & 0x1;
1266 		cdp_rx_ppdu->dl_ul_flag = (ppdu_info->rx_status.he_data3 >>
1267 		QDF_MON_STATUS_DL_UL_SHIFT) & 0x1;
1268 		cdp_rx_ppdu->ldpc_extra_sym = (ppdu_info->rx_status.he_data3 >>
1269 				QDF_MON_STATUS_LDPC_EXTRA_SYMBOL_SHIFT) & 0x1;
1270 		cdp_rx_ppdu->special_reuse =
1271 			ppdu_info->rx_status.he_data4 & 0xF;
1272 		cdp_rx_ppdu->ltf_sym = (ppdu_info->rx_status.he_data5 >>
1273 				QDF_MON_STATUS_HE_LTF_SYM_SHIFT) & 0x7;
1274 		cdp_rx_ppdu->txbf = (ppdu_info->rx_status.he_data5 >>
1275 				QDF_MON_STATUS_TXBF_SHIFT) & 0x1;
1276 		cdp_rx_ppdu->pe_disambiguity = (ppdu_info->rx_status.he_data5 >>
1277 				QDF_MON_STATUS_PE_DISAMBIGUITY_SHIFT) & 0x1;
1278 		cdp_rx_ppdu->pre_fec_pad = (ppdu_info->rx_status.he_data5 >>
1279 				QDF_MON_STATUS_PRE_FEC_PAD_SHIFT) & 0x3;
1280 		cdp_rx_ppdu->dopplar = (ppdu_info->rx_status.he_data6 >>
1281 				QDF_MON_STATUS_DOPPLER_SHIFT) & 0x1;
1282 		cdp_rx_ppdu->txop_duration = (ppdu_info->rx_status.he_data6 >>
1283 				QDF_MON_STATUS_TXOP_SHIFT) & 0x7F;
1284 		cdp_rx_ppdu->sig_b_mcs = ppdu_info->rx_status.he_flags1 & 0x7;
1285 		cdp_rx_ppdu->sig_b_dcm = (ppdu_info->rx_status.he_flags1 >>
1286 				QDF_MON_STATUS_DCM_FLAG_1_SHIFT) & 0x1;
1287 		cdp_rx_ppdu->sig_b_sym = (ppdu_info->rx_status.he_flags2 >>
1288 				QDF_MON_STATUS_NUM_SIG_B_SYMBOLS_SHIFT) & 0xF;
1289 		cdp_rx_ppdu->sig_b_comp = (ppdu_info->rx_status.he_flags2 >>
1290 			QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_2_SHIFT) & 0x1;
1291 	}
1292 	dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu, pdev);
1293 	dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu);
1294 	cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna;
1295 
1296 	cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor;
1297 	for (chain = 0; chain < MAX_CHAIN; chain++)
1298 		cdp_rx_ppdu->per_chain_rssi[chain] =
1299 			ppdu_info->rx_status.rssi[chain];
1300 
1301 	cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast;
1302 
1303 	cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users;
1304 
1305 	dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu);
1306 }
1307 
1308 /**
1309  * dp_rx_is_valid_undecoded_frame() - Check unencoded frame received valid
1310  * or not against configured error mask
1311  * @err_mask: configured err mask
1312  * @err_code: Received error reason code for phy abort
1313  *
1314  * Return: true / false
1315  */
1316 static inline bool
1317 dp_rx_is_valid_undecoded_frame(uint64_t err_mask, uint8_t err_code)
1318 {
1319 	if (err_code < CDP_PHYRX_ERR_MAX &&
1320 	    (err_mask & (1L << err_code)))
1321 		return true;
1322 
1323 	return false;
1324 }
1325 
1326 void
1327 dp_rx_handle_ppdu_undecoded_metadata(struct dp_soc *soc, struct dp_pdev *pdev,
1328 				     struct hal_rx_ppdu_info *ppdu_info)
1329 {
1330 	qdf_nbuf_t ppdu_nbuf;
1331 	struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
1332 	uint8_t abort_reason = 0;
1333 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1334 	uint64_t mask64;
1335 
1336 	 /* Return if RX_ABORT not set */
1337 	if (ppdu_info->rx_status.phyrx_abort == 0)
1338 		return;
1339 
1340 	mask64 = RX_PHYERR_MASK_GET64(mon_pdev->phyrx_error_mask,
1341 				      mon_pdev->phyrx_error_mask_cont);
1342 	abort_reason = ppdu_info->rx_status.phyrx_abort_reason;
1343 
1344 	if (!dp_rx_is_valid_undecoded_frame(mask64, abort_reason))
1345 		return;
1346 
1347 	ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
1348 				   sizeof(struct cdp_rx_indication_ppdu),
1349 				   0, 0, FALSE);
1350 	if (ppdu_nbuf) {
1351 		cdp_rx_ppdu = ((struct cdp_rx_indication_ppdu *)
1352 				qdf_nbuf_data(ppdu_nbuf));
1353 
1354 		qdf_mem_zero(cdp_rx_ppdu,
1355 			     sizeof(struct cdp_rx_indication_ppdu));
1356 		dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(pdev,
1357 				ppdu_info, cdp_rx_ppdu);
1358 
1359 		if (!qdf_nbuf_put_tail(ppdu_nbuf,
1360 				       sizeof(struct cdp_rx_indication_ppdu))) {
1361 			return;
1362 		}
1363 
1364 		mon_pdev->rx_mon_stats.rx_undecoded_count++;
1365 		mon_pdev->rx_mon_stats.rx_undecoded_error[abort_reason] += 1;
1366 
1367 		dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC_UNDECODED_METADATA,
1368 				     soc, ppdu_nbuf, HTT_INVALID_PEER,
1369 				     WDI_NO_VAL, pdev->pdev_id);
1370 	}
1371 }
1372 #endif/* QCA_UNDECODED_METADATA_SUPPORT */
1373 
1374 #ifdef QCA_MCOPY_SUPPORT
1375 QDF_STATUS
1376 dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
1377 			struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf,
1378 			uint8_t fcs_ok_mpdu_cnt, bool deliver_frame)
1379 {
1380 	uint16_t size = 0;
1381 	struct ieee80211_frame *wh;
1382 	uint32_t *nbuf_data;
1383 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1384 
1385 	if (!ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload)
1386 		return QDF_STATUS_SUCCESS;
1387 
1388 	/* For M_COPY mode only one msdu per ppdu is sent to upper layer*/
1389 	if (mon_pdev->mcopy_mode == M_COPY) {
1390 		if (mon_pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id)
1391 			return QDF_STATUS_SUCCESS;
1392 	}
1393 
1394 	wh = (struct ieee80211_frame *)(ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload + 4);
1395 
1396 	size = (ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload -
1397 				qdf_nbuf_data(nbuf));
1398 
1399 	if (qdf_nbuf_pull_head(nbuf, size) == NULL)
1400 		return QDF_STATUS_SUCCESS;
1401 
1402 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1403 	     IEEE80211_FC0_TYPE_MGT) ||
1404 	     ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1405 	     IEEE80211_FC0_TYPE_CTL)) {
1406 		return QDF_STATUS_SUCCESS;
1407 	}
1408 
1409 	nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf);
1410 	*nbuf_data = mon_pdev->ppdu_info.com_info.ppdu_id;
1411 	/* only retain RX MSDU payload in the skb */
1412 	qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].payload_len);
1413 	if (deliver_frame) {
1414 		mon_pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
1415 		dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
1416 				     nbuf, HTT_INVALID_PEER,
1417 				     WDI_NO_VAL, pdev->pdev_id);
1418 	}
1419 	return QDF_STATUS_E_ALREADY;
1420 }
1421 
1422 void
1423 dp_rx_mcopy_handle_last_mpdu(struct dp_soc *soc, struct dp_pdev *pdev,
1424 			     struct hal_rx_ppdu_info *ppdu_info,
1425 			     qdf_nbuf_t status_nbuf)
1426 {
1427 	QDF_STATUS mcopy_status;
1428 	qdf_nbuf_t nbuf_clone = NULL;
1429 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1430 
1431 	/* If the MPDU end tlv and RX header are received in different buffers,
1432 	 * process the RX header based on fcs status.
1433 	 */
1434 	if (mon_pdev->mcopy_status_nbuf) {
1435 		/* For M_COPY mode only one msdu per ppdu is sent to upper layer*/
1436 		if (mon_pdev->mcopy_mode == M_COPY) {
1437 			if (mon_pdev->m_copy_id.rx_ppdu_id ==
1438 			    ppdu_info->com_info.ppdu_id)
1439 				goto end1;
1440 		}
1441 
1442 		if (ppdu_info->is_fcs_passed) {
1443 			nbuf_clone = qdf_nbuf_clone(mon_pdev->mcopy_status_nbuf);
1444 			if (!nbuf_clone) {
1445 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1446 					  QDF_TRACE_LEVEL_ERROR,
1447 					  "Failed to clone nbuf");
1448 				goto end1;
1449 			}
1450 
1451 			mon_pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
1452 			dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
1453 					     nbuf_clone,
1454 					     HTT_INVALID_PEER,
1455 					     WDI_NO_VAL, pdev->pdev_id);
1456 			ppdu_info->is_fcs_passed = false;
1457 		}
1458 end1:
1459 		qdf_nbuf_free(mon_pdev->mcopy_status_nbuf);
1460 		mon_pdev->mcopy_status_nbuf = NULL;
1461 	}
1462 
1463 	/* If the MPDU end tlv and RX header are received in different buffers,
1464 	 * preserve the RX header as the fcs status will be received in MPDU
1465 	 * end tlv in next buffer. So, cache the buffer to be processd in next
1466 	 * iteration
1467 	 */
1468 	if ((ppdu_info->fcs_ok_cnt + ppdu_info->fcs_err_cnt) !=
1469 	    ppdu_info->com_info.mpdu_cnt) {
1470 		mon_pdev->mcopy_status_nbuf = qdf_nbuf_clone(status_nbuf);
1471 		if (mon_pdev->mcopy_status_nbuf) {
1472 			mcopy_status = dp_rx_handle_mcopy_mode(
1473 							soc, pdev,
1474 							ppdu_info,
1475 							mon_pdev->mcopy_status_nbuf,
1476 							ppdu_info->fcs_ok_cnt,
1477 							false);
1478 			if (mcopy_status == QDF_STATUS_SUCCESS) {
1479 				qdf_nbuf_free(mon_pdev->mcopy_status_nbuf);
1480 				mon_pdev->mcopy_status_nbuf = NULL;
1481 			}
1482 		}
1483 	}
1484 }
1485 
1486 void
1487 dp_rx_mcopy_process_ppdu_info(struct dp_pdev *pdev,
1488 			      struct hal_rx_ppdu_info *ppdu_info,
1489 			      uint32_t tlv_status)
1490 {
1491 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1492 
1493 	if (qdf_unlikely(!mon_pdev->mcopy_mode))
1494 		return;
1495 
1496 	/* The fcs status is received in MPDU end tlv. If the RX header
1497 	 * and its MPDU end tlv are received in different status buffer then
1498 	 * to process that header ppdu_info->is_fcs_passed is used.
1499 	 * If end tlv is received in next status buffer then com_info.mpdu_cnt
1500 	 * will be 0 at the time of receiving MPDU end tlv and we update the
1501 	 * is_fcs_passed flag based on ppdu_info->fcs_err.
1502 	 */
1503 	if (tlv_status != HAL_TLV_STATUS_MPDU_END)
1504 		return;
1505 
1506 	if (!ppdu_info->fcs_err) {
1507 		if (ppdu_info->fcs_ok_cnt >
1508 		    HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER) {
1509 			dp_err("No. of MPDUs(%d) per status buff exceeded",
1510 					ppdu_info->fcs_ok_cnt);
1511 			return;
1512 		}
1513 		if (ppdu_info->com_info.mpdu_cnt)
1514 			ppdu_info->fcs_ok_cnt++;
1515 		else
1516 			ppdu_info->is_fcs_passed = true;
1517 	} else {
1518 		if (ppdu_info->com_info.mpdu_cnt)
1519 			ppdu_info->fcs_err_cnt++;
1520 		else
1521 			ppdu_info->is_fcs_passed = false;
1522 	}
1523 }
1524 
1525 void
1526 dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
1527 			 struct hal_rx_ppdu_info *ppdu_info,
1528 			 uint32_t tlv_status,
1529 			 qdf_nbuf_t status_nbuf)
1530 {
1531 	QDF_STATUS mcopy_status;
1532 	qdf_nbuf_t nbuf_clone = NULL;
1533 	uint8_t fcs_ok_mpdu_cnt = 0;
1534 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1535 
1536 	dp_rx_mcopy_handle_last_mpdu(soc, pdev, ppdu_info, status_nbuf);
1537 
1538 	if (qdf_unlikely(!ppdu_info->com_info.mpdu_cnt))
1539 		goto end;
1540 
1541 	if (qdf_unlikely(!ppdu_info->fcs_ok_cnt))
1542 		goto end;
1543 
1544 	/* For M_COPY mode only one msdu per ppdu is sent to upper layer*/
1545 	if (mon_pdev->mcopy_mode == M_COPY)
1546 		ppdu_info->fcs_ok_cnt = 1;
1547 
1548 	while (fcs_ok_mpdu_cnt < ppdu_info->fcs_ok_cnt) {
1549 		nbuf_clone = qdf_nbuf_clone(status_nbuf);
1550 		if (!nbuf_clone) {
1551 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1552 				  "Failed to clone nbuf");
1553 			goto end;
1554 		}
1555 
1556 		mcopy_status = dp_rx_handle_mcopy_mode(soc, pdev,
1557 						       ppdu_info,
1558 						       nbuf_clone,
1559 						       fcs_ok_mpdu_cnt,
1560 						       true);
1561 
1562 		if (mcopy_status == QDF_STATUS_SUCCESS)
1563 			qdf_nbuf_free(nbuf_clone);
1564 
1565 		fcs_ok_mpdu_cnt++;
1566 	}
1567 end:
1568 	qdf_nbuf_free(status_nbuf);
1569 	ppdu_info->fcs_ok_cnt = 0;
1570 	ppdu_info->fcs_err_cnt = 0;
1571 	ppdu_info->com_info.mpdu_cnt = 0;
1572 	qdf_mem_zero(&ppdu_info->ppdu_msdu_info,
1573 		     HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER
1574 		     * sizeof(struct hal_rx_msdu_payload_info));
1575 }
1576 #endif /* QCA_MCOPY_SUPPORT */
1577 
1578 int
1579 dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev,
1580 			      struct hal_rx_ppdu_info *ppdu_info,
1581 			      qdf_nbuf_t nbuf)
1582 {
1583 	uint8_t size = 0;
1584 	struct dp_mon_vdev *mon_vdev;
1585 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1586 
1587 	if (!mon_pdev->mvdev) {
1588 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1589 			  "[%s]:[%d] Monitor vdev is NULL !!",
1590 			  __func__, __LINE__);
1591 		return 1;
1592 	}
1593 
1594 	mon_vdev = mon_pdev->mvdev->monitor_vdev;
1595 
1596 	if (!ppdu_info->msdu_info.first_msdu_payload) {
1597 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1598 			  "[%s]:[%d] First msdu payload not present",
1599 			  __func__, __LINE__);
1600 		return 1;
1601 	}
1602 
1603 	/* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */
1604 	size = (ppdu_info->msdu_info.first_msdu_payload -
1605 		qdf_nbuf_data(nbuf)) + 4;
1606 	ppdu_info->msdu_info.first_msdu_payload = NULL;
1607 
1608 	if (!qdf_nbuf_pull_head(nbuf, size)) {
1609 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1610 			  "[%s]:[%d] No header present",
1611 			  __func__, __LINE__);
1612 		return 1;
1613 	}
1614 
1615 	/* Only retain RX MSDU payload in the skb */
1616 	qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
1617 			   ppdu_info->msdu_info.payload_len);
1618 	if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, nbuf,
1619 				      qdf_nbuf_headroom(nbuf))) {
1620 		DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
1621 		return 1;
1622 	}
1623 
1624 	mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
1625 			      nbuf, NULL);
1626 	mon_pdev->ppdu_info.rx_status.monitor_direct_used = 0;
1627 	return 0;
1628 }
1629 
1630 qdf_nbuf_t
1631 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
1632 {
1633 	uint8_t *buf;
1634 	int32_t nbuf_retry_count;
1635 	QDF_STATUS ret;
1636 	qdf_nbuf_t nbuf = NULL;
1637 
1638 	for (nbuf_retry_count = 0; nbuf_retry_count <
1639 		QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
1640 			nbuf_retry_count++) {
1641 		/* Allocate a new skb using alloc_skb */
1642 		nbuf = qdf_nbuf_alloc_no_recycler(RX_MON_STATUS_BUF_SIZE,
1643 						  RX_MON_STATUS_BUF_RESERVATION,
1644 						  RX_DATA_BUFFER_ALIGNMENT);
1645 
1646 		if (!nbuf) {
1647 			DP_STATS_INC(pdev, replenish.nbuf_alloc_fail, 1);
1648 			continue;
1649 		}
1650 
1651 		buf = qdf_nbuf_data(nbuf);
1652 
1653 		memset(buf, 0, RX_MON_STATUS_BUF_SIZE);
1654 
1655 		ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
1656 						 QDF_DMA_FROM_DEVICE,
1657 						 RX_MON_STATUS_BUF_SIZE);
1658 
1659 		/* nbuf map failed */
1660 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
1661 			qdf_nbuf_free(nbuf);
1662 			DP_STATS_INC(pdev, replenish.map_err, 1);
1663 			continue;
1664 		}
1665 		/* qdf_nbuf alloc and map succeeded */
1666 		break;
1667 	}
1668 
1669 	/* qdf_nbuf still alloc or map failed */
1670 	if (qdf_unlikely(nbuf_retry_count >=
1671 			QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
1672 		return NULL;
1673 
1674 	return nbuf;
1675 }
1676 
1677 #ifndef DISABLE_MON_CONFIG
1678 uint32_t
1679 dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
1680 	       uint32_t mac_id, uint32_t quota)
1681 {
1682 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1683 
1684 	if (mon_soc && mon_soc->mon_rx_process)
1685 		return mon_soc->mon_rx_process(soc, int_ctx,
1686 					       mac_id, quota);
1687 	return 0;
1688 }
1689 #else
1690 uint32_t
1691 dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
1692 	       uint32_t mac_id, uint32_t quota)
1693 {
1694 	return 0;
1695 }
1696 #endif
1697 
1698 /**
1699  * dp_send_mgmt_packet_to_stack(): send indicataion to upper layers
1700  *
1701  * @soc: soc handle
1702  * @nbuf: Mgmt packet
1703  * @pdev: pdev handle
1704  *
1705  * Return: QDF_STATUS_SUCCESS on success
1706  *         QDF_STATUS_E_INVAL in error
1707  */
1708 #ifdef QCA_MCOPY_SUPPORT
1709 static inline QDF_STATUS
1710 dp_send_mgmt_packet_to_stack(struct dp_soc *soc,
1711 			     qdf_nbuf_t nbuf,
1712 			     struct dp_pdev *pdev)
1713 {
1714 	uint32_t *nbuf_data;
1715 	struct ieee80211_frame *wh;
1716 	qdf_frag_t addr;
1717 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1718 
1719 	if (!nbuf)
1720 		return QDF_STATUS_E_INVAL;
1721 
1722 	/* Get addr pointing to80211 header */
1723 	addr = dp_rx_mon_get_nbuf_80211_hdr(nbuf);
1724 	if (qdf_unlikely(!addr)) {
1725 		qdf_nbuf_free(nbuf);
1726 		return QDF_STATUS_E_INVAL;
1727 	}
1728 
1729 	/*check if this is not a mgmt packet*/
1730 	wh = (struct ieee80211_frame *)addr;
1731 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
1732 	     IEEE80211_FC0_TYPE_MGT) &&
1733 	     ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
1734 	     IEEE80211_FC0_TYPE_CTL)) {
1735 		qdf_nbuf_free(nbuf);
1736 		return QDF_STATUS_E_INVAL;
1737 	}
1738 	nbuf_data = (uint32_t *)qdf_nbuf_push_head(nbuf, 4);
1739 	if (!nbuf_data) {
1740 		QDF_TRACE(QDF_MODULE_ID_DP,
1741 			  QDF_TRACE_LEVEL_ERROR,
1742 			  FL("No headroom"));
1743 		qdf_nbuf_free(nbuf);
1744 		return QDF_STATUS_E_INVAL;
1745 	}
1746 	*nbuf_data = mon_pdev->ppdu_info.com_info.ppdu_id;
1747 
1748 	dp_wdi_event_handler(WDI_EVENT_RX_MGMT_CTRL, soc, nbuf,
1749 			     HTT_INVALID_PEER,
1750 			     WDI_NO_VAL, pdev->pdev_id);
1751 	return QDF_STATUS_SUCCESS;
1752 }
1753 #else
1754 static inline QDF_STATUS
1755 dp_send_mgmt_packet_to_stack(struct dp_soc *soc,
1756 			     qdf_nbuf_t nbuf,
1757 			     struct dp_pdev *pdev)
1758 {
1759 	return QDF_STATUS_SUCCESS;
1760 }
1761 #endif /* QCA_MCOPY_SUPPORT */
1762 
1763 QDF_STATUS dp_rx_mon_process_dest_pktlog(struct dp_soc *soc,
1764 					 uint32_t mac_id,
1765 					 qdf_nbuf_t mpdu)
1766 {
1767 	uint32_t event, msdu_timestamp = 0;
1768 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1769 	void *data;
1770 	struct ieee80211_frame *wh;
1771 	uint8_t type, subtype;
1772 	struct dp_mon_pdev *mon_pdev;
1773 
1774 	if (!pdev)
1775 		return QDF_STATUS_E_INVAL;
1776 
1777 	mon_pdev = pdev->monitor_pdev;
1778 
1779 	if (mon_pdev->rx_pktlog_cbf) {
1780 		if (qdf_nbuf_get_nr_frags(mpdu))
1781 			data = qdf_nbuf_get_frag_addr(mpdu, 0);
1782 		else
1783 			data = qdf_nbuf_data(mpdu);
1784 
1785 		/* CBF logging required, doesn't matter if it is a full mode
1786 		 * or lite mode.
1787 		 * Need to look for mpdu with:
1788 		 * TYPE = ACTION, SUBTYPE = NO ACK in the header
1789 		 */
1790 		event = WDI_EVENT_RX_CBF;
1791 
1792 		wh = (struct ieee80211_frame *)data;
1793 		type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1794 		subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1795 		if (type == IEEE80211_FC0_TYPE_MGT &&
1796 		    subtype == IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK) {
1797 			msdu_timestamp = mon_pdev->ppdu_info.rx_status.tsft;
1798 			dp_rx_populate_cbf_hdr(soc,
1799 					       mac_id, event,
1800 					       mpdu,
1801 					       msdu_timestamp);
1802 		}
1803 	}
1804 	return QDF_STATUS_SUCCESS;
1805 }
1806 
1807 QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id,
1808 			     qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu)
1809 {
1810 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1811 	struct cdp_mon_status *rs;
1812 	qdf_nbuf_t mon_skb, skb_next;
1813 	qdf_nbuf_t mon_mpdu = NULL;
1814 	struct dp_mon_vdev *mon_vdev;
1815 	struct dp_mon_pdev *mon_pdev;
1816 
1817 	if (!pdev)
1818 		goto mon_deliver_fail;
1819 
1820 	mon_pdev = pdev->monitor_pdev;
1821 	rs = &mon_pdev->rx_mon_recv_status;
1822 
1823 	if (!mon_pdev->mvdev && !mon_pdev->mcopy_mode &&
1824 	    !mon_pdev->rx_pktlog_cbf)
1825 		goto mon_deliver_fail;
1826 
1827 	/* restitch mon MPDU for delivery via monitor interface */
1828 	mon_mpdu = dp_rx_mon_restitch_mpdu(soc, mac_id, head_msdu,
1829 					   tail_msdu, rs);
1830 
1831 	/* If MPDU restitch fails, free buffers*/
1832 	if (!mon_mpdu) {
1833 		dp_info("MPDU restitch failed, free buffers");
1834 		goto mon_deliver_fail;
1835 	}
1836 
1837 	dp_rx_mon_process_dest_pktlog(soc, mac_id, mon_mpdu);
1838 
1839 	/* monitor vap cannot be present when mcopy is enabled
1840 	 * hence same skb can be consumed
1841 	 */
1842 	if (mon_pdev->mcopy_mode)
1843 		return dp_send_mgmt_packet_to_stack(soc, mon_mpdu, pdev);
1844 
1845 	if (mon_mpdu && mon_pdev->mvdev &&
1846 	    mon_pdev->mvdev->osif_vdev &&
1847 	    mon_pdev->mvdev->monitor_vdev &&
1848 	    mon_pdev->mvdev->monitor_vdev->osif_rx_mon) {
1849 		mon_vdev = mon_pdev->mvdev->monitor_vdev;
1850 
1851 		mon_pdev->ppdu_info.rx_status.ppdu_id =
1852 			mon_pdev->ppdu_info.com_info.ppdu_id;
1853 		mon_pdev->ppdu_info.rx_status.device_id = soc->device_id;
1854 		mon_pdev->ppdu_info.rx_status.chan_noise_floor =
1855 			pdev->chan_noise_floor;
1856 		dp_handle_tx_capture(soc, pdev, mon_mpdu);
1857 
1858 		if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status,
1859 					      mon_mpdu,
1860 					      qdf_nbuf_headroom(mon_mpdu))) {
1861 			DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
1862 			goto mon_deliver_fail;
1863 		}
1864 
1865 		dp_rx_mon_update_pf_tag_to_buf_headroom(soc, mon_mpdu);
1866 		mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
1867 				      mon_mpdu,
1868 				      &mon_pdev->ppdu_info.rx_status);
1869 	} else {
1870 		dp_rx_mon_dest_debug("%pK: mon_mpdu=%pK monitor_vdev %pK osif_vdev %pK"
1871 				     , soc, mon_mpdu, mon_pdev->mvdev,
1872 				     (mon_pdev->mvdev ? mon_pdev->mvdev->osif_vdev
1873 				     : NULL));
1874 		goto mon_deliver_fail;
1875 	}
1876 
1877 	return QDF_STATUS_SUCCESS;
1878 
1879 mon_deliver_fail:
1880 	mon_skb = head_msdu;
1881 	while (mon_skb) {
1882 		skb_next = qdf_nbuf_next(mon_skb);
1883 
1884 		 dp_rx_mon_dest_debug("%pK: [%s][%d] mon_skb=%pK len %u",
1885 				      soc,  __func__, __LINE__, mon_skb, mon_skb->len);
1886 
1887 		qdf_nbuf_free(mon_skb);
1888 		mon_skb = skb_next;
1889 	}
1890 	return QDF_STATUS_E_INVAL;
1891 }
1892 
1893 QDF_STATUS dp_rx_mon_deliver_non_std(struct dp_soc *soc,
1894 				     uint32_t mac_id)
1895 {
1896 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1897 	ol_txrx_rx_mon_fp osif_rx_mon;
1898 	qdf_nbuf_t dummy_msdu;
1899 	struct dp_mon_pdev *mon_pdev;
1900 	struct dp_mon_vdev *mon_vdev;
1901 
1902 	/* Sanity checking */
1903 	if (!pdev || !pdev->monitor_pdev)
1904 		goto mon_deliver_non_std_fail;
1905 
1906 	mon_pdev = pdev->monitor_pdev;
1907 
1908 	if (!mon_pdev->mvdev || !mon_pdev->mvdev ||
1909 	    !mon_pdev->mvdev->monitor_vdev ||
1910 	    !mon_pdev->mvdev->monitor_vdev->osif_rx_mon)
1911 		goto mon_deliver_non_std_fail;
1912 
1913 	mon_vdev = mon_pdev->mvdev->monitor_vdev;
1914 	/* Generate a dummy skb_buff */
1915 	osif_rx_mon = mon_vdev->osif_rx_mon;
1916 	dummy_msdu = qdf_nbuf_alloc(soc->osdev, MAX_MONITOR_HEADER,
1917 				    MAX_MONITOR_HEADER, 4, FALSE);
1918 	if (!dummy_msdu)
1919 		goto allocate_dummy_msdu_fail;
1920 
1921 	qdf_nbuf_set_pktlen(dummy_msdu, 0);
1922 	qdf_nbuf_set_next(dummy_msdu, NULL);
1923 
1924 	mon_pdev->ppdu_info.rx_status.ppdu_id =
1925 		mon_pdev->ppdu_info.com_info.ppdu_id;
1926 
1927 	/* Apply the radio header to this dummy skb */
1928 	if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, dummy_msdu,
1929 				      qdf_nbuf_headroom(dummy_msdu))) {
1930 		DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
1931 		qdf_nbuf_free(dummy_msdu);
1932 		goto mon_deliver_non_std_fail;
1933 	}
1934 
1935 	/* deliver to the user layer application */
1936 	osif_rx_mon(mon_pdev->mvdev->osif_vdev,
1937 		    dummy_msdu, NULL);
1938 
1939 	/* Clear rx_status*/
1940 	qdf_mem_zero(&mon_pdev->ppdu_info.rx_status,
1941 		     sizeof(mon_pdev->ppdu_info.rx_status));
1942 	mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
1943 
1944 	return QDF_STATUS_SUCCESS;
1945 
1946 allocate_dummy_msdu_fail:
1947 		 dp_rx_mon_dest_debug("%pK: mon_skb=%pK ",
1948 				      soc, dummy_msdu);
1949 
1950 mon_deliver_non_std_fail:
1951 	return QDF_STATUS_E_INVAL;
1952 }
1953 
1954 /**
1955  * dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based
1956  *                                     filtering enabled
1957  * @soc: core txrx main context
1958  * @ppdu_info: Structure for rx ppdu info
1959  * @status_nbuf: Qdf nbuf abstraction for linux skb
1960  * @pdev_id: mac_id/pdev_id correspondinggly for MCL and WIN
1961  *
1962  * Return: none
1963  */
1964 void
1965 dp_rx_process_peer_based_pktlog(struct dp_soc *soc,
1966 				struct hal_rx_ppdu_info *ppdu_info,
1967 				qdf_nbuf_t status_nbuf, uint32_t pdev_id)
1968 {
1969 	struct dp_peer *peer;
1970 	struct mon_rx_user_status *rx_user_status;
1971 	uint32_t num_users = ppdu_info->com_info.num_users;
1972 	uint16_t sw_peer_id;
1973 
1974 	/* Sanity check for num_users */
1975 	if (!num_users)
1976 		return;
1977 
1978 	qdf_assert_always(num_users <= CDP_MU_MAX_USERS);
1979 	rx_user_status = &ppdu_info->rx_user_status[num_users - 1];
1980 
1981 	sw_peer_id = rx_user_status->sw_peer_id;
1982 
1983 	peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
1984 				     DP_MOD_ID_RX_PPDU_STATS);
1985 
1986 	if (!peer)
1987 		return;
1988 
1989 	if ((peer->peer_id != HTT_INVALID_PEER) && (peer->monitor_peer) &&
1990 	    (peer->monitor_peer->peer_based_pktlog_filter)) {
1991 		dp_wdi_event_handler(
1992 				     WDI_EVENT_RX_DESC, soc,
1993 				     status_nbuf,
1994 				     peer->peer_id,
1995 				     WDI_NO_VAL, pdev_id);
1996 	}
1997 	dp_peer_unref_delete(peer,
1998 			     DP_MOD_ID_RX_PPDU_STATS);
1999 }
2000 
2001 uint32_t
2002 dp_mon_rx_add_tlv(uint8_t id, uint16_t len, void *value, qdf_nbuf_t mpdu_nbuf)
2003 {
2004 	uint8_t *dest = NULL;
2005 	uint32_t num_bytes_pushed = 0;
2006 
2007 	/* Add tlv id field */
2008 	dest = qdf_nbuf_push_head(mpdu_nbuf, sizeof(uint8_t));
2009 	if (qdf_likely(dest)) {
2010 		*((uint8_t *)dest) = id;
2011 		num_bytes_pushed += sizeof(uint8_t);
2012 	}
2013 
2014 	/* Add tlv len field */
2015 	dest = qdf_nbuf_push_head(mpdu_nbuf, sizeof(uint16_t));
2016 	if (qdf_likely(dest)) {
2017 		*((uint16_t *)dest) = len;
2018 		num_bytes_pushed += sizeof(uint16_t);
2019 	}
2020 
2021 	/* Add tlv value field */
2022 	dest = qdf_nbuf_push_head(mpdu_nbuf, len);
2023 	if (qdf_likely(dest)) {
2024 		qdf_mem_copy(dest, value, len);
2025 		num_bytes_pushed += len;
2026 	}
2027 
2028 	return num_bytes_pushed;
2029 }
2030 
2031 void
2032 dp_mon_rx_stats_update_rssi_dbm_params(struct dp_mon_pdev *mon_pdev,
2033 				       struct hal_rx_ppdu_info *ppdu_info)
2034 {
2035 	ppdu_info->rx_status.rssi_offset = mon_pdev->rssi_offsets.rssi_offset;
2036 	ppdu_info->rx_status.rssi_dbm_conv_support =
2037 				mon_pdev->rssi_dbm_conv_support;
2038 }
2039 
2040 #ifdef WLAN_SUPPORT_CTRL_FRAME_STATS
2041 void dp_rx_mon_update_user_ctrl_frame_stats(struct dp_pdev *pdev,
2042 					    struct hal_rx_ppdu_info *ppdu_info)
2043 {
2044 	struct dp_peer *peer;
2045 	struct dp_mon_peer *mon_peer;
2046 	struct dp_soc *soc = pdev->soc;
2047 	uint16_t fc, sw_peer_id;
2048 	uint8_t i;
2049 
2050 	if (qdf_unlikely(!ppdu_info))
2051 		return;
2052 
2053 	fc = ppdu_info->nac_info.frame_control;
2054 	if (qdf_likely((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_TYPE_MASK) !=
2055 	    QDF_IEEE80211_FC0_TYPE_CTL))
2056 		return;
2057 
2058 	for (i = 0; i < ppdu_info->com_info.num_users; i++) {
2059 		sw_peer_id = ppdu_info->rx_user_status[i].sw_peer_id;
2060 		peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
2061 					     DP_MOD_ID_RX_PPDU_STATS);
2062 		if (qdf_unlikely(!peer))
2063 			continue;
2064 		mon_peer = peer->monitor_peer;
2065 		if (qdf_unlikely(!mon_peer)) {
2066 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
2067 			continue;
2068 		}
2069 		DP_STATS_INCC(mon_peer, rx.ndpa_cnt, 1,
2070 			      ppdu_info->ctrl_frm_info[i].ndpa);
2071 		DP_STATS_INCC(mon_peer, rx.bar_cnt, 1,
2072 			      ppdu_info->ctrl_frm_info[i].bar);
2073 
2074 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
2075 	}
2076 }
2077 #endif /* WLAN_SUPPORT_CTRL_FRAME_STATS */
2078