xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/1.0/dp_rx_mon_status_1.0.c (revision 737b028eeab9d1c8c0971fb81ffcb33313bb90f0)
1 /*
2  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #include "hal_hw_headers.h"
18 #include "dp_types.h"
19 #include "dp_rx.h"
20 #include "dp_peer.h"
21 #include "hal_rx.h"
22 #include "hal_api.h"
23 #include "qdf_trace.h"
24 #include "qdf_nbuf.h"
25 #include "hal_api_mon.h"
26 #include "dp_internal.h"
27 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
28 #include "dp_htt.h"
29 #include "dp_mon.h"
30 #include "dp_rx_mon.h"
31 #include "htt.h"
32 #include <dp_mon_1.0.h>
33 #include <dp_rx_mon_1.0.h>
34 
35 #ifdef FEATURE_PERPKT_INFO
36 #include "dp_ratetable.h"
37 #endif
38 
39 static inline
40 QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
41 					      uint32_t mac_id,
42 					      struct dp_srng *dp_rxdma_srng,
43 					      struct rx_desc_pool *rx_desc_pool,
44 					      uint32_t num_req_buffers,
45 					      union dp_rx_desc_list_elem_t **desc_list,
46 					      union dp_rx_desc_list_elem_t **tail,
47 					      uint8_t owner);
48 
49 /**
50  * dp_rx_mon_handle_status_buf_done() - Handle status buf DMA not done
51  *
52  * @pdev: DP pdev handle
53  * @mon_status_srng: Monitor status SRNG
54  *
55  * As per MAC team's suggestion, If HP + 2 entry's DMA done is set,
56  * skip HP + 1 entry and start processing in next interrupt.
57  * If HP + 2 entry's DMA done is not set, poll onto HP + 1 entry
58  * for it's DMA done TLV to be set.
59  *
60  * Return: enum dp_mon_reap_status
61  */
62 enum dp_mon_reap_status
63 dp_rx_mon_handle_status_buf_done(struct dp_pdev *pdev,
64 				 void *mon_status_srng)
65 {
66 	struct dp_soc *soc = pdev->soc;
67 	hal_soc_handle_t hal_soc;
68 	void *ring_entry;
69 	struct hal_buf_info hbi;
70 	qdf_nbuf_t status_nbuf;
71 	struct dp_rx_desc *rx_desc;
72 	void *rx_tlv;
73 	QDF_STATUS buf_status;
74 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
75 
76 	hal_soc = soc->hal_soc;
77 
78 	ring_entry = hal_srng_src_peek_n_get_next_next(hal_soc,
79 						       mon_status_srng);
80 	if (!ring_entry) {
81 		dp_rx_mon_status_debug("%pK: Monitor status ring entry is NULL for SRNG: %pK",
82 				       soc, mon_status_srng);
83 		return DP_MON_STATUS_NO_DMA;
84 	}
85 
86 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_entry,
87 				  &hbi);
88 	rx_desc = dp_rx_cookie_2_va_mon_status(soc, hbi.sw_cookie);
89 
90 	qdf_assert_always(rx_desc);
91 
92 	status_nbuf = rx_desc->nbuf;
93 
94 	qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
95 			      QDF_DMA_FROM_DEVICE);
96 
97 	rx_tlv = qdf_nbuf_data(status_nbuf);
98 	buf_status = hal_get_rx_status_done(rx_tlv);
99 
100 	/* If status buffer DMA is not done,
101 	 * 1. As per MAC team's suggestion, If HP + 2 entry's DMA done is set,
102 	 * replenish HP + 1 entry and start processing in next interrupt.
103 	 * 2. If HP + 2 entry's DMA done is not set
104 	 * hold on to mon destination ring.
105 	 */
106 	if (buf_status != QDF_STATUS_SUCCESS) {
107 		dp_err_rl("Monitor status ring: DMA is not done "
108 			     "for nbuf: %pK", status_nbuf);
109 		mon_pdev->rx_mon_stats.tlv_tag_status_err++;
110 		return DP_MON_STATUS_REPLENISH;
111 	}
112 
113 	mon_pdev->rx_mon_stats.status_buf_done_war++;
114 
115 	return DP_MON_STATUS_REPLENISH;
116 }
117 
118 #ifdef WLAN_RX_PKT_CAPTURE_ENH
119 #include "dp_rx_mon_feature.h"
120 #else
121 static QDF_STATUS
122 dp_rx_handle_enh_capture(struct dp_soc *soc, struct dp_pdev *pdev,
123 			 struct hal_rx_ppdu_info *ppdu_info)
124 {
125 	return QDF_STATUS_SUCCESS;
126 }
127 
128 static void
129 dp_rx_mon_enh_capture_process(struct dp_pdev *pdev, uint32_t tlv_status,
130 			      qdf_nbuf_t status_nbuf,
131 			      struct hal_rx_ppdu_info *ppdu_info,
132 			      bool *nbuf_used)
133 {
134 }
135 #endif
136 
137 #ifdef WLAN_TX_PKT_CAPTURE_ENH
138 #include "dp_rx_mon_feature.h"
139 #else
140 static QDF_STATUS
141 dp_send_ack_frame_to_stack(struct dp_soc *soc,
142 			   struct dp_pdev *pdev,
143 			   struct hal_rx_ppdu_info *ppdu_info)
144 {
145 	return QDF_STATUS_SUCCESS;
146 }
147 #endif
148 
149 #if defined(HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_M)
150 static inline void
151 dp_rx_ul_ofdma_ru_size_to_width(
152 	uint32_t ru_size,
153 	uint32_t *ru_width)
154 {
155 	uint32_t width;
156 
157 	width = 0;
158 	switch (ru_size) {
159 	case HTT_UL_OFDMA_V0_RU_SIZE_RU_26:
160 		width = 1;
161 		break;
162 	case HTT_UL_OFDMA_V0_RU_SIZE_RU_52:
163 		width = 2;
164 		break;
165 	case HTT_UL_OFDMA_V0_RU_SIZE_RU_106:
166 		width = 4;
167 		break;
168 	case HTT_UL_OFDMA_V0_RU_SIZE_RU_242:
169 		width = 9;
170 		break;
171 	case HTT_UL_OFDMA_V0_RU_SIZE_RU_484:
172 		width = 18;
173 		break;
174 	case HTT_UL_OFDMA_V0_RU_SIZE_RU_996:
175 		width = 37;
176 		break;
177 	case HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2:
178 		width = 74;
179 		break;
180 	default:
181 		dp_rx_mon_status_err("RU size to width convert err");
182 		break;
183 	}
184 	*ru_width = width;
185 }
186 
187 static inline void
188 dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info)
189 {
190 	struct mon_rx_user_status *mon_rx_user_status;
191 	uint32_t num_users;
192 	uint32_t i;
193 	uint32_t mu_ul_user_v0_word0;
194 	uint32_t mu_ul_user_v0_word1;
195 	uint32_t ru_width;
196 	uint32_t ru_size;
197 
198 	if (!(ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_OFDMA ||
199 	      ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_MIMO))
200 		return;
201 
202 	num_users = ppdu_info->com_info.num_users;
203 	if (num_users > HAL_MAX_UL_MU_USERS)
204 		num_users = HAL_MAX_UL_MU_USERS;
205 	for (i = 0; i < num_users; i++) {
206 		mon_rx_user_status = &ppdu_info->rx_user_status[i];
207 		mu_ul_user_v0_word0 =
208 			mon_rx_user_status->mu_ul_user_v0_word0;
209 		mu_ul_user_v0_word1 =
210 			mon_rx_user_status->mu_ul_user_v0_word1;
211 
212 		if (HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_GET(
213 			mu_ul_user_v0_word0) &&
214 			!HTT_UL_OFDMA_USER_INFO_V0_W0_VER_GET(
215 			mu_ul_user_v0_word0)) {
216 			mon_rx_user_status->mcs =
217 				HTT_UL_OFDMA_USER_INFO_V0_W1_MCS_GET(
218 				mu_ul_user_v0_word1);
219 			mon_rx_user_status->nss =
220 				HTT_UL_OFDMA_USER_INFO_V0_W1_NSS_GET(
221 				mu_ul_user_v0_word1) + 1;
222 
223 			mon_rx_user_status->mu_ul_info_valid = 1;
224 			mon_rx_user_status->ofdma_ru_start_index =
225 				HTT_UL_OFDMA_USER_INFO_V0_W1_RU_START_GET(
226 				mu_ul_user_v0_word1);
227 
228 			ru_size =
229 				HTT_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE_GET(
230 				mu_ul_user_v0_word1);
231 			dp_rx_ul_ofdma_ru_size_to_width(ru_size, &ru_width);
232 			mon_rx_user_status->ofdma_ru_width = ru_width;
233 			mon_rx_user_status->ofdma_ru_size = ru_size;
234 		}
235 	}
236 }
237 #else
238 static inline void
239 dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info)
240 {
241 }
242 #endif
243 
244 #ifdef QCA_UNDECODED_METADATA_SUPPORT
245 static inline bool
246 dp_rx_mon_check_phyrx_abort(struct dp_pdev *pdev,
247 			    struct hal_rx_ppdu_info *ppdu_info)
248 {
249 	return (pdev->monitor_pdev->undecoded_metadata_capture &&
250 			ppdu_info->rx_status.phyrx_abort);
251 }
252 
253 static inline void
254 dp_rx_mon_handle_ppdu_undecoded_metadata(struct dp_soc *soc,
255 					 struct dp_pdev *pdev,
256 					 struct hal_rx_ppdu_info *ppdu_info)
257 {
258 	if (pdev->monitor_pdev->undecoded_metadata_capture)
259 		dp_rx_handle_ppdu_undecoded_metadata(soc, pdev, ppdu_info);
260 
261 	pdev->monitor_pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
262 }
263 #else
264 static inline bool
265 dp_rx_mon_check_phyrx_abort(struct dp_pdev *pdev,
266 			    struct hal_rx_ppdu_info *ppdu_info)
267 {
268 	return false;
269 }
270 
271 static inline void
272 dp_rx_mon_handle_ppdu_undecoded_metadata(struct dp_soc *soc,
273 					 struct dp_pdev *pdev,
274 					 struct hal_rx_ppdu_info *ppdu_info)
275 {
276 }
277 #endif
278 
279 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
280 /**
281  * dp_rx_mon_update_scan_spcl_vap_stats() - Update special vap stats
282  * @pdev: dp pdev context
283  * @ppdu_info: ppdu info structure from ppdu ring
284  *
285  * Return: none
286  */
287 static inline void
288 dp_rx_mon_update_scan_spcl_vap_stats(struct dp_pdev *pdev,
289 				     struct hal_rx_ppdu_info *ppdu_info)
290 {
291 	struct mon_rx_user_status *rx_user_status = NULL;
292 	struct dp_mon_pdev *mon_pdev = NULL;
293 	struct dp_mon_vdev *mon_vdev = NULL;
294 	uint32_t num_users = 0;
295 	uint32_t user = 0;
296 
297 	mon_pdev = pdev->monitor_pdev;
298 	if (!mon_pdev || !mon_pdev->mvdev)
299 		return;
300 
301 	mon_vdev = mon_pdev->mvdev->monitor_vdev;
302 	if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats)
303 		return;
304 
305 	num_users = ppdu_info->com_info.num_users;
306 	for (user = 0; user < num_users; user++) {
307 		rx_user_status =  &ppdu_info->rx_user_status[user];
308 		mon_vdev->scan_spcl_vap_stats->rx_ok_pkts +=
309 				rx_user_status->mpdu_cnt_fcs_ok;
310 		mon_vdev->scan_spcl_vap_stats->rx_ok_bytes +=
311 				rx_user_status->mpdu_ok_byte_count;
312 		mon_vdev->scan_spcl_vap_stats->rx_err_pkts +=
313 				rx_user_status->mpdu_cnt_fcs_err;
314 		mon_vdev->scan_spcl_vap_stats->rx_err_bytes +=
315 				rx_user_status->mpdu_err_byte_count;
316 	}
317 	mon_vdev->scan_spcl_vap_stats->rx_mgmt_pkts +=
318 				ppdu_info->frm_type_info.rx_mgmt_cnt;
319 	mon_vdev->scan_spcl_vap_stats->rx_ctrl_pkts +=
320 				ppdu_info->frm_type_info.rx_ctrl_cnt;
321 	mon_vdev->scan_spcl_vap_stats->rx_data_pkts +=
322 				ppdu_info->frm_type_info.rx_data_cnt;
323 }
324 #else
325 static inline void
326 dp_rx_mon_update_scan_spcl_vap_stats(struct dp_pdev *pdev,
327 				     struct hal_rx_ppdu_info *ppdu_info)
328 {
329 }
330 #endif
331 
332 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
333 /**
334  * dp_rx_mon_status_ring_record_entry() - Record one entry of a particular
335  *					  event type into the monitor status
336  *					  buffer tracking history.
337  * @soc: DP soc handle
338  * @event: event type
339  * @ring_desc: Monitor status ring descriptor
340  * @rx_desc: RX descriptor
341  * @nbuf: status buffer.
342  *
343  * Return: None
344  */
345 static void
346 dp_rx_mon_status_ring_record_entry(struct dp_soc *soc,
347 				   enum dp_mon_status_process_event event,
348 				   hal_ring_desc_t ring_desc,
349 				   struct dp_rx_desc *rx_desc,
350 				   qdf_nbuf_t nbuf)
351 {
352 	struct dp_mon_stat_info_record *record;
353 	struct hal_buf_info hbi;
354 	uint32_t idx;
355 
356 	if (qdf_unlikely(!soc->mon_status_ring_history))
357 		return;
358 
359 	idx = dp_history_get_next_index(&soc->mon_status_ring_history->index,
360 					DP_MON_STATUS_HIST_MAX);
361 
362 	/* No NULL check needed for record since its an array */
363 	record = &soc->mon_status_ring_history->entry[idx];
364 
365 	record->timestamp = qdf_get_log_timestamp();
366 	record->event = event;
367 	if (event == DP_MON_STATUS_BUF_REAP) {
368 		hal_rx_buffer_addr_info_get_paddr(ring_desc, &hbi);
369 
370 		/* buffer_addr_info is the first element of ring_desc */
371 		hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc,
372 					  &hbi);
373 
374 		record->hbi.paddr = hbi.paddr;
375 		record->hbi.sw_cookie = hbi.sw_cookie;
376 		record->hbi.rbm = hbi.rbm;
377 		record->rx_desc = rx_desc;
378 		if (rx_desc) {
379 			record->nbuf = rx_desc->nbuf;
380 			record->rx_desc_nbuf_data = qdf_nbuf_data(rx_desc->nbuf);
381 		} else {
382 			record->nbuf = NULL;
383 			record->rx_desc_nbuf_data = NULL;
384 		}
385 	}
386 
387 	if (event == DP_MON_STATUS_BUF_ENQUEUE) {
388 		record->nbuf = nbuf;
389 		record->rx_desc_nbuf_data = qdf_nbuf_data(nbuf);
390 	}
391 
392 	if (event == DP_MON_STATUS_BUF_DEQUEUE) {
393 		record->nbuf = nbuf;
394 		if (nbuf)
395 			record->rx_desc_nbuf_data = qdf_nbuf_data(nbuf);
396 		else
397 			record->rx_desc_nbuf_data = NULL;
398 	}
399 }
400 #else
401 static void
402 dp_rx_mon_status_ring_record_entry(struct dp_soc *soc,
403 				   enum dp_mon_status_process_event event,
404 				   hal_ring_desc_t ring_desc,
405 				   struct dp_rx_desc *rx_desc,
406 				   qdf_nbuf_t nbuf)
407 {
408 }
409 #endif
410 
411 /**
412  * dp_rx_mon_status_process_tlv() - Process status TLV in status
413  * buffer on Rx status Queue posted by status SRNG processing.
414  * @soc: core txrx main context
415  * @int_ctx: interrupt context
416  * @mac_id: mac_id which is one of 3 mac_ids _ring
417  * @quota: amount of work which can be done
418  *
419  * Return: none
420  */
421 static inline void
422 dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
423 			     uint32_t mac_id, uint32_t quota)
424 {
425 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
426 	struct hal_rx_ppdu_info *ppdu_info;
427 	qdf_nbuf_t status_nbuf;
428 	uint8_t *rx_tlv;
429 	uint8_t *rx_tlv_start;
430 	uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
431 	struct cdp_pdev_mon_stats *rx_mon_stats;
432 	int smart_mesh_status;
433 	enum WDI_EVENT pktlog_mode = WDI_NO_VAL;
434 	bool nbuf_used;
435 	uint32_t rx_enh_capture_mode;
436 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
437 	struct dp_mon_pdev *mon_pdev;
438 
439 	if (qdf_unlikely(!pdev)) {
440 		dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", soc,
441 				       mac_id);
442 		return;
443 	}
444 
445 	mon_pdev = pdev->monitor_pdev;
446 	ppdu_info = &mon_pdev->ppdu_info;
447 	rx_mon_stats = &mon_pdev->rx_mon_stats;
448 
449 	if (qdf_unlikely(mon_pdev->mon_ppdu_status != DP_PPDU_STATUS_START))
450 		return;
451 
452 	rx_enh_capture_mode = mon_pdev->rx_enh_capture_mode;
453 
454 	while (!qdf_nbuf_is_queue_empty(&mon_pdev->rx_status_q)) {
455 
456 		status_nbuf = qdf_nbuf_queue_remove(&mon_pdev->rx_status_q);
457 		dp_rx_mon_status_ring_record_entry(soc,
458 						   DP_MON_STATUS_BUF_DEQUEUE,
459 						   NULL, NULL, status_nbuf);
460 
461 		if (qdf_unlikely(!status_nbuf))
462 			return;
463 
464 		rx_tlv = qdf_nbuf_data(status_nbuf);
465 		rx_tlv_start = rx_tlv;
466 		nbuf_used = false;
467 
468 		if ((mon_pdev->mvdev) || (mon_pdev->enhanced_stats_en) ||
469 		    (mon_pdev->mcopy_mode) || (dp_cfr_rcc_mode_status(pdev)) ||
470 		    (mon_pdev->undecoded_metadata_capture) ||
471 		    (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
472 			do {
473 				tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
474 						ppdu_info, pdev->soc->hal_soc,
475 						status_nbuf);
476 
477 				dp_rx_mon_update_dbg_ppdu_stats(ppdu_info,
478 								rx_mon_stats);
479 
480 				dp_rx_mon_enh_capture_process(pdev, tlv_status,
481 					status_nbuf, ppdu_info,
482 					&nbuf_used);
483 
484 				dp_rx_mcopy_process_ppdu_info(pdev,
485 							      ppdu_info,
486 							      tlv_status);
487 
488 				rx_tlv = hal_rx_status_get_next_tlv(rx_tlv,
489 						mon_pdev->is_tlv_hdr_64_bit);
490 
491 				if (qdf_unlikely(((rx_tlv - rx_tlv_start) >=
492 						RX_MON_STATUS_BUF_SIZE) ||
493 						(RX_MON_STATUS_BUF_SIZE -
494 						(rx_tlv - rx_tlv_start) <
495 						mon_pdev->tlv_hdr_size)))
496 					break;
497 
498 			} while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
499 				 (tlv_status == HAL_TLV_STATUS_HEADER) ||
500 				 (tlv_status == HAL_TLV_STATUS_MPDU_END) ||
501 				 (tlv_status == HAL_TLV_STATUS_MPDU_START) ||
502 				 (tlv_status == HAL_TLV_STATUS_MSDU_END));
503 		}
504 		dp_mon_rx_stats_update_rssi_dbm_params(mon_pdev, ppdu_info);
505 		if (qdf_unlikely(mon_pdev->dp_peer_based_pktlog)) {
506 			dp_rx_process_peer_based_pktlog(soc, ppdu_info,
507 							status_nbuf,
508 							pdev->pdev_id);
509 		} else {
510 			if (qdf_unlikely(mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL))
511 				pktlog_mode = WDI_EVENT_RX_DESC;
512 			else if (qdf_unlikely(mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE))
513 				pktlog_mode = WDI_EVENT_LITE_RX;
514 
515 			if (qdf_unlikely(pktlog_mode != WDI_NO_VAL))
516 				dp_wdi_event_handler(pktlog_mode, soc,
517 						     status_nbuf,
518 						     HTT_INVALID_PEER,
519 						     WDI_NO_VAL, pdev->pdev_id);
520 		}
521 
522 		/* smart monitor vap and m_copy cannot co-exist */
523 		if (qdf_unlikely(ppdu_info->rx_status.monitor_direct_used &&
524 				 mon_pdev->neighbour_peers_added &&
525 				 mon_pdev->mvdev)) {
526 			smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc,
527 						pdev, ppdu_info, status_nbuf);
528 			if (smart_mesh_status)
529 				qdf_nbuf_free(status_nbuf);
530 		} else if (qdf_unlikely(IS_LOCAL_PKT_CAPTURE_RUNNING(mon_pdev,
531 				is_local_pkt_capture_running))) {
532 			int ret;
533 
534 			ret = dp_rx_handle_local_pkt_capture(pdev, ppdu_info,
535 							     status_nbuf);
536 			if (ret)
537 				qdf_nbuf_free(status_nbuf);
538 		} else if (qdf_unlikely(mon_pdev->mcopy_mode)) {
539 			dp_rx_process_mcopy_mode(soc, pdev,
540 						 ppdu_info, tlv_status,
541 						 status_nbuf);
542 		} else if (qdf_unlikely(rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
543 			if (!nbuf_used)
544 				qdf_nbuf_free(status_nbuf);
545 
546 			if (tlv_status == HAL_TLV_STATUS_PPDU_DONE)
547 				dp_rx_handle_enh_capture(soc,
548 							 pdev, ppdu_info);
549 		} else {
550 			qdf_nbuf_free(status_nbuf);
551 		}
552 
553 		if (qdf_unlikely(tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE)) {
554 			dp_rx_mon_deliver_non_std(soc, mac_id);
555 			dp_mon_rx_ppdu_status_reset(mon_pdev);
556 		} else if ((qdf_likely(tlv_status == HAL_TLV_STATUS_PPDU_DONE)) &&
557 				(qdf_likely(!dp_rx_mon_check_phyrx_abort(pdev, ppdu_info)))) {
558 			rx_mon_stats->status_ppdu_done++;
559 			dp_rx_mon_handle_mu_ul_info(ppdu_info);
560 
561 			if (qdf_unlikely(mon_pdev->tx_capture_enabled
562 			    != CDP_TX_ENH_CAPTURE_DISABLED))
563 				dp_send_ack_frame_to_stack(soc, pdev,
564 							   ppdu_info);
565 
566 			if (qdf_likely(mon_pdev->enhanced_stats_en ||
567 				       mon_pdev->mcopy_mode ||
568 				       mon_pdev->neighbour_peers_added))
569 				dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
570 			else if (dp_cfr_rcc_mode_status(pdev))
571 				dp_rx_handle_cfr(soc, pdev, ppdu_info);
572 
573 			mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
574 
575 			/* Collect spcl vap stats if configured */
576 			if (qdf_unlikely(mon_pdev->scan_spcl_vap_configured))
577 				dp_rx_mon_update_scan_spcl_vap_stats(pdev,
578 								     ppdu_info);
579 
580 			dp_rx_mon_update_user_ctrl_frame_stats(pdev, ppdu_info);
581 
582 			/*
583 			* if chan_num is not fetched correctly from ppdu RX TLV,
584 			 * get it from pdev saved.
585 			 */
586 			if (qdf_unlikely(mon_pdev->ppdu_info.rx_status.chan_num == 0))
587 				mon_pdev->ppdu_info.rx_status.chan_num =
588 							mon_pdev->mon_chan_num;
589 			/*
590 			 * if chan_freq is not fetched correctly from ppdu RX TLV,
591 			 * get it from pdev saved.
592 			 */
593 			if (qdf_unlikely(mon_pdev->ppdu_info.rx_status.chan_freq == 0)) {
594 				mon_pdev->ppdu_info.rx_status.chan_freq =
595 					mon_pdev->mon_chan_freq;
596 			}
597 
598 			if (!mon_soc->full_mon_mode)
599 				dp_rx_mon_dest_process(soc, int_ctx, mac_id,
600 						       quota);
601 
602 			dp_mon_rx_ppdu_status_reset(mon_pdev);
603 		} else {
604 			dp_rx_mon_handle_ppdu_undecoded_metadata(soc, pdev,
605 								 ppdu_info);
606 		}
607 	}
608 	return;
609 }
610 
611 /*
612  * dp_rx_mon_status_srng_process() - Process monitor status ring
613  *	post the status ring buffer to Rx status Queue for later
614  *	processing when status ring is filled with status TLV.
615  *	Allocate a new buffer to status ring if the filled buffer
616  *	is posted.
617  * @soc: core txrx main context
618  * @int_ctx: interrupt context
619  * @mac_id: mac_id which is one of 3 mac_ids
620  * @quota: No. of ring entry that can be serviced in one shot.
621 
622  * Return: uint32_t: No. of ring entry that is processed.
623  */
624 static inline uint32_t
625 dp_rx_mon_status_srng_process(struct dp_soc *soc, struct dp_intr *int_ctx,
626 			      uint32_t mac_id, uint32_t quota)
627 {
628 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
629 	hal_soc_handle_t hal_soc;
630 	void *mon_status_srng;
631 	void *rxdma_mon_status_ring_entry;
632 	QDF_STATUS status;
633 	enum dp_mon_reap_status reap_status;
634 	uint32_t work_done = 0;
635 	struct dp_mon_pdev *mon_pdev;
636 
637 	if (qdf_unlikely(!pdev)) {
638 		dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d",
639 				       soc, mac_id);
640 		return work_done;
641 	}
642 
643 	mon_pdev = pdev->monitor_pdev;
644 
645 	mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
646 
647 	qdf_assert(mon_status_srng);
648 	if (qdf_unlikely(!mon_status_srng ||
649 			 !hal_srng_initialized(mon_status_srng))) {
650 
651 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
652 			"%s %d : HAL Monitor Status Ring Init Failed -- %pK",
653 			__func__, __LINE__, mon_status_srng);
654 		return work_done;
655 	}
656 
657 	hal_soc = soc->hal_soc;
658 
659 	qdf_assert(hal_soc);
660 
661 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_status_srng)))
662 		goto done;
663 
664 	/* mon_status_ring_desc => WBM_BUFFER_RING STRUCT =>
665 	 * BUFFER_ADDR_INFO STRUCT
666 	 */
667 	while (qdf_likely((rxdma_mon_status_ring_entry =
668 		hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng))
669 			&& quota--)) {
670 		struct hal_buf_info hbi;
671 		qdf_nbuf_t status_nbuf;
672 		struct dp_rx_desc *rx_desc;
673 		uint8_t *status_buf;
674 		qdf_dma_addr_t paddr;
675 		uint64_t buf_addr;
676 		struct rx_desc_pool *rx_desc_pool;
677 
678 		rx_desc_pool = &soc->rx_desc_status[mac_id];
679 		buf_addr =
680 			(HAL_RX_BUFFER_ADDR_31_0_GET(
681 				rxdma_mon_status_ring_entry) |
682 			((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(
683 				rxdma_mon_status_ring_entry)) << 32));
684 
685 		if (qdf_likely(buf_addr)) {
686 
687 			hal_rx_buf_cookie_rbm_get(soc->hal_soc,
688 					(uint32_t *)rxdma_mon_status_ring_entry,
689 					&hbi);
690 			rx_desc = dp_rx_cookie_2_va_mon_status(soc,
691 						hbi.sw_cookie);
692 			dp_rx_mon_status_ring_record_entry(soc, DP_MON_STATUS_BUF_REAP,
693 						rxdma_mon_status_ring_entry,
694 						rx_desc, NULL);
695 
696 			qdf_assert_always(rx_desc);
697 
698 			if (qdf_unlikely(!dp_rx_desc_paddr_sanity_check(rx_desc,
699 								buf_addr))) {
700 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
701 				hal_srng_src_get_next(hal_soc, mon_status_srng);
702 				continue;
703 			}
704 
705 			status_nbuf = rx_desc->nbuf;
706 
707 			qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
708 				QDF_DMA_FROM_DEVICE);
709 
710 			status_buf = qdf_nbuf_data(status_nbuf);
711 
712 			status = hal_get_rx_status_done(status_buf);
713 
714 			if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
715 				uint32_t hp, tp;
716 				hal_get_sw_hptp(hal_soc, mon_status_srng,
717 						&tp, &hp);
718 				dp_info_rl("tlv tag status error hp:%u, tp:%u",
719 					   hp, tp);
720 
721 				/* RxDMA status done bit might not be set even
722 				 * though tp is moved by HW.
723 				 */
724 
725 				/* If done status is missing:
726 				 * 1. As per MAC team's suggestion,
727 				 *    when HP + 1 entry is peeked and if DMA
728 				 *    is not done and if HP + 2 entry's DMA done
729 				 *    is set. skip HP + 1 entry and
730 				 *    start processing in next interrupt.
731 				 * 2. If HP + 2 entry's DMA done is not set,
732 				 *    poll onto HP + 1 entry DMA done to be set.
733 				 *    Check status for same buffer for next time
734 				 *    dp_rx_mon_status_srng_process
735 				 */
736 				reap_status = dp_rx_mon_handle_status_buf_done(pdev,
737 									mon_status_srng);
738 				if (qdf_unlikely(reap_status == DP_MON_STATUS_NO_DMA))
739 					continue;
740 				else if (qdf_unlikely(reap_status == DP_MON_STATUS_REPLENISH)) {
741 					if (!rx_desc->unmapped) {
742 						qdf_nbuf_unmap_nbytes_single(
743 							soc->osdev, status_nbuf,
744 							QDF_DMA_FROM_DEVICE,
745 							rx_desc_pool->buf_size);
746 						rx_desc->unmapped = 1;
747 					}
748 					qdf_nbuf_free(status_nbuf);
749 					goto buf_replenish;
750 				}
751 			}
752 			qdf_nbuf_set_pktlen(status_nbuf,
753 					    RX_MON_STATUS_BUF_SIZE);
754 
755 			if (qdf_likely(!rx_desc->unmapped)) {
756 				qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
757 							     QDF_DMA_FROM_DEVICE,
758 							     rx_desc_pool->buf_size);
759 				rx_desc->unmapped = 1;
760 			}
761 
762 			/* Put the status_nbuf to queue */
763 			qdf_nbuf_queue_add(&mon_pdev->rx_status_q, status_nbuf);
764 			dp_rx_mon_status_ring_record_entry(soc, DP_MON_STATUS_BUF_ENQUEUE,
765 						rxdma_mon_status_ring_entry,
766 						rx_desc, status_nbuf);
767 
768 		} else {
769 			union dp_rx_desc_list_elem_t *desc_list = NULL;
770 			union dp_rx_desc_list_elem_t *tail = NULL;
771 			uint32_t num_alloc_desc;
772 
773 			num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
774 							rx_desc_pool,
775 							1,
776 							&desc_list,
777 							&tail);
778 			/*
779 			 * No free descriptors available
780 			 */
781 			if (qdf_unlikely(num_alloc_desc == 0)) {
782 				work_done++;
783 				break;
784 			}
785 
786 			rx_desc = &desc_list->rx_desc;
787 		}
788 
789 buf_replenish:
790 		status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
791 
792 		/*
793 		 * qdf_nbuf alloc or map failed,
794 		 * free the dp rx desc to free list,
795 		 * fill in NULL dma address at current HP entry,
796 		 * keep HP in mon_status_ring unchanged,
797 		 * wait next time dp_rx_mon_status_srng_process
798 		 * to fill in buffer at current HP.
799 		 */
800 		if (qdf_unlikely(!status_nbuf)) {
801 			union dp_rx_desc_list_elem_t *desc_list = NULL;
802 			union dp_rx_desc_list_elem_t *tail = NULL;
803 			struct rx_desc_pool *rx_desc_pool;
804 
805 			rx_desc_pool = &soc->rx_desc_status[mac_id];
806 
807 			dp_info_rl("fail to allocate or map qdf_nbuf");
808 			dp_rx_add_to_free_desc_list(&desc_list,
809 						&tail, rx_desc);
810 			dp_rx_add_desc_list_to_free_list(soc, &desc_list,
811 						&tail, mac_id, rx_desc_pool);
812 
813 			hal_rxdma_buff_addr_info_set(
814 				hal_soc, rxdma_mon_status_ring_entry,
815 				0, 0,
816 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id));
817 			work_done++;
818 			break;
819 		}
820 
821 		paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
822 
823 		rx_desc->nbuf = status_nbuf;
824 		rx_desc->in_use = 1;
825 		rx_desc->unmapped = 0;
826 
827 		hal_rxdma_buff_addr_info_set(hal_soc,
828 					     rxdma_mon_status_ring_entry,
829 					     paddr, rx_desc->cookie,
830 					     HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id));
831 
832 		hal_srng_src_get_next(hal_soc, mon_status_srng);
833 		work_done++;
834 	}
835 done:
836 
837 	dp_srng_access_end(int_ctx, soc, mon_status_srng);
838 
839 	return work_done;
840 
841 }
842 
843 uint32_t
844 dp_rx_mon_status_process(struct dp_soc *soc, struct dp_intr *int_ctx,
845 			 uint32_t mac_id, uint32_t quota)
846 {
847 	uint32_t work_done;
848 
849 	work_done = dp_rx_mon_status_srng_process(soc, int_ctx, mac_id, quota);
850 	quota -= work_done;
851 	dp_rx_mon_status_process_tlv(soc, int_ctx, mac_id, quota);
852 
853 	return work_done;
854 }
855 
856 QDF_STATUS
857 dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id)
858 {
859 	uint8_t pdev_id = pdev->pdev_id;
860 	struct dp_soc *soc = pdev->soc;
861 	struct dp_srng *mon_status_ring;
862 	uint32_t num_entries;
863 	struct rx_desc_pool *rx_desc_pool;
864 	union dp_rx_desc_list_elem_t *desc_list = NULL;
865 	union dp_rx_desc_list_elem_t *tail = NULL;
866 
867 	mon_status_ring = &soc->rxdma_mon_status_ring[mac_id];
868 
869 	num_entries = mon_status_ring->num_entries;
870 
871 	rx_desc_pool = &soc->rx_desc_status[mac_id];
872 
873 	dp_debug("Mon RX Desc Pool[%d] entries=%u",
874 		 pdev_id, num_entries);
875 
876 	return dp_rx_mon_status_buffers_replenish(soc, mac_id, mon_status_ring,
877 						  rx_desc_pool, num_entries,
878 						  &desc_list, &tail,
879 						  HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id));
880 }
881 
882 QDF_STATUS
883 dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
884 {
885 	uint8_t pdev_id = pdev->pdev_id;
886 	struct dp_soc *soc = pdev->soc;
887 	struct dp_srng *mon_status_ring;
888 	uint32_t num_entries;
889 	struct rx_desc_pool *rx_desc_pool;
890 
891 	mon_status_ring = &soc->rxdma_mon_status_ring[mac_id];
892 
893 	num_entries = mon_status_ring->num_entries;
894 
895 	rx_desc_pool = &soc->rx_desc_status[mac_id];
896 
897 	dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries);
898 
899 	rx_desc_pool->desc_type = QDF_DP_RX_DESC_STATUS_TYPE;
900 	return dp_rx_desc_pool_alloc(soc, num_entries + 1, rx_desc_pool);
901 }
902 
903 void
904 dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
905 {
906 	uint32_t i;
907 	uint8_t pdev_id = pdev->pdev_id;
908 	struct dp_soc *soc = pdev->soc;
909 	struct dp_srng *mon_status_ring;
910 	uint32_t num_entries;
911 	struct rx_desc_pool *rx_desc_pool;
912 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
913 
914 	mon_status_ring = &soc->rxdma_mon_status_ring[mac_id];
915 
916 	num_entries = mon_status_ring->num_entries;
917 
918 	rx_desc_pool = &soc->rx_desc_status[mac_id];
919 
920 	dp_debug("Mon RX Desc status Pool[%d] init entries=%u",
921 		 pdev_id, num_entries);
922 
923 	rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id);
924 	rx_desc_pool->buf_size = RX_MON_STATUS_BUF_SIZE;
925 	rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
926 	/* Disable frag processing flag */
927 	dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
928 
929 	dp_rx_desc_pool_init(soc, mac_id, num_entries + 1, rx_desc_pool);
930 
931 	qdf_nbuf_queue_init(&mon_pdev->rx_status_q);
932 
933 	mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
934 
935 	qdf_mem_zero(&mon_pdev->ppdu_info, sizeof(mon_pdev->ppdu_info));
936 
937 	/*
938 	 * Set last_ppdu_id to HAL_INVALID_PPDU_ID in order to avoid ppdu_id
939 	 * match with '0' ppdu_id from monitor status ring
940 	 */
941 	mon_pdev->ppdu_info.com_info.last_ppdu_id = HAL_INVALID_PPDU_ID;
942 
943 	qdf_mem_zero(&mon_pdev->rx_mon_stats, sizeof(mon_pdev->rx_mon_stats));
944 
945 	dp_rx_mon_init_dbg_ppdu_stats(&mon_pdev->ppdu_info,
946 				      &mon_pdev->rx_mon_stats);
947 
948 	for (i = 0; i < MAX_MU_USERS; i++) {
949 		qdf_nbuf_queue_init(&mon_pdev->mpdu_q[i]);
950 		mon_pdev->is_mpdu_hdr[i] = true;
951 	}
952 
953 	qdf_mem_zero(mon_pdev->msdu_list,
954 		     sizeof(mon_pdev->msdu_list[MAX_MU_USERS]));
955 
956 	mon_pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED;
957 }
958 
959 void
960 dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id) {
961 	uint8_t pdev_id = pdev->pdev_id;
962 	struct dp_soc *soc = pdev->soc;
963 	struct rx_desc_pool *rx_desc_pool;
964 
965 	rx_desc_pool = &soc->rx_desc_status[mac_id];
966 
967 	dp_debug("Mon RX Desc status Pool[%d] deinit", pdev_id);
968 
969 	dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_id);
970 }
971 
972 void
973 dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id) {
974 	uint8_t pdev_id = pdev->pdev_id;
975 	struct dp_soc *soc = pdev->soc;
976 	struct rx_desc_pool *rx_desc_pool;
977 
978 	rx_desc_pool = &soc->rx_desc_status[mac_id];
979 
980 	dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id);
981 
982 	dp_rx_desc_pool_free(soc, rx_desc_pool);
983 }
984 
985 void
986 dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
987 {
988 	uint8_t pdev_id = pdev->pdev_id;
989 	struct dp_soc *soc = pdev->soc;
990 	struct rx_desc_pool *rx_desc_pool;
991 
992 	rx_desc_pool = &soc->rx_desc_status[mac_id];
993 
994 	dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id);
995 
996 	dp_rx_desc_nbuf_free(soc, rx_desc_pool, true);
997 }
998 
999 /*
1000  * dp_rx_buffers_replenish() -  replenish monitor status ring with
1001  *				rx nbufs called during dp rx
1002  *				monitor status ring initialization
1003  *
1004  * @soc: core txrx main context
1005  * @mac_id: mac_id which is one of 3 mac_ids
1006  * @dp_rxdma_srng: dp monitor status circular ring
1007  * @rx_desc_pool; Pointer to Rx descriptor pool
1008  * @num_req_buffers: number of buffer to be replenished
1009  * @desc_list:	list of descs if called from dp rx monitor status
1010  *		process or NULL during dp rx initialization or
1011  *		out of buffer interrupt
1012  * @tail: tail of descs list
1013  * @owner: who owns the nbuf (host, NSS etc...)
1014  * Return: return success or failure
1015  */
1016 static inline
1017 QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
1018 	uint32_t mac_id,
1019 	struct dp_srng *dp_rxdma_srng,
1020 	struct rx_desc_pool *rx_desc_pool,
1021 	uint32_t num_req_buffers,
1022 	union dp_rx_desc_list_elem_t **desc_list,
1023 	union dp_rx_desc_list_elem_t **tail,
1024 	uint8_t owner)
1025 {
1026 	uint32_t num_alloc_desc;
1027 	uint16_t num_desc_to_free = 0;
1028 	uint32_t num_entries_avail;
1029 	uint32_t count = 0;
1030 	int sync_hw_ptr = 1;
1031 	qdf_dma_addr_t paddr;
1032 	qdf_nbuf_t rx_netbuf;
1033 	void *rxdma_ring_entry;
1034 	union dp_rx_desc_list_elem_t *next;
1035 	void *rxdma_srng;
1036 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
1037 	uint32_t hp, tp;
1038 
1039 	if (!dp_pdev) {
1040 		dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d",
1041 				       dp_soc, mac_id);
1042 		return QDF_STATUS_E_FAILURE;
1043 	}
1044 
1045 	rxdma_srng = dp_rxdma_srng->hal_srng;
1046 
1047 	qdf_assert(rxdma_srng);
1048 
1049 	dp_rx_mon_status_debug("%pK: requested %d buffers for replenish",
1050 			       dp_soc, num_req_buffers);
1051 
1052 	/*
1053 	 * if desc_list is NULL, allocate the descs from freelist
1054 	 */
1055 	if (!(*desc_list)) {
1056 
1057 		num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
1058 							  rx_desc_pool,
1059 							  num_req_buffers,
1060 							  desc_list,
1061 							  tail);
1062 
1063 		if (!num_alloc_desc) {
1064 			dp_rx_mon_status_err("%pK: no free rx_descs in freelist",
1065 					     dp_soc);
1066 			return QDF_STATUS_E_NOMEM;
1067 		}
1068 
1069 		dp_rx_mon_status_debug("%pK: %d rx desc allocated", dp_soc,
1070 				       num_alloc_desc);
1071 
1072 		num_req_buffers = num_alloc_desc;
1073 	}
1074 
1075 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
1076 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
1077 				rxdma_srng, sync_hw_ptr);
1078 
1079 	dp_rx_mon_status_debug("%pK: no of available entries in rxdma ring: %d",
1080 			       dp_soc, num_entries_avail);
1081 
1082 	if (num_entries_avail < num_req_buffers) {
1083 		num_desc_to_free = num_req_buffers - num_entries_avail;
1084 		num_req_buffers = num_entries_avail;
1085 	}
1086 
1087 	while (count <= num_req_buffers) {
1088 		rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev);
1089 
1090 		/*
1091 		 * qdf_nbuf alloc or map failed,
1092 		 * keep HP in mon_status_ring unchanged,
1093 		 * wait dp_rx_mon_status_srng_process
1094 		 * to fill in buffer at current HP.
1095 		 */
1096 		if (qdf_unlikely(!rx_netbuf)) {
1097 			hal_get_sw_hptp(dp_soc->hal_soc, rxdma_srng, &tp, &hp);
1098 			dp_err("%pK: qdf_nbuf allocate or map fail, count %d hp:%u tp:%u",
1099 			       dp_soc, count, hp, tp);
1100 			/*
1101 			 * If buffer allocation fails on current HP, then
1102 			 * decrement HP so it will be set to previous index
1103 			 * where proper buffer is attached.
1104 			 */
1105 			hal_srng_src_dec_hp(dp_soc->hal_soc,
1106 					    rxdma_srng);
1107 
1108 			hal_get_sw_hptp(dp_soc->hal_soc, rxdma_srng, &tp, &hp);
1109 			dp_err("HP adjusted to proper buffer index, hp:%u tp:%u", hp, tp);
1110 			break;
1111 		}
1112 
1113 		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
1114 
1115 		next = (*desc_list)->next;
1116 		rxdma_ring_entry = hal_srng_src_get_cur_hp_n_move_next(
1117 						dp_soc->hal_soc,
1118 						rxdma_srng);
1119 
1120 		if (qdf_unlikely(!rxdma_ring_entry)) {
1121 			dp_rx_mon_status_err("%pK: rxdma_ring_entry is NULL, count - %d",
1122 					     dp_soc, count);
1123 			qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, rx_netbuf,
1124 						     QDF_DMA_FROM_DEVICE,
1125 						     rx_desc_pool->buf_size);
1126 			qdf_nbuf_free(rx_netbuf);
1127 			break;
1128 		}
1129 
1130 		(*desc_list)->rx_desc.nbuf = rx_netbuf;
1131 		(*desc_list)->rx_desc.in_use = 1;
1132 		(*desc_list)->rx_desc.unmapped = 0;
1133 		count++;
1134 
1135 		hal_rxdma_buff_addr_info_set(dp_soc->hal_soc,
1136 					     rxdma_ring_entry, paddr,
1137 					     (*desc_list)->rx_desc.cookie,
1138 					     owner);
1139 
1140 		dp_rx_mon_status_debug("%pK: rx_desc=%pK, cookie=%d, nbuf=%pK, paddr=%pK",
1141 				       dp_soc, &(*desc_list)->rx_desc,
1142 				       (*desc_list)->rx_desc.cookie, rx_netbuf,
1143 				       (void *)paddr);
1144 
1145 		*desc_list = next;
1146 	}
1147 
1148 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
1149 
1150 	dp_rx_mon_status_debug("%pK: successfully replenished %d buffers",
1151 			       dp_soc, num_req_buffers);
1152 
1153 	dp_rx_mon_status_debug("%pK: %d rx desc added back to free list",
1154 			       dp_soc, num_desc_to_free);
1155 
1156 	/*
1157 	 * add any available free desc back to the free list
1158 	 */
1159 	if (*desc_list) {
1160 		dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
1161 			mac_id, rx_desc_pool);
1162 	}
1163 
1164 	return QDF_STATUS_SUCCESS;
1165 }
1166 
1167 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1168 /**
1169  * dp_mon_status_srng_drop_for_mac() - Drop the mon status ring packets for
1170  *  a given mac
1171  * @pdev: DP pdev
1172  * @mac_id: mac id
1173  * @quota: maximum number of ring entries that can be processed
1174  *
1175  * Return: Number of ring entries reaped
1176  */
1177 static uint32_t
1178 dp_mon_status_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
1179 				uint32_t quota)
1180 {
1181 	struct dp_soc *soc = pdev->soc;
1182 	void *mon_status_srng;
1183 	hal_soc_handle_t hal_soc;
1184 	void *ring_desc;
1185 	uint32_t reap_cnt = 0;
1186 
1187 	if (qdf_unlikely(!soc || !soc->hal_soc))
1188 		return reap_cnt;
1189 
1190 	mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
1191 
1192 	if (qdf_unlikely(!mon_status_srng ||
1193 			 !hal_srng_initialized(mon_status_srng)))
1194 		return reap_cnt;
1195 
1196 	hal_soc = soc->hal_soc;
1197 
1198 	if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
1199 		return reap_cnt;
1200 
1201 	while ((ring_desc =
1202 		hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng)) &&
1203 		reap_cnt < MON_DROP_REAP_LIMIT && quota--) {
1204 		uint64_t buf_addr;
1205 		struct hal_buf_info hbi;
1206 		struct dp_rx_desc *rx_desc;
1207 		qdf_nbuf_t status_nbuf;
1208 		uint8_t *status_buf;
1209 		enum dp_mon_reap_status reap_status;
1210 		qdf_dma_addr_t iova;
1211 		struct rx_desc_pool *rx_desc_pool;
1212 
1213 		rx_desc_pool = &soc->rx_desc_status[mac_id];
1214 
1215 		buf_addr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_desc) |
1216 		   ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_desc)) << 32));
1217 
1218 		if (qdf_likely(buf_addr)) {
1219 			hal_rx_buf_cookie_rbm_get(soc->hal_soc,
1220 						  (uint32_t *)ring_desc,
1221 						  &hbi);
1222 			rx_desc = dp_rx_cookie_2_va_mon_status(soc,
1223 							       hbi.sw_cookie);
1224 
1225 			qdf_assert_always(rx_desc);
1226 
1227 			status_nbuf = rx_desc->nbuf;
1228 
1229 			qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
1230 					      QDF_DMA_FROM_DEVICE);
1231 
1232 			status_buf = qdf_nbuf_data(status_nbuf);
1233 
1234 			if (hal_get_rx_status_done(status_buf) !=
1235 			    QDF_STATUS_SUCCESS) {
1236 				/* If done status is missing:
1237 				 * 1. As per MAC team's suggestion,
1238 				 *    when HP + 1 entry is peeked and if DMA
1239 				 *    is not done and if HP + 2 entry's DMA done
1240 				 *    is set. skip HP + 1 entry and
1241 				 *    start processing in next interrupt.
1242 				 * 2. If HP + 2 entry's DMA done is not set,
1243 				 *    poll onto HP + 1 entry DMA done to be set.
1244 				 *    Check status for same buffer for next time
1245 				 *    dp_rx_mon_status_srng_process
1246 				 */
1247 				reap_status =
1248 					dp_rx_mon_handle_status_buf_done(pdev,
1249 							       mon_status_srng);
1250 				if (reap_status == DP_MON_STATUS_NO_DMA)
1251 					break;
1252 			}
1253 			qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
1254 						     QDF_DMA_FROM_DEVICE,
1255 						     rx_desc_pool->buf_size);
1256 			qdf_nbuf_free(status_nbuf);
1257 		} else {
1258 			union dp_rx_desc_list_elem_t *rx_desc_elem;
1259 
1260 			qdf_spin_lock_bh(&rx_desc_pool->lock);
1261 
1262 			if (!rx_desc_pool->freelist) {
1263 				qdf_spin_unlock_bh(&rx_desc_pool->lock);
1264 				break;
1265 			}
1266 			rx_desc_elem = rx_desc_pool->freelist;
1267 			rx_desc_pool->freelist = rx_desc_pool->freelist->next;
1268 			qdf_spin_unlock_bh(&rx_desc_pool->lock);
1269 
1270 			rx_desc = &rx_desc_elem->rx_desc;
1271 		}
1272 
1273 		status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
1274 
1275 		if (qdf_unlikely(!status_nbuf)) {
1276 			union dp_rx_desc_list_elem_t *desc_list = NULL;
1277 			union dp_rx_desc_list_elem_t *tail = NULL;
1278 
1279 			dp_info_rl("fail to allocate or map nbuf");
1280 			dp_rx_add_to_free_desc_list(&desc_list, &tail,
1281 						    rx_desc);
1282 			dp_rx_add_desc_list_to_free_list(soc,
1283 							 &desc_list,
1284 							 &tail, mac_id,
1285 							 rx_desc_pool);
1286 
1287 			hal_rxdma_buff_addr_info_set(hal_soc, ring_desc, 0, 0,
1288 						     HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id));
1289 			break;
1290 		}
1291 
1292 		iova = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
1293 
1294 		rx_desc->nbuf = status_nbuf;
1295 		rx_desc->in_use = 1;
1296 
1297 		hal_rxdma_buff_addr_info_set(hal_soc, ring_desc, iova,
1298 					     rx_desc->cookie,
1299 					     HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id));
1300 
1301 		reap_cnt++;
1302 		hal_srng_src_get_next(hal_soc, mon_status_srng);
1303 	}
1304 
1305 	hal_srng_access_end(hal_soc, mon_status_srng);
1306 
1307 	return reap_cnt;
1308 }
1309 
1310 uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
1311 				     uint32_t quota, bool force_flush)
1312 {
1313 	uint32_t work_done;
1314 
1315 	work_done = dp_mon_status_srng_drop_for_mac(pdev, mac_id, quota);
1316 	dp_mon_dest_srng_drop_for_mac(pdev, mac_id, force_flush);
1317 
1318 	return work_done;
1319 }
1320 #else
1321 uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
1322 				     uint32_t quota, bool force_flush)
1323 {
1324 	return 0;
1325 }
1326 #endif
1327