xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "dp_types.h"
20 #include "dp_rx.h"
21 #include "dp_peer.h"
22 #include "hal_rx.h"
23 #include "hal_api.h"
24 #include "qdf_nbuf.h"
25 #ifdef MESH_MODE_SUPPORT
26 #include "if_meta_hdr.h"
27 #endif
28 #include "dp_internal.h"
29 #include "dp_rx_mon.h"
30 
31 #ifdef RX_DESC_DEBUG_CHECK
32 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
33 {
34 	rx_desc->magic = DP_RX_DESC_MAGIC;
35 	rx_desc->nbuf = nbuf;
36 }
37 #else
38 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
39 {
40 	rx_desc->nbuf = nbuf;
41 }
42 #endif
43 
44 #ifdef CONFIG_WIN
45 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
46 {
47 	return vdev->ap_bridge_enabled;
48 }
49 #else
50 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
51 {
52 	if (vdev->opmode != wlan_op_mode_sta)
53 		return true;
54 	else
55 		return false;
56 }
57 #endif
58 /*
59  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
60  *			       called during dp rx initialization
61  *			       and at the end of dp_rx_process.
62  *
63  * @soc: core txrx main context
64  * @mac_id: mac_id which is one of 3 mac_ids
65  * @dp_rxdma_srng: dp rxdma circular ring
66  * @rx_desc_pool: Poiter to free Rx descriptor pool
67  * @num_req_buffers: number of buffer to be replenished
68  * @desc_list: list of descs if called from dp_rx_process
69  *	       or NULL during dp rx initialization or out of buffer
70  *	       interrupt.
71  * @tail: tail of descs list
72  * @owner: who owns the nbuf (host, NSS etc...)
73  * Return: return success or failure
74  */
75 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
76 				struct dp_srng *dp_rxdma_srng,
77 				struct rx_desc_pool *rx_desc_pool,
78 				uint32_t num_req_buffers,
79 				union dp_rx_desc_list_elem_t **desc_list,
80 				union dp_rx_desc_list_elem_t **tail,
81 				uint8_t owner)
82 {
83 	uint32_t num_alloc_desc;
84 	uint16_t num_desc_to_free = 0;
85 	struct dp_pdev *dp_pdev = dp_soc->pdev_list[mac_id];
86 	uint32_t num_entries_avail;
87 	uint32_t count;
88 	int sync_hw_ptr = 1;
89 	qdf_dma_addr_t paddr;
90 	qdf_nbuf_t rx_netbuf;
91 	void *rxdma_ring_entry;
92 	union dp_rx_desc_list_elem_t *next;
93 	QDF_STATUS ret;
94 
95 	void *rxdma_srng;
96 
97 	rxdma_srng = dp_rxdma_srng->hal_srng;
98 
99 	if (!rxdma_srng) {
100 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
101 				  "rxdma srng not initialized");
102 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
103 		return QDF_STATUS_E_FAILURE;
104 	}
105 
106 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
107 		"requested %d buffers for replenish", num_req_buffers);
108 
109 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
110 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
111 						   rxdma_srng,
112 						   sync_hw_ptr);
113 
114 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
115 		"no of availble entries in rxdma ring: %d",
116 		num_entries_avail);
117 
118 	if (!(*desc_list) && (num_entries_avail >
119 		((dp_rxdma_srng->num_entries * 3) / 4))) {
120 		num_req_buffers = num_entries_avail;
121 	} else if (num_entries_avail < num_req_buffers) {
122 		num_desc_to_free = num_req_buffers - num_entries_avail;
123 		num_req_buffers = num_entries_avail;
124 	}
125 
126 	if (qdf_unlikely(!num_req_buffers)) {
127 		num_desc_to_free = num_req_buffers;
128 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
129 		goto free_descs;
130 	}
131 
132 	/*
133 	 * if desc_list is NULL, allocate the descs from freelist
134 	 */
135 	if (!(*desc_list)) {
136 		num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
137 							  rx_desc_pool,
138 							  num_req_buffers,
139 							  desc_list,
140 							  tail);
141 
142 		if (!num_alloc_desc) {
143 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
144 				"no free rx_descs in freelist");
145 			DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
146 					num_req_buffers);
147 			hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
148 			return QDF_STATUS_E_NOMEM;
149 		}
150 
151 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
152 			"%d rx desc allocated", num_alloc_desc);
153 		num_req_buffers = num_alloc_desc;
154 	}
155 
156 
157 	count = 0;
158 
159 	while (count < num_req_buffers) {
160 		rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
161 					RX_BUFFER_SIZE,
162 					RX_BUFFER_RESERVATION,
163 					RX_BUFFER_ALIGNMENT,
164 					FALSE);
165 
166 		if (rx_netbuf == NULL) {
167 			DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
168 			continue;
169 		}
170 
171 		ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
172 				    QDF_DMA_BIDIRECTIONAL);
173 		if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
174 			qdf_nbuf_free(rx_netbuf);
175 			DP_STATS_INC(dp_pdev, replenish.map_err, 1);
176 			continue;
177 		}
178 
179 		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
180 
181 		/*
182 		 * check if the physical address of nbuf->data is
183 		 * less then 0x50000000 then free the nbuf and try
184 		 * allocating new nbuf. We can try for 100 times.
185 		 * this is a temp WAR till we fix it properly.
186 		 */
187 		ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev);
188 		if (ret == QDF_STATUS_E_FAILURE) {
189 			DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
190 			break;
191 		}
192 
193 		count++;
194 
195 		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
196 								rxdma_srng);
197 
198 		next = (*desc_list)->next;
199 
200 		dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf);
201 		(*desc_list)->rx_desc.in_use = 1;
202 
203 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
204 				"rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
205 			rx_netbuf, qdf_nbuf_data(rx_netbuf),
206 			(unsigned long long)paddr, (*desc_list)->rx_desc.cookie);
207 
208 		hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
209 						(*desc_list)->rx_desc.cookie,
210 						owner);
211 
212 		*desc_list = next;
213 	}
214 
215 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
216 
217 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
218 		"successfully replenished %d buffers", num_req_buffers);
219 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
220 		"%d rx desc added back to free list", num_desc_to_free);
221 
222 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers,
223 			(RX_BUFFER_SIZE * num_req_buffers));
224 
225 free_descs:
226 	DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
227 	/*
228 	 * add any available free desc back to the free list
229 	 */
230 	if (*desc_list)
231 		dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
232 			mac_id, rx_desc_pool);
233 
234 	return QDF_STATUS_SUCCESS;
235 }
236 
237 /*
238  * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
239  *				pkts to RAW mode simulation to
240  *				decapsulate the pkt.
241  *
242  * @vdev: vdev on which RAW mode is enabled
243  * @nbuf_list: list of RAW pkts to process
244  * @peer: peer object from which the pkt is rx
245  *
246  * Return: void
247  */
248 void
249 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
250 					struct dp_peer *peer)
251 {
252 	qdf_nbuf_t deliver_list_head = NULL;
253 	qdf_nbuf_t deliver_list_tail = NULL;
254 	qdf_nbuf_t nbuf;
255 
256 	nbuf = nbuf_list;
257 	while (nbuf) {
258 		qdf_nbuf_t next = qdf_nbuf_next(nbuf);
259 
260 		DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
261 
262 		DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
263 		/*
264 		 * reset the chfrag_start and chfrag_end bits in nbuf cb
265 		 * as this is a non-amsdu pkt and RAW mode simulation expects
266 		 * these bit s to be 0 for non-amsdu pkt.
267 		 */
268 		if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
269 			 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
270 			qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
271 			qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
272 		}
273 
274 		nbuf = next;
275 	}
276 
277 	vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
278 				 &deliver_list_tail, (struct cdp_peer*) peer);
279 
280 	vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
281 }
282 
283 
284 #ifdef DP_LFR
285 /*
286  * In case of LFR, data of a new peer might be sent up
287  * even before peer is added.
288  */
289 static inline struct dp_vdev *
290 dp_get_vdev_from_peer(struct dp_soc *soc,
291 			uint16_t peer_id,
292 			struct dp_peer *peer,
293 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
294 {
295 	struct dp_vdev *vdev;
296 	uint8_t vdev_id;
297 
298 	if (unlikely(!peer)) {
299 		if (peer_id != HTT_INVALID_PEER) {
300 			vdev_id = DP_PEER_METADATA_ID_GET(
301 					mpdu_desc_info.peer_meta_data);
302 			QDF_TRACE(QDF_MODULE_ID_DP,
303 				QDF_TRACE_LEVEL_DEBUG,
304 				FL("PeerID %d not found use vdevID %d"),
305 				peer_id, vdev_id);
306 			vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
307 							vdev_id);
308 		} else {
309 			QDF_TRACE(QDF_MODULE_ID_DP,
310 				QDF_TRACE_LEVEL_DEBUG,
311 				FL("Invalid PeerID %d"),
312 				peer_id);
313 			return NULL;
314 		}
315 	} else {
316 		vdev = peer->vdev;
317 	}
318 	return vdev;
319 }
320 /*
321  * In case of LFR, this is an empty inline function
322  */
323 static inline void dp_rx_peer_validity_check(struct dp_peer *peer)
324 {
325 }
326 #else
327 static inline struct dp_vdev *
328 dp_get_vdev_from_peer(struct dp_soc *soc,
329 			uint16_t peer_id,
330 			struct dp_peer *peer,
331 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
332 {
333 	if (unlikely(!peer)) {
334 		QDF_TRACE(QDF_MODULE_ID_DP,
335 			QDF_TRACE_LEVEL_DEBUG,
336 			FL("Peer not found for peerID %d"),
337 			peer_id);
338 		return NULL;
339 	} else {
340 		return peer->vdev;
341 	}
342 }
343 
344 /*
345  * Assert if PEER is NULL
346  */
347 static inline void dp_rx_peer_validity_check(struct dp_peer *peer)
348 {
349 	qdf_assert_always(peer);
350 }
351 #endif
352 
353 /**
354  * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
355  *
356  * @soc: core txrx main context
357  * @sa_peer	: source peer entry
358  * @rx_tlv_hdr	: start address of rx tlvs
359  * @nbuf	: nbuf that has to be intrabss forwarded
360  *
361  * Return: bool: true if it is forwarded else false
362  */
363 static bool
364 dp_rx_intrabss_fwd(struct dp_soc *soc,
365 			struct dp_peer *sa_peer,
366 			uint8_t *rx_tlv_hdr,
367 			qdf_nbuf_t nbuf)
368 {
369 	uint16_t da_idx;
370 	uint16_t len;
371 	struct dp_peer *da_peer;
372 	struct dp_ast_entry *ast_entry;
373 	qdf_nbuf_t nbuf_copy;
374 
375 	/* check if the destination peer is available in peer table
376 	 * and also check if the source peer and destination peer
377 	 * belong to the same vap and destination peer is not bss peer.
378 	 */
379 
380 	if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) &&
381 	   !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
382 		da_idx = hal_rx_msdu_end_da_idx_get(rx_tlv_hdr);
383 
384 		ast_entry = soc->ast_table[da_idx];
385 		if (!ast_entry)
386 			return false;
387 
388 		da_peer = ast_entry->peer;
389 
390 		if (!da_peer)
391 			return false;
392 
393 		if (da_peer->vdev == sa_peer->vdev && !da_peer->bss_peer) {
394 			memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
395 			len = qdf_nbuf_len(nbuf);
396 
397 			/* linearize the nbuf just before we send to
398 			 * dp_tx_send()
399 			 */
400 			if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) {
401 				if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
402 					return false;
403 
404 				nbuf = qdf_nbuf_unshare(nbuf);
405 			}
406 
407 			if (!dp_tx_send(sa_peer->vdev, nbuf)) {
408 				DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts,
409 						1, len);
410 				return true;
411 			} else {
412 				DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1,
413 						len);
414 				return false;
415 			}
416 		}
417 	}
418 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
419 	 * source, then clone the pkt and send the cloned pkt for
420 	 * intra BSS forwarding and original pkt up the network stack
421 	 * Note: how do we handle multicast pkts. do we forward
422 	 * all multicast pkts as is or let a higher layer module
423 	 * like igmpsnoop decide whether to forward or not with
424 	 * Mcast enhancement.
425 	 */
426 	else if (qdf_unlikely((hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
427 		!sa_peer->bss_peer))) {
428 		nbuf_copy = qdf_nbuf_copy(nbuf);
429 		if (!nbuf_copy)
430 			return false;
431 		memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
432 		len = qdf_nbuf_len(nbuf_copy);
433 
434 		if (dp_tx_send(sa_peer->vdev, nbuf_copy)) {
435 			DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, len);
436 			qdf_nbuf_free(nbuf_copy);
437 		} else
438 			DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 1, len);
439 	}
440 	/* return false as we have to still send the original pkt
441 	 * up the stack
442 	 */
443 	return false;
444 }
445 
446 #ifdef MESH_MODE_SUPPORT
447 
448 /**
449  * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
450  *
451  * @vdev: DP Virtual device handle
452  * @nbuf: Buffer pointer
453  * @rx_tlv_hdr: start of rx tlv header
454  * @peer: pointer to peer
455  *
456  * This function allocated memory for mesh receive stats and fill the
457  * required stats. Stores the memory address in skb cb.
458  *
459  * Return: void
460  */
461 
462 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
463 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
464 {
465 	struct mesh_recv_hdr_s *rx_info = NULL;
466 	uint32_t pkt_type;
467 	uint32_t nss;
468 	uint32_t rate_mcs;
469 	uint32_t bw;
470 
471 	/* fill recv mesh stats */
472 	rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
473 
474 	/* upper layers are resposible to free this memory */
475 
476 	if (rx_info == NULL) {
477 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
478 			"Memory allocation failed for mesh rx stats");
479 		DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
480 		return;
481 	}
482 
483 	rx_info->rs_flags = MESH_RXHDR_VER1;
484 	if (qdf_nbuf_is_rx_chfrag_start(nbuf))
485 		rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
486 
487 	if (qdf_nbuf_is_rx_chfrag_end(nbuf))
488 		rx_info->rs_flags |= MESH_RX_LAST_MSDU;
489 
490 	if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
491 		rx_info->rs_flags |= MESH_RX_DECRYPTED;
492 		rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
493 		if (vdev->osif_get_key)
494 			vdev->osif_get_key(vdev->osif_vdev,
495 					&rx_info->rs_decryptkey[0],
496 					&peer->mac_addr.raw[0],
497 					rx_info->rs_keyix);
498 	}
499 
500 	rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
501 	rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
502 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
503 	rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
504 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
505 	nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr);
506 	rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
507 				(bw << 24);
508 
509 	qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
510 
511 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
512 		FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
513 						rx_info->rs_flags,
514 						rx_info->rs_rssi,
515 						rx_info->rs_channel,
516 						rx_info->rs_ratephy1,
517 						rx_info->rs_keyix);
518 
519 }
520 
521 /**
522  * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
523  *
524  * @vdev: DP Virtual device handle
525  * @nbuf: Buffer pointer
526  * @rx_tlv_hdr: start of rx tlv header
527  *
528  * This checks if the received packet is matching any filter out
529  * catogery and and drop the packet if it matches.
530  *
531  * Return: status(0 indicates drop, 1 indicate to no drop)
532  */
533 
534 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
535 					uint8_t *rx_tlv_hdr)
536 {
537 	union dp_align_mac_addr mac_addr;
538 
539 	if (qdf_unlikely(vdev->mesh_rx_filter)) {
540 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
541 			if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr))
542 				return  QDF_STATUS_SUCCESS;
543 
544 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
545 			if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr))
546 				return  QDF_STATUS_SUCCESS;
547 
548 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
549 			if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)
550 				&& !hal_rx_mpdu_get_to_ds(rx_tlv_hdr))
551 				return  QDF_STATUS_SUCCESS;
552 
553 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
554 			if (hal_rx_mpdu_get_addr1(rx_tlv_hdr,
555 					&mac_addr.raw[0]))
556 				return QDF_STATUS_E_FAILURE;
557 
558 			if (!qdf_mem_cmp(&mac_addr.raw[0],
559 					&vdev->mac_addr.raw[0],
560 					DP_MAC_ADDR_LEN))
561 				return  QDF_STATUS_SUCCESS;
562 		}
563 
564 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
565 			if (hal_rx_mpdu_get_addr2(rx_tlv_hdr,
566 					&mac_addr.raw[0]))
567 				return QDF_STATUS_E_FAILURE;
568 
569 			if (!qdf_mem_cmp(&mac_addr.raw[0],
570 					&vdev->mac_addr.raw[0],
571 					DP_MAC_ADDR_LEN))
572 				return  QDF_STATUS_SUCCESS;
573 		}
574 	}
575 
576 	return QDF_STATUS_E_FAILURE;
577 }
578 
579 #else
580 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
581 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
582 {
583 }
584 
585 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
586 					uint8_t *rx_tlv_hdr)
587 {
588 	return QDF_STATUS_E_FAILURE;
589 }
590 
591 #endif
592 
593 #ifdef CONFIG_WIN
594 /**
595  * dp_rx_nac_filter(): Function to perform filtering of non-associated
596  * clients
597  * @pdev: DP pdev handle
598  * @rx_pkt_hdr: Rx packet Header
599  *
600  * return: dp_vdev*
601  */
602 static
603 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
604 		uint8_t *rx_pkt_hdr)
605 {
606 	struct ieee80211_frame *wh;
607 	struct dp_neighbour_peer *peer = NULL;
608 
609 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
610 
611 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
612 		return NULL;
613 
614 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
615 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
616 				neighbour_peer_list_elem) {
617 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
618 				wh->i_addr2, DP_MAC_ADDR_LEN) == 0) {
619 			QDF_TRACE(
620 				QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
621 				FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
622 				peer->neighbour_peers_macaddr.raw[0],
623 				peer->neighbour_peers_macaddr.raw[1],
624 				peer->neighbour_peers_macaddr.raw[2],
625 				peer->neighbour_peers_macaddr.raw[3],
626 				peer->neighbour_peers_macaddr.raw[4],
627 				peer->neighbour_peers_macaddr.raw[5]);
628 
629 				qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
630 
631 			return pdev->monitor_vdev;
632 		}
633 	}
634 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
635 
636 	return NULL;
637 }
638 
639 /**
640  * dp_rx_process_nac_rssi_frames(): Store RSSI for configured NAC
641  * @pdev: DP pdev handle
642  * @rx_tlv_hdr: tlv hdr buf
643  *
644  * return: None
645  */
646 #ifdef ATH_SUPPORT_NAC_RSSI
647 static void dp_rx_process_nac_rssi_frames(struct dp_pdev *pdev, uint8_t *rx_tlv_hdr)
648 {
649 	struct dp_vdev *vdev = NULL;
650 	struct dp_soc *soc  = pdev->soc;
651 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
652 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
653 
654 	if (pdev->nac_rssi_filtering) {
655 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
656 			if (vdev->cdp_nac_rssi_enabled &&
657 				(qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac,
658 					wh->i_addr1, DP_MAC_ADDR_LEN) == 0)) {
659 				QDF_TRACE(QDF_MODULE_ID_DP,
660 					QDF_TRACE_LEVEL_DEBUG, "RSSI updated");
661 				vdev->cdp_nac_rssi.vdev_id = vdev->vdev_id;
662 				vdev->cdp_nac_rssi.client_rssi =
663 					hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
664 				dp_wdi_event_handler(WDI_EVENT_NAC_RSSI, soc,
665 					(void *)&vdev->cdp_nac_rssi,
666 					HTT_INVALID_PEER, WDI_NO_VAL,
667 					pdev->pdev_id);
668 			}
669 		}
670 	}
671 }
672 #else
673 static void dp_rx_process_nac_rssi_frames(struct dp_pdev *pdev, uint8_t *rx_tlv_hdr)
674 {
675 }
676 #endif
677 
678 /**
679  * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
680  * @soc: DP SOC handle
681  * @mpdu: mpdu for which peer is invalid
682  *
683  * return: integer type
684  */
685 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu)
686 {
687 	struct dp_invalid_peer_msg msg;
688 	struct dp_vdev *vdev = NULL;
689 	struct dp_pdev *pdev = NULL;
690 	struct ieee80211_frame *wh;
691 	uint8_t i;
692 	qdf_nbuf_t curr_nbuf, next_nbuf;
693 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
694 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
695 
696 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
697 
698 	if (!DP_FRAME_IS_DATA(wh)) {
699 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
700 				"NAWDS valid only for data frames");
701 		goto free;
702 	}
703 
704 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
705 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
706 				"Invalid nbuf length");
707 		goto free;
708 	}
709 
710 
711 	for (i = 0; i < MAX_PDEV_CNT; i++) {
712 		pdev = soc->pdev_list[i];
713 		if (!pdev) {
714 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
715 					"PDEV not found");
716 			continue;
717 		}
718 
719 		if (pdev->filter_neighbour_peers) {
720 			/* Next Hop scenario not yet handle */
721 			vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
722 			if (vdev) {
723 				dp_rx_mon_deliver(soc, i,
724 						pdev->invalid_peer_head_msdu,
725 						pdev->invalid_peer_tail_msdu);
726 				return 0;
727 			}
728 		}
729 
730 
731 		dp_rx_process_nac_rssi_frames(pdev, rx_tlv_hdr);
732 
733 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
734 
735 			if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
736 						DP_MAC_ADDR_LEN) == 0) {
737 				goto out;
738 			}
739 		}
740 	}
741 
742 	if (!vdev) {
743 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
744 				"VDEV not found");
745 		goto free;
746 	}
747 
748 out:
749 	msg.wh = wh;
750 	qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
751 	msg.nbuf = mpdu;
752 	msg.vdev_id = vdev->vdev_id;
753 	if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer)
754 		pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->osif_pdev, &msg);
755 
756 free:
757 	/* Drop and free packet */
758 	curr_nbuf = mpdu;
759 	while (curr_nbuf) {
760 		next_nbuf = qdf_nbuf_next(curr_nbuf);
761 		qdf_nbuf_free(curr_nbuf);
762 		curr_nbuf = next_nbuf;
763 	}
764 
765 	return 0;
766 }
767 
768 /**
769  * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
770  * @soc: DP SOC handle
771  * @mpdu: mpdu for which peer is invalid
772  * @mpdu_done: if an mpdu is completed
773  *
774  * return: integer type
775  */
776 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
777 					qdf_nbuf_t mpdu, bool mpdu_done)
778 {
779 	/* Only trigger the process when mpdu is completed */
780 	if (mpdu_done)
781 		dp_rx_process_invalid_peer(soc, mpdu);
782 }
783 #else
784 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu)
785 {
786 	qdf_nbuf_t curr_nbuf, next_nbuf;
787 	struct dp_pdev *pdev;
788 	uint8_t i;
789 
790 	curr_nbuf = mpdu;
791 	while (curr_nbuf) {
792 		next_nbuf = qdf_nbuf_next(curr_nbuf);
793 		/* Drop and free packet */
794 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
795 				qdf_nbuf_len(curr_nbuf));
796 		qdf_nbuf_free(curr_nbuf);
797 		curr_nbuf = next_nbuf;
798 	}
799 
800 	/* reset the head and tail pointers */
801 	for (i = 0; i < MAX_PDEV_CNT; i++) {
802 		pdev = soc->pdev_list[i];
803 		if (!pdev) {
804 			QDF_TRACE(QDF_MODULE_ID_DP,
805 				QDF_TRACE_LEVEL_ERROR,
806 				"PDEV not found");
807 			continue;
808 		}
809 
810 		pdev->invalid_peer_head_msdu = NULL;
811 		pdev->invalid_peer_tail_msdu = NULL;
812 	}
813 	return 0;
814 }
815 
816 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
817 					qdf_nbuf_t mpdu, bool mpdu_done)
818 {
819 	/* To avoid compiler warning */
820 	mpdu_done = mpdu_done;
821 
822 	/* Process the nbuf */
823 	dp_rx_process_invalid_peer(soc, mpdu);
824 }
825 #endif
826 
827 #if defined(FEATURE_LRO)
828 static void dp_rx_print_lro_info(uint8_t *rx_tlv)
829 {
830 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
831 	FL("----------------------RX DESC LRO----------------------\n"));
832 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
833 		FL("lro_eligible 0x%x"), HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
834 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
835 		FL("pure_ack 0x%x"), HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
836 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
837 		FL("chksum 0x%x"), HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv));
838 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
839 		FL("TCP seq num 0x%x"), HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
840 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
841 		FL("TCP ack num 0x%x"), HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
842 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
843 		FL("TCP window 0x%x"), HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
844 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
845 		FL("TCP protocol 0x%x"), HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
846 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
847 		FL("TCP offset 0x%x"), HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
848 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
849 		FL("toeplitz 0x%x"), HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
850 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
851 	FL("---------------------------------------------------------\n"));
852 }
853 
854 /**
855  * dp_rx_lro() - LRO related processing
856  * @rx_tlv: TLV data extracted from the rx packet
857  * @peer: destination peer of the msdu
858  * @msdu: network buffer
859  * @ctx: LRO context
860  *
861  * This function performs the LRO related processing of the msdu
862  *
863  * Return: true: LRO enabled false: LRO is not enabled
864  */
865 static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer,
866 	 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx)
867 {
868 	if (!peer || !peer->vdev || !peer->vdev->lro_enable) {
869 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
870 			 FL("no peer, no vdev or LRO disabled"));
871 		QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 0;
872 		return;
873 	}
874 	qdf_assert(rx_tlv);
875 	dp_rx_print_lro_info(rx_tlv);
876 
877 	QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
878 		 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
879 
880 	QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
881 			HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
882 
883 	QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
884 			 HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv);
885 	QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
886 			 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
887 	QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
888 			 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
889 	QDF_NBUF_CB_RX_TCP_WIN(msdu) =
890 			 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
891 	QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
892 			 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
893 	QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
894 			 HAL_RX_TLV_GET_IPV6(rx_tlv);
895 	QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
896 			 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
897 	QDF_NBUF_CB_RX_FLOW_ID(msdu) =
898 			 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
899 	QDF_NBUF_CB_RX_LRO_CTX(msdu) = (unsigned char *)ctx;
900 
901 }
902 #else
903 static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer,
904 	 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx)
905 {
906 }
907 #endif
908 
909 static inline void dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
910 {
911 	if (*mpdu_len >= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN))
912 		qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE);
913 	else
914 		qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
915 
916 	*mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN);
917 }
918 
919 /**
920  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
921  *		     multiple nbufs.
922  * @nbuf: nbuf which can may be part of frag_list.
923  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
924  * @mpdu_len: mpdu length.
925  * @is_first_frag: is this the first nbuf in the fragmented MSDU.
926  * @frag_list_len: length of all the fragments combined.
927  * @head_frag_nbuf: parent nbuf
928  * @frag_list_head: pointer to the first nbuf in the frag_list.
929  * @frag_list_tail: pointer to the last nbuf in the frag_list.
930  *
931  * This function implements the creation of RX frag_list for cases
932  * where an MSDU is spread across multiple nbufs.
933  *
934  */
935 void dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
936 		uint16_t *mpdu_len, bool *is_first_frag,
937 			uint16_t *frag_list_len, qdf_nbuf_t *head_frag_nbuf,
938 			qdf_nbuf_t *frag_list_head, qdf_nbuf_t *frag_list_tail)
939 {
940 	if (qdf_unlikely(qdf_nbuf_is_rx_chfrag_cont(nbuf))) {
941 		if (!(*is_first_frag)) {
942 			*is_first_frag = 1;
943 			qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
944 			*mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
945 
946 			dp_rx_adjust_nbuf_len(nbuf, mpdu_len);
947 			*head_frag_nbuf = nbuf;
948 		} else {
949 			dp_rx_adjust_nbuf_len(nbuf, mpdu_len);
950 			qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
951 			*frag_list_len += qdf_nbuf_len(nbuf);
952 
953 			DP_RX_LIST_APPEND(*frag_list_head,
954 						*frag_list_tail,
955 						nbuf);
956 		}
957 	} else {
958 		if (qdf_unlikely(*is_first_frag)) {
959 			qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
960 			dp_rx_adjust_nbuf_len(nbuf, mpdu_len);
961 			qdf_nbuf_pull_head(nbuf,
962 					RX_PKT_TLVS_LEN);
963 			*frag_list_len += qdf_nbuf_len(nbuf);
964 
965 			DP_RX_LIST_APPEND(*frag_list_head,
966 						*frag_list_tail,
967 						nbuf);
968 
969 			qdf_nbuf_append_ext_list(*head_frag_nbuf,
970 						*frag_list_head,
971 						*frag_list_len);
972 
973 			*is_first_frag = 0;
974 			return;
975 		}
976 		*head_frag_nbuf = nbuf;
977 	}
978 }
979 
980 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev,
981 						struct dp_peer *peer,
982 						qdf_nbuf_t nbuf_list)
983 {
984 	/*
985 	 * highly unlikely to have a vdev without a registerd rx
986 	 * callback function. if so let us free the nbuf_list.
987 	 */
988 	if (qdf_unlikely(!vdev->osif_rx)) {
989 		qdf_nbuf_t nbuf;
990 		do {
991 			nbuf = nbuf_list;
992 			nbuf_list = nbuf_list->next;
993 			qdf_nbuf_free(nbuf);
994 		} while (nbuf_list);
995 
996 		return;
997 	}
998 
999 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
1000 			(vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi))
1001 		dp_rx_deliver_raw(vdev, nbuf_list, peer);
1002 	else
1003 		vdev->osif_rx(vdev->osif_vdev, nbuf_list);
1004 
1005 }
1006 
1007 #ifdef WDS_VENDOR_EXTENSION
1008 int dp_wds_rx_policy_check(
1009 		uint8_t *rx_tlv_hdr,
1010 		struct dp_vdev *vdev,
1011 		struct dp_peer *peer,
1012 		int rx_mcast
1013 		)
1014 {
1015 	struct dp_peer *bss_peer;
1016 	int fr_ds, to_ds, rx_3addr, rx_4addr;
1017 	int rx_policy_ucast, rx_policy_mcast;
1018 
1019 	if (vdev->opmode == wlan_op_mode_ap) {
1020 		TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) {
1021 			if (bss_peer->bss_peer) {
1022 				/* if wds policy check is not enabled on this vdev, accept all frames */
1023 				if (!bss_peer->wds_ecm.wds_rx_filter) {
1024 					return 1;
1025 				}
1026 				break;
1027 			}
1028 		}
1029 		rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr;
1030 		rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr;
1031 	} else {             /* sta mode */
1032 		if (!peer->wds_ecm.wds_rx_filter) {
1033 			return 1;
1034 		}
1035 		rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr;
1036 		rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr;
1037 	}
1038 
1039 	/* ------------------------------------------------
1040 	 *                       self
1041 	 * peer-             rx  rx-
1042 	 * wds  ucast mcast dir policy accept note
1043 	 * ------------------------------------------------
1044 	 * 1     1     0     11  x1     1      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept
1045 	 * 1     1     0     01  x1     0      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
1046 	 * 1     1     0     10  x1     0      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
1047 	 * 1     1     0     00  x1     0      bad frame, won't see it
1048 	 * 1     0     1     11  1x     1      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept
1049 	 * 1     0     1     01  1x     0      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
1050 	 * 1     0     1     10  1x     0      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
1051 	 * 1     0     1     00  1x     0      bad frame, won't see it
1052 	 * 1     1     0     11  x0     0      AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
1053 	 * 1     1     0     01  x0     0      AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
1054 	 * 1     1     0     10  x0     1      AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept
1055 	 * 1     1     0     00  x0     0      bad frame, won't see it
1056 	 * 1     0     1     11  0x     0      AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
1057 	 * 1     0     1     01  0x     0      AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
1058 	 * 1     0     1     10  0x     1      AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept
1059 	 * 1     0     1     00  0x     0      bad frame, won't see it
1060 	 *
1061 	 * 0     x     x     11  xx     0      we only accept td-ds Rx frames from non-wds peers in mode.
1062 	 * 0     x     x     01  xx     1
1063 	 * 0     x     x     10  xx     0
1064 	 * 0     x     x     00  xx     0      bad frame, won't see it
1065 	 * ------------------------------------------------
1066 	 */
1067 
1068 	fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr);
1069 	to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr);
1070 	rx_3addr = fr_ds ^ to_ds;
1071 	rx_4addr = fr_ds & to_ds;
1072 
1073 	if (vdev->opmode == wlan_op_mode_ap) {
1074 		if ((!peer->wds_enabled && rx_3addr && to_ds) ||
1075 				(peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) ||
1076 				(peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) {
1077 			return 1;
1078 		}
1079 	} else {           /* sta mode */
1080 		if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) ||
1081 				(rx_mcast && (rx_4addr == rx_policy_mcast))) {
1082 			return 1;
1083 		}
1084 	}
1085 	return 0;
1086 }
1087 #else
1088 int dp_wds_rx_policy_check(
1089 		uint8_t *rx_tlv_hdr,
1090 		struct dp_vdev *vdev,
1091 		struct dp_peer *peer,
1092 		int rx_mcast
1093 		)
1094 {
1095 	return 1;
1096 }
1097 #endif
1098 
1099 /**
1100  * dp_rx_process() - Brain of the Rx processing functionality
1101  *		     Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
1102  * @soc: core txrx main context
1103  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1104  * @quota: No. of units (packets) that can be serviced in one shot.
1105  *
1106  * This function implements the core of Rx functionality. This is
1107  * expected to handle only non-error frames.
1108  *
1109  * Return: uint32_t: No. of elements processed
1110  */
1111 uint32_t
1112 dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota)
1113 {
1114 	void *hal_soc;
1115 	void *ring_desc;
1116 	struct dp_rx_desc *rx_desc = NULL;
1117 	qdf_nbuf_t nbuf, next;
1118 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
1119 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
1120 	uint32_t rx_bufs_used = 0, rx_buf_cookie, l2_hdr_offset;
1121 	uint16_t msdu_len;
1122 	uint16_t peer_id;
1123 	struct dp_peer *peer = NULL;
1124 	struct dp_vdev *vdev = NULL;
1125 	uint32_t pkt_len;
1126 	struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 };
1127 	struct hal_rx_msdu_desc_info msdu_desc_info = { 0 };
1128 	enum hal_reo_error_status error;
1129 	uint32_t peer_mdata;
1130 	uint8_t *rx_tlv_hdr;
1131 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1132 	uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
1133 	uint8_t mac_id = 0;
1134 	uint32_t ampdu_flag, amsdu_flag;
1135 	struct dp_pdev *pdev;
1136 	struct dp_srng *dp_rxdma_srng;
1137 	struct rx_desc_pool *rx_desc_pool;
1138 	struct ether_header *eh;
1139 	struct dp_soc *soc = int_ctx->soc;
1140 	uint8_t ring_id = 0;
1141 	uint8_t core_id = 0;
1142 	bool is_first_frag = 0;
1143 	bool isBroadcast = 0;
1144 	uint16_t mpdu_len = 0;
1145 	qdf_nbuf_t head_frag_nbuf = NULL;
1146 	qdf_nbuf_t frag_list_head = NULL;
1147 	qdf_nbuf_t frag_list_tail = NULL;
1148 	uint16_t frag_list_len = 0;
1149 	qdf_nbuf_t nbuf_head = NULL;
1150 	qdf_nbuf_t nbuf_tail = NULL;
1151 	qdf_nbuf_t deliver_list_head = NULL;
1152 	qdf_nbuf_t deliver_list_tail = NULL;
1153 
1154 	DP_HIST_INIT();
1155 	/* Debug -- Remove later */
1156 	qdf_assert(soc && hal_ring);
1157 
1158 	hal_soc = soc->hal_soc;
1159 
1160 	/* Debug -- Remove later */
1161 	qdf_assert(hal_soc);
1162 
1163 	hif_pm_runtime_mark_last_busy(soc->osdev->dev);
1164 	sgi = mcs = tid = nss = bw = reception_type = pkt_type = 0;
1165 
1166 	if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
1167 
1168 		/*
1169 		 * Need API to convert from hal_ring pointer to
1170 		 * Ring Type / Ring Id combo
1171 		 */
1172 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
1173 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1174 			FL("HAL RING Access Failed -- %pK"), hal_ring);
1175 		hal_srng_access_end(hal_soc, hal_ring);
1176 		goto done;
1177 	}
1178 
1179 	/*
1180 	 * start reaping the buffers from reo ring and queue
1181 	 * them in per vdev queue.
1182 	 * Process the received pkts in a different per vdev loop.
1183 	 */
1184 	while (qdf_likely(quota && (ring_desc =
1185 				hal_srng_dst_get_next(hal_soc, hal_ring)))) {
1186 
1187 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
1188 		ring_id = hal_srng_ring_id_get(hal_ring);
1189 
1190 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
1191 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1192 			FL("HAL RING 0x%pK:error %d"), hal_ring, error);
1193 			DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
1194 			/* Don't know how to deal with this -- assert */
1195 			qdf_assert(0);
1196 		}
1197 
1198 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1199 
1200 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
1201 
1202 
1203 		qdf_assert(rx_desc);
1204 		rx_bufs_reaped[rx_desc->pool_id]++;
1205 
1206 		/* TODO */
1207 		/*
1208 		 * Need a separate API for unmapping based on
1209 		 * phyiscal address
1210 		 */
1211 		qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
1212 					QDF_DMA_BIDIRECTIONAL);
1213 
1214 		core_id = smp_processor_id();
1215 		DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
1216 
1217 		/* Get MPDU DESC info */
1218 		hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
1219 		peer_id = DP_PEER_METADATA_PEER_ID_GET(
1220 				mpdu_desc_info.peer_meta_data);
1221 
1222 		hal_rx_mpdu_peer_meta_data_set(qdf_nbuf_data(rx_desc->nbuf),
1223 						mpdu_desc_info.peer_meta_data);
1224 
1225 		peer = dp_peer_find_by_id(soc, peer_id);
1226 
1227 		vdev = dp_get_vdev_from_peer(soc, peer_id, peer,
1228 						mpdu_desc_info);
1229 
1230 		if (!vdev) {
1231 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1232 				FL("vdev is NULL"));
1233 			DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1234 			qdf_nbuf_free(rx_desc->nbuf);
1235 			goto fail;
1236 
1237 		}
1238 
1239 		/* Get MSDU DESC info */
1240 		hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
1241 
1242 		/*
1243 		 * save msdu flags first, last and continuation msdu in
1244 		 * nbuf->cb
1245 		 */
1246 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
1247 			qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
1248 
1249 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
1250 			qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
1251 
1252 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
1253 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
1254 
1255 		DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1,
1256 				qdf_nbuf_len(rx_desc->nbuf));
1257 
1258 		if (soc->process_rx_status) {
1259 			ampdu_flag = (mpdu_desc_info.mpdu_flags &
1260 					HAL_MPDU_F_AMPDU_FLAG);
1261 
1262 			DP_STATS_INCC(peer, rx.ampdu_cnt, 1, ampdu_flag);
1263 			DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(ampdu_flag));
1264 		}
1265 
1266 		amsdu_flag = ((msdu_desc_info.msdu_flags &
1267 					HAL_MSDU_F_FIRST_MSDU_IN_MPDU) &&
1268 				(msdu_desc_info.msdu_flags &
1269 					HAL_MSDU_F_LAST_MSDU_IN_MPDU));
1270 
1271 		DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1,
1272 				amsdu_flag);
1273 		DP_STATS_INCC(peer, rx.amsdu_cnt, 1,
1274 				!(amsdu_flag));
1275 
1276 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
1277 		DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
1278 fail:
1279 		/*
1280 		 * if continuation bit is set then we have MSDU spread
1281 		 * across multiple buffers, let us not decrement quota
1282 		 * till we reap all buffers of that MSDU.
1283 		 */
1284 		if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
1285 			quota -= 1;
1286 
1287 
1288 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
1289 						&tail[rx_desc->pool_id],
1290 						rx_desc);
1291 	}
1292 done:
1293 	hal_srng_access_end(hal_soc, hal_ring);
1294 
1295 	/* Update histogram statistics by looping through pdev's */
1296 	DP_RX_HIST_STATS_PER_PDEV();
1297 
1298 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1299 		/*
1300 		 * continue with next mac_id if no pkts were reaped
1301 		 * from that pool
1302 		 */
1303 		if (!rx_bufs_reaped[mac_id])
1304 			continue;
1305 
1306 		pdev = soc->pdev_list[mac_id];
1307 		dp_rxdma_srng = &pdev->rx_refill_buf_ring;
1308 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
1309 
1310 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1311 					rx_desc_pool, rx_bufs_reaped[mac_id],
1312 					&head[mac_id], &tail[mac_id],
1313 					HAL_RX_BUF_RBM_SW3_BM);
1314 	}
1315 
1316 	/* Peer can be NULL is case of LFR */
1317 	if (qdf_likely(peer != NULL))
1318 		vdev = NULL;
1319 
1320 	nbuf = nbuf_head;
1321 	while (nbuf) {
1322 		next = nbuf->next;
1323 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
1324 
1325 		peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr);
1326 		peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
1327 		peer = dp_peer_find_by_id(soc, peer_id);
1328 
1329 		if (deliver_list_head && peer && (vdev != peer->vdev)) {
1330 			dp_rx_deliver_to_stack(vdev, peer, deliver_list_head);
1331 			deliver_list_head = NULL;
1332 			deliver_list_tail = NULL;
1333 		}
1334 
1335 		if (qdf_likely(peer != NULL))
1336 			vdev = peer->vdev;
1337 
1338 		/*
1339 		 * Check if DMA completed -- msdu_done is the last bit
1340 		 * to be written
1341 		 */
1342 		if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
1343 
1344 			QDF_TRACE(QDF_MODULE_ID_DP,
1345 					QDF_TRACE_LEVEL_ERROR,
1346 					FL("MSDU DONE failure"));
1347 			DP_STATS_INC(vdev->pdev, dropped.msdu_not_done,
1348 					1);
1349 			hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO);
1350 			qdf_assert(0);
1351 		}
1352 
1353 		/*
1354 		 * The below condition happens when an MSDU is spread
1355 		 * across multiple buffers. This can happen in two cases
1356 		 * 1. The nbuf size is smaller then the received msdu.
1357 		 *    ex: we have set the nbuf size to 2048 during
1358 		 *        nbuf_alloc. but we received an msdu which is
1359 		 *        2304 bytes in size then this msdu is spread
1360 		 *        across 2 nbufs.
1361 		 *
1362 		 * 2. AMSDUs when RAW mode is enabled.
1363 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
1364 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
1365 		 *        spread across 2nd nbuf and 3rd nbuf.
1366 		 *
1367 		 * for these scenarios let us create a skb frag_list and
1368 		 * append these buffers till the last MSDU of the AMSDU
1369 		 */
1370 		if (qdf_unlikely(vdev->rx_decap_type ==
1371 				htt_cmn_pkt_type_raw)) {
1372 
1373 			dp_rx_sg_create(nbuf, rx_tlv_hdr, &mpdu_len,
1374 					&is_first_frag, &frag_list_len,
1375 					&head_frag_nbuf,
1376 					&frag_list_head,
1377 					&frag_list_tail);
1378 
1379 			if (is_first_frag) {
1380 				nbuf = next;
1381 				continue;
1382 			} else {
1383 				frag_list_head = NULL;
1384 				frag_list_tail = NULL;
1385 				nbuf = head_frag_nbuf;
1386 				rx_tlv_hdr = qdf_nbuf_data(nbuf);
1387 			}
1388 		}
1389 
1390 		/*
1391 		 * This is a redundant sanity check, Ideally peer
1392 		 * should never be NULL here. if for any reason it
1393 		 * is NULL we will assert.
1394 		 * Do nothing for LFR case.
1395 		 */
1396 		dp_rx_peer_validity_check(peer);
1397 
1398 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer,
1399 					hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
1400 			QDF_TRACE(QDF_MODULE_ID_DP,
1401 					QDF_TRACE_LEVEL_ERROR,
1402 					FL("Policy Check Drop pkt"));
1403 			/* Drop & free packet */
1404 			qdf_nbuf_free(nbuf);
1405 			/* Statistics */
1406 			nbuf = next;
1407 			continue;
1408 		}
1409 
1410 		if (qdf_unlikely(peer && peer->bss_peer)) {
1411 			QDF_TRACE(QDF_MODULE_ID_DP,
1412 				QDF_TRACE_LEVEL_ERROR,
1413 				FL("received pkt with same src MAC"));
1414 			DP_STATS_INC(vdev->pdev, dropped.mec, 1);
1415 
1416 			/* Drop & free packet */
1417 			qdf_nbuf_free(nbuf);
1418 			/* Statistics */
1419 			nbuf = next;
1420 			continue;
1421 		}
1422 
1423 		pdev = vdev->pdev;
1424 
1425 		if (qdf_unlikely(peer && (peer->nawds_enabled == true) &&
1426 			(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) &&
1427 			(hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) {
1428 			DP_STATS_INC_PKT(peer, rx.nawds_mcast_drop, 1,
1429 				qdf_nbuf_len(nbuf));
1430 			qdf_nbuf_free(nbuf);
1431 			nbuf = next;
1432 			continue;
1433 		}
1434 
1435 		if (qdf_likely(
1436 			!hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr)
1437 			&&
1438 			!hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr))) {
1439 			qdf_nbuf_rx_cksum_t cksum = {0};
1440 
1441 			cksum.l4_result =
1442 				QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1443 
1444 			qdf_nbuf_set_rx_cksum(nbuf, &cksum);
1445 		}
1446 
1447 		if (soc->process_rx_status) {
1448 			sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
1449 			mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
1450 			tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr);
1451 
1452 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1453 				"%s: %d, SGI: %d, tid: %d",
1454 				__func__, __LINE__, sgi, tid);
1455 
1456 			bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
1457 			reception_type = hal_rx_msdu_start_reception_type_get(
1458 					rx_tlv_hdr);
1459 			nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr);
1460 			pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
1461 
1462 			DP_STATS_INC(peer, rx.nss[nss], 1);
1463 
1464 			DP_STATS_INCC(peer, rx.err.mic_err, 1,
1465 				hal_rx_mpdu_end_mic_err_get(
1466 					rx_tlv_hdr));
1467 
1468 			DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
1469 				hal_rx_mpdu_end_decrypt_err_get(
1470 					rx_tlv_hdr));
1471 
1472 			DP_STATS_INC(peer, rx.reception_type[reception_type],
1473 					1);
1474 			DP_STATS_INC(peer, rx.bw[bw], 1);
1475 			DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
1476 			DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
1477 					mcs_count[MAX_MCS - 1], 1,
1478 					((mcs >= MAX_MCS_11A) &&
1479 					 (pkt_type == DOT11_A)));
1480 			DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
1481 					mcs_count[mcs], 1,
1482 					((mcs < MAX_MCS_11A) &&
1483 					 (pkt_type == DOT11_A)));
1484 			DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
1485 					mcs_count[MAX_MCS - 1], 1,
1486 					((mcs >= MAX_MCS_11B) &&
1487 					 (pkt_type == DOT11_B)));
1488 			DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
1489 					mcs_count[mcs], 1,
1490 					((mcs < MAX_MCS_11B) &&
1491 					 (pkt_type == DOT11_B)));
1492 			DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
1493 					mcs_count[MAX_MCS - 1], 1,
1494 					((mcs >= MAX_MCS_11A) &&
1495 					 (pkt_type == DOT11_N)));
1496 			DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
1497 					mcs_count[mcs], 1,
1498 					((mcs < MAX_MCS_11A) &&
1499 					 (pkt_type == DOT11_N)));
1500 			DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
1501 					mcs_count[MAX_MCS - 1], 1,
1502 					((mcs >= MAX_MCS_11AC) &&
1503 					 (pkt_type == DOT11_AC)));
1504 			DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
1505 					mcs_count[mcs], 1,
1506 					((mcs < MAX_MCS_11AC) &&
1507 					 (pkt_type == DOT11_AC)));
1508 			DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
1509 					mcs_count[MAX_MCS - 1], 1,
1510 					((mcs >= (MAX_MCS - 1)) &&
1511 					 (pkt_type == DOT11_AX)));
1512 			DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
1513 					mcs_count[mcs], 1,
1514 					((mcs < (MAX_MCS - 1)) &&
1515 					 (pkt_type == DOT11_AX)));
1516 
1517 			DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)],
1518 					1);
1519 		}
1520 
1521 		/*
1522 		 * HW structures call this L3 header padding --
1523 		 * even though this is actually the offset from
1524 		 * the buffer beginning where the L2 header
1525 		 * begins.
1526 		 */
1527 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1528 			FL("rxhash: flow id toeplitz: 0x%x\n"),
1529 			hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr));
1530 
1531 		l2_hdr_offset =
1532 			hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
1533 
1534 		msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
1535 		pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1536 
1537 		if (unlikely(qdf_nbuf_get_ext_list(nbuf)))
1538 			qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1539 		else {
1540 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
1541 			qdf_nbuf_pull_head(nbuf,
1542 					RX_PKT_TLVS_LEN +
1543 					l2_hdr_offset);
1544 		}
1545 
1546 		if (qdf_unlikely(vdev->mesh_vdev)) {
1547 			if (dp_rx_filter_mesh_packets(vdev, nbuf,
1548 							rx_tlv_hdr)
1549 					== QDF_STATUS_SUCCESS) {
1550 				QDF_TRACE(QDF_MODULE_ID_DP,
1551 					QDF_TRACE_LEVEL_INFO_MED,
1552 					FL("mesh pkt filtered"));
1553 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
1554 					1);
1555 
1556 				qdf_nbuf_free(nbuf);
1557 				nbuf = next;
1558 				continue;
1559 			}
1560 			dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
1561 		}
1562 
1563 #ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */
1564 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1565 			"p_id %d msdu_len %d hdr_off %d",
1566 			peer_id, msdu_len, l2_hdr_offset);
1567 
1568 		print_hex_dump(KERN_ERR,
1569 			       "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4,
1570 				qdf_nbuf_data(nbuf), 128, false);
1571 #endif /* NAPIER_EMULATION */
1572 
1573 		if (qdf_likely(vdev->rx_decap_type ==
1574 					htt_cmn_pkt_type_ethernet) &&
1575 				(qdf_likely(!vdev->mesh_vdev))) {
1576 			/* WDS Source Port Learning */
1577 			dp_rx_wds_srcport_learn(soc,
1578 						rx_tlv_hdr,
1579 						peer,
1580 						nbuf);
1581 
1582 			/* Intrabss-fwd */
1583 			if (dp_rx_check_ap_bridge(vdev))
1584 				if (dp_rx_intrabss_fwd(soc,
1585 							peer,
1586 							rx_tlv_hdr,
1587 							nbuf)) {
1588 					nbuf = next;
1589 					continue; /* Get next desc */
1590 				}
1591 		}
1592 
1593 		rx_bufs_used++;
1594 
1595 		dp_rx_lro(rx_tlv_hdr, peer, nbuf, int_ctx->lro_ctx);
1596 
1597 		DP_RX_LIST_APPEND(deliver_list_head,
1598 					deliver_list_tail,
1599 					nbuf);
1600 
1601 		DP_STATS_INCC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf),
1602 				hal_rx_msdu_end_da_is_mcbc_get(
1603 					rx_tlv_hdr));
1604 
1605 		DP_STATS_INC_PKT(peer, rx.to_stack, 1,
1606 				qdf_nbuf_len(nbuf));
1607 
1608 		if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
1609 					(vdev->rx_decap_type ==
1610 					 htt_cmn_pkt_type_ethernet))) {
1611 			eh = (struct ether_header *)qdf_nbuf_data(nbuf);
1612 			isBroadcast = (IEEE80211_IS_BROADCAST
1613 					(eh->ether_dhost)) ? 1 : 0 ;
1614 			if (isBroadcast) {
1615 				DP_STATS_INC_PKT(peer, rx.bcast, 1,
1616 						qdf_nbuf_len(nbuf));
1617 			}
1618 		}
1619 
1620 		if ((soc->process_rx_status) && likely(peer) &&
1621 			hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
1622 			if (soc->cdp_soc.ol_ops->update_dp_stats) {
1623 				soc->cdp_soc.ol_ops->update_dp_stats(
1624 						vdev->pdev->osif_pdev,
1625 						&peer->stats,
1626 						peer_id,
1627 						UPDATE_PEER_STATS);
1628 			}
1629 		}
1630 		nbuf = next;
1631 	}
1632 
1633 	if (deliver_list_head)
1634 		dp_rx_deliver_to_stack(vdev, peer, deliver_list_head);
1635 
1636 	return rx_bufs_used; /* Assume no scale factor for now */
1637 }
1638 
1639 /**
1640  * dp_rx_detach() - detach dp rx
1641  * @pdev: core txrx pdev context
1642  *
1643  * This function will detach DP RX into main device context
1644  * will free DP Rx resources.
1645  *
1646  * Return: void
1647  */
1648 void
1649 dp_rx_pdev_detach(struct dp_pdev *pdev)
1650 {
1651 	uint8_t pdev_id = pdev->pdev_id;
1652 	struct dp_soc *soc = pdev->soc;
1653 	struct rx_desc_pool *rx_desc_pool;
1654 
1655 	rx_desc_pool = &soc->rx_desc_buf[pdev_id];
1656 
1657 	if (rx_desc_pool->pool_size != 0) {
1658 		dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool);
1659 		qdf_spinlock_destroy(&soc->rx_desc_mutex[pdev_id]);
1660 	}
1661 
1662 	return;
1663 }
1664 
1665 /**
1666  * dp_rx_attach() - attach DP RX
1667  * @pdev: core txrx pdev context
1668  *
1669  * This function will attach a DP RX instance into the main
1670  * device (SOC) context. Will allocate dp rx resource and
1671  * initialize resources.
1672  *
1673  * Return: QDF_STATUS_SUCCESS: success
1674  *         QDF_STATUS_E_RESOURCES: Error return
1675  */
1676 QDF_STATUS
1677 dp_rx_pdev_attach(struct dp_pdev *pdev)
1678 {
1679 	uint8_t pdev_id = pdev->pdev_id;
1680 	struct dp_soc *soc = pdev->soc;
1681 	struct dp_srng rxdma_srng;
1682 	uint32_t rxdma_entries;
1683 	union dp_rx_desc_list_elem_t *desc_list = NULL;
1684 	union dp_rx_desc_list_elem_t *tail = NULL;
1685 	struct dp_srng *dp_rxdma_srng;
1686 	struct rx_desc_pool *rx_desc_pool;
1687 
1688 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
1689 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1690 			"nss-wifi<4> skip Rx refil %d", pdev_id);
1691 		return QDF_STATUS_SUCCESS;
1692 	}
1693 
1694 	qdf_spinlock_create(&soc->rx_desc_mutex[pdev_id]);
1695 	pdev = soc->pdev_list[pdev_id];
1696 	rxdma_srng = pdev->rx_refill_buf_ring;
1697 	soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
1698 	rxdma_entries = rxdma_srng.alloc_size/hal_srng_get_entrysize(
1699 						     soc->hal_soc, RXDMA_BUF);
1700 
1701 	rx_desc_pool = &soc->rx_desc_buf[pdev_id];
1702 
1703 	dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries*3, rx_desc_pool);
1704 	/* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
1705 	dp_rxdma_srng = &pdev->rx_refill_buf_ring;
1706 	dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool,
1707 		0, &desc_list, &tail, HAL_RX_BUF_RBM_SW3_BM);
1708 
1709 	return QDF_STATUS_SUCCESS;
1710 }
1711