xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c (revision cf269aa28dd6d2246de6aa0dfeab69dedcc01bb8)
1 /*
2  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #ifndef RX_DEFRAG_DO_NOT_REINJECT
22 #ifndef DP_BE_WAR
23 #include "li/hal_li_rx.h"
24 #endif
25 #endif
26 #include "dp_types.h"
27 #include "dp_rx.h"
28 #include "dp_peer.h"
29 #include "hal_api.h"
30 #include "qdf_trace.h"
31 #include "qdf_nbuf.h"
32 #include "dp_internal.h"
33 #include "dp_rx_defrag.h"
34 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
35 #include "dp_rx_defrag.h"
36 #include "dp_ipa.h"
37 #include "dp_rx_buffer_pool.h"
38 
39 const struct dp_rx_defrag_cipher dp_f_ccmp = {
40 	"AES-CCM",
41 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
42 	IEEE80211_WEP_MICLEN,
43 	0,
44 };
45 
46 const struct dp_rx_defrag_cipher dp_f_tkip = {
47 	"TKIP",
48 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
49 	IEEE80211_WEP_CRCLEN,
50 	IEEE80211_WEP_MICLEN,
51 };
52 
53 const struct dp_rx_defrag_cipher dp_f_wep = {
54 	"WEP",
55 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
56 	IEEE80211_WEP_CRCLEN,
57 	0,
58 };
59 
60 /*
61  * The header and mic length are same for both
62  * GCMP-128 and GCMP-256.
63  */
64 const struct dp_rx_defrag_cipher dp_f_gcmp = {
65 	"AES-GCMP",
66 	WLAN_IEEE80211_GCMP_HEADERLEN,
67 	WLAN_IEEE80211_GCMP_MICLEN,
68 	WLAN_IEEE80211_GCMP_MICLEN,
69 };
70 
71 /**
72  * dp_rx_defrag_frames_free() - Free fragment chain
73  * @frames: Fragment chain
74  *
75  * Iterates through the fragment chain and frees them
76  * Return: None
77  */
78 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames)
79 {
80 	qdf_nbuf_t next, frag = frames;
81 
82 	while (frag) {
83 		next = qdf_nbuf_next(frag);
84 		dp_rx_nbuf_free(frag);
85 		frag = next;
86 	}
87 }
88 
89 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
90 /**
91  * dp_rx_clear_saved_desc_info() - Clears descriptor info
92  * @txrx_peer: Pointer to the peer data structure
93  * @tid: Transmit ID (TID)
94  *
95  * Saves MPDU descriptor info and MSDU link pointer from REO
96  * ring descriptor. The cache is created per peer, per TID
97  *
98  * Return: None
99  */
100 static void dp_rx_clear_saved_desc_info(struct dp_txrx_peer *txrx_peer,
101 					unsigned int tid)
102 {
103 	if (txrx_peer->rx_tid[tid].dst_ring_desc)
104 		qdf_mem_free(txrx_peer->rx_tid[tid].dst_ring_desc);
105 
106 	txrx_peer->rx_tid[tid].dst_ring_desc = NULL;
107 	txrx_peer->rx_tid[tid].head_frag_desc = NULL;
108 }
109 
110 static void dp_rx_return_head_frag_desc(struct dp_txrx_peer *txrx_peer,
111 					unsigned int tid)
112 {
113 	struct dp_soc *soc;
114 	struct dp_pdev *pdev;
115 	struct dp_srng *dp_rxdma_srng;
116 	struct rx_desc_pool *rx_desc_pool;
117 	union dp_rx_desc_list_elem_t *head = NULL;
118 	union dp_rx_desc_list_elem_t *tail = NULL;
119 	uint8_t pool_id;
120 
121 	pdev = txrx_peer->vdev->pdev;
122 	soc = pdev->soc;
123 
124 	if (txrx_peer->rx_tid[tid].head_frag_desc) {
125 		pool_id = txrx_peer->rx_tid[tid].head_frag_desc->pool_id;
126 		dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id];
127 		rx_desc_pool = &soc->rx_desc_buf[pool_id];
128 
129 		dp_rx_add_to_free_desc_list(&head, &tail,
130 					    txrx_peer->rx_tid[tid].head_frag_desc);
131 		dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
132 					1, &head, &tail, false);
133 	}
134 
135 	if (txrx_peer->rx_tid[tid].dst_ring_desc) {
136 		if (dp_rx_link_desc_return(soc,
137 					   txrx_peer->rx_tid[tid].dst_ring_desc,
138 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
139 		    QDF_STATUS_SUCCESS)
140 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
141 				  "%s: Failed to return link desc", __func__);
142 	}
143 }
144 #else
145 
146 static void dp_rx_clear_saved_desc_info(struct dp_txrx_peer *txrx_peer,
147 					unsigned int tid)
148 {
149 }
150 
151 static void dp_rx_return_head_frag_desc(struct dp_txrx_peer *txrx_peer,
152 					unsigned int tid)
153 {
154 }
155 #endif /* WLAN_SOFTUMAC_SUPPORT */
156 
157 void dp_rx_reorder_flush_frag(struct dp_txrx_peer *txrx_peer,
158 			      unsigned int tid)
159 {
160 	dp_info_rl("Flushing TID %d", tid);
161 
162 	if (!txrx_peer) {
163 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
164 					"%s: NULL peer", __func__);
165 		return;
166 	}
167 
168 	dp_rx_return_head_frag_desc(txrx_peer, tid);
169 	dp_rx_defrag_cleanup(txrx_peer, tid);
170 }
171 
172 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc)
173 {
174 	struct dp_rx_tid_defrag *waitlist_elem = NULL;
175 	struct dp_rx_tid_defrag *tmp;
176 	uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
177 	TAILQ_HEAD(, dp_rx_tid_defrag) temp_list;
178 	dp_txrx_ref_handle txrx_ref_handle = NULL;
179 
180 	TAILQ_INIT(&temp_list);
181 
182 	dp_debug("Current time  %u", now_ms);
183 
184 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
185 	TAILQ_FOREACH_SAFE(waitlist_elem, &soc->rx.defrag.waitlist,
186 			   defrag_waitlist_elem, tmp) {
187 		uint32_t tid;
188 
189 		if (waitlist_elem->defrag_timeout_ms > now_ms)
190 			break;
191 
192 		tid = waitlist_elem->tid;
193 		if (tid >= DP_MAX_TIDS) {
194 			qdf_assert(0);
195 			continue;
196 		}
197 
198 		TAILQ_REMOVE(&soc->rx.defrag.waitlist, waitlist_elem,
199 			     defrag_waitlist_elem);
200 		DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
201 
202 		/* Move to temp list and clean-up later */
203 		TAILQ_INSERT_TAIL(&temp_list, waitlist_elem,
204 				  defrag_waitlist_elem);
205 	}
206 	if (waitlist_elem) {
207 		soc->rx.defrag.next_flush_ms =
208 			waitlist_elem->defrag_timeout_ms;
209 	} else {
210 		soc->rx.defrag.next_flush_ms =
211 			now_ms + soc->rx.defrag.timeout_ms;
212 	}
213 
214 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
215 
216 	TAILQ_FOREACH_SAFE(waitlist_elem, &temp_list,
217 			   defrag_waitlist_elem, tmp) {
218 		struct dp_txrx_peer *txrx_peer, *temp_peer = NULL;
219 
220 		qdf_spin_lock_bh(&waitlist_elem->defrag_tid_lock);
221 		TAILQ_REMOVE(&temp_list, waitlist_elem,
222 			     defrag_waitlist_elem);
223 		/* get address of current peer */
224 		txrx_peer = waitlist_elem->defrag_peer;
225 		qdf_spin_unlock_bh(&waitlist_elem->defrag_tid_lock);
226 
227 		temp_peer = dp_txrx_peer_get_ref_by_id(soc, txrx_peer->peer_id,
228 						       &txrx_ref_handle,
229 						       DP_MOD_ID_RX_ERR);
230 		if (temp_peer == txrx_peer) {
231 			qdf_spin_lock_bh(&waitlist_elem->defrag_tid_lock);
232 			dp_rx_reorder_flush_frag(txrx_peer, waitlist_elem->tid);
233 			qdf_spin_unlock_bh(&waitlist_elem->defrag_tid_lock);
234 		}
235 
236 		if (temp_peer)
237 			dp_txrx_peer_unref_delete(txrx_ref_handle,
238 						  DP_MOD_ID_RX_ERR);
239 
240 	}
241 }
242 
243 void dp_rx_defrag_waitlist_add(struct dp_txrx_peer *txrx_peer,
244 			       unsigned int tid)
245 {
246 	struct dp_soc *psoc = txrx_peer->vdev->pdev->soc;
247 	struct dp_rx_tid_defrag *waitlist_elem = &txrx_peer->rx_tid[tid];
248 
249 	dp_debug("Adding TID %u to waitlist for peer %pK with peer_id = %d ",
250 		 tid, txrx_peer, txrx_peer->peer_id);
251 
252 	/* TODO: use LIST macros instead of TAIL macros */
253 	qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock);
254 	if (TAILQ_EMPTY(&psoc->rx.defrag.waitlist))
255 		psoc->rx.defrag.next_flush_ms =
256 			waitlist_elem->defrag_timeout_ms;
257 
258 	TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, waitlist_elem,
259 			  defrag_waitlist_elem);
260 	DP_STATS_INC(psoc, rx.rx_frag_wait, 1);
261 	qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock);
262 }
263 
264 void dp_rx_defrag_waitlist_remove(struct dp_txrx_peer *txrx_peer,
265 				  unsigned int tid)
266 {
267 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
268 	struct dp_soc *soc = pdev->soc;
269 	struct dp_rx_tid_defrag *waitlist_elm;
270 	struct dp_rx_tid_defrag *tmp;
271 
272 	dp_debug("Removing TID %u to waitlist for peer %pK peer_id = %d ",
273 		 tid, txrx_peer, txrx_peer->peer_id);
274 
275 	if (tid >= DP_MAX_TIDS) {
276 		dp_err("TID out of bounds: %d", tid);
277 		qdf_assert_always(0);
278 	}
279 
280 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
281 	TAILQ_FOREACH_SAFE(waitlist_elm, &soc->rx.defrag.waitlist,
282 			   defrag_waitlist_elem, tmp) {
283 		struct dp_txrx_peer *peer_on_waitlist;
284 
285 		/* get address of current peer */
286 		peer_on_waitlist = waitlist_elm->defrag_peer;
287 
288 		/* Ensure it is TID for same peer */
289 		if (peer_on_waitlist == txrx_peer && waitlist_elm->tid == tid) {
290 			TAILQ_REMOVE(&soc->rx.defrag.waitlist,
291 				     waitlist_elm, defrag_waitlist_elem);
292 			DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
293 		}
294 	}
295 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
296 }
297 
298 QDF_STATUS
299 dp_rx_defrag_fraglist_insert(struct dp_txrx_peer *txrx_peer, unsigned int tid,
300 			     qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr,
301 			     qdf_nbuf_t frag, uint8_t *all_frag_present)
302 {
303 	struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
304 	qdf_nbuf_t next;
305 	qdf_nbuf_t prev = NULL;
306 	qdf_nbuf_t cur;
307 	uint16_t head_fragno, cur_fragno, next_fragno;
308 	uint8_t last_morefrag = 1, count = 0;
309 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
310 	uint8_t *rx_desc_info;
311 
312 	qdf_assert(frag);
313 	qdf_assert(head_addr);
314 	qdf_assert(tail_addr);
315 
316 	*all_frag_present = 0;
317 	rx_desc_info = qdf_nbuf_data(frag);
318 	cur_fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc_info);
319 
320 	dp_debug("cur_fragno %d", cur_fragno);
321 	/* If this is the first fragment */
322 	if (!(*head_addr)) {
323 		*head_addr = *tail_addr = frag;
324 		qdf_nbuf_set_next(*tail_addr, NULL);
325 		rx_tid->curr_frag_num = cur_fragno;
326 
327 		goto insert_done;
328 	}
329 
330 	/* In sequence fragment */
331 	if (cur_fragno > rx_tid->curr_frag_num) {
332 		qdf_nbuf_set_next(*tail_addr, frag);
333 		*tail_addr = frag;
334 		qdf_nbuf_set_next(*tail_addr, NULL);
335 		rx_tid->curr_frag_num = cur_fragno;
336 	} else {
337 		/* Out of sequence fragment */
338 		cur = *head_addr;
339 		rx_desc_info = qdf_nbuf_data(cur);
340 		head_fragno = dp_rx_frag_get_mpdu_frag_number(soc,
341 							      rx_desc_info);
342 
343 		if (cur_fragno == head_fragno) {
344 			dp_rx_nbuf_free(frag);
345 			goto insert_fail;
346 		} else if (head_fragno > cur_fragno) {
347 			qdf_nbuf_set_next(frag, cur);
348 			cur = frag;
349 			*head_addr = frag; /* head pointer to be updated */
350 		} else {
351 			while ((cur_fragno > head_fragno) && cur) {
352 				prev = cur;
353 				cur = qdf_nbuf_next(cur);
354 				if (cur) {
355 					rx_desc_info = qdf_nbuf_data(cur);
356 					head_fragno =
357 						dp_rx_frag_get_mpdu_frag_number(
358 								soc,
359 								rx_desc_info);
360 				}
361 			}
362 
363 			if (cur_fragno == head_fragno) {
364 				dp_rx_nbuf_free(frag);
365 				goto insert_fail;
366 			}
367 
368 			qdf_nbuf_set_next(prev, frag);
369 			qdf_nbuf_set_next(frag, cur);
370 		}
371 	}
372 
373 	next = qdf_nbuf_next(*head_addr);
374 
375 	rx_desc_info = qdf_nbuf_data(*tail_addr);
376 	last_morefrag = dp_rx_frag_get_more_frag_bit(soc, rx_desc_info);
377 
378 	/* TODO: optimize the loop */
379 	if (!last_morefrag) {
380 		/* Check if all fragments are present */
381 		do {
382 			rx_desc_info = qdf_nbuf_data(next);
383 			next_fragno =
384 				dp_rx_frag_get_mpdu_frag_number(soc,
385 								rx_desc_info);
386 			count++;
387 
388 			if (next_fragno != count)
389 				break;
390 
391 			next = qdf_nbuf_next(next);
392 		} while (next);
393 
394 		if (!next) {
395 			*all_frag_present = 1;
396 			return QDF_STATUS_SUCCESS;
397 		} else {
398 			/* revisit */
399 		}
400 	}
401 
402 insert_done:
403 	return QDF_STATUS_SUCCESS;
404 
405 insert_fail:
406 	return QDF_STATUS_E_FAILURE;
407 }
408 
409 
410 /**
411  * dp_rx_defrag_tkip_decap() - decap tkip encrypted fragment
412  * @soc: DP SOC
413  * @msdu: Pointer to the fragment
414  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
415  *
416  * decap tkip encrypted fragment
417  *
418  * Return: QDF_STATUS
419  */
420 static QDF_STATUS
421 dp_rx_defrag_tkip_decap(struct dp_soc *soc,
422 			qdf_nbuf_t msdu, uint16_t hdrlen)
423 {
424 	uint8_t *ivp, *orig_hdr;
425 	int rx_desc_len = soc->rx_pkt_tlv_size;
426 
427 	/* start of 802.11 header info */
428 	orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len);
429 
430 	/* TKIP header is located post 802.11 header */
431 	ivp = orig_hdr + hdrlen;
432 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) {
433 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
434 			"IEEE80211_WEP_EXTIV is missing in TKIP fragment");
435 		return QDF_STATUS_E_DEFRAG_ERROR;
436 	}
437 
438 	qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer);
439 
440 	return QDF_STATUS_SUCCESS;
441 }
442 
443 /**
444  * dp_rx_defrag_ccmp_demic() - Remove MIC information from CCMP fragment
445  * @soc: DP SOC
446  * @nbuf: Pointer to the fragment buffer
447  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
448  *
449  * Remove MIC information from CCMP fragment
450  *
451  * Return: QDF_STATUS
452  */
453 static QDF_STATUS
454 dp_rx_defrag_ccmp_demic(struct dp_soc *soc, qdf_nbuf_t nbuf, uint16_t hdrlen)
455 {
456 	uint8_t *ivp, *orig_hdr;
457 	int rx_desc_len = soc->rx_pkt_tlv_size;
458 
459 	/* start of the 802.11 header */
460 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
461 
462 	/* CCMP header is located after 802.11 header */
463 	ivp = orig_hdr + hdrlen;
464 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
465 		return QDF_STATUS_E_DEFRAG_ERROR;
466 
467 	qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer);
468 
469 	return QDF_STATUS_SUCCESS;
470 }
471 
472 /**
473  * dp_rx_defrag_ccmp_decap() - decap CCMP encrypted fragment
474  * @soc: DP SOC
475  * @nbuf: Pointer to the fragment
476  * @hdrlen: length of the header information
477  *
478  * decap CCMP encrypted fragment
479  *
480  * Return: QDF_STATUS
481  */
482 static QDF_STATUS
483 dp_rx_defrag_ccmp_decap(struct dp_soc *soc, qdf_nbuf_t nbuf, uint16_t hdrlen)
484 {
485 	uint8_t *ivp, *origHdr;
486 	int rx_desc_len = soc->rx_pkt_tlv_size;
487 
488 	origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
489 	ivp = origHdr + hdrlen;
490 
491 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
492 		return QDF_STATUS_E_DEFRAG_ERROR;
493 
494 	return QDF_STATUS_SUCCESS;
495 }
496 
497 /**
498  * dp_rx_defrag_wep_decap() - decap WEP encrypted fragment
499  * @soc: DP SOC
500  * @msdu: Pointer to the fragment
501  * @hdrlen: length of the header information
502  *
503  * decap WEP encrypted fragment
504  *
505  * Return: QDF_STATUS
506  */
507 static QDF_STATUS
508 dp_rx_defrag_wep_decap(struct dp_soc *soc, qdf_nbuf_t msdu, uint16_t hdrlen)
509 {
510 	uint8_t *origHdr;
511 	int rx_desc_len = soc->rx_pkt_tlv_size;
512 
513 	origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
514 	qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen);
515 
516 	qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer);
517 
518 	return QDF_STATUS_SUCCESS;
519 }
520 
521 /**
522  * dp_rx_defrag_hdrsize() - Calculate the header size of the received fragment
523  * @soc: soc handle
524  * @nbuf: Pointer to the fragment
525  *
526  * Calculate the header size of the received fragment
527  *
528  * Return: header size (uint16_t)
529  */
530 static uint16_t dp_rx_defrag_hdrsize(struct dp_soc *soc, qdf_nbuf_t nbuf)
531 {
532 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
533 	uint16_t size = sizeof(struct ieee80211_frame);
534 	uint16_t fc = 0;
535 	uint32_t to_ds, fr_ds;
536 	uint8_t frm_ctrl_valid;
537 	uint16_t frm_ctrl_field;
538 
539 	to_ds = hal_rx_mpdu_get_to_ds(soc->hal_soc, rx_tlv_hdr);
540 	fr_ds = hal_rx_mpdu_get_fr_ds(soc->hal_soc, rx_tlv_hdr);
541 	frm_ctrl_valid =
542 		hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
543 						    rx_tlv_hdr);
544 	frm_ctrl_field = hal_rx_get_frame_ctrl_field(soc->hal_soc, rx_tlv_hdr);
545 
546 	if (to_ds && fr_ds)
547 		size += QDF_MAC_ADDR_SIZE;
548 
549 	if (frm_ctrl_valid) {
550 		fc = frm_ctrl_field;
551 
552 		/* use 1-st byte for validation */
553 		if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) {
554 			size += sizeof(uint16_t);
555 			/* use 2-nd byte for validation */
556 			if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER)
557 				size += sizeof(struct ieee80211_htc);
558 		}
559 	}
560 
561 	return size;
562 }
563 
564 /**
565  * dp_rx_defrag_michdr() - Calculate a pseudo MIC header
566  * @wh0: Pointer to the wireless header of the fragment
567  * @hdr: Array to hold the pseudo header
568  *
569  * Calculate a pseudo MIC header
570  *
571  * Return: None
572  */
573 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0,
574 				uint8_t hdr[])
575 {
576 	const struct ieee80211_frame_addr4 *wh =
577 		(const struct ieee80211_frame_addr4 *)wh0;
578 
579 	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
580 	case IEEE80211_FC1_DIR_NODS:
581 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
582 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
583 					   wh->i_addr2);
584 		break;
585 	case IEEE80211_FC1_DIR_TODS:
586 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
587 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
588 					   wh->i_addr2);
589 		break;
590 	case IEEE80211_FC1_DIR_FROMDS:
591 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
592 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
593 					   wh->i_addr3);
594 		break;
595 	case IEEE80211_FC1_DIR_DSTODS:
596 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
597 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
598 					   wh->i_addr4);
599 		break;
600 	}
601 
602 	/*
603 	 * Bit 7 is QDF_IEEE80211_FC0_SUBTYPE_QOS for data frame, but
604 	 * it could also be set for deauth, disassoc, action, etc. for
605 	 * a mgt type frame. It comes into picture for MFP.
606 	 */
607 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
608 		if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
609 				IEEE80211_FC1_DIR_DSTODS) {
610 			const struct ieee80211_qosframe_addr4 *qwh =
611 				(const struct ieee80211_qosframe_addr4 *)wh;
612 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
613 		} else {
614 			const struct ieee80211_qosframe *qwh =
615 				(const struct ieee80211_qosframe *)wh;
616 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
617 		}
618 	} else {
619 		hdr[12] = 0;
620 	}
621 
622 	hdr[13] = hdr[14] = hdr[15] = 0;	/* reserved */
623 }
624 
625 /**
626  * dp_rx_defrag_mic() - Calculate MIC header
627  * @soc: DP SOC
628  * @key: Pointer to the key
629  * @wbuf: fragment buffer
630  * @off: Offset
631  * @data_len: Data length
632  * @mic: Array to hold MIC
633  *
634  * Calculate a pseudo MIC header
635  *
636  * Return: QDF_STATUS
637  */
638 static QDF_STATUS dp_rx_defrag_mic(struct dp_soc *soc, const uint8_t *key,
639 				   qdf_nbuf_t wbuf, uint16_t off,
640 				   uint16_t data_len, uint8_t mic[])
641 {
642 	uint8_t hdr[16] = { 0, };
643 	uint32_t l, r;
644 	const uint8_t *data;
645 	uint32_t space;
646 	int rx_desc_len = soc->rx_pkt_tlv_size;
647 
648 	dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf)
649 		+ rx_desc_len), hdr);
650 
651 	l = dp_rx_get_le32(key);
652 	r = dp_rx_get_le32(key + 4);
653 
654 	/* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
655 	l ^= dp_rx_get_le32(hdr);
656 	dp_rx_michael_block(l, r);
657 	l ^= dp_rx_get_le32(&hdr[4]);
658 	dp_rx_michael_block(l, r);
659 	l ^= dp_rx_get_le32(&hdr[8]);
660 	dp_rx_michael_block(l, r);
661 	l ^= dp_rx_get_le32(&hdr[12]);
662 	dp_rx_michael_block(l, r);
663 
664 	/* first buffer has special handling */
665 	data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
666 	space = qdf_nbuf_len(wbuf) - off;
667 
668 	for (;; ) {
669 		if (space > data_len)
670 			space = data_len;
671 
672 		/* collect 32-bit blocks from current buffer */
673 		while (space >= sizeof(uint32_t)) {
674 			l ^= dp_rx_get_le32(data);
675 			dp_rx_michael_block(l, r);
676 			data += sizeof(uint32_t);
677 			space -= sizeof(uint32_t);
678 			data_len -= sizeof(uint32_t);
679 		}
680 		if (data_len < sizeof(uint32_t))
681 			break;
682 
683 		wbuf = qdf_nbuf_next(wbuf);
684 		if (!wbuf)
685 			return QDF_STATUS_E_DEFRAG_ERROR;
686 
687 		if (space != 0) {
688 			const uint8_t *data_next;
689 			/*
690 			 * Block straddles buffers, split references.
691 			 */
692 			data_next =
693 				(uint8_t *)qdf_nbuf_data(wbuf) + off;
694 			if ((qdf_nbuf_len(wbuf)) <
695 				sizeof(uint32_t) - space) {
696 				return QDF_STATUS_E_DEFRAG_ERROR;
697 			}
698 			switch (space) {
699 			case 1:
700 				l ^= dp_rx_get_le32_split(data[0],
701 					data_next[0], data_next[1],
702 					data_next[2]);
703 				data = data_next + 3;
704 				space = (qdf_nbuf_len(wbuf) - off) - 3;
705 				break;
706 			case 2:
707 				l ^= dp_rx_get_le32_split(data[0], data[1],
708 						    data_next[0], data_next[1]);
709 				data = data_next + 2;
710 				space = (qdf_nbuf_len(wbuf) - off) - 2;
711 				break;
712 			case 3:
713 				l ^= dp_rx_get_le32_split(data[0], data[1],
714 					data[2], data_next[0]);
715 				data = data_next + 1;
716 				space = (qdf_nbuf_len(wbuf) - off) - 1;
717 				break;
718 			}
719 			dp_rx_michael_block(l, r);
720 			data_len -= sizeof(uint32_t);
721 		} else {
722 			/*
723 			 * Setup for next buffer.
724 			 */
725 			data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
726 			space = qdf_nbuf_len(wbuf) - off;
727 		}
728 	}
729 	/* Last block and padding (0x5a, 4..7 x 0) */
730 	switch (data_len) {
731 	case 0:
732 		l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0);
733 		break;
734 	case 1:
735 		l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0);
736 		break;
737 	case 2:
738 		l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0);
739 		break;
740 	case 3:
741 		l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a);
742 		break;
743 	}
744 	dp_rx_michael_block(l, r);
745 	dp_rx_michael_block(l, r);
746 	dp_rx_put_le32(mic, l);
747 	dp_rx_put_le32(mic + 4, r);
748 
749 	return QDF_STATUS_SUCCESS;
750 }
751 
752 /**
753  * dp_rx_defrag_tkip_demic() - Remove MIC header from the TKIP frame
754  * @soc: DP SOC
755  * @key: Pointer to the key
756  * @msdu: fragment buffer
757  * @hdrlen: Length of the header information
758  *
759  * Remove MIC information from the TKIP frame
760  *
761  * Return: QDF_STATUS
762  */
763 static QDF_STATUS dp_rx_defrag_tkip_demic(struct dp_soc *soc,
764 					  const uint8_t *key,
765 					  qdf_nbuf_t msdu, uint16_t hdrlen)
766 {
767 	QDF_STATUS status;
768 	uint32_t pktlen = 0, prev_data_len;
769 	uint8_t mic[IEEE80211_WEP_MICLEN];
770 	uint8_t mic0[IEEE80211_WEP_MICLEN];
771 	qdf_nbuf_t prev = NULL, prev0, next;
772 	uint8_t len0 = 0;
773 
774 	next = msdu;
775 	prev0 = msdu;
776 	while (next) {
777 		pktlen += (qdf_nbuf_len(next) - hdrlen);
778 		prev = next;
779 		dp_debug("pktlen %u",
780 			 (uint32_t)(qdf_nbuf_len(next) - hdrlen));
781 		next = qdf_nbuf_next(next);
782 		if (next && !qdf_nbuf_next(next))
783 			prev0 = prev;
784 	}
785 
786 	if (!prev) {
787 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
788 			  "%s Defrag chaining failed !\n", __func__);
789 		return QDF_STATUS_E_DEFRAG_ERROR;
790 	}
791 
792 	prev_data_len = qdf_nbuf_len(prev) - hdrlen;
793 	if (prev_data_len < dp_f_tkip.ic_miclen) {
794 		if (prev0 == prev) {
795 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
796 				  "%s Fragments don't have MIC header !\n", __func__);
797 			return QDF_STATUS_E_DEFRAG_ERROR;
798 		}
799 		len0 = dp_f_tkip.ic_miclen - (uint8_t)prev_data_len;
800 		qdf_nbuf_copy_bits(prev0, qdf_nbuf_len(prev0) - len0, len0,
801 				   (caddr_t)mic0);
802 		qdf_nbuf_trim_tail(prev0, len0);
803 	}
804 
805 	qdf_nbuf_copy_bits(prev, (qdf_nbuf_len(prev) -
806 			   (dp_f_tkip.ic_miclen - len0)),
807 			   (dp_f_tkip.ic_miclen - len0),
808 			   (caddr_t)(&mic0[len0]));
809 	qdf_nbuf_trim_tail(prev, (dp_f_tkip.ic_miclen - len0));
810 	pktlen -= dp_f_tkip.ic_miclen;
811 
812 	if (((qdf_nbuf_len(prev) - hdrlen) == 0) && prev != msdu) {
813 		dp_rx_nbuf_free(prev);
814 		qdf_nbuf_set_next(prev0, NULL);
815 	}
816 
817 	status = dp_rx_defrag_mic(soc, key, msdu, hdrlen,
818 				  pktlen, mic);
819 
820 	if (QDF_IS_STATUS_ERROR(status))
821 		return status;
822 
823 	if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen))
824 		return QDF_STATUS_E_DEFRAG_ERROR;
825 
826 	return QDF_STATUS_SUCCESS;
827 }
828 
829 /**
830  * dp_rx_frag_pull_hdr() - Pulls the RXTLV & the 802.11 headers
831  * @soc: DP SOC
832  * @nbuf: buffer pointer
833  * @hdrsize: size of the header to be pulled
834  *
835  * Pull the RXTLV & the 802.11 headers
836  *
837  * Return: None
838  */
839 static void dp_rx_frag_pull_hdr(struct dp_soc *soc,
840 				qdf_nbuf_t nbuf, uint16_t hdrsize)
841 {
842 	hal_rx_print_pn(soc->hal_soc, qdf_nbuf_data(nbuf));
843 
844 	qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + hdrsize);
845 
846 	dp_debug("final pktlen %d .11len %d",
847 		 (uint32_t)qdf_nbuf_len(nbuf), hdrsize);
848 }
849 
850 /**
851  * dp_rx_defrag_pn_check() - Check the PN of current fragmented with prev PN
852  * @soc: DP SOC
853  * @msdu: msdu to get the current PN
854  * @cur_pn128: PN extracted from current msdu
855  * @prev_pn128: Prev PN
856  *
857  * Return: 0 on success, non zero on failure
858  */
859 static int dp_rx_defrag_pn_check(struct dp_soc *soc, qdf_nbuf_t msdu,
860 				 uint64_t *cur_pn128, uint64_t *prev_pn128)
861 {
862 	int out_of_order = 0;
863 
864 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(msdu), cur_pn128);
865 
866 	if (cur_pn128[1] == prev_pn128[1])
867 		out_of_order = (cur_pn128[0] - prev_pn128[0] != 1);
868 	else
869 		out_of_order = (cur_pn128[1] - prev_pn128[1] != 1);
870 
871 	return out_of_order;
872 }
873 
874 /**
875  * dp_rx_construct_fraglist() - Construct a nbuf fraglist
876  * @txrx_peer: Pointer to the txrx peer
877  * @tid: Transmit ID (TID)
878  * @head: Pointer to list of fragments
879  * @hdrsize: Size of the header to be pulled
880  *
881  * Construct a nbuf fraglist
882  *
883  * Return: None
884  */
885 static int
886 dp_rx_construct_fraglist(struct dp_txrx_peer *txrx_peer, int tid,
887 			 qdf_nbuf_t head,
888 			 uint16_t hdrsize)
889 {
890 	struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
891 	qdf_nbuf_t msdu = qdf_nbuf_next(head);
892 	qdf_nbuf_t rx_nbuf = msdu;
893 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
894 	uint32_t len = 0;
895 	uint64_t cur_pn128[2] = {0, 0}, prev_pn128[2];
896 	int out_of_order = 0;
897 	int index;
898 	int needs_pn_check = 0;
899 	enum cdp_sec_type sec_type;
900 
901 	prev_pn128[0] = rx_tid->pn128[0];
902 	prev_pn128[1] = rx_tid->pn128[1];
903 
904 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, msdu) ? dp_sec_mcast :
905 				dp_sec_ucast;
906 	sec_type = txrx_peer->security[index].sec_type;
907 
908 	if (!(sec_type == cdp_sec_type_none || sec_type == cdp_sec_type_wep128 ||
909 	      sec_type == cdp_sec_type_wep104 || sec_type == cdp_sec_type_wep40))
910 		needs_pn_check = 1;
911 
912 	while (msdu) {
913 		if (qdf_likely(needs_pn_check))
914 			out_of_order = dp_rx_defrag_pn_check(soc, msdu,
915 							     &cur_pn128[0],
916 							     &prev_pn128[0]);
917 
918 		if (qdf_unlikely(out_of_order)) {
919 			dp_info_rl("cur_pn128[0] 0x%llx cur_pn128[1] 0x%llx prev_pn128[0] 0x%llx prev_pn128[1] 0x%llx",
920 				   cur_pn128[0], cur_pn128[1],
921 				   prev_pn128[0], prev_pn128[1]);
922 			return QDF_STATUS_E_FAILURE;
923 		}
924 
925 		prev_pn128[0] = cur_pn128[0];
926 		prev_pn128[1] = cur_pn128[1];
927 
928 		/*
929 		 * Broadcast and multicast frames should never be fragmented.
930 		 * Iterating through all msdus and dropping fragments if even
931 		 * one of them has mcast/bcast destination address.
932 		 */
933 		if (hal_rx_msdu_is_wlan_mcast(soc->hal_soc, msdu)) {
934 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
935 				  "Dropping multicast/broadcast fragments");
936 			return QDF_STATUS_E_FAILURE;
937 		}
938 
939 		dp_rx_frag_pull_hdr(soc, msdu, hdrsize);
940 		len += qdf_nbuf_len(msdu);
941 		msdu = qdf_nbuf_next(msdu);
942 	}
943 
944 	qdf_nbuf_append_ext_list(head, rx_nbuf, len);
945 	qdf_nbuf_set_next(head, NULL);
946 	qdf_nbuf_set_is_frag(head, 1);
947 
948 	dp_debug("head len %d ext len %d data len %d ",
949 		 (uint32_t)qdf_nbuf_len(head),
950 		 (uint32_t)qdf_nbuf_len(rx_nbuf),
951 		 (uint32_t)(head->data_len));
952 
953 	return QDF_STATUS_SUCCESS;
954 }
955 
956 /**
957  * dp_rx_defrag_err() - rx defragmentation error handler
958  * @vdev: handle to vdev object
959  * @nbuf: packet buffer
960  *
961  * This function handles rx error and send MIC error notification
962  *
963  * Return: None
964  */
965 static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
966 {
967 	struct ol_if_ops *tops = NULL;
968 	struct dp_pdev *pdev = vdev->pdev;
969 	int rx_desc_len = pdev->soc->rx_pkt_tlv_size;
970 	uint8_t *orig_hdr;
971 	struct ieee80211_frame *wh;
972 	struct cdp_rx_mic_err_info mic_failure_info;
973 
974 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
975 	wh = (struct ieee80211_frame *)orig_hdr;
976 
977 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.da_mac_addr,
978 			 (struct qdf_mac_addr *)&wh->i_addr1);
979 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.ta_mac_addr,
980 			 (struct qdf_mac_addr *)&wh->i_addr2);
981 	mic_failure_info.key_id = 0;
982 	mic_failure_info.multicast =
983 		IEEE80211_IS_MULTICAST(wh->i_addr1);
984 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
985 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
986 	mic_failure_info.data = (uint8_t *)wh;
987 	mic_failure_info.vdev_id = vdev->vdev_id;
988 
989 	tops = pdev->soc->cdp_soc.ol_ops;
990 	if (tops->rx_mic_error)
991 		tops->rx_mic_error(pdev->soc->ctrl_psoc, pdev->pdev_id,
992 				   &mic_failure_info);
993 }
994 
995 
996 /**
997  * dp_rx_defrag_nwifi_to_8023() - Transcap 802.11 to 802.3
998  * @soc: dp soc handle
999  * @txrx_peer: txrx_peer handle
1000  * @tid: Transmit ID (TID)
1001  * @nbuf: Pointer to the fragment buffer
1002  * @hdrsize: Size of headers
1003  *
1004  * Transcap the fragment from 802.11 to 802.3
1005  *
1006  * Return: None
1007  */
1008 static void
1009 dp_rx_defrag_nwifi_to_8023(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
1010 			   int tid, qdf_nbuf_t nbuf, uint16_t hdrsize)
1011 {
1012 	struct llc_snap_hdr_t *llchdr;
1013 	struct ethernet_hdr_t *eth_hdr;
1014 	uint8_t ether_type[2];
1015 	uint16_t fc = 0;
1016 	union dp_align_mac_addr mac_addr;
1017 	uint8_t *rx_desc_info = qdf_mem_malloc(soc->rx_pkt_tlv_size);
1018 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
1019 	struct ieee80211_frame_addr4 wh = {0};
1020 
1021 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), rx_tid->pn128);
1022 
1023 	hal_rx_print_pn(soc->hal_soc, qdf_nbuf_data(nbuf));
1024 
1025 	if (!rx_desc_info) {
1026 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1027 			"%s: Memory alloc failed ! ", __func__);
1028 		QDF_ASSERT(0);
1029 		return;
1030 	}
1031 
1032 	qdf_mem_zero(&wh, sizeof(struct ieee80211_frame_addr4));
1033 	if (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
1034 		qdf_mem_copy(&wh, qdf_nbuf_data(nbuf) + soc->rx_pkt_tlv_size,
1035 			     hdrsize);
1036 
1037 	qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), soc->rx_pkt_tlv_size);
1038 
1039 	llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) +
1040 					soc->rx_pkt_tlv_size + hdrsize);
1041 	qdf_mem_copy(ether_type, llchdr->ethertype, 2);
1042 
1043 	qdf_nbuf_pull_head(nbuf, (soc->rx_pkt_tlv_size + hdrsize +
1044 				  sizeof(struct llc_snap_hdr_t) -
1045 				  sizeof(struct ethernet_hdr_t)));
1046 
1047 	eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf));
1048 
1049 	if (hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
1050 						rx_desc_info))
1051 		fc = hal_rx_get_frame_ctrl_field(soc->hal_soc, rx_desc_info);
1052 
1053 	dp_debug("Frame control type: 0x%x", fc);
1054 
1055 	switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) {
1056 	case IEEE80211_FC1_DIR_NODS:
1057 		hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info,
1058 				      &mac_addr.raw[0]);
1059 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1060 			QDF_MAC_ADDR_SIZE);
1061 		hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info,
1062 				      &mac_addr.raw[0]);
1063 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1064 			QDF_MAC_ADDR_SIZE);
1065 		break;
1066 	case IEEE80211_FC1_DIR_TODS:
1067 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1068 				      &mac_addr.raw[0]);
1069 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1070 			QDF_MAC_ADDR_SIZE);
1071 		hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info,
1072 				      &mac_addr.raw[0]);
1073 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1074 			QDF_MAC_ADDR_SIZE);
1075 		break;
1076 	case IEEE80211_FC1_DIR_FROMDS:
1077 		hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info,
1078 				      &mac_addr.raw[0]);
1079 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1080 			QDF_MAC_ADDR_SIZE);
1081 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1082 				      &mac_addr.raw[0]);
1083 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1084 			QDF_MAC_ADDR_SIZE);
1085 		break;
1086 
1087 	case IEEE80211_FC1_DIR_DSTODS:
1088 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1089 				      &mac_addr.raw[0]);
1090 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1091 			QDF_MAC_ADDR_SIZE);
1092 		qdf_mem_copy(eth_hdr->src_addr, &wh.i_addr4[0],
1093 			     QDF_MAC_ADDR_SIZE);
1094 		break;
1095 
1096 	default:
1097 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1098 		"%s: Unknown frame control type: 0x%x", __func__, fc);
1099 	}
1100 
1101 	qdf_mem_copy(eth_hdr->ethertype, ether_type,
1102 			sizeof(ether_type));
1103 
1104 	qdf_nbuf_push_head(nbuf, soc->rx_pkt_tlv_size);
1105 	qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, soc->rx_pkt_tlv_size);
1106 	qdf_mem_free(rx_desc_info);
1107 }
1108 
1109 #ifdef RX_DEFRAG_DO_NOT_REINJECT
1110 /**
1111  * dp_rx_defrag_deliver() - Deliver defrag packet to stack
1112  * @txrx_peer: Pointer to the peer
1113  * @tid: Transmit Identifier
1114  * @head: Nbuf to be delivered
1115  *
1116  * Return: None
1117  */
1118 static inline void dp_rx_defrag_deliver(struct dp_txrx_peer *txrx_peer,
1119 					unsigned int tid,
1120 					qdf_nbuf_t head)
1121 {
1122 	struct dp_vdev *vdev = txrx_peer->vdev;
1123 	struct dp_soc *soc = vdev->pdev->soc;
1124 	qdf_nbuf_t deliver_list_head = NULL;
1125 	qdf_nbuf_t deliver_list_tail = NULL;
1126 	uint8_t *rx_tlv_hdr;
1127 
1128 	rx_tlv_hdr = qdf_nbuf_data(head);
1129 
1130 	QDF_NBUF_CB_RX_VDEV_ID(head) = vdev->vdev_id;
1131 	qdf_nbuf_set_tid_val(head, tid);
1132 	qdf_nbuf_pull_head(head, soc->rx_pkt_tlv_size);
1133 
1134 	DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail,
1135 			  head);
1136 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, deliver_list_head,
1137 			       deliver_list_tail);
1138 }
1139 
1140 /**
1141  * dp_rx_defrag_reo_reinject() - Reinject the fragment chain back into REO
1142  * @txrx_peer: Pointer to the peer
1143  * @tid: Transmit Identifier
1144  * @head: Buffer to be reinjected back
1145  *
1146  * Reinject the fragment chain back into REO
1147  *
1148  * Return: QDF_STATUS
1149  */
1150 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_txrx_peer *txrx_peer,
1151 					    unsigned int tid, qdf_nbuf_t head)
1152 {
1153 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1154 
1155 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1156 
1157 	dp_rx_defrag_deliver(txrx_peer, tid, head);
1158 	rx_reorder_array_elem->head = NULL;
1159 	rx_reorder_array_elem->tail = NULL;
1160 	dp_rx_return_head_frag_desc(txrx_peer, tid);
1161 
1162 	return QDF_STATUS_SUCCESS;
1163 }
1164 #else
1165 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1166 /**
1167  * dp_rx_reinject_ring_record_entry() - Record reinject ring history
1168  * @soc: Datapath soc structure
1169  * @paddr: paddr of the buffer reinjected to SW2REO ring
1170  * @sw_cookie: SW cookie of the buffer reinjected to SW2REO ring
1171  * @rbm: Return buffer manager of the buffer reinjected to SW2REO ring
1172  *
1173  * Return: None
1174  */
1175 static inline void
1176 dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1177 				 uint32_t sw_cookie, uint8_t rbm)
1178 {
1179 	struct dp_buf_info_record *record;
1180 	uint32_t idx;
1181 
1182 	if (qdf_unlikely(!soc->rx_reinject_ring_history))
1183 		return;
1184 
1185 	idx = dp_history_get_next_index(&soc->rx_reinject_ring_history->index,
1186 					DP_RX_REINJECT_HIST_MAX);
1187 
1188 	/* No NULL check needed for record since its an array */
1189 	record = &soc->rx_reinject_ring_history->entry[idx];
1190 
1191 	record->timestamp = qdf_get_log_timestamp();
1192 	record->hbi.paddr = paddr;
1193 	record->hbi.sw_cookie = sw_cookie;
1194 	record->hbi.rbm = rbm;
1195 }
1196 #else
1197 static inline void
1198 dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1199 				 uint32_t sw_cookie, uint8_t rbm)
1200 {
1201 }
1202 #endif
1203 
1204 /**
1205  * dp_rx_defrag_reo_reinject() - Reinject the fragment chain back into REO
1206  * @txrx_peer: Pointer to the txrx_peer
1207  * @tid: Transmit Identifier
1208  * @head: Buffer to be reinjected back
1209  *
1210  * Reinject the fragment chain back into REO
1211  *
1212  * Return: QDF_STATUS
1213  */
1214 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_txrx_peer *txrx_peer,
1215 					    unsigned int tid, qdf_nbuf_t head)
1216 {
1217 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
1218 	struct dp_soc *soc = pdev->soc;
1219 	struct hal_buf_info buf_info;
1220 	struct hal_buf_info temp_buf_info;
1221 	void *link_desc_va;
1222 	void *msdu0, *msdu_desc_info;
1223 	void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr;
1224 	void *dst_mpdu_desc_info;
1225 	uint64_t dst_qdesc_addr;
1226 	qdf_dma_addr_t paddr;
1227 	uint32_t nbuf_len, seq_no, dst_ind;
1228 	uint32_t ret, cookie;
1229 	hal_ring_desc_t dst_ring_desc =
1230 		txrx_peer->rx_tid[tid].dst_ring_desc;
1231 	hal_ring_handle_t hal_srng = soc->reo_reinject_ring.hal_srng;
1232 	struct dp_rx_desc *rx_desc = txrx_peer->rx_tid[tid].head_frag_desc;
1233 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1234 						txrx_peer->rx_tid[tid].array;
1235 	qdf_nbuf_t nbuf_head;
1236 	struct rx_desc_pool *rx_desc_pool = NULL;
1237 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(dst_ring_desc);
1238 	uint8_t rx_defrag_rbm_id = dp_rx_get_defrag_bm_id(soc);
1239 
1240 	/* do duplicate link desc address check */
1241 	dp_rx_link_desc_refill_duplicate_check(
1242 				soc,
1243 				&soc->last_op_info.reo_reinject_link_desc,
1244 				buf_addr_info);
1245 
1246 	nbuf_head = dp_ipa_handle_rx_reo_reinject(soc, head);
1247 	if (qdf_unlikely(!nbuf_head)) {
1248 		dp_err_rl("IPA RX REO reinject failed");
1249 		return QDF_STATUS_E_FAILURE;
1250 	}
1251 
1252 	/* update new allocated skb in case IPA is enabled */
1253 	if (nbuf_head != head) {
1254 		head = nbuf_head;
1255 		rx_desc->nbuf = head;
1256 		rx_reorder_array_elem->head = head;
1257 	}
1258 
1259 	ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
1260 	if (!ent_ring_desc) {
1261 		dp_err_rl("HAL src ring next entry NULL");
1262 		return QDF_STATUS_E_FAILURE;
1263 	}
1264 
1265 	hal_rx_reo_buf_paddr_get(soc->hal_soc, dst_ring_desc, &buf_info);
1266 
1267 	/* buffer_addr_info is the first element of ring_desc */
1268 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)dst_ring_desc,
1269 				  &buf_info);
1270 
1271 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1272 
1273 	qdf_assert_always(link_desc_va);
1274 
1275 	msdu0 = hal_rx_msdu0_buffer_addr_lsb(soc->hal_soc, link_desc_va);
1276 	nbuf_len = qdf_nbuf_len(head) - soc->rx_pkt_tlv_size;
1277 
1278 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW);
1279 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE,
1280 			UNI_DESC_BUF_TYPE_RX_MSDU_LINK);
1281 
1282 	/* msdu reconfig */
1283 	msdu_desc_info = hal_rx_msdu_desc_info_ptr_get(soc->hal_soc, msdu0);
1284 
1285 	dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va);
1286 
1287 	qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info));
1288 
1289 	hal_msdu_desc_info_set(soc->hal_soc, msdu_desc_info, dst_ind, nbuf_len);
1290 
1291 	/* change RX TLV's */
1292 	hal_rx_tlv_msdu_len_set(soc->hal_soc, qdf_nbuf_data(head), nbuf_len);
1293 
1294 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)msdu0,
1295 				  &temp_buf_info);
1296 
1297 	cookie = temp_buf_info.sw_cookie;
1298 	rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
1299 
1300 	/* map the nbuf before reinject it into HW */
1301 	ret = qdf_nbuf_map_nbytes_single(soc->osdev, head,
1302 					 QDF_DMA_FROM_DEVICE,
1303 					 rx_desc_pool->buf_size);
1304 	if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1305 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1306 				"%s: nbuf map failed !", __func__);
1307 		return QDF_STATUS_E_FAILURE;
1308 	}
1309 
1310 	dp_ipa_handle_rx_buf_smmu_mapping(soc, head,
1311 					  rx_desc_pool->buf_size, true,
1312 					  __func__, __LINE__);
1313 	dp_audio_smmu_map(soc->osdev,
1314 			  qdf_mem_paddr_from_dmaaddr(soc->osdev,
1315 						     QDF_NBUF_CB_PADDR(head)),
1316 			  QDF_NBUF_CB_PADDR(head), rx_desc_pool->buf_size);
1317 
1318 	/*
1319 	 * As part of rx frag handler buffer was unmapped and rx desc
1320 	 * unmapped is set to 1. So again for defrag reinject frame reset
1321 	 * it back to 0.
1322 	 */
1323 	rx_desc->unmapped = 0;
1324 
1325 	paddr = qdf_nbuf_get_frag_paddr(head, 0);
1326 
1327 	ret = dp_check_paddr(soc, &head, &paddr, rx_desc_pool);
1328 
1329 	if (ret == QDF_STATUS_E_FAILURE) {
1330 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1331 				"%s: x86 check failed !", __func__);
1332 		return QDF_STATUS_E_FAILURE;
1333 	}
1334 
1335 	hal_rxdma_buff_addr_info_set(soc->hal_soc, msdu0, paddr, cookie,
1336 				     rx_defrag_rbm_id);
1337 
1338 	/* Lets fill entrance ring now !!! */
1339 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1340 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1341 		"HAL RING Access For REO entrance SRNG Failed: %pK",
1342 		hal_srng);
1343 
1344 		return QDF_STATUS_E_FAILURE;
1345 	}
1346 
1347 	dp_rx_reinject_ring_record_entry(soc, paddr, cookie,
1348 					 rx_defrag_rbm_id);
1349 	paddr = (uint64_t)buf_info.paddr;
1350 	/* buf addr */
1351 	hal_rxdma_buff_addr_info_set(soc->hal_soc, ent_ring_desc, paddr,
1352 				     buf_info.sw_cookie,
1353 				     soc->idle_link_bm_id);
1354 	/* mpdu desc info */
1355 	ent_mpdu_desc_info = hal_ent_mpdu_desc_info(soc->hal_soc,
1356 						    ent_ring_desc);
1357 	dst_mpdu_desc_info = hal_dst_mpdu_desc_info(soc->hal_soc,
1358 						    dst_ring_desc);
1359 
1360 	qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info,
1361 				sizeof(struct rx_mpdu_desc_info));
1362 	qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t));
1363 
1364 	seq_no = hal_rx_get_rx_sequence(soc->hal_soc, rx_desc->rx_buf_start);
1365 
1366 	hal_mpdu_desc_info_set(soc->hal_soc, ent_ring_desc, ent_mpdu_desc_info,
1367 			       seq_no);
1368 	/* qdesc addr */
1369 	ent_qdesc_addr = hal_get_reo_ent_desc_qdesc_addr(soc->hal_soc,
1370 						(uint8_t *)ent_ring_desc);
1371 
1372 	dst_qdesc_addr = soc->arch_ops.get_reo_qdesc_addr(
1373 						soc->hal_soc,
1374 						(uint8_t *)dst_ring_desc,
1375 						qdf_nbuf_data(head),
1376 						txrx_peer, tid);
1377 
1378 	qdf_mem_copy(ent_qdesc_addr, &dst_qdesc_addr, 5);
1379 
1380 	hal_set_reo_ent_desc_reo_dest_ind(soc->hal_soc,
1381 					  (uint8_t *)ent_ring_desc, dst_ind);
1382 
1383 	hal_srng_access_end(soc->hal_soc, hal_srng);
1384 
1385 	DP_STATS_INC(soc, rx.reo_reinject, 1);
1386 	dp_debug("reinjection done !");
1387 	return QDF_STATUS_SUCCESS;
1388 }
1389 #endif
1390 
1391 /**
1392  * dp_rx_defrag_gcmp_demic() - Remove MIC information from GCMP fragment
1393  * @soc: Datapath soc structure
1394  * @nbuf: Pointer to the fragment buffer
1395  * @hdrlen: 802.11 header length
1396  *
1397  * Remove MIC information from GCMP fragment
1398  *
1399  * Return: QDF_STATUS
1400  */
1401 static QDF_STATUS dp_rx_defrag_gcmp_demic(struct dp_soc *soc, qdf_nbuf_t nbuf,
1402 					  uint16_t hdrlen)
1403 {
1404 	uint8_t *ivp, *orig_hdr;
1405 	int rx_desc_len = soc->rx_pkt_tlv_size;
1406 
1407 	/* start of the 802.11 header */
1408 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
1409 
1410 	/*
1411 	 * GCMP header is located after 802.11 header and EXTIV
1412 	 * field should always be set to 1 for GCMP protocol.
1413 	 */
1414 	ivp = orig_hdr + hdrlen;
1415 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
1416 		return QDF_STATUS_E_DEFRAG_ERROR;
1417 
1418 	qdf_nbuf_trim_tail(nbuf, dp_f_gcmp.ic_trailer);
1419 
1420 	return QDF_STATUS_SUCCESS;
1421 }
1422 
1423 QDF_STATUS dp_rx_defrag(struct dp_txrx_peer *txrx_peer, unsigned int tid,
1424 			qdf_nbuf_t frag_list_head,
1425 			qdf_nbuf_t frag_list_tail)
1426 {
1427 	qdf_nbuf_t tmp_next;
1428 	qdf_nbuf_t cur = frag_list_head, msdu;
1429 	uint32_t index, tkip_demic = 0;
1430 	uint16_t hdr_space;
1431 	uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
1432 	struct dp_vdev *vdev = txrx_peer->vdev;
1433 	struct dp_soc *soc = vdev->pdev->soc;
1434 	uint8_t status = 0;
1435 
1436 	if (!cur)
1437 		return QDF_STATUS_E_DEFRAG_ERROR;
1438 
1439 	hdr_space = dp_rx_defrag_hdrsize(soc, cur);
1440 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, cur) ?
1441 		dp_sec_mcast : dp_sec_ucast;
1442 
1443 	/* Remove FCS from all fragments */
1444 	while (cur) {
1445 		tmp_next = qdf_nbuf_next(cur);
1446 		qdf_nbuf_set_next(cur, NULL);
1447 		qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
1448 		qdf_nbuf_set_next(cur, tmp_next);
1449 		cur = tmp_next;
1450 	}
1451 	cur = frag_list_head;
1452 
1453 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1454 		  "%s: index %d Security type: %d", __func__,
1455 		  index, txrx_peer->security[index].sec_type);
1456 
1457 	switch (txrx_peer->security[index].sec_type) {
1458 	case cdp_sec_type_tkip:
1459 		tkip_demic = 1;
1460 		fallthrough;
1461 	case cdp_sec_type_tkip_nomic:
1462 		while (cur) {
1463 			tmp_next = qdf_nbuf_next(cur);
1464 			if (dp_rx_defrag_tkip_decap(soc, cur, hdr_space)) {
1465 
1466 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1467 					QDF_TRACE_LEVEL_ERROR,
1468 					"dp_rx_defrag: TKIP decap failed");
1469 
1470 				return QDF_STATUS_E_DEFRAG_ERROR;
1471 			}
1472 			cur = tmp_next;
1473 		}
1474 
1475 		/* If success, increment header to be stripped later */
1476 		hdr_space += dp_f_tkip.ic_header;
1477 		break;
1478 
1479 	case cdp_sec_type_aes_ccmp:
1480 		while (cur) {
1481 			tmp_next = qdf_nbuf_next(cur);
1482 			if (dp_rx_defrag_ccmp_demic(soc, cur, hdr_space)) {
1483 
1484 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1485 					QDF_TRACE_LEVEL_ERROR,
1486 					"dp_rx_defrag: CCMP demic failed");
1487 
1488 				return QDF_STATUS_E_DEFRAG_ERROR;
1489 			}
1490 			if (dp_rx_defrag_ccmp_decap(soc, cur, hdr_space)) {
1491 
1492 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1493 					QDF_TRACE_LEVEL_ERROR,
1494 					"dp_rx_defrag: CCMP decap failed");
1495 
1496 				return QDF_STATUS_E_DEFRAG_ERROR;
1497 			}
1498 			cur = tmp_next;
1499 		}
1500 
1501 		break;
1502 
1503 	case cdp_sec_type_wep40:
1504 	case cdp_sec_type_wep104:
1505 	case cdp_sec_type_wep128:
1506 		while (cur) {
1507 			tmp_next = qdf_nbuf_next(cur);
1508 			if (dp_rx_defrag_wep_decap(soc, cur, hdr_space)) {
1509 
1510 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1511 					QDF_TRACE_LEVEL_ERROR,
1512 					"dp_rx_defrag: WEP decap failed");
1513 
1514 				return QDF_STATUS_E_DEFRAG_ERROR;
1515 			}
1516 			cur = tmp_next;
1517 		}
1518 
1519 		break;
1520 	case cdp_sec_type_aes_gcmp:
1521 	case cdp_sec_type_aes_gcmp_256:
1522 		while (cur) {
1523 			tmp_next = qdf_nbuf_next(cur);
1524 			if (dp_rx_defrag_gcmp_demic(soc, cur, hdr_space)) {
1525 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1526 					  QDF_TRACE_LEVEL_ERROR,
1527 					  "dp_rx_defrag: GCMP demic failed");
1528 
1529 				return QDF_STATUS_E_DEFRAG_ERROR;
1530 			}
1531 			cur = tmp_next;
1532 		}
1533 
1534 		break;
1535 	default:
1536 		break;
1537 	}
1538 
1539 	if (tkip_demic) {
1540 		msdu = frag_list_head;
1541 		qdf_mem_copy(key,
1542 			     &txrx_peer->security[index].michael_key[0],
1543 			     IEEE80211_WEP_MICLEN);
1544 		status = dp_rx_defrag_tkip_demic(soc, key, msdu,
1545 						 soc->rx_pkt_tlv_size +
1546 						 hdr_space);
1547 
1548 		if (status) {
1549 			dp_rx_defrag_err(vdev, frag_list_head);
1550 
1551 			QDF_TRACE(QDF_MODULE_ID_TXRX,
1552 				  QDF_TRACE_LEVEL_ERROR,
1553 				  "%s: TKIP demic failed status %d",
1554 				   __func__, status);
1555 
1556 			return QDF_STATUS_E_DEFRAG_ERROR;
1557 		}
1558 	}
1559 
1560 	/* Convert the header to 802.3 header */
1561 	dp_rx_defrag_nwifi_to_8023(soc, txrx_peer, tid, frag_list_head,
1562 				   hdr_space);
1563 	if (qdf_nbuf_next(frag_list_head)) {
1564 		if (dp_rx_construct_fraglist(txrx_peer, tid, frag_list_head,
1565 					     hdr_space))
1566 			return QDF_STATUS_E_DEFRAG_ERROR;
1567 	}
1568 
1569 	return QDF_STATUS_SUCCESS;
1570 }
1571 
1572 void dp_rx_defrag_cleanup(struct dp_txrx_peer *txrx_peer, unsigned int tid)
1573 {
1574 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1575 				txrx_peer->rx_tid[tid].array;
1576 
1577 	if (rx_reorder_array_elem) {
1578 		/* Free up nbufs */
1579 		dp_rx_defrag_frames_free(rx_reorder_array_elem->head);
1580 		rx_reorder_array_elem->head = NULL;
1581 		rx_reorder_array_elem->tail = NULL;
1582 	} else {
1583 		dp_info("Cleanup self peer %pK and TID %u",
1584 			txrx_peer, tid);
1585 	}
1586 
1587 	/* Free up saved ring descriptors */
1588 	dp_rx_clear_saved_desc_info(txrx_peer, tid);
1589 
1590 	txrx_peer->rx_tid[tid].defrag_timeout_ms = 0;
1591 	txrx_peer->rx_tid[tid].curr_frag_num = 0;
1592 	txrx_peer->rx_tid[tid].curr_seq_num = 0;
1593 }
1594 
1595 #ifdef DP_RX_DEFRAG_ADDR1_CHECK_WAR
1596 #ifdef WLAN_FEATURE_11BE_MLO
1597 /**
1598  * dp_rx_defrag_vdev_mac_addr_cmp() - function to check whether mac address
1599  *				matches VDEV mac
1600  * @vdev: dp_vdev object of the VDEV on which this data packet is received
1601  * @mac_addr: Address to compare
1602  *
1603  * Return: 1 if the mac matching,
1604  *         0 if this frame is not correctly destined to this VDEV/MLD
1605  */
1606 static int dp_rx_defrag_vdev_mac_addr_cmp(struct dp_vdev *vdev,
1607 					  uint8_t *mac_addr)
1608 {
1609 	return ((qdf_mem_cmp(mac_addr, &vdev->mac_addr.raw[0],
1610 			     QDF_MAC_ADDR_SIZE) == 0) ||
1611 		(qdf_mem_cmp(mac_addr, &vdev->mld_mac_addr.raw[0],
1612 			     QDF_MAC_ADDR_SIZE) == 0));
1613 }
1614 
1615 #else
1616 static int dp_rx_defrag_vdev_mac_addr_cmp(struct dp_vdev *vdev,
1617 					  uint8_t *mac_addr)
1618 {
1619 	return (qdf_mem_cmp(mac_addr, &vdev->mac_addr.raw[0],
1620 			    QDF_MAC_ADDR_SIZE) == 0);
1621 }
1622 #endif
1623 
1624 static bool dp_rx_defrag_addr1_check(struct dp_soc *soc,
1625 				     struct dp_vdev *vdev,
1626 				     uint8_t *rx_tlv_hdr)
1627 {
1628 	union dp_align_mac_addr mac_addr;
1629 
1630 	/* If address1 is not valid discard the fragment */
1631 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, rx_tlv_hdr,
1632 				  &mac_addr.raw[0]) != QDF_STATUS_SUCCESS) {
1633 		DP_STATS_INC(soc, rx.err.defrag_ad1_invalid, 1);
1634 		return false;
1635 	}
1636 
1637 	/* WAR suggested by HW team to avoid crashing incase of packet
1638 	 * corruption issue
1639 	 *
1640 	 * recipe is to compare VDEV mac or MLD mac address with ADDR1
1641 	 * in case of mismatch consider it as corrupted packet and do
1642 	 * not process further
1643 	 */
1644 	if (!dp_rx_defrag_vdev_mac_addr_cmp(vdev,
1645 					    &mac_addr.raw[0])) {
1646 		DP_STATS_INC(soc, rx.err.defrag_ad1_invalid, 1);
1647 		return false;
1648 	}
1649 
1650 	return true;
1651 }
1652 #else
1653 static inline bool dp_rx_defrag_addr1_check(struct dp_soc *soc,
1654 					    struct dp_vdev *vdev,
1655 					    uint8_t *rx_tlv_hdr)
1656 {
1657 
1658 	return true;
1659 }
1660 #endif
1661 
1662 QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
1663 				      struct dp_txrx_peer *txrx_peer,
1664 				      uint16_t tid,
1665 				      uint16_t rxseq, qdf_nbuf_t nbuf)
1666 {
1667 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
1668 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1669 	uint8_t all_frag_present;
1670 	uint32_t msdu_len;
1671 	QDF_STATUS status;
1672 
1673 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1674 
1675 	/*
1676 	 * HW may fill in unexpected peer_id in RX PKT TLV,
1677 	 * if this peer_id related peer is valid by coincidence,
1678 	 * but actually this peer won't do dp_peer_rx_init(like SAP vdev
1679 	 * self peer), then invalid access to rx_reorder_array_elem happened.
1680 	 */
1681 	if (!rx_reorder_array_elem) {
1682 		dp_verbose_debug(
1683 			"peer id:%d drop rx frame!",
1684 			txrx_peer->peer_id);
1685 		DP_STATS_INC(soc, rx.err.defrag_peer_uninit, 1);
1686 		dp_rx_nbuf_free(nbuf);
1687 		goto fail;
1688 	}
1689 
1690 	if (rx_reorder_array_elem->head &&
1691 	    rxseq != rx_tid->curr_seq_num) {
1692 		/* Drop stored fragments if out of sequence
1693 		 * fragment is received
1694 		 */
1695 		dp_rx_reorder_flush_frag(txrx_peer, tid);
1696 
1697 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1698 			  "%s: No list found for TID %d Seq# %d",
1699 				__func__, tid, rxseq);
1700 		dp_rx_nbuf_free(nbuf);
1701 		goto fail;
1702 	}
1703 
1704 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
1705 						  qdf_nbuf_data(nbuf));
1706 
1707 	qdf_nbuf_set_pktlen(nbuf, (msdu_len + soc->rx_pkt_tlv_size));
1708 
1709 	status = dp_rx_defrag_fraglist_insert(txrx_peer, tid,
1710 					      &rx_reorder_array_elem->head,
1711 			&rx_reorder_array_elem->tail, nbuf,
1712 			&all_frag_present);
1713 
1714 	if (QDF_IS_STATUS_ERROR(status)) {
1715 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1716 			  "%s Fragment insert failed", __func__);
1717 
1718 		goto fail;
1719 	}
1720 
1721 	if (soc->rx.flags.defrag_timeout_check)
1722 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
1723 
1724 	if (!all_frag_present) {
1725 		uint32_t now_ms =
1726 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1727 
1728 		txrx_peer->rx_tid[tid].defrag_timeout_ms =
1729 			now_ms + soc->rx.defrag.timeout_ms;
1730 
1731 		dp_rx_defrag_waitlist_add(txrx_peer, tid);
1732 
1733 		return QDF_STATUS_SUCCESS;
1734 	}
1735 
1736 	status = dp_rx_defrag(txrx_peer, tid, rx_reorder_array_elem->head,
1737 			      rx_reorder_array_elem->tail);
1738 
1739 	if (QDF_IS_STATUS_ERROR(status)) {
1740 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1741 			  "%s Fragment processing failed", __func__);
1742 
1743 		dp_rx_return_head_frag_desc(txrx_peer, tid);
1744 		dp_rx_defrag_cleanup(txrx_peer, tid);
1745 
1746 		goto fail;
1747 	}
1748 
1749 	/* Re-inject the fragments back to REO for further processing */
1750 	status = dp_rx_defrag_reo_reinject(txrx_peer, tid,
1751 					   rx_reorder_array_elem->head);
1752 	if (QDF_IS_STATUS_SUCCESS(status)) {
1753 		rx_reorder_array_elem->head = NULL;
1754 		rx_reorder_array_elem->tail = NULL;
1755 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1756 			  "%s: Frag seq successfully reinjected",
1757 			__func__);
1758 	} else {
1759 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1760 			  "%s: Frag seq reinjection failed", __func__);
1761 		dp_rx_return_head_frag_desc(txrx_peer, tid);
1762 	}
1763 
1764 	dp_rx_defrag_cleanup(txrx_peer, tid);
1765 	return QDF_STATUS_SUCCESS;
1766 
1767 fail:
1768 	return QDF_STATUS_E_DEFRAG_ERROR;
1769 }
1770 
1771 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
1772 /**
1773  * dp_rx_defrag_save_info_from_ring_desc() - Save info from REO ring descriptor
1774  * @soc: Pointer to the SOC data structure
1775  * @ring_desc: Pointer to the dst ring descriptor
1776  * @rx_desc: Pointer to rx descriptor
1777  * @txrx_peer: Pointer to the peer
1778  * @tid: Transmit Identifier
1779  *
1780  * Return: None
1781  */
1782 static QDF_STATUS
1783 dp_rx_defrag_save_info_from_ring_desc(struct dp_soc *soc,
1784 				      hal_ring_desc_t ring_desc,
1785 				      struct dp_rx_desc *rx_desc,
1786 				      struct dp_txrx_peer *txrx_peer,
1787 				      unsigned int tid)
1788 {
1789 	void *dst_ring_desc;
1790 
1791 	dst_ring_desc = qdf_mem_malloc(hal_srng_get_entrysize(soc->hal_soc,
1792 							      REO_DST));
1793 
1794 	if (!dst_ring_desc) {
1795 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1796 			"%s: Memory alloc failed !", __func__);
1797 		QDF_ASSERT(0);
1798 		return QDF_STATUS_E_NOMEM;
1799 	}
1800 
1801 	qdf_mem_copy(dst_ring_desc, ring_desc,
1802 		     hal_srng_get_entrysize(soc->hal_soc, REO_DST));
1803 
1804 	txrx_peer->rx_tid[tid].dst_ring_desc = dst_ring_desc;
1805 	txrx_peer->rx_tid[tid].head_frag_desc = rx_desc;
1806 
1807 	return QDF_STATUS_SUCCESS;
1808 }
1809 
1810 /**
1811  * dp_rx_defrag_store_fragment() - Store incoming fragments
1812  * @soc: Pointer to the SOC data structure
1813  * @ring_desc: Pointer to the ring descriptor
1814  * @head:
1815  * @tail:
1816  * @mpdu_desc_info: MPDU descriptor info
1817  * @tid: Traffic Identifier
1818  * @rx_desc: Pointer to rx descriptor
1819  * @rx_bfs: Number of bfs consumed
1820  *
1821  * Return: QDF_STATUS
1822  */
1823 static QDF_STATUS
1824 dp_rx_defrag_store_fragment(struct dp_soc *soc,
1825 			    hal_ring_desc_t ring_desc,
1826 			    union dp_rx_desc_list_elem_t **head,
1827 			    union dp_rx_desc_list_elem_t **tail,
1828 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1829 			    unsigned int tid, struct dp_rx_desc *rx_desc,
1830 			    uint32_t *rx_bfs)
1831 {
1832 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1833 	struct dp_pdev *pdev;
1834 	struct dp_txrx_peer *txrx_peer = NULL;
1835 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1836 	uint16_t peer_id;
1837 	uint8_t fragno, more_frag, all_frag_present = 0;
1838 	uint16_t rxseq = mpdu_desc_info->mpdu_seq;
1839 	QDF_STATUS status;
1840 	struct dp_rx_tid_defrag *rx_tid;
1841 	uint8_t mpdu_sequence_control_valid;
1842 	uint8_t mpdu_frame_control_valid;
1843 	qdf_nbuf_t frag = rx_desc->nbuf;
1844 	uint32_t msdu_len;
1845 
1846 	if (qdf_nbuf_len(frag) > 0) {
1847 		dp_info("Dropping unexpected packet with skb_len: %d "
1848 			"data len: %d cookie: %d",
1849 			(uint32_t)qdf_nbuf_len(frag), frag->data_len,
1850 			rx_desc->cookie);
1851 		DP_STATS_INC(soc, rx.rx_frag_err_len_error, 1);
1852 		goto discard_frag;
1853 	}
1854 
1855 	if (dp_rx_buffer_pool_refill(soc, frag, rx_desc->pool_id)) {
1856 		/* fragment queued back to the pool, free the link desc */
1857 		goto err_free_desc;
1858 	}
1859 
1860 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
1861 						  rx_desc->rx_buf_start);
1862 
1863 	qdf_nbuf_set_pktlen(frag, (msdu_len + soc->rx_pkt_tlv_size));
1864 	qdf_nbuf_append_ext_list(frag, NULL, 0);
1865 
1866 	/* Check if the packet is from a valid peer */
1867 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1868 					       mpdu_desc_info->peer_meta_data);
1869 	txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, &txrx_ref_handle,
1870 					       DP_MOD_ID_RX_ERR);
1871 
1872 	if (!txrx_peer) {
1873 		/* We should not receive anything from unknown peer
1874 		 * however, that might happen while we are in the monitor mode.
1875 		 * We don't need to handle that here
1876 		 */
1877 		dp_info_rl("Unknown peer with peer_id %d, dropping fragment",
1878 			   peer_id);
1879 		DP_STATS_INC(soc, rx.rx_frag_err_no_peer, 1);
1880 		goto discard_frag;
1881 	}
1882 
1883 	if (tid >= DP_MAX_TIDS) {
1884 		dp_info("TID out of bounds: %d", tid);
1885 		qdf_assert_always(0);
1886 		goto discard_frag;
1887 	}
1888 
1889 	if (!dp_rx_defrag_addr1_check(soc, txrx_peer->vdev,
1890 				      rx_desc->rx_buf_start)) {
1891 		dp_info("Invalid address 1");
1892 		goto discard_frag;
1893 	}
1894 
1895 	mpdu_sequence_control_valid =
1896 		hal_rx_get_mpdu_sequence_control_valid(soc->hal_soc,
1897 						       rx_desc->rx_buf_start);
1898 
1899 	/* Invalid MPDU sequence control field, MPDU is of no use */
1900 	if (!mpdu_sequence_control_valid) {
1901 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1902 			"Invalid MPDU seq control field, dropping MPDU");
1903 
1904 		qdf_assert(0);
1905 		goto discard_frag;
1906 	}
1907 
1908 	mpdu_frame_control_valid =
1909 		hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
1910 						    rx_desc->rx_buf_start);
1911 
1912 	/* Invalid frame control field */
1913 	if (!mpdu_frame_control_valid) {
1914 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1915 			"Invalid frame control field, dropping MPDU");
1916 
1917 		qdf_assert(0);
1918 		goto discard_frag;
1919 	}
1920 
1921 	/* Current mpdu sequence */
1922 	more_frag = dp_rx_frag_get_more_frag_bit(soc, rx_desc->rx_buf_start);
1923 
1924 	/* HW does not populate the fragment number as of now
1925 	 * need to get from the 802.11 header
1926 	 */
1927 	fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc->rx_buf_start);
1928 
1929 	pdev = txrx_peer->vdev->pdev;
1930 	rx_tid = &txrx_peer->rx_tid[tid];
1931 
1932 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, frag,
1933 			      QDF_TX_RX_STATUS_OK, false);
1934 
1935 	qdf_spin_lock_bh(&rx_tid->defrag_tid_lock);
1936 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1937 	if (!rx_reorder_array_elem) {
1938 		dp_err_rl("Rcvd Fragmented pkt before tid setup for peer %pK",
1939 			  txrx_peer);
1940 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1941 		goto discard_frag;
1942 	}
1943 
1944 	/*
1945 	 * !more_frag: no more fragments to be delivered
1946 	 * !frag_no: packet is not fragmented
1947 	 * !rx_reorder_array_elem->head: no saved fragments so far
1948 	 */
1949 	if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
1950 		/* We should not get into this situation here.
1951 		 * It means an unfragmented packet with fragment flag
1952 		 * is delivered over the REO exception ring.
1953 		 * Typically it follows normal rx path.
1954 		 */
1955 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1956 			"Rcvd unfragmented pkt on REO Err srng, dropping");
1957 
1958 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1959 		qdf_assert(0);
1960 		goto discard_frag;
1961 	}
1962 
1963 	/* Check if the fragment is for the same sequence or a different one */
1964 	dp_debug("rx_tid %d", tid);
1965 	if (rx_reorder_array_elem->head) {
1966 		dp_debug("rxseq %d", rxseq);
1967 		if (rxseq != rx_tid->curr_seq_num) {
1968 
1969 			dp_debug("mismatch cur_seq %d rxseq %d",
1970 				 rx_tid->curr_seq_num, rxseq);
1971 			/* Drop stored fragments if out of sequence
1972 			 * fragment is received
1973 			 */
1974 			dp_rx_reorder_flush_frag(txrx_peer, tid);
1975 
1976 			DP_STATS_INC(soc, rx.rx_frag_oor, 1);
1977 
1978 			dp_debug("cur rxseq %d", rxseq);
1979 			/*
1980 			 * The sequence number for this fragment becomes the
1981 			 * new sequence number to be processed
1982 			 */
1983 			rx_tid->curr_seq_num = rxseq;
1984 		}
1985 	} else {
1986 		/* Check if we are processing first fragment if it is
1987 		 * not first fragment discard fragment.
1988 		 */
1989 		if (fragno) {
1990 			qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1991 			goto discard_frag;
1992 		}
1993 		dp_debug("cur rxseq %d", rxseq);
1994 		/* Start of a new sequence */
1995 		dp_rx_defrag_cleanup(txrx_peer, tid);
1996 		rx_tid->curr_seq_num = rxseq;
1997 		/* store PN number also */
1998 	}
1999 
2000 	/*
2001 	 * If the earlier sequence was dropped, this will be the fresh start.
2002 	 * Else, continue with next fragment in a given sequence
2003 	 */
2004 	status = dp_rx_defrag_fraglist_insert(txrx_peer, tid,
2005 					      &rx_reorder_array_elem->head,
2006 					      &rx_reorder_array_elem->tail,
2007 					      frag, &all_frag_present);
2008 
2009 	/*
2010 	 * Currently, we can have only 6 MSDUs per-MPDU, if the current
2011 	 * packet sequence has more than 6 MSDUs for some reason, we will
2012 	 * have to use the next MSDU link descriptor and chain them together
2013 	 * before reinjection.
2014 	 * ring_desc is validated in dp_rx_err_process.
2015 	 */
2016 	if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) &&
2017 			(rx_reorder_array_elem->head == frag)) {
2018 
2019 		status = dp_rx_defrag_save_info_from_ring_desc(soc, ring_desc,
2020 							       rx_desc,
2021 							       txrx_peer, tid);
2022 
2023 		if (status != QDF_STATUS_SUCCESS) {
2024 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2025 				"%s: Unable to store ring desc !", __func__);
2026 			qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2027 			goto discard_frag;
2028 		}
2029 	} else {
2030 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
2031 		(*rx_bfs)++;
2032 
2033 		/* Return the non-head link desc */
2034 		if (dp_rx_link_desc_return(soc, ring_desc,
2035 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
2036 		    QDF_STATUS_SUCCESS)
2037 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2038 				  "%s: Failed to return link desc", __func__);
2039 
2040 	}
2041 
2042 	if (pdev->soc->rx.flags.defrag_timeout_check)
2043 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
2044 
2045 	/* Yet to receive more fragments for this sequence number */
2046 	if (!all_frag_present) {
2047 		uint32_t now_ms =
2048 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2049 
2050 		txrx_peer->rx_tid[tid].defrag_timeout_ms =
2051 			now_ms + pdev->soc->rx.defrag.timeout_ms;
2052 
2053 		dp_rx_defrag_waitlist_add(txrx_peer, tid);
2054 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2055 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2056 
2057 		return QDF_STATUS_SUCCESS;
2058 	}
2059 
2060 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2061 		  "All fragments received for sequence: %d", rxseq);
2062 
2063 	/* Process the fragments */
2064 	status = dp_rx_defrag(txrx_peer, tid, rx_reorder_array_elem->head,
2065 			      rx_reorder_array_elem->tail);
2066 	if (QDF_IS_STATUS_ERROR(status)) {
2067 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2068 			"Fragment processing failed");
2069 
2070 		dp_rx_add_to_free_desc_list(head, tail,
2071 				txrx_peer->rx_tid[tid].head_frag_desc);
2072 		(*rx_bfs)++;
2073 
2074 		if (dp_rx_link_desc_return(soc,
2075 					txrx_peer->rx_tid[tid].dst_ring_desc,
2076 					HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
2077 				QDF_STATUS_SUCCESS)
2078 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2079 					"%s: Failed to return link desc",
2080 					__func__);
2081 		dp_rx_defrag_cleanup(txrx_peer, tid);
2082 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2083 		goto end;
2084 	}
2085 
2086 	/* Re-inject the fragments back to REO for further processing */
2087 	status = dp_rx_defrag_reo_reinject(txrx_peer, tid,
2088 					   rx_reorder_array_elem->head);
2089 	if (QDF_IS_STATUS_SUCCESS(status)) {
2090 		rx_reorder_array_elem->head = NULL;
2091 		rx_reorder_array_elem->tail = NULL;
2092 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2093 			  "Fragmented sequence successfully reinjected");
2094 	} else {
2095 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2096 		"Fragmented sequence reinjection failed");
2097 		dp_rx_return_head_frag_desc(txrx_peer, tid);
2098 	}
2099 
2100 	dp_rx_defrag_cleanup(txrx_peer, tid);
2101 	qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2102 
2103 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2104 
2105 	return QDF_STATUS_SUCCESS;
2106 
2107 discard_frag:
2108 	dp_rx_nbuf_free(frag);
2109 err_free_desc:
2110 	dp_rx_add_to_free_desc_list(head, tail, rx_desc);
2111 	if (dp_rx_link_desc_return(soc, ring_desc,
2112 				   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
2113 	    QDF_STATUS_SUCCESS)
2114 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2115 			  "%s: Failed to return link desc", __func__);
2116 	(*rx_bfs)++;
2117 
2118 end:
2119 	if (txrx_peer)
2120 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2121 
2122 	DP_STATS_INC(soc, rx.rx_frag_err, 1);
2123 	return QDF_STATUS_E_DEFRAG_ERROR;
2124 }
2125 
2126 uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
2127 			   struct hal_rx_mpdu_desc_info *mpdu_desc_info,
2128 			   struct dp_rx_desc *rx_desc,
2129 			   uint8_t *mac_id,
2130 			   uint32_t quota)
2131 {
2132 	uint32_t rx_bufs_used = 0;
2133 	qdf_nbuf_t msdu = NULL;
2134 	uint32_t tid;
2135 	uint32_t rx_bfs = 0;
2136 	struct dp_pdev *pdev;
2137 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2138 	struct rx_desc_pool *rx_desc_pool;
2139 
2140 	qdf_assert(soc);
2141 	qdf_assert(mpdu_desc_info);
2142 	qdf_assert(rx_desc);
2143 
2144 	dp_debug("Number of MSDUs to process, num_msdus: %d",
2145 		 mpdu_desc_info->msdu_count);
2146 
2147 
2148 	if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) {
2149 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2150 			"Not sufficient MSDUs to process");
2151 		return rx_bufs_used;
2152 	}
2153 
2154 	/* all buffers in MSDU link belong to same pdev */
2155 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
2156 	if (!pdev) {
2157 		dp_nofl_debug("pdev is null for pool_id = %d",
2158 			      rx_desc->pool_id);
2159 		return rx_bufs_used;
2160 	}
2161 
2162 	*mac_id = rx_desc->pool_id;
2163 
2164 	msdu = rx_desc->nbuf;
2165 
2166 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2167 
2168 	if (rx_desc->unmapped)
2169 		return rx_bufs_used;
2170 
2171 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
2172 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
2173 	rx_desc->unmapped = 1;
2174 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2175 
2176 	rx_desc->rx_buf_start = qdf_nbuf_data(msdu);
2177 
2178 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_desc->rx_buf_start);
2179 
2180 	/* Process fragment-by-fragment */
2181 	status = dp_rx_defrag_store_fragment(soc, ring_desc,
2182 					     &pdev->free_list_head,
2183 					     &pdev->free_list_tail,
2184 					     mpdu_desc_info,
2185 					     tid, rx_desc, &rx_bfs);
2186 
2187 	if (rx_bfs)
2188 		rx_bufs_used += rx_bfs;
2189 
2190 	if (!QDF_IS_STATUS_SUCCESS(status))
2191 		dp_info_rl("Rx Defrag err seq#:0x%x msdu_count:%d flags:%d",
2192 			   mpdu_desc_info->mpdu_seq,
2193 			   mpdu_desc_info->msdu_count,
2194 			   mpdu_desc_info->mpdu_flags);
2195 
2196 	return rx_bufs_used;
2197 }
2198 
2199 #endif /* WLAN_SOFTUMAC_SUPPORT */
2200