xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #ifndef RX_DEFRAG_DO_NOT_REINJECT
22 #ifndef DP_BE_WAR
23 #include "li/hal_li_rx.h"
24 #endif
25 #endif
26 #include "dp_types.h"
27 #include "dp_rx.h"
28 #include "dp_peer.h"
29 #include "hal_api.h"
30 #include "qdf_trace.h"
31 #include "qdf_nbuf.h"
32 #include "dp_internal.h"
33 #include "dp_rx_defrag.h"
34 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
35 #include "dp_rx_defrag.h"
36 #include "dp_ipa.h"
37 #include "dp_rx_buffer_pool.h"
38 
39 const struct dp_rx_defrag_cipher dp_f_ccmp = {
40 	"AES-CCM",
41 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
42 	IEEE80211_WEP_MICLEN,
43 	0,
44 };
45 
46 const struct dp_rx_defrag_cipher dp_f_tkip = {
47 	"TKIP",
48 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
49 	IEEE80211_WEP_CRCLEN,
50 	IEEE80211_WEP_MICLEN,
51 };
52 
53 const struct dp_rx_defrag_cipher dp_f_wep = {
54 	"WEP",
55 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
56 	IEEE80211_WEP_CRCLEN,
57 	0,
58 };
59 
60 /*
61  * The header and mic length are same for both
62  * GCMP-128 and GCMP-256.
63  */
64 const struct dp_rx_defrag_cipher dp_f_gcmp = {
65 	"AES-GCMP",
66 	WLAN_IEEE80211_GCMP_HEADERLEN,
67 	WLAN_IEEE80211_GCMP_MICLEN,
68 	WLAN_IEEE80211_GCMP_MICLEN,
69 };
70 
71 /*
72  * dp_rx_defrag_frames_free(): Free fragment chain
73  * @frames: Fragment chain
74  *
75  * Iterates through the fragment chain and frees them
76  * Returns: None
77  */
78 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames)
79 {
80 	qdf_nbuf_t next, frag = frames;
81 
82 	while (frag) {
83 		next = qdf_nbuf_next(frag);
84 		dp_rx_nbuf_free(frag);
85 		frag = next;
86 	}
87 }
88 
89 /*
90  * dp_rx_clear_saved_desc_info(): Clears descriptor info
91  * @txrx peer: Pointer to the peer data structure
92  * @tid: Transmit ID (TID)
93  *
94  * Saves MPDU descriptor info and MSDU link pointer from REO
95  * ring descriptor. The cache is created per peer, per TID
96  *
97  * Returns: None
98  */
99 static void dp_rx_clear_saved_desc_info(struct dp_txrx_peer *txrx_peer,
100 					unsigned int tid)
101 {
102 	if (txrx_peer->rx_tid[tid].dst_ring_desc)
103 		qdf_mem_free(txrx_peer->rx_tid[tid].dst_ring_desc);
104 
105 	txrx_peer->rx_tid[tid].dst_ring_desc = NULL;
106 	txrx_peer->rx_tid[tid].head_frag_desc = NULL;
107 }
108 
109 static void dp_rx_return_head_frag_desc(struct dp_txrx_peer *txrx_peer,
110 					unsigned int tid)
111 {
112 	struct dp_soc *soc;
113 	struct dp_pdev *pdev;
114 	struct dp_srng *dp_rxdma_srng;
115 	struct rx_desc_pool *rx_desc_pool;
116 	union dp_rx_desc_list_elem_t *head = NULL;
117 	union dp_rx_desc_list_elem_t *tail = NULL;
118 	uint8_t pool_id;
119 
120 	pdev = txrx_peer->vdev->pdev;
121 	soc = pdev->soc;
122 
123 	if (txrx_peer->rx_tid[tid].head_frag_desc) {
124 		pool_id = txrx_peer->rx_tid[tid].head_frag_desc->pool_id;
125 		dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id];
126 		rx_desc_pool = &soc->rx_desc_buf[pool_id];
127 
128 		dp_rx_add_to_free_desc_list(&head, &tail,
129 					    txrx_peer->rx_tid[tid].head_frag_desc);
130 		dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
131 					1, &head, &tail, false);
132 	}
133 
134 	if (txrx_peer->rx_tid[tid].dst_ring_desc) {
135 		if (dp_rx_link_desc_return(soc,
136 					   txrx_peer->rx_tid[tid].dst_ring_desc,
137 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
138 		    QDF_STATUS_SUCCESS)
139 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
140 				  "%s: Failed to return link desc", __func__);
141 	}
142 }
143 
144 /*
145  * dp_rx_reorder_flush_frag(): Flush the frag list
146  * @txrx_peer: Pointer to the peer data structure
147  * @tid: Transmit ID (TID)
148  *
149  * Flush the per-TID frag list
150  *
151  * Returns: None
152  */
153 void dp_rx_reorder_flush_frag(struct dp_txrx_peer *txrx_peer,
154 			      unsigned int tid)
155 {
156 	dp_info_rl("Flushing TID %d", tid);
157 
158 	if (!txrx_peer) {
159 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
160 					"%s: NULL peer", __func__);
161 		return;
162 	}
163 
164 	dp_rx_return_head_frag_desc(txrx_peer, tid);
165 	dp_rx_defrag_cleanup(txrx_peer, tid);
166 }
167 
168 /*
169  * dp_rx_defrag_waitlist_flush(): Flush SOC defrag wait list
170  * @soc: DP SOC
171  *
172  * Flush fragments of all waitlisted TID's
173  *
174  * Returns: None
175  */
176 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc)
177 {
178 	struct dp_rx_tid_defrag *waitlist_elem = NULL;
179 	struct dp_rx_tid_defrag *tmp;
180 	uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
181 	TAILQ_HEAD(, dp_rx_tid_defrag) temp_list;
182 	dp_txrx_ref_handle txrx_ref_handle = NULL;
183 
184 	TAILQ_INIT(&temp_list);
185 
186 	dp_debug("Current time  %u", now_ms);
187 
188 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
189 	TAILQ_FOREACH_SAFE(waitlist_elem, &soc->rx.defrag.waitlist,
190 			   defrag_waitlist_elem, tmp) {
191 		uint32_t tid;
192 
193 		if (waitlist_elem->defrag_timeout_ms > now_ms)
194 			break;
195 
196 		tid = waitlist_elem->tid;
197 		if (tid >= DP_MAX_TIDS) {
198 			qdf_assert(0);
199 			continue;
200 		}
201 
202 		TAILQ_REMOVE(&soc->rx.defrag.waitlist, waitlist_elem,
203 			     defrag_waitlist_elem);
204 		DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
205 
206 		/* Move to temp list and clean-up later */
207 		TAILQ_INSERT_TAIL(&temp_list, waitlist_elem,
208 				  defrag_waitlist_elem);
209 	}
210 	if (waitlist_elem) {
211 		soc->rx.defrag.next_flush_ms =
212 			waitlist_elem->defrag_timeout_ms;
213 	} else {
214 		soc->rx.defrag.next_flush_ms =
215 			now_ms + soc->rx.defrag.timeout_ms;
216 	}
217 
218 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
219 
220 	TAILQ_FOREACH_SAFE(waitlist_elem, &temp_list,
221 			   defrag_waitlist_elem, tmp) {
222 		struct dp_txrx_peer *txrx_peer, *temp_peer = NULL;
223 
224 		qdf_spin_lock_bh(&waitlist_elem->defrag_tid_lock);
225 		TAILQ_REMOVE(&temp_list, waitlist_elem,
226 			     defrag_waitlist_elem);
227 		/* get address of current peer */
228 		txrx_peer = waitlist_elem->defrag_peer;
229 		qdf_spin_unlock_bh(&waitlist_elem->defrag_tid_lock);
230 
231 		temp_peer = dp_txrx_peer_get_ref_by_id(soc, txrx_peer->peer_id,
232 						       &txrx_ref_handle,
233 						       DP_MOD_ID_RX_ERR);
234 		if (temp_peer == txrx_peer) {
235 			qdf_spin_lock_bh(&waitlist_elem->defrag_tid_lock);
236 			dp_rx_reorder_flush_frag(txrx_peer, waitlist_elem->tid);
237 			qdf_spin_unlock_bh(&waitlist_elem->defrag_tid_lock);
238 		}
239 
240 		if (temp_peer)
241 			dp_txrx_peer_unref_delete(txrx_ref_handle,
242 						  DP_MOD_ID_RX_ERR);
243 
244 	}
245 }
246 
247 /*
248  * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list
249  * @txrx_peer: Pointer to the peer data structure
250  * @tid: Transmit ID (TID)
251  *
252  * Appends per-tid fragments to global fragment wait list
253  *
254  * Returns: None
255  */
256 static void dp_rx_defrag_waitlist_add(struct dp_txrx_peer *txrx_peer,
257 				      unsigned int tid)
258 {
259 	struct dp_soc *psoc = txrx_peer->vdev->pdev->soc;
260 	struct dp_rx_tid_defrag *waitlist_elem = &txrx_peer->rx_tid[tid];
261 
262 	dp_debug("Adding TID %u to waitlist for peer %pK with peer_id = %d ",
263 		 tid, txrx_peer, txrx_peer->peer_id);
264 
265 	/* TODO: use LIST macros instead of TAIL macros */
266 	qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock);
267 	if (TAILQ_EMPTY(&psoc->rx.defrag.waitlist))
268 		psoc->rx.defrag.next_flush_ms =
269 			waitlist_elem->defrag_timeout_ms;
270 
271 	TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, waitlist_elem,
272 			  defrag_waitlist_elem);
273 	DP_STATS_INC(psoc, rx.rx_frag_wait, 1);
274 	qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock);
275 }
276 
277 /*
278  * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist
279  * @txrx peer: Pointer to the peer data structure
280  * @tid: Transmit ID (TID)
281  *
282  * Remove fragments from waitlist
283  *
284  * Returns: None
285  */
286 void dp_rx_defrag_waitlist_remove(struct dp_txrx_peer *txrx_peer,
287 				  unsigned int tid)
288 {
289 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
290 	struct dp_soc *soc = pdev->soc;
291 	struct dp_rx_tid_defrag *waitlist_elm;
292 	struct dp_rx_tid_defrag *tmp;
293 
294 	dp_debug("Removing TID %u to waitlist for peer %pK peer_id = %d ",
295 		 tid, txrx_peer, txrx_peer->peer_id);
296 
297 	if (tid >= DP_MAX_TIDS) {
298 		dp_err("TID out of bounds: %d", tid);
299 		qdf_assert_always(0);
300 	}
301 
302 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
303 	TAILQ_FOREACH_SAFE(waitlist_elm, &soc->rx.defrag.waitlist,
304 			   defrag_waitlist_elem, tmp) {
305 		struct dp_txrx_peer *peer_on_waitlist;
306 
307 		/* get address of current peer */
308 		peer_on_waitlist = waitlist_elm->defrag_peer;
309 
310 		/* Ensure it is TID for same peer */
311 		if (peer_on_waitlist == txrx_peer && waitlist_elm->tid == tid) {
312 			TAILQ_REMOVE(&soc->rx.defrag.waitlist,
313 				     waitlist_elm, defrag_waitlist_elem);
314 			DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
315 		}
316 	}
317 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
318 }
319 
320 /*
321  * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list
322  * @txrx_peer: Pointer to the peer data structure
323  * @tid: Transmit ID (TID)
324  * @head_addr: Pointer to head list
325  * @tail_addr: Pointer to tail list
326  * @frag: Incoming fragment
327  * @all_frag_present: Flag to indicate whether all fragments are received
328  *
329  * Build a per-tid, per-sequence fragment list.
330  *
331  * Returns: Success, if inserted
332  */
333 static QDF_STATUS
334 dp_rx_defrag_fraglist_insert(struct dp_txrx_peer *txrx_peer, unsigned int tid,
335 			     qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr,
336 			     qdf_nbuf_t frag, uint8_t *all_frag_present)
337 {
338 	struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
339 	qdf_nbuf_t next;
340 	qdf_nbuf_t prev = NULL;
341 	qdf_nbuf_t cur;
342 	uint16_t head_fragno, cur_fragno, next_fragno;
343 	uint8_t last_morefrag = 1, count = 0;
344 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
345 	uint8_t *rx_desc_info;
346 
347 	qdf_assert(frag);
348 	qdf_assert(head_addr);
349 	qdf_assert(tail_addr);
350 
351 	*all_frag_present = 0;
352 	rx_desc_info = qdf_nbuf_data(frag);
353 	cur_fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc_info);
354 
355 	dp_debug("cur_fragno %d\n", cur_fragno);
356 	/* If this is the first fragment */
357 	if (!(*head_addr)) {
358 		*head_addr = *tail_addr = frag;
359 		qdf_nbuf_set_next(*tail_addr, NULL);
360 		rx_tid->curr_frag_num = cur_fragno;
361 
362 		goto insert_done;
363 	}
364 
365 	/* In sequence fragment */
366 	if (cur_fragno > rx_tid->curr_frag_num) {
367 		qdf_nbuf_set_next(*tail_addr, frag);
368 		*tail_addr = frag;
369 		qdf_nbuf_set_next(*tail_addr, NULL);
370 		rx_tid->curr_frag_num = cur_fragno;
371 	} else {
372 		/* Out of sequence fragment */
373 		cur = *head_addr;
374 		rx_desc_info = qdf_nbuf_data(cur);
375 		head_fragno = dp_rx_frag_get_mpdu_frag_number(soc,
376 							      rx_desc_info);
377 
378 		if (cur_fragno == head_fragno) {
379 			dp_rx_nbuf_free(frag);
380 			goto insert_fail;
381 		} else if (head_fragno > cur_fragno) {
382 			qdf_nbuf_set_next(frag, cur);
383 			cur = frag;
384 			*head_addr = frag; /* head pointer to be updated */
385 		} else {
386 			while ((cur_fragno > head_fragno) && cur) {
387 				prev = cur;
388 				cur = qdf_nbuf_next(cur);
389 				if (cur) {
390 					rx_desc_info = qdf_nbuf_data(cur);
391 					head_fragno =
392 						dp_rx_frag_get_mpdu_frag_number(
393 								soc,
394 								rx_desc_info);
395 				}
396 			}
397 
398 			if (cur_fragno == head_fragno) {
399 				dp_rx_nbuf_free(frag);
400 				goto insert_fail;
401 			}
402 
403 			qdf_nbuf_set_next(prev, frag);
404 			qdf_nbuf_set_next(frag, cur);
405 		}
406 	}
407 
408 	next = qdf_nbuf_next(*head_addr);
409 
410 	rx_desc_info = qdf_nbuf_data(*tail_addr);
411 	last_morefrag = dp_rx_frag_get_more_frag_bit(soc, rx_desc_info);
412 
413 	/* TODO: optimize the loop */
414 	if (!last_morefrag) {
415 		/* Check if all fragments are present */
416 		do {
417 			rx_desc_info = qdf_nbuf_data(next);
418 			next_fragno =
419 				dp_rx_frag_get_mpdu_frag_number(soc,
420 								rx_desc_info);
421 			count++;
422 
423 			if (next_fragno != count)
424 				break;
425 
426 			next = qdf_nbuf_next(next);
427 		} while (next);
428 
429 		if (!next) {
430 			*all_frag_present = 1;
431 			return QDF_STATUS_SUCCESS;
432 		} else {
433 			/* revisit */
434 		}
435 	}
436 
437 insert_done:
438 	return QDF_STATUS_SUCCESS;
439 
440 insert_fail:
441 	return QDF_STATUS_E_FAILURE;
442 }
443 
444 
445 /*
446  * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment
447  * @msdu: Pointer to the fragment
448  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
449  *
450  * decap tkip encrypted fragment
451  *
452  * Returns: QDF_STATUS
453  */
454 static QDF_STATUS
455 dp_rx_defrag_tkip_decap(struct dp_soc *soc,
456 			qdf_nbuf_t msdu, uint16_t hdrlen)
457 {
458 	uint8_t *ivp, *orig_hdr;
459 	int rx_desc_len = soc->rx_pkt_tlv_size;
460 
461 	/* start of 802.11 header info */
462 	orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len);
463 
464 	/* TKIP header is located post 802.11 header */
465 	ivp = orig_hdr + hdrlen;
466 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) {
467 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
468 			"IEEE80211_WEP_EXTIV is missing in TKIP fragment");
469 		return QDF_STATUS_E_DEFRAG_ERROR;
470 	}
471 
472 	qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer);
473 
474 	return QDF_STATUS_SUCCESS;
475 }
476 
477 /*
478  * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment
479  * @nbuf: Pointer to the fragment buffer
480  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
481  *
482  * Remove MIC information from CCMP fragment
483  *
484  * Returns: QDF_STATUS
485  */
486 static QDF_STATUS
487 dp_rx_defrag_ccmp_demic(struct dp_soc *soc, qdf_nbuf_t nbuf, uint16_t hdrlen)
488 {
489 	uint8_t *ivp, *orig_hdr;
490 	int rx_desc_len = soc->rx_pkt_tlv_size;
491 
492 	/* start of the 802.11 header */
493 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
494 
495 	/* CCMP header is located after 802.11 header */
496 	ivp = orig_hdr + hdrlen;
497 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
498 		return QDF_STATUS_E_DEFRAG_ERROR;
499 
500 	qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer);
501 
502 	return QDF_STATUS_SUCCESS;
503 }
504 
505 /*
506  * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment
507  * @nbuf: Pointer to the fragment
508  * @hdrlen: length of the header information
509  *
510  * decap CCMP encrypted fragment
511  *
512  * Returns: QDF_STATUS
513  */
514 static QDF_STATUS
515 dp_rx_defrag_ccmp_decap(struct dp_soc *soc, qdf_nbuf_t nbuf, uint16_t hdrlen)
516 {
517 	uint8_t *ivp, *origHdr;
518 	int rx_desc_len = soc->rx_pkt_tlv_size;
519 
520 	origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
521 	ivp = origHdr + hdrlen;
522 
523 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
524 		return QDF_STATUS_E_DEFRAG_ERROR;
525 
526 	return QDF_STATUS_SUCCESS;
527 }
528 
529 /*
530  * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment
531  * @msdu: Pointer to the fragment
532  * @hdrlen: length of the header information
533  *
534  * decap WEP encrypted fragment
535  *
536  * Returns: QDF_STATUS
537  */
538 static QDF_STATUS
539 dp_rx_defrag_wep_decap(struct dp_soc *soc, qdf_nbuf_t msdu, uint16_t hdrlen)
540 {
541 	uint8_t *origHdr;
542 	int rx_desc_len = soc->rx_pkt_tlv_size;
543 
544 	origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
545 	qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen);
546 
547 	qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer);
548 
549 	return QDF_STATUS_SUCCESS;
550 }
551 
552 /*
553  * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment
554  * @soc: soc handle
555  * @nbuf: Pointer to the fragment
556  *
557  * Calculate the header size of the received fragment
558  *
559  * Returns: header size (uint16_t)
560  */
561 static uint16_t dp_rx_defrag_hdrsize(struct dp_soc *soc, qdf_nbuf_t nbuf)
562 {
563 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
564 	uint16_t size = sizeof(struct ieee80211_frame);
565 	uint16_t fc = 0;
566 	uint32_t to_ds, fr_ds;
567 	uint8_t frm_ctrl_valid;
568 	uint16_t frm_ctrl_field;
569 
570 	to_ds = hal_rx_mpdu_get_to_ds(soc->hal_soc, rx_tlv_hdr);
571 	fr_ds = hal_rx_mpdu_get_fr_ds(soc->hal_soc, rx_tlv_hdr);
572 	frm_ctrl_valid =
573 		hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
574 						    rx_tlv_hdr);
575 	frm_ctrl_field = hal_rx_get_frame_ctrl_field(soc->hal_soc, rx_tlv_hdr);
576 
577 	if (to_ds && fr_ds)
578 		size += QDF_MAC_ADDR_SIZE;
579 
580 	if (frm_ctrl_valid) {
581 		fc = frm_ctrl_field;
582 
583 		/* use 1-st byte for validation */
584 		if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) {
585 			size += sizeof(uint16_t);
586 			/* use 2-nd byte for validation */
587 			if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER)
588 				size += sizeof(struct ieee80211_htc);
589 		}
590 	}
591 
592 	return size;
593 }
594 
595 /*
596  * dp_rx_defrag_michdr(): Calculate a pseudo MIC header
597  * @wh0: Pointer to the wireless header of the fragment
598  * @hdr: Array to hold the pseudo header
599  *
600  * Calculate a pseudo MIC header
601  *
602  * Returns: None
603  */
604 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0,
605 				uint8_t hdr[])
606 {
607 	const struct ieee80211_frame_addr4 *wh =
608 		(const struct ieee80211_frame_addr4 *)wh0;
609 
610 	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
611 	case IEEE80211_FC1_DIR_NODS:
612 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
613 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
614 					   wh->i_addr2);
615 		break;
616 	case IEEE80211_FC1_DIR_TODS:
617 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
618 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
619 					   wh->i_addr2);
620 		break;
621 	case IEEE80211_FC1_DIR_FROMDS:
622 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
623 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
624 					   wh->i_addr3);
625 		break;
626 	case IEEE80211_FC1_DIR_DSTODS:
627 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
628 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
629 					   wh->i_addr4);
630 		break;
631 	}
632 
633 	/*
634 	 * Bit 7 is QDF_IEEE80211_FC0_SUBTYPE_QOS for data frame, but
635 	 * it could also be set for deauth, disassoc, action, etc. for
636 	 * a mgt type frame. It comes into picture for MFP.
637 	 */
638 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
639 		if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
640 				IEEE80211_FC1_DIR_DSTODS) {
641 			const struct ieee80211_qosframe_addr4 *qwh =
642 				(const struct ieee80211_qosframe_addr4 *)wh;
643 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
644 		} else {
645 			const struct ieee80211_qosframe *qwh =
646 				(const struct ieee80211_qosframe *)wh;
647 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
648 		}
649 	} else {
650 		hdr[12] = 0;
651 	}
652 
653 	hdr[13] = hdr[14] = hdr[15] = 0;	/* reserved */
654 }
655 
656 /*
657  * dp_rx_defrag_mic(): Calculate MIC header
658  * @key: Pointer to the key
659  * @wbuf: fragment buffer
660  * @off: Offset
661  * @data_len: Data length
662  * @mic: Array to hold MIC
663  *
664  * Calculate a pseudo MIC header
665  *
666  * Returns: QDF_STATUS
667  */
668 static QDF_STATUS dp_rx_defrag_mic(struct dp_soc *soc, const uint8_t *key,
669 				   qdf_nbuf_t wbuf, uint16_t off,
670 				   uint16_t data_len, uint8_t mic[])
671 {
672 	uint8_t hdr[16] = { 0, };
673 	uint32_t l, r;
674 	const uint8_t *data;
675 	uint32_t space;
676 	int rx_desc_len = soc->rx_pkt_tlv_size;
677 
678 	dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf)
679 		+ rx_desc_len), hdr);
680 
681 	l = dp_rx_get_le32(key);
682 	r = dp_rx_get_le32(key + 4);
683 
684 	/* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
685 	l ^= dp_rx_get_le32(hdr);
686 	dp_rx_michael_block(l, r);
687 	l ^= dp_rx_get_le32(&hdr[4]);
688 	dp_rx_michael_block(l, r);
689 	l ^= dp_rx_get_le32(&hdr[8]);
690 	dp_rx_michael_block(l, r);
691 	l ^= dp_rx_get_le32(&hdr[12]);
692 	dp_rx_michael_block(l, r);
693 
694 	/* first buffer has special handling */
695 	data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
696 	space = qdf_nbuf_len(wbuf) - off;
697 
698 	for (;; ) {
699 		if (space > data_len)
700 			space = data_len;
701 
702 		/* collect 32-bit blocks from current buffer */
703 		while (space >= sizeof(uint32_t)) {
704 			l ^= dp_rx_get_le32(data);
705 			dp_rx_michael_block(l, r);
706 			data += sizeof(uint32_t);
707 			space -= sizeof(uint32_t);
708 			data_len -= sizeof(uint32_t);
709 		}
710 		if (data_len < sizeof(uint32_t))
711 			break;
712 
713 		wbuf = qdf_nbuf_next(wbuf);
714 		if (!wbuf)
715 			return QDF_STATUS_E_DEFRAG_ERROR;
716 
717 		if (space != 0) {
718 			const uint8_t *data_next;
719 			/*
720 			 * Block straddles buffers, split references.
721 			 */
722 			data_next =
723 				(uint8_t *)qdf_nbuf_data(wbuf) + off;
724 			if ((qdf_nbuf_len(wbuf)) <
725 				sizeof(uint32_t) - space) {
726 				return QDF_STATUS_E_DEFRAG_ERROR;
727 			}
728 			switch (space) {
729 			case 1:
730 				l ^= dp_rx_get_le32_split(data[0],
731 					data_next[0], data_next[1],
732 					data_next[2]);
733 				data = data_next + 3;
734 				space = (qdf_nbuf_len(wbuf) - off) - 3;
735 				break;
736 			case 2:
737 				l ^= dp_rx_get_le32_split(data[0], data[1],
738 						    data_next[0], data_next[1]);
739 				data = data_next + 2;
740 				space = (qdf_nbuf_len(wbuf) - off) - 2;
741 				break;
742 			case 3:
743 				l ^= dp_rx_get_le32_split(data[0], data[1],
744 					data[2], data_next[0]);
745 				data = data_next + 1;
746 				space = (qdf_nbuf_len(wbuf) - off) - 1;
747 				break;
748 			}
749 			dp_rx_michael_block(l, r);
750 			data_len -= sizeof(uint32_t);
751 		} else {
752 			/*
753 			 * Setup for next buffer.
754 			 */
755 			data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
756 			space = qdf_nbuf_len(wbuf) - off;
757 		}
758 	}
759 	/* Last block and padding (0x5a, 4..7 x 0) */
760 	switch (data_len) {
761 	case 0:
762 		l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0);
763 		break;
764 	case 1:
765 		l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0);
766 		break;
767 	case 2:
768 		l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0);
769 		break;
770 	case 3:
771 		l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a);
772 		break;
773 	}
774 	dp_rx_michael_block(l, r);
775 	dp_rx_michael_block(l, r);
776 	dp_rx_put_le32(mic, l);
777 	dp_rx_put_le32(mic + 4, r);
778 
779 	return QDF_STATUS_SUCCESS;
780 }
781 
782 /*
783  * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame
784  * @key: Pointer to the key
785  * @msdu: fragment buffer
786  * @hdrlen: Length of the header information
787  *
788  * Remove MIC information from the TKIP frame
789  *
790  * Returns: QDF_STATUS
791  */
792 static QDF_STATUS dp_rx_defrag_tkip_demic(struct dp_soc *soc,
793 					  const uint8_t *key,
794 					  qdf_nbuf_t msdu, uint16_t hdrlen)
795 {
796 	QDF_STATUS status;
797 	uint32_t pktlen = 0, prev_data_len;
798 	uint8_t mic[IEEE80211_WEP_MICLEN];
799 	uint8_t mic0[IEEE80211_WEP_MICLEN];
800 	qdf_nbuf_t prev = NULL, prev0, next;
801 	uint8_t len0 = 0;
802 
803 	next = msdu;
804 	prev0 = msdu;
805 	while (next) {
806 		pktlen += (qdf_nbuf_len(next) - hdrlen);
807 		prev = next;
808 		dp_debug("pktlen %u",
809 			 (uint32_t)(qdf_nbuf_len(next) - hdrlen));
810 		next = qdf_nbuf_next(next);
811 		if (next && !qdf_nbuf_next(next))
812 			prev0 = prev;
813 	}
814 
815 	if (!prev) {
816 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
817 			  "%s Defrag chaining failed !\n", __func__);
818 		return QDF_STATUS_E_DEFRAG_ERROR;
819 	}
820 
821 	prev_data_len = qdf_nbuf_len(prev) - hdrlen;
822 	if (prev_data_len < dp_f_tkip.ic_miclen) {
823 		if (prev0 == prev) {
824 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
825 				  "%s Fragments don't have MIC header !\n", __func__);
826 			return QDF_STATUS_E_DEFRAG_ERROR;
827 		}
828 		len0 = dp_f_tkip.ic_miclen - (uint8_t)prev_data_len;
829 		qdf_nbuf_copy_bits(prev0, qdf_nbuf_len(prev0) - len0, len0,
830 				   (caddr_t)mic0);
831 		qdf_nbuf_trim_tail(prev0, len0);
832 	}
833 
834 	qdf_nbuf_copy_bits(prev, (qdf_nbuf_len(prev) -
835 			   (dp_f_tkip.ic_miclen - len0)),
836 			   (dp_f_tkip.ic_miclen - len0),
837 			   (caddr_t)(&mic0[len0]));
838 	qdf_nbuf_trim_tail(prev, (dp_f_tkip.ic_miclen - len0));
839 	pktlen -= dp_f_tkip.ic_miclen;
840 
841 	if (((qdf_nbuf_len(prev) - hdrlen) == 0) && prev != msdu) {
842 		dp_rx_nbuf_free(prev);
843 		qdf_nbuf_set_next(prev0, NULL);
844 	}
845 
846 	status = dp_rx_defrag_mic(soc, key, msdu, hdrlen,
847 				  pktlen, mic);
848 
849 	if (QDF_IS_STATUS_ERROR(status))
850 		return status;
851 
852 	if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen))
853 		return QDF_STATUS_E_DEFRAG_ERROR;
854 
855 	return QDF_STATUS_SUCCESS;
856 }
857 
858 /*
859  * dp_rx_frag_pull_hdr(): Pulls the RXTLV & the 802.11 headers
860  * @nbuf: buffer pointer
861  * @hdrsize: size of the header to be pulled
862  *
863  * Pull the RXTLV & the 802.11 headers
864  *
865  * Returns: None
866  */
867 static void dp_rx_frag_pull_hdr(struct dp_soc *soc,
868 				qdf_nbuf_t nbuf, uint16_t hdrsize)
869 {
870 	hal_rx_print_pn(soc->hal_soc, qdf_nbuf_data(nbuf));
871 
872 	qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + hdrsize);
873 
874 	dp_debug("final pktlen %d .11len %d",
875 		 (uint32_t)qdf_nbuf_len(nbuf), hdrsize);
876 }
877 
878 /*
879  * dp_rx_defrag_pn_check(): Check the PN of current fragmented with prev PN
880  * @msdu: msdu to get the current PN
881  * @cur_pn128: PN extracted from current msdu
882  * @prev_pn128: Prev PN
883  *
884  * Returns: 0 on success, non zero on failure
885  */
886 static int dp_rx_defrag_pn_check(struct dp_soc *soc, qdf_nbuf_t msdu,
887 				 uint64_t *cur_pn128, uint64_t *prev_pn128)
888 {
889 	int out_of_order = 0;
890 
891 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(msdu), cur_pn128);
892 
893 	if (cur_pn128[1] == prev_pn128[1])
894 		out_of_order = (cur_pn128[0] - prev_pn128[0] != 1);
895 	else
896 		out_of_order = (cur_pn128[1] - prev_pn128[1] != 1);
897 
898 	return out_of_order;
899 }
900 
901 /*
902  * dp_rx_construct_fraglist(): Construct a nbuf fraglist
903  * @txrx peer: Pointer to the txrx peer
904  * @head: Pointer to list of fragments
905  * @hdrsize: Size of the header to be pulled
906  *
907  * Construct a nbuf fraglist
908  *
909  * Returns: None
910  */
911 static int
912 dp_rx_construct_fraglist(struct dp_txrx_peer *txrx_peer, int tid,
913 			 qdf_nbuf_t head,
914 			 uint16_t hdrsize)
915 {
916 	struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
917 	qdf_nbuf_t msdu = qdf_nbuf_next(head);
918 	qdf_nbuf_t rx_nbuf = msdu;
919 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
920 	uint32_t len = 0;
921 	uint64_t cur_pn128[2] = {0, 0}, prev_pn128[2];
922 	int out_of_order = 0;
923 	int index;
924 	int needs_pn_check = 0;
925 	enum cdp_sec_type sec_type;
926 
927 	prev_pn128[0] = rx_tid->pn128[0];
928 	prev_pn128[1] = rx_tid->pn128[1];
929 
930 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, msdu) ? dp_sec_mcast :
931 				dp_sec_ucast;
932 	sec_type = txrx_peer->security[index].sec_type;
933 
934 	if (!(sec_type == cdp_sec_type_none || sec_type == cdp_sec_type_wep128 ||
935 	      sec_type == cdp_sec_type_wep104 || sec_type == cdp_sec_type_wep40))
936 		needs_pn_check = 1;
937 
938 	while (msdu) {
939 		if (qdf_likely(needs_pn_check))
940 			out_of_order = dp_rx_defrag_pn_check(soc, msdu,
941 							     &cur_pn128[0],
942 							     &prev_pn128[0]);
943 
944 		if (qdf_unlikely(out_of_order)) {
945 			dp_info_rl("cur_pn128[0] 0x%llx cur_pn128[1] 0x%llx prev_pn128[0] 0x%llx prev_pn128[1] 0x%llx",
946 				   cur_pn128[0], cur_pn128[1],
947 				   prev_pn128[0], prev_pn128[1]);
948 			return QDF_STATUS_E_FAILURE;
949 		}
950 
951 		prev_pn128[0] = cur_pn128[0];
952 		prev_pn128[1] = cur_pn128[1];
953 
954 		/*
955 		 * Broadcast and multicast frames should never be fragmented.
956 		 * Iterating through all msdus and dropping fragments if even
957 		 * one of them has mcast/bcast destination address.
958 		 */
959 		if (hal_rx_msdu_is_wlan_mcast(soc->hal_soc, msdu)) {
960 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
961 				  "Dropping multicast/broadcast fragments");
962 			return QDF_STATUS_E_FAILURE;
963 		}
964 
965 		dp_rx_frag_pull_hdr(soc, msdu, hdrsize);
966 		len += qdf_nbuf_len(msdu);
967 		msdu = qdf_nbuf_next(msdu);
968 	}
969 
970 	qdf_nbuf_append_ext_list(head, rx_nbuf, len);
971 	qdf_nbuf_set_next(head, NULL);
972 	qdf_nbuf_set_is_frag(head, 1);
973 
974 	dp_debug("head len %d ext len %d data len %d ",
975 		 (uint32_t)qdf_nbuf_len(head),
976 		 (uint32_t)qdf_nbuf_len(rx_nbuf),
977 		 (uint32_t)(head->data_len));
978 
979 	return QDF_STATUS_SUCCESS;
980 }
981 
982 /**
983  * dp_rx_defrag_err() - rx err handler
984  * @pdev: handle to pdev object
985  * @vdev_id: vdev id
986  * @peer_mac_addr: peer mac address
987  * @tid: TID
988  * @tsf32: TSF
989  * @err_type: error type
990  * @rx_frame: rx frame
991  * @pn: PN Number
992  * @key_id: key id
993  *
994  * This function handles rx error and send MIC error notification
995  *
996  * Return: None
997  */
998 static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
999 {
1000 	struct ol_if_ops *tops = NULL;
1001 	struct dp_pdev *pdev = vdev->pdev;
1002 	int rx_desc_len = pdev->soc->rx_pkt_tlv_size;
1003 	uint8_t *orig_hdr;
1004 	struct ieee80211_frame *wh;
1005 	struct cdp_rx_mic_err_info mic_failure_info;
1006 
1007 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
1008 	wh = (struct ieee80211_frame *)orig_hdr;
1009 
1010 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.da_mac_addr,
1011 			 (struct qdf_mac_addr *)&wh->i_addr1);
1012 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.ta_mac_addr,
1013 			 (struct qdf_mac_addr *)&wh->i_addr2);
1014 	mic_failure_info.key_id = 0;
1015 	mic_failure_info.multicast =
1016 		IEEE80211_IS_MULTICAST(wh->i_addr1);
1017 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1018 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1019 	mic_failure_info.data = (uint8_t *)wh;
1020 	mic_failure_info.vdev_id = vdev->vdev_id;
1021 
1022 	tops = pdev->soc->cdp_soc.ol_ops;
1023 	if (tops->rx_mic_error)
1024 		tops->rx_mic_error(pdev->soc->ctrl_psoc, pdev->pdev_id,
1025 				   &mic_failure_info);
1026 }
1027 
1028 
1029 /*
1030  * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3
1031  * @soc: dp soc handle
1032  * @txrx_peer: txrx_peer handle
1033  * @nbuf: Pointer to the fragment buffer
1034  * @hdrsize: Size of headers
1035  *
1036  * Transcap the fragment from 802.11 to 802.3
1037  *
1038  * Returns: None
1039  */
1040 static void
1041 dp_rx_defrag_nwifi_to_8023(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
1042 			   int tid, qdf_nbuf_t nbuf, uint16_t hdrsize)
1043 {
1044 	struct llc_snap_hdr_t *llchdr;
1045 	struct ethernet_hdr_t *eth_hdr;
1046 	uint8_t ether_type[2];
1047 	uint16_t fc = 0;
1048 	union dp_align_mac_addr mac_addr;
1049 	uint8_t *rx_desc_info = qdf_mem_malloc(soc->rx_pkt_tlv_size);
1050 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
1051 	struct ieee80211_frame_addr4 wh = {0};
1052 
1053 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), rx_tid->pn128);
1054 
1055 	hal_rx_print_pn(soc->hal_soc, qdf_nbuf_data(nbuf));
1056 
1057 	if (!rx_desc_info) {
1058 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1059 			"%s: Memory alloc failed ! ", __func__);
1060 		QDF_ASSERT(0);
1061 		return;
1062 	}
1063 
1064 	qdf_mem_zero(&wh, sizeof(struct ieee80211_frame_addr4));
1065 	if (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
1066 		qdf_mem_copy(&wh, qdf_nbuf_data(nbuf) + soc->rx_pkt_tlv_size,
1067 			     hdrsize);
1068 
1069 	qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), soc->rx_pkt_tlv_size);
1070 
1071 	llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) +
1072 					soc->rx_pkt_tlv_size + hdrsize);
1073 	qdf_mem_copy(ether_type, llchdr->ethertype, 2);
1074 
1075 	qdf_nbuf_pull_head(nbuf, (soc->rx_pkt_tlv_size + hdrsize +
1076 				  sizeof(struct llc_snap_hdr_t) -
1077 				  sizeof(struct ethernet_hdr_t)));
1078 
1079 	eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf));
1080 
1081 	if (hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
1082 						rx_desc_info))
1083 		fc = hal_rx_get_frame_ctrl_field(soc->hal_soc, rx_desc_info);
1084 
1085 	dp_debug("Frame control type: 0x%x", fc);
1086 
1087 	switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) {
1088 	case IEEE80211_FC1_DIR_NODS:
1089 		hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info,
1090 				      &mac_addr.raw[0]);
1091 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1092 			QDF_MAC_ADDR_SIZE);
1093 		hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info,
1094 				      &mac_addr.raw[0]);
1095 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1096 			QDF_MAC_ADDR_SIZE);
1097 		break;
1098 	case IEEE80211_FC1_DIR_TODS:
1099 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1100 				      &mac_addr.raw[0]);
1101 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1102 			QDF_MAC_ADDR_SIZE);
1103 		hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info,
1104 				      &mac_addr.raw[0]);
1105 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1106 			QDF_MAC_ADDR_SIZE);
1107 		break;
1108 	case IEEE80211_FC1_DIR_FROMDS:
1109 		hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info,
1110 				      &mac_addr.raw[0]);
1111 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1112 			QDF_MAC_ADDR_SIZE);
1113 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1114 				      &mac_addr.raw[0]);
1115 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1116 			QDF_MAC_ADDR_SIZE);
1117 		break;
1118 
1119 	case IEEE80211_FC1_DIR_DSTODS:
1120 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1121 				      &mac_addr.raw[0]);
1122 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1123 			QDF_MAC_ADDR_SIZE);
1124 		qdf_mem_copy(eth_hdr->src_addr, &wh.i_addr4[0],
1125 			     QDF_MAC_ADDR_SIZE);
1126 		break;
1127 
1128 	default:
1129 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1130 		"%s: Unknown frame control type: 0x%x", __func__, fc);
1131 	}
1132 
1133 	qdf_mem_copy(eth_hdr->ethertype, ether_type,
1134 			sizeof(ether_type));
1135 
1136 	qdf_nbuf_push_head(nbuf, soc->rx_pkt_tlv_size);
1137 	qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, soc->rx_pkt_tlv_size);
1138 	qdf_mem_free(rx_desc_info);
1139 }
1140 
1141 #ifdef RX_DEFRAG_DO_NOT_REINJECT
1142 /*
1143  * dp_rx_defrag_deliver(): Deliver defrag packet to stack
1144  * @peer: Pointer to the peer
1145  * @tid: Transmit Identifier
1146  * @head: Nbuf to be delivered
1147  *
1148  * Returns: None
1149  */
1150 static inline void dp_rx_defrag_deliver(struct dp_txrx_peer *txrx_peer,
1151 					unsigned int tid,
1152 					qdf_nbuf_t head)
1153 {
1154 	struct dp_vdev *vdev = txrx_peer->vdev;
1155 	struct dp_soc *soc = vdev->pdev->soc;
1156 	qdf_nbuf_t deliver_list_head = NULL;
1157 	qdf_nbuf_t deliver_list_tail = NULL;
1158 	uint8_t *rx_tlv_hdr;
1159 
1160 	rx_tlv_hdr = qdf_nbuf_data(head);
1161 
1162 	QDF_NBUF_CB_RX_VDEV_ID(head) = vdev->vdev_id;
1163 	qdf_nbuf_set_tid_val(head, tid);
1164 	qdf_nbuf_pull_head(head, soc->rx_pkt_tlv_size);
1165 
1166 	DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail,
1167 			  head);
1168 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, deliver_list_head,
1169 			       deliver_list_tail);
1170 }
1171 
1172 /*
1173  * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO
1174  * @txrx peer: Pointer to the peer
1175  * @tid: Transmit Identifier
1176  * @head: Buffer to be reinjected back
1177  *
1178  * Reinject the fragment chain back into REO
1179  *
1180  * Returns: QDF_STATUS
1181  */
1182 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_txrx_peer *txrx_peer,
1183 					    unsigned int tid, qdf_nbuf_t head)
1184 {
1185 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1186 
1187 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1188 
1189 	dp_rx_defrag_deliver(txrx_peer, tid, head);
1190 	rx_reorder_array_elem->head = NULL;
1191 	rx_reorder_array_elem->tail = NULL;
1192 	dp_rx_return_head_frag_desc(txrx_peer, tid);
1193 
1194 	return QDF_STATUS_SUCCESS;
1195 }
1196 #else
1197 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1198 /**
1199  * dp_rx_reinject_ring_record_entry() - Record reinject ring history
1200  * @soc: Datapath soc structure
1201  * @paddr: paddr of the buffer reinjected to SW2REO ring
1202  * @sw_cookie: SW cookie of the buffer reinjected to SW2REO ring
1203  * @rbm: Return buffer manager of the buffer reinjected to SW2REO ring
1204  *
1205  * Returns: None
1206  */
1207 static inline void
1208 dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1209 				 uint32_t sw_cookie, uint8_t rbm)
1210 {
1211 	struct dp_buf_info_record *record;
1212 	uint32_t idx;
1213 
1214 	if (qdf_unlikely(!soc->rx_reinject_ring_history))
1215 		return;
1216 
1217 	idx = dp_history_get_next_index(&soc->rx_reinject_ring_history->index,
1218 					DP_RX_REINJECT_HIST_MAX);
1219 
1220 	/* No NULL check needed for record since its an array */
1221 	record = &soc->rx_reinject_ring_history->entry[idx];
1222 
1223 	record->timestamp = qdf_get_log_timestamp();
1224 	record->hbi.paddr = paddr;
1225 	record->hbi.sw_cookie = sw_cookie;
1226 	record->hbi.rbm = rbm;
1227 }
1228 #else
1229 static inline void
1230 dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1231 				 uint32_t sw_cookie, uint8_t rbm)
1232 {
1233 }
1234 #endif
1235 
1236 /*
1237  * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO
1238  * @txrx_peer: Pointer to the txrx_peer
1239  * @tid: Transmit Identifier
1240  * @head: Buffer to be reinjected back
1241  *
1242  * Reinject the fragment chain back into REO
1243  *
1244  * Returns: QDF_STATUS
1245  */
1246 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_txrx_peer *txrx_peer,
1247 					    unsigned int tid, qdf_nbuf_t head)
1248 {
1249 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
1250 	struct dp_soc *soc = pdev->soc;
1251 	struct hal_buf_info buf_info;
1252 	struct hal_buf_info temp_buf_info;
1253 	void *link_desc_va;
1254 	void *msdu0, *msdu_desc_info;
1255 	void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr;
1256 	void *dst_mpdu_desc_info;
1257 	uint64_t dst_qdesc_addr;
1258 	qdf_dma_addr_t paddr;
1259 	uint32_t nbuf_len, seq_no, dst_ind;
1260 	uint32_t ret, cookie;
1261 	hal_ring_desc_t dst_ring_desc =
1262 		txrx_peer->rx_tid[tid].dst_ring_desc;
1263 	hal_ring_handle_t hal_srng = soc->reo_reinject_ring.hal_srng;
1264 	struct dp_rx_desc *rx_desc = txrx_peer->rx_tid[tid].head_frag_desc;
1265 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1266 						txrx_peer->rx_tid[tid].array;
1267 	qdf_nbuf_t nbuf_head;
1268 	struct rx_desc_pool *rx_desc_pool = NULL;
1269 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(dst_ring_desc);
1270 	uint8_t rx_defrag_rbm_id = dp_rx_get_defrag_bm_id(soc);
1271 
1272 	/* do duplicate link desc address check */
1273 	dp_rx_link_desc_refill_duplicate_check(
1274 				soc,
1275 				&soc->last_op_info.reo_reinject_link_desc,
1276 				buf_addr_info);
1277 
1278 	nbuf_head = dp_ipa_handle_rx_reo_reinject(soc, head);
1279 	if (qdf_unlikely(!nbuf_head)) {
1280 		dp_err_rl("IPA RX REO reinject failed");
1281 		return QDF_STATUS_E_FAILURE;
1282 	}
1283 
1284 	/* update new allocated skb in case IPA is enabled */
1285 	if (nbuf_head != head) {
1286 		head = nbuf_head;
1287 		rx_desc->nbuf = head;
1288 		rx_reorder_array_elem->head = head;
1289 	}
1290 
1291 	ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
1292 	if (!ent_ring_desc) {
1293 		dp_err_rl("HAL src ring next entry NULL");
1294 		return QDF_STATUS_E_FAILURE;
1295 	}
1296 
1297 	hal_rx_reo_buf_paddr_get(soc->hal_soc, dst_ring_desc, &buf_info);
1298 
1299 	/* buffer_addr_info is the first element of ring_desc */
1300 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)dst_ring_desc,
1301 				  &buf_info);
1302 
1303 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1304 
1305 	qdf_assert_always(link_desc_va);
1306 
1307 	msdu0 = hal_rx_msdu0_buffer_addr_lsb(soc->hal_soc, link_desc_va);
1308 	nbuf_len = qdf_nbuf_len(head) - soc->rx_pkt_tlv_size;
1309 
1310 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW);
1311 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE,
1312 			UNI_DESC_BUF_TYPE_RX_MSDU_LINK);
1313 
1314 	/* msdu reconfig */
1315 	msdu_desc_info = hal_rx_msdu_desc_info_ptr_get(soc->hal_soc, msdu0);
1316 
1317 	dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va);
1318 
1319 	qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info));
1320 
1321 	hal_msdu_desc_info_set(soc->hal_soc, msdu_desc_info, dst_ind, nbuf_len);
1322 
1323 	/* change RX TLV's */
1324 	hal_rx_tlv_msdu_len_set(soc->hal_soc, qdf_nbuf_data(head), nbuf_len);
1325 
1326 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)msdu0,
1327 				  &temp_buf_info);
1328 
1329 	cookie = temp_buf_info.sw_cookie;
1330 	rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
1331 
1332 	/* map the nbuf before reinject it into HW */
1333 	ret = qdf_nbuf_map_nbytes_single(soc->osdev, head,
1334 					 QDF_DMA_FROM_DEVICE,
1335 					 rx_desc_pool->buf_size);
1336 	if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1337 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1338 				"%s: nbuf map failed !", __func__);
1339 		return QDF_STATUS_E_FAILURE;
1340 	}
1341 
1342 	dp_ipa_handle_rx_buf_smmu_mapping(soc, head, rx_desc_pool->buf_size,
1343 					  true, __func__, __LINE__);
1344 
1345 	dp_audio_smmu_map(soc->osdev,
1346 			  qdf_mem_paddr_from_dmaaddr(soc->osdev,
1347 						     QDF_NBUF_CB_PADDR(head)),
1348 			  QDF_NBUF_CB_PADDR(head), rx_desc_pool->buf_size);
1349 
1350 	/*
1351 	 * As part of rx frag handler buffer was unmapped and rx desc
1352 	 * unmapped is set to 1. So again for defrag reinject frame reset
1353 	 * it back to 0.
1354 	 */
1355 	rx_desc->unmapped = 0;
1356 
1357 	paddr = qdf_nbuf_get_frag_paddr(head, 0);
1358 
1359 	ret = dp_check_paddr(soc, &head, &paddr, rx_desc_pool);
1360 
1361 	if (ret == QDF_STATUS_E_FAILURE) {
1362 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1363 				"%s: x86 check failed !", __func__);
1364 		return QDF_STATUS_E_FAILURE;
1365 	}
1366 
1367 	hal_rxdma_buff_addr_info_set(soc->hal_soc, msdu0, paddr, cookie,
1368 				     rx_defrag_rbm_id);
1369 
1370 	/* Lets fill entrance ring now !!! */
1371 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1372 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1373 		"HAL RING Access For REO entrance SRNG Failed: %pK",
1374 		hal_srng);
1375 
1376 		return QDF_STATUS_E_FAILURE;
1377 	}
1378 
1379 	dp_rx_reinject_ring_record_entry(soc, paddr, cookie,
1380 					 rx_defrag_rbm_id);
1381 	paddr = (uint64_t)buf_info.paddr;
1382 	/* buf addr */
1383 	hal_rxdma_buff_addr_info_set(soc->hal_soc, ent_ring_desc, paddr,
1384 				     buf_info.sw_cookie,
1385 				     soc->idle_link_bm_id);
1386 	/* mpdu desc info */
1387 	ent_mpdu_desc_info = hal_ent_mpdu_desc_info(soc->hal_soc,
1388 						    ent_ring_desc);
1389 	dst_mpdu_desc_info = hal_dst_mpdu_desc_info(soc->hal_soc,
1390 						    dst_ring_desc);
1391 
1392 	qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info,
1393 				sizeof(struct rx_mpdu_desc_info));
1394 	qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t));
1395 
1396 	seq_no = hal_rx_get_rx_sequence(soc->hal_soc, rx_desc->rx_buf_start);
1397 
1398 	hal_mpdu_desc_info_set(soc->hal_soc, ent_ring_desc, ent_mpdu_desc_info,
1399 			       seq_no);
1400 	/* qdesc addr */
1401 	ent_qdesc_addr = hal_get_reo_ent_desc_qdesc_addr(soc->hal_soc,
1402 						(uint8_t *)ent_ring_desc);
1403 
1404 	dst_qdesc_addr = soc->arch_ops.get_reo_qdesc_addr(
1405 						soc->hal_soc,
1406 						(uint8_t *)dst_ring_desc,
1407 						qdf_nbuf_data(head),
1408 						txrx_peer, tid);
1409 
1410 	qdf_mem_copy(ent_qdesc_addr, &dst_qdesc_addr, 5);
1411 
1412 	hal_set_reo_ent_desc_reo_dest_ind(soc->hal_soc,
1413 					  (uint8_t *)ent_ring_desc, dst_ind);
1414 
1415 	hal_srng_access_end(soc->hal_soc, hal_srng);
1416 
1417 	DP_STATS_INC(soc, rx.reo_reinject, 1);
1418 	dp_debug("reinjection done !");
1419 	return QDF_STATUS_SUCCESS;
1420 }
1421 #endif
1422 
1423 /*
1424  * dp_rx_defrag_gcmp_demic(): Remove MIC information from GCMP fragment
1425  * @soc: Datapath soc structure
1426  * @nbuf: Pointer to the fragment buffer
1427  * @hdrlen: 802.11 header length
1428  *
1429  * Remove MIC information from GCMP fragment
1430  *
1431  * Returns: QDF_STATUS
1432  */
1433 static QDF_STATUS dp_rx_defrag_gcmp_demic(struct dp_soc *soc, qdf_nbuf_t nbuf,
1434 					  uint16_t hdrlen)
1435 {
1436 	uint8_t *ivp, *orig_hdr;
1437 	int rx_desc_len = soc->rx_pkt_tlv_size;
1438 
1439 	/* start of the 802.11 header */
1440 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
1441 
1442 	/*
1443 	 * GCMP header is located after 802.11 header and EXTIV
1444 	 * field should always be set to 1 for GCMP protocol.
1445 	 */
1446 	ivp = orig_hdr + hdrlen;
1447 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
1448 		return QDF_STATUS_E_DEFRAG_ERROR;
1449 
1450 	qdf_nbuf_trim_tail(nbuf, dp_f_gcmp.ic_trailer);
1451 
1452 	return QDF_STATUS_SUCCESS;
1453 }
1454 
1455 /*
1456  * dp_rx_defrag(): Defragment the fragment chain
1457  * @txrx peer: Pointer to the peer
1458  * @tid: Transmit Identifier
1459  * @frag_list_head: Pointer to head list
1460  * @frag_list_tail: Pointer to tail list
1461  *
1462  * Defragment the fragment chain
1463  *
1464  * Returns: QDF_STATUS
1465  */
1466 static QDF_STATUS dp_rx_defrag(struct dp_txrx_peer *txrx_peer, unsigned int tid,
1467 			       qdf_nbuf_t frag_list_head,
1468 			       qdf_nbuf_t frag_list_tail)
1469 {
1470 	qdf_nbuf_t tmp_next;
1471 	qdf_nbuf_t cur = frag_list_head, msdu;
1472 	uint32_t index, tkip_demic = 0;
1473 	uint16_t hdr_space;
1474 	uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
1475 	struct dp_vdev *vdev = txrx_peer->vdev;
1476 	struct dp_soc *soc = vdev->pdev->soc;
1477 	uint8_t status = 0;
1478 
1479 	if (!cur)
1480 		return QDF_STATUS_E_DEFRAG_ERROR;
1481 
1482 	hdr_space = dp_rx_defrag_hdrsize(soc, cur);
1483 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, cur) ?
1484 		dp_sec_mcast : dp_sec_ucast;
1485 
1486 	/* Remove FCS from all fragments */
1487 	while (cur) {
1488 		tmp_next = qdf_nbuf_next(cur);
1489 		qdf_nbuf_set_next(cur, NULL);
1490 		qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
1491 		qdf_nbuf_set_next(cur, tmp_next);
1492 		cur = tmp_next;
1493 	}
1494 	cur = frag_list_head;
1495 
1496 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1497 		  "%s: index %d Security type: %d", __func__,
1498 		  index, txrx_peer->security[index].sec_type);
1499 
1500 	switch (txrx_peer->security[index].sec_type) {
1501 	case cdp_sec_type_tkip:
1502 		tkip_demic = 1;
1503 		fallthrough;
1504 	case cdp_sec_type_tkip_nomic:
1505 		while (cur) {
1506 			tmp_next = qdf_nbuf_next(cur);
1507 			if (dp_rx_defrag_tkip_decap(soc, cur, hdr_space)) {
1508 
1509 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1510 					QDF_TRACE_LEVEL_ERROR,
1511 					"dp_rx_defrag: TKIP decap failed");
1512 
1513 				return QDF_STATUS_E_DEFRAG_ERROR;
1514 			}
1515 			cur = tmp_next;
1516 		}
1517 
1518 		/* If success, increment header to be stripped later */
1519 		hdr_space += dp_f_tkip.ic_header;
1520 		break;
1521 
1522 	case cdp_sec_type_aes_ccmp:
1523 		while (cur) {
1524 			tmp_next = qdf_nbuf_next(cur);
1525 			if (dp_rx_defrag_ccmp_demic(soc, cur, hdr_space)) {
1526 
1527 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1528 					QDF_TRACE_LEVEL_ERROR,
1529 					"dp_rx_defrag: CCMP demic failed");
1530 
1531 				return QDF_STATUS_E_DEFRAG_ERROR;
1532 			}
1533 			if (dp_rx_defrag_ccmp_decap(soc, cur, hdr_space)) {
1534 
1535 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1536 					QDF_TRACE_LEVEL_ERROR,
1537 					"dp_rx_defrag: CCMP decap failed");
1538 
1539 				return QDF_STATUS_E_DEFRAG_ERROR;
1540 			}
1541 			cur = tmp_next;
1542 		}
1543 
1544 		/* If success, increment header to be stripped later */
1545 		hdr_space += dp_f_ccmp.ic_header;
1546 		break;
1547 
1548 	case cdp_sec_type_wep40:
1549 	case cdp_sec_type_wep104:
1550 	case cdp_sec_type_wep128:
1551 		while (cur) {
1552 			tmp_next = qdf_nbuf_next(cur);
1553 			if (dp_rx_defrag_wep_decap(soc, cur, hdr_space)) {
1554 
1555 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1556 					QDF_TRACE_LEVEL_ERROR,
1557 					"dp_rx_defrag: WEP decap failed");
1558 
1559 				return QDF_STATUS_E_DEFRAG_ERROR;
1560 			}
1561 			cur = tmp_next;
1562 		}
1563 
1564 		/* If success, increment header to be stripped later */
1565 		hdr_space += dp_f_wep.ic_header;
1566 		break;
1567 	case cdp_sec_type_aes_gcmp:
1568 	case cdp_sec_type_aes_gcmp_256:
1569 		while (cur) {
1570 			tmp_next = qdf_nbuf_next(cur);
1571 			if (dp_rx_defrag_gcmp_demic(soc, cur, hdr_space)) {
1572 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1573 					  QDF_TRACE_LEVEL_ERROR,
1574 					  "dp_rx_defrag: GCMP demic failed");
1575 
1576 				return QDF_STATUS_E_DEFRAG_ERROR;
1577 			}
1578 			cur = tmp_next;
1579 		}
1580 
1581 		hdr_space += dp_f_gcmp.ic_header;
1582 		break;
1583 	default:
1584 		break;
1585 	}
1586 
1587 	if (tkip_demic) {
1588 		msdu = frag_list_head;
1589 		qdf_mem_copy(key,
1590 			     &txrx_peer->security[index].michael_key[0],
1591 			     IEEE80211_WEP_MICLEN);
1592 		status = dp_rx_defrag_tkip_demic(soc, key, msdu,
1593 						 soc->rx_pkt_tlv_size +
1594 						 hdr_space);
1595 
1596 		if (status) {
1597 			dp_rx_defrag_err(vdev, frag_list_head);
1598 
1599 			QDF_TRACE(QDF_MODULE_ID_TXRX,
1600 				  QDF_TRACE_LEVEL_ERROR,
1601 				  "%s: TKIP demic failed status %d",
1602 				   __func__, status);
1603 
1604 			return QDF_STATUS_E_DEFRAG_ERROR;
1605 		}
1606 	}
1607 
1608 	/* Convert the header to 802.3 header */
1609 	dp_rx_defrag_nwifi_to_8023(soc, txrx_peer, tid, frag_list_head,
1610 				   hdr_space);
1611 	if (qdf_nbuf_next(frag_list_head)) {
1612 		if (dp_rx_construct_fraglist(txrx_peer, tid, frag_list_head,
1613 					     hdr_space))
1614 			return QDF_STATUS_E_DEFRAG_ERROR;
1615 	}
1616 
1617 	return QDF_STATUS_SUCCESS;
1618 }
1619 
1620 /*
1621  * dp_rx_defrag_cleanup(): Clean up activities
1622  * @txrx_peer: Pointer to the peer
1623  * @tid: Transmit Identifier
1624  *
1625  * Returns: None
1626  */
1627 void dp_rx_defrag_cleanup(struct dp_txrx_peer *txrx_peer, unsigned int tid)
1628 {
1629 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1630 				txrx_peer->rx_tid[tid].array;
1631 
1632 	if (rx_reorder_array_elem) {
1633 		/* Free up nbufs */
1634 		dp_rx_defrag_frames_free(rx_reorder_array_elem->head);
1635 		rx_reorder_array_elem->head = NULL;
1636 		rx_reorder_array_elem->tail = NULL;
1637 	} else {
1638 		dp_info("Cleanup self peer %pK and TID %u",
1639 			txrx_peer, tid);
1640 	}
1641 
1642 	/* Free up saved ring descriptors */
1643 	dp_rx_clear_saved_desc_info(txrx_peer, tid);
1644 
1645 	txrx_peer->rx_tid[tid].defrag_timeout_ms = 0;
1646 	txrx_peer->rx_tid[tid].curr_frag_num = 0;
1647 	txrx_peer->rx_tid[tid].curr_seq_num = 0;
1648 }
1649 
1650 /*
1651  * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor
1652  * @soc: Pointer to the SOC data structure
1653  * @ring_desc: Pointer to the dst ring descriptor
1654  * @txrx_peer: Pointer to the peer
1655  * @tid: Transmit Identifier
1656  *
1657  * Returns: None
1658  */
1659 static QDF_STATUS
1660 dp_rx_defrag_save_info_from_ring_desc(struct dp_soc *soc,
1661 				      hal_ring_desc_t ring_desc,
1662 				      struct dp_rx_desc *rx_desc,
1663 				      struct dp_txrx_peer *txrx_peer,
1664 				      unsigned int tid)
1665 {
1666 	void *dst_ring_desc;
1667 
1668 	dst_ring_desc = qdf_mem_malloc(hal_srng_get_entrysize(soc->hal_soc,
1669 							      REO_DST));
1670 
1671 	if (!dst_ring_desc) {
1672 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1673 			"%s: Memory alloc failed !", __func__);
1674 		QDF_ASSERT(0);
1675 		return QDF_STATUS_E_NOMEM;
1676 	}
1677 
1678 	qdf_mem_copy(dst_ring_desc, ring_desc,
1679 		     hal_srng_get_entrysize(soc->hal_soc, REO_DST));
1680 
1681 	txrx_peer->rx_tid[tid].dst_ring_desc = dst_ring_desc;
1682 	txrx_peer->rx_tid[tid].head_frag_desc = rx_desc;
1683 
1684 	return QDF_STATUS_SUCCESS;
1685 }
1686 
1687 #ifdef DP_RX_DEFRAG_ADDR1_CHECK_WAR
1688 #ifdef WLAN_FEATURE_11BE_MLO
1689 /*
1690  * dp_rx_defrag_vdev_mac_addr_cmp() - function to check whether mac address
1691  *				matches VDEV mac
1692  * @vdev: dp_vdev object of the VDEV on which this data packet is received
1693  * @mac_addr: Address to compare
1694  *
1695  * Return: 1 if the mac matching,
1696  *         0 if this frame is not correctly destined to this VDEV/MLD
1697  */
1698 static int dp_rx_defrag_vdev_mac_addr_cmp(struct dp_vdev *vdev,
1699 					  uint8_t *mac_addr)
1700 {
1701 	return ((qdf_mem_cmp(mac_addr, &vdev->mac_addr.raw[0],
1702 			     QDF_MAC_ADDR_SIZE) == 0) ||
1703 		(qdf_mem_cmp(mac_addr, &vdev->mld_mac_addr.raw[0],
1704 			     QDF_MAC_ADDR_SIZE) == 0));
1705 }
1706 
1707 #else
1708 static int dp_rx_defrag_vdev_mac_addr_cmp(struct dp_vdev *vdev,
1709 					  uint8_t *mac_addr)
1710 {
1711 	return (qdf_mem_cmp(mac_addr, &vdev->mac_addr.raw[0],
1712 			    QDF_MAC_ADDR_SIZE) == 0);
1713 }
1714 #endif
1715 
1716 static bool dp_rx_defrag_addr1_check(struct dp_soc *soc,
1717 				     struct dp_vdev *vdev,
1718 				     uint8_t *rx_tlv_hdr)
1719 {
1720 	union dp_align_mac_addr mac_addr;
1721 
1722 	/* If address1 is not valid discard the fragment */
1723 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, rx_tlv_hdr,
1724 				  &mac_addr.raw[0]) != QDF_STATUS_SUCCESS) {
1725 		DP_STATS_INC(soc, rx.err.defrag_ad1_invalid, 1);
1726 		return false;
1727 	}
1728 
1729 	/* WAR suggested by HW team to avoid crashing incase of packet
1730 	 * corruption issue
1731 	 *
1732 	 * recipe is to compare VDEV mac or MLD mac address with ADDR1
1733 	 * in case of mismatch consider it as corrupted packet and do
1734 	 * not process further
1735 	 */
1736 	if (!dp_rx_defrag_vdev_mac_addr_cmp(vdev,
1737 					    &mac_addr.raw[0])) {
1738 		DP_STATS_INC(soc, rx.err.defrag_ad1_invalid, 1);
1739 		return false;
1740 	}
1741 
1742 	return true;
1743 }
1744 #else
1745 static inline bool dp_rx_defrag_addr1_check(struct dp_soc *soc,
1746 					    struct dp_vdev *vdev,
1747 					    uint8_t *rx_tlv_hdr)
1748 {
1749 
1750 	return true;
1751 }
1752 #endif
1753 
1754 /*
1755  * dp_rx_defrag_store_fragment(): Store incoming fragments
1756  * @soc: Pointer to the SOC data structure
1757  * @ring_desc: Pointer to the ring descriptor
1758  * @mpdu_desc_info: MPDU descriptor info
1759  * @tid: Traffic Identifier
1760  * @rx_desc: Pointer to rx descriptor
1761  * @rx_bfs: Number of bfs consumed
1762  *
1763  * Returns: QDF_STATUS
1764  */
1765 static QDF_STATUS
1766 dp_rx_defrag_store_fragment(struct dp_soc *soc,
1767 			    hal_ring_desc_t ring_desc,
1768 			    union dp_rx_desc_list_elem_t **head,
1769 			    union dp_rx_desc_list_elem_t **tail,
1770 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1771 			    unsigned int tid, struct dp_rx_desc *rx_desc,
1772 			    uint32_t *rx_bfs)
1773 {
1774 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1775 	struct dp_pdev *pdev;
1776 	struct dp_txrx_peer *txrx_peer = NULL;
1777 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1778 	uint16_t peer_id;
1779 	uint8_t fragno, more_frag, all_frag_present = 0;
1780 	uint16_t rxseq = mpdu_desc_info->mpdu_seq;
1781 	QDF_STATUS status;
1782 	struct dp_rx_tid_defrag *rx_tid;
1783 	uint8_t mpdu_sequence_control_valid;
1784 	uint8_t mpdu_frame_control_valid;
1785 	qdf_nbuf_t frag = rx_desc->nbuf;
1786 	uint32_t msdu_len;
1787 
1788 	if (qdf_nbuf_len(frag) > 0) {
1789 		dp_info("Dropping unexpected packet with skb_len: %d,"
1790 			"data len: %d, cookie: %d",
1791 			(uint32_t)qdf_nbuf_len(frag), frag->data_len,
1792 			rx_desc->cookie);
1793 		DP_STATS_INC(soc, rx.rx_frag_err_len_error, 1);
1794 		goto discard_frag;
1795 	}
1796 
1797 	if (dp_rx_buffer_pool_refill(soc, frag, rx_desc->pool_id)) {
1798 		/* fragment queued back to the pool, free the link desc */
1799 		goto err_free_desc;
1800 	}
1801 
1802 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
1803 						  rx_desc->rx_buf_start);
1804 
1805 	qdf_nbuf_set_pktlen(frag, (msdu_len + soc->rx_pkt_tlv_size));
1806 	qdf_nbuf_append_ext_list(frag, NULL, 0);
1807 
1808 	/* Check if the packet is from a valid peer */
1809 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1810 					       mpdu_desc_info->peer_meta_data);
1811 	txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, &txrx_ref_handle,
1812 					       DP_MOD_ID_RX_ERR);
1813 
1814 	if (!txrx_peer) {
1815 		/* We should not receive anything from unknown peer
1816 		 * however, that might happen while we are in the monitor mode.
1817 		 * We don't need to handle that here
1818 		 */
1819 		dp_info_rl("Unknown peer with peer_id %d, dropping fragment",
1820 			   peer_id);
1821 		DP_STATS_INC(soc, rx.rx_frag_err_no_peer, 1);
1822 		goto discard_frag;
1823 	}
1824 
1825 	if (tid >= DP_MAX_TIDS) {
1826 		dp_info("TID out of bounds: %d", tid);
1827 		qdf_assert_always(0);
1828 		goto discard_frag;
1829 	}
1830 
1831 	if (!dp_rx_defrag_addr1_check(soc, txrx_peer->vdev,
1832 				      rx_desc->rx_buf_start)) {
1833 		dp_info("Invalid address 1");
1834 		goto discard_frag;
1835 	}
1836 
1837 	mpdu_sequence_control_valid =
1838 		hal_rx_get_mpdu_sequence_control_valid(soc->hal_soc,
1839 						       rx_desc->rx_buf_start);
1840 
1841 	/* Invalid MPDU sequence control field, MPDU is of no use */
1842 	if (!mpdu_sequence_control_valid) {
1843 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1844 			"Invalid MPDU seq control field, dropping MPDU");
1845 
1846 		qdf_assert(0);
1847 		goto discard_frag;
1848 	}
1849 
1850 	mpdu_frame_control_valid =
1851 		hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
1852 						    rx_desc->rx_buf_start);
1853 
1854 	/* Invalid frame control field */
1855 	if (!mpdu_frame_control_valid) {
1856 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1857 			"Invalid frame control field, dropping MPDU");
1858 
1859 		qdf_assert(0);
1860 		goto discard_frag;
1861 	}
1862 
1863 	/* Current mpdu sequence */
1864 	more_frag = dp_rx_frag_get_more_frag_bit(soc, rx_desc->rx_buf_start);
1865 
1866 	/* HW does not populate the fragment number as of now
1867 	 * need to get from the 802.11 header
1868 	 */
1869 	fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc->rx_buf_start);
1870 
1871 	pdev = txrx_peer->vdev->pdev;
1872 	rx_tid = &txrx_peer->rx_tid[tid];
1873 
1874 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, frag,
1875 			      QDF_TX_RX_STATUS_OK, false);
1876 
1877 	qdf_spin_lock_bh(&rx_tid->defrag_tid_lock);
1878 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1879 	if (!rx_reorder_array_elem) {
1880 		dp_err_rl("Rcvd Fragmented pkt before tid setup for peer %pK",
1881 			  txrx_peer);
1882 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1883 		goto discard_frag;
1884 	}
1885 
1886 	/*
1887 	 * !more_frag: no more fragments to be delivered
1888 	 * !frag_no: packet is not fragmented
1889 	 * !rx_reorder_array_elem->head: no saved fragments so far
1890 	 */
1891 	if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
1892 		/* We should not get into this situation here.
1893 		 * It means an unfragmented packet with fragment flag
1894 		 * is delivered over the REO exception ring.
1895 		 * Typically it follows normal rx path.
1896 		 */
1897 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1898 			"Rcvd unfragmented pkt on REO Err srng, dropping");
1899 
1900 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1901 		qdf_assert(0);
1902 		goto discard_frag;
1903 	}
1904 
1905 	/* Check if the fragment is for the same sequence or a different one */
1906 	dp_debug("rx_tid %d", tid);
1907 	if (rx_reorder_array_elem->head) {
1908 		dp_debug("rxseq %d\n", rxseq);
1909 		if (rxseq != rx_tid->curr_seq_num) {
1910 
1911 			dp_debug("mismatch cur_seq %d rxseq %d\n",
1912 				 rx_tid->curr_seq_num, rxseq);
1913 			/* Drop stored fragments if out of sequence
1914 			 * fragment is received
1915 			 */
1916 			dp_rx_reorder_flush_frag(txrx_peer, tid);
1917 
1918 			DP_STATS_INC(soc, rx.rx_frag_oor, 1);
1919 
1920 			dp_debug("cur rxseq %d\n", rxseq);
1921 			/*
1922 			 * The sequence number for this fragment becomes the
1923 			 * new sequence number to be processed
1924 			 */
1925 			rx_tid->curr_seq_num = rxseq;
1926 		}
1927 	} else {
1928 		/* Check if we are processing first fragment if it is
1929 		 * not first fragment discard fragment.
1930 		 */
1931 		if (fragno) {
1932 			qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1933 			goto discard_frag;
1934 		}
1935 		dp_debug("cur rxseq %d\n", rxseq);
1936 		/* Start of a new sequence */
1937 		dp_rx_defrag_cleanup(txrx_peer, tid);
1938 		rx_tid->curr_seq_num = rxseq;
1939 		/* store PN number also */
1940 	}
1941 
1942 	/*
1943 	 * If the earlier sequence was dropped, this will be the fresh start.
1944 	 * Else, continue with next fragment in a given sequence
1945 	 */
1946 	status = dp_rx_defrag_fraglist_insert(txrx_peer, tid,
1947 					      &rx_reorder_array_elem->head,
1948 					      &rx_reorder_array_elem->tail,
1949 					      frag, &all_frag_present);
1950 
1951 	/*
1952 	 * Currently, we can have only 6 MSDUs per-MPDU, if the current
1953 	 * packet sequence has more than 6 MSDUs for some reason, we will
1954 	 * have to use the next MSDU link descriptor and chain them together
1955 	 * before reinjection.
1956 	 * ring_desc is validated in dp_rx_err_process.
1957 	 */
1958 	if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) &&
1959 			(rx_reorder_array_elem->head == frag)) {
1960 
1961 		status = dp_rx_defrag_save_info_from_ring_desc(soc, ring_desc,
1962 							       rx_desc,
1963 							       txrx_peer, tid);
1964 
1965 		if (status != QDF_STATUS_SUCCESS) {
1966 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1967 				"%s: Unable to store ring desc !", __func__);
1968 			qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1969 			goto discard_frag;
1970 		}
1971 	} else {
1972 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1973 		(*rx_bfs)++;
1974 
1975 		/* Return the non-head link desc */
1976 		if (dp_rx_link_desc_return(soc, ring_desc,
1977 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1978 		    QDF_STATUS_SUCCESS)
1979 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1980 				  "%s: Failed to return link desc", __func__);
1981 
1982 	}
1983 
1984 	if (pdev->soc->rx.flags.defrag_timeout_check)
1985 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
1986 
1987 	/* Yet to receive more fragments for this sequence number */
1988 	if (!all_frag_present) {
1989 		uint32_t now_ms =
1990 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1991 
1992 		txrx_peer->rx_tid[tid].defrag_timeout_ms =
1993 			now_ms + pdev->soc->rx.defrag.timeout_ms;
1994 
1995 		dp_rx_defrag_waitlist_add(txrx_peer, tid);
1996 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
1997 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1998 
1999 		return QDF_STATUS_SUCCESS;
2000 	}
2001 
2002 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2003 		  "All fragments received for sequence: %d", rxseq);
2004 
2005 	/* Process the fragments */
2006 	status = dp_rx_defrag(txrx_peer, tid, rx_reorder_array_elem->head,
2007 			      rx_reorder_array_elem->tail);
2008 	if (QDF_IS_STATUS_ERROR(status)) {
2009 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2010 			"Fragment processing failed");
2011 
2012 		dp_rx_add_to_free_desc_list(head, tail,
2013 				txrx_peer->rx_tid[tid].head_frag_desc);
2014 		(*rx_bfs)++;
2015 
2016 		if (dp_rx_link_desc_return(soc,
2017 					txrx_peer->rx_tid[tid].dst_ring_desc,
2018 					HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
2019 				QDF_STATUS_SUCCESS)
2020 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2021 					"%s: Failed to return link desc",
2022 					__func__);
2023 		dp_rx_defrag_cleanup(txrx_peer, tid);
2024 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2025 		goto end;
2026 	}
2027 
2028 	/* Re-inject the fragments back to REO for further processing */
2029 	status = dp_rx_defrag_reo_reinject(txrx_peer, tid,
2030 					   rx_reorder_array_elem->head);
2031 	if (QDF_IS_STATUS_SUCCESS(status)) {
2032 		rx_reorder_array_elem->head = NULL;
2033 		rx_reorder_array_elem->tail = NULL;
2034 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2035 			  "Fragmented sequence successfully reinjected");
2036 	} else {
2037 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2038 		"Fragmented sequence reinjection failed");
2039 		dp_rx_return_head_frag_desc(txrx_peer, tid);
2040 	}
2041 
2042 	dp_rx_defrag_cleanup(txrx_peer, tid);
2043 	qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2044 
2045 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2046 
2047 	return QDF_STATUS_SUCCESS;
2048 
2049 discard_frag:
2050 	dp_rx_nbuf_free(frag);
2051 err_free_desc:
2052 	dp_rx_add_to_free_desc_list(head, tail, rx_desc);
2053 	if (dp_rx_link_desc_return(soc, ring_desc,
2054 				   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
2055 	    QDF_STATUS_SUCCESS)
2056 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2057 			  "%s: Failed to return link desc", __func__);
2058 	(*rx_bfs)++;
2059 
2060 end:
2061 	if (txrx_peer)
2062 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2063 
2064 	DP_STATS_INC(soc, rx.rx_frag_err, 1);
2065 	return QDF_STATUS_E_DEFRAG_ERROR;
2066 }
2067 
2068 /**
2069  * dp_rx_frag_handle() - Handles fragmented Rx frames
2070  *
2071  * @soc: core txrx main context
2072  * @ring_desc: opaque pointer to the REO error ring descriptor
2073  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
2074  * @head: head of the local descriptor free-list
2075  * @tail: tail of the local descriptor free-list
2076  * @quota: No. of units (packets) that can be serviced in one shot.
2077  *
2078  * This function implements RX 802.11 fragmentation handling
2079  * The handling is mostly same as legacy fragmentation handling.
2080  * If required, this function can re-inject the frames back to
2081  * REO ring (with proper setting to by-pass fragmentation check
2082  * but use duplicate detection / re-ordering and routing these frames
2083  * to a different core.
2084  *
2085  * Return: uint32_t: No. of elements processed
2086  */
2087 uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
2088 			   struct hal_rx_mpdu_desc_info *mpdu_desc_info,
2089 			   struct dp_rx_desc *rx_desc,
2090 			   uint8_t *mac_id,
2091 			   uint32_t quota)
2092 {
2093 	uint32_t rx_bufs_used = 0;
2094 	qdf_nbuf_t msdu = NULL;
2095 	uint32_t tid;
2096 	uint32_t rx_bfs = 0;
2097 	struct dp_pdev *pdev;
2098 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2099 	struct rx_desc_pool *rx_desc_pool;
2100 
2101 	qdf_assert(soc);
2102 	qdf_assert(mpdu_desc_info);
2103 	qdf_assert(rx_desc);
2104 
2105 	dp_debug("Number of MSDUs to process, num_msdus: %d",
2106 		 mpdu_desc_info->msdu_count);
2107 
2108 
2109 	if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) {
2110 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2111 			"Not sufficient MSDUs to process");
2112 		return rx_bufs_used;
2113 	}
2114 
2115 	/* all buffers in MSDU link belong to same pdev */
2116 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
2117 	if (!pdev) {
2118 		dp_nofl_debug("pdev is null for pool_id = %d",
2119 			      rx_desc->pool_id);
2120 		return rx_bufs_used;
2121 	}
2122 
2123 	*mac_id = rx_desc->pool_id;
2124 
2125 	msdu = rx_desc->nbuf;
2126 
2127 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2128 
2129 	if (rx_desc->unmapped)
2130 		return rx_bufs_used;
2131 
2132 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
2133 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
2134 	rx_desc->unmapped = 1;
2135 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2136 
2137 	rx_desc->rx_buf_start = qdf_nbuf_data(msdu);
2138 
2139 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_desc->rx_buf_start);
2140 
2141 	/* Process fragment-by-fragment */
2142 	status = dp_rx_defrag_store_fragment(soc, ring_desc,
2143 					     &pdev->free_list_head,
2144 					     &pdev->free_list_tail,
2145 					     mpdu_desc_info,
2146 					     tid, rx_desc, &rx_bfs);
2147 
2148 	if (rx_bfs)
2149 		rx_bufs_used += rx_bfs;
2150 
2151 	if (!QDF_IS_STATUS_SUCCESS(status))
2152 		dp_info_rl("Rx Defrag err seq#:0x%x msdu_count:%d flags:%d",
2153 			   mpdu_desc_info->mpdu_seq,
2154 			   mpdu_desc_info->msdu_count,
2155 			   mpdu_desc_info->mpdu_flags);
2156 
2157 	return rx_bufs_used;
2158 }
2159 
2160 QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
2161 				      struct dp_txrx_peer *txrx_peer,
2162 				      uint16_t tid,
2163 				      uint16_t rxseq, qdf_nbuf_t nbuf)
2164 {
2165 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
2166 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
2167 	uint8_t all_frag_present;
2168 	uint32_t msdu_len;
2169 	QDF_STATUS status;
2170 
2171 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
2172 
2173 	/*
2174 	 * HW may fill in unexpected peer_id in RX PKT TLV,
2175 	 * if this peer_id related peer is valid by coincidence,
2176 	 * but actually this peer won't do dp_peer_rx_init(like SAP vdev
2177 	 * self peer), then invalid access to rx_reorder_array_elem happened.
2178 	 */
2179 	if (!rx_reorder_array_elem) {
2180 		dp_verbose_debug(
2181 			"peer id:%d drop rx frame!",
2182 			txrx_peer->peer_id);
2183 		DP_STATS_INC(soc, rx.err.defrag_peer_uninit, 1);
2184 		dp_rx_nbuf_free(nbuf);
2185 		goto fail;
2186 	}
2187 
2188 	if (rx_reorder_array_elem->head &&
2189 	    rxseq != rx_tid->curr_seq_num) {
2190 		/* Drop stored fragments if out of sequence
2191 		 * fragment is received
2192 		 */
2193 		dp_rx_reorder_flush_frag(txrx_peer, tid);
2194 
2195 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2196 			  "%s: No list found for TID %d Seq# %d",
2197 				__func__, tid, rxseq);
2198 		dp_rx_nbuf_free(nbuf);
2199 		goto fail;
2200 	}
2201 
2202 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2203 						  qdf_nbuf_data(nbuf));
2204 
2205 	qdf_nbuf_set_pktlen(nbuf, (msdu_len + soc->rx_pkt_tlv_size));
2206 
2207 	status = dp_rx_defrag_fraglist_insert(txrx_peer, tid,
2208 					      &rx_reorder_array_elem->head,
2209 			&rx_reorder_array_elem->tail, nbuf,
2210 			&all_frag_present);
2211 
2212 	if (QDF_IS_STATUS_ERROR(status)) {
2213 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2214 			  "%s Fragment insert failed", __func__);
2215 
2216 		goto fail;
2217 	}
2218 
2219 	if (soc->rx.flags.defrag_timeout_check)
2220 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
2221 
2222 	if (!all_frag_present) {
2223 		uint32_t now_ms =
2224 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2225 
2226 		txrx_peer->rx_tid[tid].defrag_timeout_ms =
2227 			now_ms + soc->rx.defrag.timeout_ms;
2228 
2229 		dp_rx_defrag_waitlist_add(txrx_peer, tid);
2230 
2231 		return QDF_STATUS_SUCCESS;
2232 	}
2233 
2234 	status = dp_rx_defrag(txrx_peer, tid, rx_reorder_array_elem->head,
2235 			      rx_reorder_array_elem->tail);
2236 
2237 	if (QDF_IS_STATUS_ERROR(status)) {
2238 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2239 			  "%s Fragment processing failed", __func__);
2240 
2241 		dp_rx_return_head_frag_desc(txrx_peer, tid);
2242 		dp_rx_defrag_cleanup(txrx_peer, tid);
2243 
2244 		goto fail;
2245 	}
2246 
2247 	/* Re-inject the fragments back to REO for further processing */
2248 	status = dp_rx_defrag_reo_reinject(txrx_peer, tid,
2249 					   rx_reorder_array_elem->head);
2250 	if (QDF_IS_STATUS_SUCCESS(status)) {
2251 		rx_reorder_array_elem->head = NULL;
2252 		rx_reorder_array_elem->tail = NULL;
2253 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2254 			  "%s: Frag seq successfully reinjected",
2255 			__func__);
2256 	} else {
2257 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2258 			  "%s: Frag seq reinjection failed", __func__);
2259 		dp_rx_return_head_frag_desc(txrx_peer, tid);
2260 	}
2261 
2262 	dp_rx_defrag_cleanup(txrx_peer, tid);
2263 	return QDF_STATUS_SUCCESS;
2264 
2265 fail:
2266 	return QDF_STATUS_E_DEFRAG_ERROR;
2267 }
2268