xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #ifndef RX_DEFRAG_DO_NOT_REINJECT
22 #ifndef DP_BE_WAR
23 #include "li/hal_li_rx.h"
24 #endif
25 #endif
26 #include "dp_types.h"
27 #include "dp_rx.h"
28 #include "dp_peer.h"
29 #include "hal_api.h"
30 #include "qdf_trace.h"
31 #include "qdf_nbuf.h"
32 #include "dp_internal.h"
33 #include "dp_rx_defrag.h"
34 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
35 #include "dp_rx_defrag.h"
36 #include "dp_ipa.h"
37 #include "dp_rx_buffer_pool.h"
38 
39 const struct dp_rx_defrag_cipher dp_f_ccmp = {
40 	"AES-CCM",
41 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
42 	IEEE80211_WEP_MICLEN,
43 	0,
44 };
45 
46 const struct dp_rx_defrag_cipher dp_f_tkip = {
47 	"TKIP",
48 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
49 	IEEE80211_WEP_CRCLEN,
50 	IEEE80211_WEP_MICLEN,
51 };
52 
53 const struct dp_rx_defrag_cipher dp_f_wep = {
54 	"WEP",
55 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
56 	IEEE80211_WEP_CRCLEN,
57 	0,
58 };
59 
60 /*
61  * The header and mic length are same for both
62  * GCMP-128 and GCMP-256.
63  */
64 const struct dp_rx_defrag_cipher dp_f_gcmp = {
65 	"AES-GCMP",
66 	WLAN_IEEE80211_GCMP_HEADERLEN,
67 	WLAN_IEEE80211_GCMP_MICLEN,
68 	WLAN_IEEE80211_GCMP_MICLEN,
69 };
70 
71 /*
72  * dp_rx_defrag_frames_free(): Free fragment chain
73  * @frames: Fragment chain
74  *
75  * Iterates through the fragment chain and frees them
76  * Returns: None
77  */
78 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames)
79 {
80 	qdf_nbuf_t next, frag = frames;
81 
82 	while (frag) {
83 		next = qdf_nbuf_next(frag);
84 		dp_rx_nbuf_free(frag);
85 		frag = next;
86 	}
87 }
88 
89 /*
90  * dp_rx_clear_saved_desc_info(): Clears descriptor info
91  * @txrx peer: Pointer to the peer data structure
92  * @tid: Transmit ID (TID)
93  *
94  * Saves MPDU descriptor info and MSDU link pointer from REO
95  * ring descriptor. The cache is created per peer, per TID
96  *
97  * Returns: None
98  */
99 static void dp_rx_clear_saved_desc_info(struct dp_txrx_peer *txrx_peer,
100 					unsigned int tid)
101 {
102 	if (txrx_peer->rx_tid[tid].dst_ring_desc)
103 		qdf_mem_free(txrx_peer->rx_tid[tid].dst_ring_desc);
104 
105 	txrx_peer->rx_tid[tid].dst_ring_desc = NULL;
106 	txrx_peer->rx_tid[tid].head_frag_desc = NULL;
107 }
108 
109 static void dp_rx_return_head_frag_desc(struct dp_txrx_peer *txrx_peer,
110 					unsigned int tid)
111 {
112 	struct dp_soc *soc;
113 	struct dp_pdev *pdev;
114 	struct dp_srng *dp_rxdma_srng;
115 	struct rx_desc_pool *rx_desc_pool;
116 	union dp_rx_desc_list_elem_t *head = NULL;
117 	union dp_rx_desc_list_elem_t *tail = NULL;
118 	uint8_t pool_id;
119 
120 	pdev = txrx_peer->vdev->pdev;
121 	soc = pdev->soc;
122 
123 	if (txrx_peer->rx_tid[tid].head_frag_desc) {
124 		pool_id = txrx_peer->rx_tid[tid].head_frag_desc->pool_id;
125 		dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id];
126 		rx_desc_pool = &soc->rx_desc_buf[pool_id];
127 
128 		dp_rx_add_to_free_desc_list(&head, &tail,
129 					    txrx_peer->rx_tid[tid].head_frag_desc);
130 		dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
131 					1, &head, &tail, false);
132 	}
133 
134 	if (txrx_peer->rx_tid[tid].dst_ring_desc) {
135 		if (dp_rx_link_desc_return(soc,
136 					   txrx_peer->rx_tid[tid].dst_ring_desc,
137 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
138 		    QDF_STATUS_SUCCESS)
139 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
140 				  "%s: Failed to return link desc", __func__);
141 	}
142 }
143 
144 /*
145  * dp_rx_reorder_flush_frag(): Flush the frag list
146  * @txrx_peer: Pointer to the peer data structure
147  * @tid: Transmit ID (TID)
148  *
149  * Flush the per-TID frag list
150  *
151  * Returns: None
152  */
153 void dp_rx_reorder_flush_frag(struct dp_txrx_peer *txrx_peer,
154 			      unsigned int tid)
155 {
156 	dp_info_rl("Flushing TID %d", tid);
157 
158 	if (!txrx_peer) {
159 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
160 					"%s: NULL peer", __func__);
161 		return;
162 	}
163 
164 	dp_rx_return_head_frag_desc(txrx_peer, tid);
165 	dp_rx_defrag_cleanup(txrx_peer, tid);
166 }
167 
168 /*
169  * dp_rx_defrag_waitlist_flush(): Flush SOC defrag wait list
170  * @soc: DP SOC
171  *
172  * Flush fragments of all waitlisted TID's
173  *
174  * Returns: None
175  */
176 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc)
177 {
178 	struct dp_rx_tid_defrag *waitlist_elem = NULL;
179 	struct dp_rx_tid_defrag *tmp;
180 	uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
181 	TAILQ_HEAD(, dp_rx_tid_defrag) temp_list;
182 	dp_txrx_ref_handle txrx_ref_handle = NULL;
183 
184 	TAILQ_INIT(&temp_list);
185 
186 	dp_debug("Current time  %u", now_ms);
187 
188 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
189 	TAILQ_FOREACH_SAFE(waitlist_elem, &soc->rx.defrag.waitlist,
190 			   defrag_waitlist_elem, tmp) {
191 		uint32_t tid;
192 
193 		if (waitlist_elem->defrag_timeout_ms > now_ms)
194 			break;
195 
196 		tid = waitlist_elem->tid;
197 		if (tid >= DP_MAX_TIDS) {
198 			qdf_assert(0);
199 			continue;
200 		}
201 
202 		TAILQ_REMOVE(&soc->rx.defrag.waitlist, waitlist_elem,
203 			     defrag_waitlist_elem);
204 		DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
205 
206 		/* Move to temp list and clean-up later */
207 		TAILQ_INSERT_TAIL(&temp_list, waitlist_elem,
208 				  defrag_waitlist_elem);
209 	}
210 	if (waitlist_elem) {
211 		soc->rx.defrag.next_flush_ms =
212 			waitlist_elem->defrag_timeout_ms;
213 	} else {
214 		soc->rx.defrag.next_flush_ms =
215 			now_ms + soc->rx.defrag.timeout_ms;
216 	}
217 
218 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
219 
220 	TAILQ_FOREACH_SAFE(waitlist_elem, &temp_list,
221 			   defrag_waitlist_elem, tmp) {
222 		struct dp_txrx_peer *txrx_peer, *temp_peer = NULL;
223 
224 		qdf_spin_lock_bh(&waitlist_elem->defrag_tid_lock);
225 		TAILQ_REMOVE(&temp_list, waitlist_elem,
226 			     defrag_waitlist_elem);
227 		/* get address of current peer */
228 		txrx_peer = waitlist_elem->defrag_peer;
229 		qdf_spin_unlock_bh(&waitlist_elem->defrag_tid_lock);
230 
231 		temp_peer = dp_txrx_peer_get_ref_by_id(soc, txrx_peer->peer_id,
232 						       &txrx_ref_handle,
233 						       DP_MOD_ID_RX_ERR);
234 		if (temp_peer == txrx_peer) {
235 			qdf_spin_lock_bh(&waitlist_elem->defrag_tid_lock);
236 			dp_rx_reorder_flush_frag(txrx_peer, waitlist_elem->tid);
237 			qdf_spin_unlock_bh(&waitlist_elem->defrag_tid_lock);
238 		}
239 
240 		if (temp_peer)
241 			dp_txrx_peer_unref_delete(txrx_ref_handle,
242 						  DP_MOD_ID_RX_ERR);
243 
244 	}
245 }
246 
247 /*
248  * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list
249  * @txrx_peer: Pointer to the peer data structure
250  * @tid: Transmit ID (TID)
251  *
252  * Appends per-tid fragments to global fragment wait list
253  *
254  * Returns: None
255  */
256 static void dp_rx_defrag_waitlist_add(struct dp_txrx_peer *txrx_peer,
257 				      unsigned int tid)
258 {
259 	struct dp_soc *psoc = txrx_peer->vdev->pdev->soc;
260 	struct dp_rx_tid_defrag *waitlist_elem = &txrx_peer->rx_tid[tid];
261 
262 	dp_debug("Adding TID %u to waitlist for peer %pK with peer_id = %d ",
263 		 tid, txrx_peer, txrx_peer->peer_id);
264 
265 	/* TODO: use LIST macros instead of TAIL macros */
266 	qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock);
267 	if (TAILQ_EMPTY(&psoc->rx.defrag.waitlist))
268 		psoc->rx.defrag.next_flush_ms =
269 			waitlist_elem->defrag_timeout_ms;
270 
271 	TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, waitlist_elem,
272 			  defrag_waitlist_elem);
273 	DP_STATS_INC(psoc, rx.rx_frag_wait, 1);
274 	qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock);
275 }
276 
277 /*
278  * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist
279  * @txrx peer: Pointer to the peer data structure
280  * @tid: Transmit ID (TID)
281  *
282  * Remove fragments from waitlist
283  *
284  * Returns: None
285  */
286 void dp_rx_defrag_waitlist_remove(struct dp_txrx_peer *txrx_peer,
287 				  unsigned int tid)
288 {
289 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
290 	struct dp_soc *soc = pdev->soc;
291 	struct dp_rx_tid_defrag *waitlist_elm;
292 	struct dp_rx_tid_defrag *tmp;
293 
294 	dp_debug("Removing TID %u to waitlist for peer %pK peer_id = %d ",
295 		 tid, txrx_peer, txrx_peer->peer_id);
296 
297 	if (tid >= DP_MAX_TIDS) {
298 		dp_err("TID out of bounds: %d", tid);
299 		qdf_assert_always(0);
300 	}
301 
302 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
303 	TAILQ_FOREACH_SAFE(waitlist_elm, &soc->rx.defrag.waitlist,
304 			   defrag_waitlist_elem, tmp) {
305 		struct dp_txrx_peer *peer_on_waitlist;
306 
307 		/* get address of current peer */
308 		peer_on_waitlist = waitlist_elm->defrag_peer;
309 
310 		/* Ensure it is TID for same peer */
311 		if (peer_on_waitlist == txrx_peer && waitlist_elm->tid == tid) {
312 			TAILQ_REMOVE(&soc->rx.defrag.waitlist,
313 				     waitlist_elm, defrag_waitlist_elem);
314 			DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
315 		}
316 	}
317 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
318 }
319 
320 /*
321  * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list
322  * @txrx_peer: Pointer to the peer data structure
323  * @tid: Transmit ID (TID)
324  * @head_addr: Pointer to head list
325  * @tail_addr: Pointer to tail list
326  * @frag: Incoming fragment
327  * @all_frag_present: Flag to indicate whether all fragments are received
328  *
329  * Build a per-tid, per-sequence fragment list.
330  *
331  * Returns: Success, if inserted
332  */
333 static QDF_STATUS
334 dp_rx_defrag_fraglist_insert(struct dp_txrx_peer *txrx_peer, unsigned int tid,
335 			     qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr,
336 			     qdf_nbuf_t frag, uint8_t *all_frag_present)
337 {
338 	struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
339 	qdf_nbuf_t next;
340 	qdf_nbuf_t prev = NULL;
341 	qdf_nbuf_t cur;
342 	uint16_t head_fragno, cur_fragno, next_fragno;
343 	uint8_t last_morefrag = 1, count = 0;
344 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
345 	uint8_t *rx_desc_info;
346 
347 	qdf_assert(frag);
348 	qdf_assert(head_addr);
349 	qdf_assert(tail_addr);
350 
351 	*all_frag_present = 0;
352 	rx_desc_info = qdf_nbuf_data(frag);
353 	cur_fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc_info);
354 
355 	dp_debug("cur_fragno %d\n", cur_fragno);
356 	/* If this is the first fragment */
357 	if (!(*head_addr)) {
358 		*head_addr = *tail_addr = frag;
359 		qdf_nbuf_set_next(*tail_addr, NULL);
360 		rx_tid->curr_frag_num = cur_fragno;
361 
362 		goto insert_done;
363 	}
364 
365 	/* In sequence fragment */
366 	if (cur_fragno > rx_tid->curr_frag_num) {
367 		qdf_nbuf_set_next(*tail_addr, frag);
368 		*tail_addr = frag;
369 		qdf_nbuf_set_next(*tail_addr, NULL);
370 		rx_tid->curr_frag_num = cur_fragno;
371 	} else {
372 		/* Out of sequence fragment */
373 		cur = *head_addr;
374 		rx_desc_info = qdf_nbuf_data(cur);
375 		head_fragno = dp_rx_frag_get_mpdu_frag_number(soc,
376 							      rx_desc_info);
377 
378 		if (cur_fragno == head_fragno) {
379 			dp_rx_nbuf_free(frag);
380 			goto insert_fail;
381 		} else if (head_fragno > cur_fragno) {
382 			qdf_nbuf_set_next(frag, cur);
383 			cur = frag;
384 			*head_addr = frag; /* head pointer to be updated */
385 		} else {
386 			while ((cur_fragno > head_fragno) && cur) {
387 				prev = cur;
388 				cur = qdf_nbuf_next(cur);
389 				if (cur) {
390 					rx_desc_info = qdf_nbuf_data(cur);
391 					head_fragno =
392 						dp_rx_frag_get_mpdu_frag_number(
393 								soc,
394 								rx_desc_info);
395 				}
396 			}
397 
398 			if (cur_fragno == head_fragno) {
399 				dp_rx_nbuf_free(frag);
400 				goto insert_fail;
401 			}
402 
403 			qdf_nbuf_set_next(prev, frag);
404 			qdf_nbuf_set_next(frag, cur);
405 		}
406 	}
407 
408 	next = qdf_nbuf_next(*head_addr);
409 
410 	rx_desc_info = qdf_nbuf_data(*tail_addr);
411 	last_morefrag = dp_rx_frag_get_more_frag_bit(soc, rx_desc_info);
412 
413 	/* TODO: optimize the loop */
414 	if (!last_morefrag) {
415 		/* Check if all fragments are present */
416 		do {
417 			rx_desc_info = qdf_nbuf_data(next);
418 			next_fragno =
419 				dp_rx_frag_get_mpdu_frag_number(soc,
420 								rx_desc_info);
421 			count++;
422 
423 			if (next_fragno != count)
424 				break;
425 
426 			next = qdf_nbuf_next(next);
427 		} while (next);
428 
429 		if (!next) {
430 			*all_frag_present = 1;
431 			return QDF_STATUS_SUCCESS;
432 		} else {
433 			/* revisit */
434 		}
435 	}
436 
437 insert_done:
438 	return QDF_STATUS_SUCCESS;
439 
440 insert_fail:
441 	return QDF_STATUS_E_FAILURE;
442 }
443 
444 
445 /*
446  * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment
447  * @msdu: Pointer to the fragment
448  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
449  *
450  * decap tkip encrypted fragment
451  *
452  * Returns: QDF_STATUS
453  */
454 static QDF_STATUS
455 dp_rx_defrag_tkip_decap(struct dp_soc *soc,
456 			qdf_nbuf_t msdu, uint16_t hdrlen)
457 {
458 	uint8_t *ivp, *orig_hdr;
459 	int rx_desc_len = soc->rx_pkt_tlv_size;
460 
461 	/* start of 802.11 header info */
462 	orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len);
463 
464 	/* TKIP header is located post 802.11 header */
465 	ivp = orig_hdr + hdrlen;
466 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) {
467 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
468 			"IEEE80211_WEP_EXTIV is missing in TKIP fragment");
469 		return QDF_STATUS_E_DEFRAG_ERROR;
470 	}
471 
472 	qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer);
473 
474 	return QDF_STATUS_SUCCESS;
475 }
476 
477 /*
478  * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment
479  * @nbuf: Pointer to the fragment buffer
480  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
481  *
482  * Remove MIC information from CCMP fragment
483  *
484  * Returns: QDF_STATUS
485  */
486 static QDF_STATUS
487 dp_rx_defrag_ccmp_demic(struct dp_soc *soc, qdf_nbuf_t nbuf, uint16_t hdrlen)
488 {
489 	uint8_t *ivp, *orig_hdr;
490 	int rx_desc_len = soc->rx_pkt_tlv_size;
491 
492 	/* start of the 802.11 header */
493 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
494 
495 	/* CCMP header is located after 802.11 header */
496 	ivp = orig_hdr + hdrlen;
497 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
498 		return QDF_STATUS_E_DEFRAG_ERROR;
499 
500 	qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer);
501 
502 	return QDF_STATUS_SUCCESS;
503 }
504 
505 /*
506  * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment
507  * @nbuf: Pointer to the fragment
508  * @hdrlen: length of the header information
509  *
510  * decap CCMP encrypted fragment
511  *
512  * Returns: QDF_STATUS
513  */
514 static QDF_STATUS
515 dp_rx_defrag_ccmp_decap(struct dp_soc *soc, qdf_nbuf_t nbuf, uint16_t hdrlen)
516 {
517 	uint8_t *ivp, *origHdr;
518 	int rx_desc_len = soc->rx_pkt_tlv_size;
519 
520 	origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
521 	ivp = origHdr + hdrlen;
522 
523 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
524 		return QDF_STATUS_E_DEFRAG_ERROR;
525 
526 	return QDF_STATUS_SUCCESS;
527 }
528 
529 /*
530  * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment
531  * @msdu: Pointer to the fragment
532  * @hdrlen: length of the header information
533  *
534  * decap WEP encrypted fragment
535  *
536  * Returns: QDF_STATUS
537  */
538 static QDF_STATUS
539 dp_rx_defrag_wep_decap(struct dp_soc *soc, qdf_nbuf_t msdu, uint16_t hdrlen)
540 {
541 	uint8_t *origHdr;
542 	int rx_desc_len = soc->rx_pkt_tlv_size;
543 
544 	origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
545 	qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen);
546 
547 	qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer);
548 
549 	return QDF_STATUS_SUCCESS;
550 }
551 
552 /*
553  * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment
554  * @soc: soc handle
555  * @nbuf: Pointer to the fragment
556  *
557  * Calculate the header size of the received fragment
558  *
559  * Returns: header size (uint16_t)
560  */
561 static uint16_t dp_rx_defrag_hdrsize(struct dp_soc *soc, qdf_nbuf_t nbuf)
562 {
563 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
564 	uint16_t size = sizeof(struct ieee80211_frame);
565 	uint16_t fc = 0;
566 	uint32_t to_ds, fr_ds;
567 	uint8_t frm_ctrl_valid;
568 	uint16_t frm_ctrl_field;
569 
570 	to_ds = hal_rx_mpdu_get_to_ds(soc->hal_soc, rx_tlv_hdr);
571 	fr_ds = hal_rx_mpdu_get_fr_ds(soc->hal_soc, rx_tlv_hdr);
572 	frm_ctrl_valid =
573 		hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
574 						    rx_tlv_hdr);
575 	frm_ctrl_field = hal_rx_get_frame_ctrl_field(soc->hal_soc, rx_tlv_hdr);
576 
577 	if (to_ds && fr_ds)
578 		size += QDF_MAC_ADDR_SIZE;
579 
580 	if (frm_ctrl_valid) {
581 		fc = frm_ctrl_field;
582 
583 		/* use 1-st byte for validation */
584 		if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) {
585 			size += sizeof(uint16_t);
586 			/* use 2-nd byte for validation */
587 			if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER)
588 				size += sizeof(struct ieee80211_htc);
589 		}
590 	}
591 
592 	return size;
593 }
594 
595 /*
596  * dp_rx_defrag_michdr(): Calculate a pseudo MIC header
597  * @wh0: Pointer to the wireless header of the fragment
598  * @hdr: Array to hold the pseudo header
599  *
600  * Calculate a pseudo MIC header
601  *
602  * Returns: None
603  */
604 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0,
605 				uint8_t hdr[])
606 {
607 	const struct ieee80211_frame_addr4 *wh =
608 		(const struct ieee80211_frame_addr4 *)wh0;
609 
610 	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
611 	case IEEE80211_FC1_DIR_NODS:
612 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
613 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
614 					   wh->i_addr2);
615 		break;
616 	case IEEE80211_FC1_DIR_TODS:
617 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
618 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
619 					   wh->i_addr2);
620 		break;
621 	case IEEE80211_FC1_DIR_FROMDS:
622 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
623 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
624 					   wh->i_addr3);
625 		break;
626 	case IEEE80211_FC1_DIR_DSTODS:
627 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
628 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
629 					   wh->i_addr4);
630 		break;
631 	}
632 
633 	/*
634 	 * Bit 7 is QDF_IEEE80211_FC0_SUBTYPE_QOS for data frame, but
635 	 * it could also be set for deauth, disassoc, action, etc. for
636 	 * a mgt type frame. It comes into picture for MFP.
637 	 */
638 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
639 		if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
640 				IEEE80211_FC1_DIR_DSTODS) {
641 			const struct ieee80211_qosframe_addr4 *qwh =
642 				(const struct ieee80211_qosframe_addr4 *)wh;
643 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
644 		} else {
645 			const struct ieee80211_qosframe *qwh =
646 				(const struct ieee80211_qosframe *)wh;
647 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
648 		}
649 	} else {
650 		hdr[12] = 0;
651 	}
652 
653 	hdr[13] = hdr[14] = hdr[15] = 0;	/* reserved */
654 }
655 
656 /*
657  * dp_rx_defrag_mic(): Calculate MIC header
658  * @key: Pointer to the key
659  * @wbuf: fragment buffer
660  * @off: Offset
661  * @data_len: Data length
662  * @mic: Array to hold MIC
663  *
664  * Calculate a pseudo MIC header
665  *
666  * Returns: QDF_STATUS
667  */
668 static QDF_STATUS dp_rx_defrag_mic(struct dp_soc *soc, const uint8_t *key,
669 				   qdf_nbuf_t wbuf, uint16_t off,
670 				   uint16_t data_len, uint8_t mic[])
671 {
672 	uint8_t hdr[16] = { 0, };
673 	uint32_t l, r;
674 	const uint8_t *data;
675 	uint32_t space;
676 	int rx_desc_len = soc->rx_pkt_tlv_size;
677 
678 	dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf)
679 		+ rx_desc_len), hdr);
680 
681 	l = dp_rx_get_le32(key);
682 	r = dp_rx_get_le32(key + 4);
683 
684 	/* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
685 	l ^= dp_rx_get_le32(hdr);
686 	dp_rx_michael_block(l, r);
687 	l ^= dp_rx_get_le32(&hdr[4]);
688 	dp_rx_michael_block(l, r);
689 	l ^= dp_rx_get_le32(&hdr[8]);
690 	dp_rx_michael_block(l, r);
691 	l ^= dp_rx_get_le32(&hdr[12]);
692 	dp_rx_michael_block(l, r);
693 
694 	/* first buffer has special handling */
695 	data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
696 	space = qdf_nbuf_len(wbuf) - off;
697 
698 	for (;; ) {
699 		if (space > data_len)
700 			space = data_len;
701 
702 		/* collect 32-bit blocks from current buffer */
703 		while (space >= sizeof(uint32_t)) {
704 			l ^= dp_rx_get_le32(data);
705 			dp_rx_michael_block(l, r);
706 			data += sizeof(uint32_t);
707 			space -= sizeof(uint32_t);
708 			data_len -= sizeof(uint32_t);
709 		}
710 		if (data_len < sizeof(uint32_t))
711 			break;
712 
713 		wbuf = qdf_nbuf_next(wbuf);
714 		if (!wbuf)
715 			return QDF_STATUS_E_DEFRAG_ERROR;
716 
717 		if (space != 0) {
718 			const uint8_t *data_next;
719 			/*
720 			 * Block straddles buffers, split references.
721 			 */
722 			data_next =
723 				(uint8_t *)qdf_nbuf_data(wbuf) + off;
724 			if ((qdf_nbuf_len(wbuf)) <
725 				sizeof(uint32_t) - space) {
726 				return QDF_STATUS_E_DEFRAG_ERROR;
727 			}
728 			switch (space) {
729 			case 1:
730 				l ^= dp_rx_get_le32_split(data[0],
731 					data_next[0], data_next[1],
732 					data_next[2]);
733 				data = data_next + 3;
734 				space = (qdf_nbuf_len(wbuf) - off) - 3;
735 				break;
736 			case 2:
737 				l ^= dp_rx_get_le32_split(data[0], data[1],
738 						    data_next[0], data_next[1]);
739 				data = data_next + 2;
740 				space = (qdf_nbuf_len(wbuf) - off) - 2;
741 				break;
742 			case 3:
743 				l ^= dp_rx_get_le32_split(data[0], data[1],
744 					data[2], data_next[0]);
745 				data = data_next + 1;
746 				space = (qdf_nbuf_len(wbuf) - off) - 1;
747 				break;
748 			}
749 			dp_rx_michael_block(l, r);
750 			data_len -= sizeof(uint32_t);
751 		} else {
752 			/*
753 			 * Setup for next buffer.
754 			 */
755 			data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
756 			space = qdf_nbuf_len(wbuf) - off;
757 		}
758 	}
759 	/* Last block and padding (0x5a, 4..7 x 0) */
760 	switch (data_len) {
761 	case 0:
762 		l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0);
763 		break;
764 	case 1:
765 		l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0);
766 		break;
767 	case 2:
768 		l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0);
769 		break;
770 	case 3:
771 		l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a);
772 		break;
773 	}
774 	dp_rx_michael_block(l, r);
775 	dp_rx_michael_block(l, r);
776 	dp_rx_put_le32(mic, l);
777 	dp_rx_put_le32(mic + 4, r);
778 
779 	return QDF_STATUS_SUCCESS;
780 }
781 
782 /*
783  * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame
784  * @key: Pointer to the key
785  * @msdu: fragment buffer
786  * @hdrlen: Length of the header information
787  *
788  * Remove MIC information from the TKIP frame
789  *
790  * Returns: QDF_STATUS
791  */
792 static QDF_STATUS dp_rx_defrag_tkip_demic(struct dp_soc *soc,
793 					  const uint8_t *key,
794 					  qdf_nbuf_t msdu, uint16_t hdrlen)
795 {
796 	QDF_STATUS status;
797 	uint32_t pktlen = 0, prev_data_len;
798 	uint8_t mic[IEEE80211_WEP_MICLEN];
799 	uint8_t mic0[IEEE80211_WEP_MICLEN];
800 	qdf_nbuf_t prev = NULL, prev0, next;
801 	uint8_t len0 = 0;
802 
803 	next = msdu;
804 	prev0 = msdu;
805 	while (next) {
806 		pktlen += (qdf_nbuf_len(next) - hdrlen);
807 		prev = next;
808 		dp_debug("pktlen %u",
809 			 (uint32_t)(qdf_nbuf_len(next) - hdrlen));
810 		next = qdf_nbuf_next(next);
811 		if (next && !qdf_nbuf_next(next))
812 			prev0 = prev;
813 	}
814 
815 	if (!prev) {
816 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
817 			  "%s Defrag chaining failed !\n", __func__);
818 		return QDF_STATUS_E_DEFRAG_ERROR;
819 	}
820 
821 	prev_data_len = qdf_nbuf_len(prev) - hdrlen;
822 	if (prev_data_len < dp_f_tkip.ic_miclen) {
823 		if (prev0 == prev) {
824 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
825 				  "%s Fragments don't have MIC header !\n", __func__);
826 			return QDF_STATUS_E_DEFRAG_ERROR;
827 		}
828 		len0 = dp_f_tkip.ic_miclen - (uint8_t)prev_data_len;
829 		qdf_nbuf_copy_bits(prev0, qdf_nbuf_len(prev0) - len0, len0,
830 				   (caddr_t)mic0);
831 		qdf_nbuf_trim_tail(prev0, len0);
832 	}
833 
834 	qdf_nbuf_copy_bits(prev, (qdf_nbuf_len(prev) -
835 			   (dp_f_tkip.ic_miclen - len0)),
836 			   (dp_f_tkip.ic_miclen - len0),
837 			   (caddr_t)(&mic0[len0]));
838 	qdf_nbuf_trim_tail(prev, (dp_f_tkip.ic_miclen - len0));
839 	pktlen -= dp_f_tkip.ic_miclen;
840 
841 	if (((qdf_nbuf_len(prev) - hdrlen) == 0) && prev != msdu) {
842 		dp_rx_nbuf_free(prev);
843 		qdf_nbuf_set_next(prev0, NULL);
844 	}
845 
846 	status = dp_rx_defrag_mic(soc, key, msdu, hdrlen,
847 				  pktlen, mic);
848 
849 	if (QDF_IS_STATUS_ERROR(status))
850 		return status;
851 
852 	if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen))
853 		return QDF_STATUS_E_DEFRAG_ERROR;
854 
855 	return QDF_STATUS_SUCCESS;
856 }
857 
858 /*
859  * dp_rx_frag_pull_hdr(): Pulls the RXTLV & the 802.11 headers
860  * @nbuf: buffer pointer
861  * @hdrsize: size of the header to be pulled
862  *
863  * Pull the RXTLV & the 802.11 headers
864  *
865  * Returns: None
866  */
867 static void dp_rx_frag_pull_hdr(struct dp_soc *soc,
868 				qdf_nbuf_t nbuf, uint16_t hdrsize)
869 {
870 	hal_rx_print_pn(soc->hal_soc, qdf_nbuf_data(nbuf));
871 
872 	qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + hdrsize);
873 
874 	dp_debug("final pktlen %d .11len %d",
875 		 (uint32_t)qdf_nbuf_len(nbuf), hdrsize);
876 }
877 
878 /*
879  * dp_rx_defrag_pn_check(): Check the PN of current fragmented with prev PN
880  * @msdu: msdu to get the current PN
881  * @cur_pn128: PN extracted from current msdu
882  * @prev_pn128: Prev PN
883  *
884  * Returns: 0 on success, non zero on failure
885  */
886 static int dp_rx_defrag_pn_check(struct dp_soc *soc, qdf_nbuf_t msdu,
887 				 uint64_t *cur_pn128, uint64_t *prev_pn128)
888 {
889 	int out_of_order = 0;
890 
891 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(msdu), cur_pn128);
892 
893 	if (cur_pn128[1] == prev_pn128[1])
894 		out_of_order = (cur_pn128[0] - prev_pn128[0] != 1);
895 	else
896 		out_of_order = (cur_pn128[1] - prev_pn128[1] != 1);
897 
898 	return out_of_order;
899 }
900 
901 /*
902  * dp_rx_construct_fraglist(): Construct a nbuf fraglist
903  * @txrx peer: Pointer to the txrx peer
904  * @head: Pointer to list of fragments
905  * @hdrsize: Size of the header to be pulled
906  *
907  * Construct a nbuf fraglist
908  *
909  * Returns: None
910  */
911 static int
912 dp_rx_construct_fraglist(struct dp_txrx_peer *txrx_peer, int tid,
913 			 qdf_nbuf_t head,
914 			 uint16_t hdrsize)
915 {
916 	struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
917 	qdf_nbuf_t msdu = qdf_nbuf_next(head);
918 	qdf_nbuf_t rx_nbuf = msdu;
919 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
920 	uint32_t len = 0;
921 	uint64_t cur_pn128[2] = {0, 0}, prev_pn128[2];
922 	int out_of_order = 0;
923 	int index;
924 	int needs_pn_check = 0;
925 	enum cdp_sec_type sec_type;
926 
927 	prev_pn128[0] = rx_tid->pn128[0];
928 	prev_pn128[1] = rx_tid->pn128[1];
929 
930 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, msdu) ? dp_sec_mcast :
931 				dp_sec_ucast;
932 	sec_type = txrx_peer->security[index].sec_type;
933 
934 	if (!(sec_type == cdp_sec_type_none || sec_type == cdp_sec_type_wep128 ||
935 	      sec_type == cdp_sec_type_wep104 || sec_type == cdp_sec_type_wep40))
936 		needs_pn_check = 1;
937 
938 	while (msdu) {
939 		if (qdf_likely(needs_pn_check))
940 			out_of_order = dp_rx_defrag_pn_check(soc, msdu,
941 							     &cur_pn128[0],
942 							     &prev_pn128[0]);
943 
944 		if (qdf_unlikely(out_of_order)) {
945 			dp_info_rl("cur_pn128[0] 0x%llx cur_pn128[1] 0x%llx prev_pn128[0] 0x%llx prev_pn128[1] 0x%llx",
946 				   cur_pn128[0], cur_pn128[1],
947 				   prev_pn128[0], prev_pn128[1]);
948 			return QDF_STATUS_E_FAILURE;
949 		}
950 
951 		prev_pn128[0] = cur_pn128[0];
952 		prev_pn128[1] = cur_pn128[1];
953 
954 		/*
955 		 * Broadcast and multicast frames should never be fragmented.
956 		 * Iterating through all msdus and dropping fragments if even
957 		 * one of them has mcast/bcast destination address.
958 		 */
959 		if (hal_rx_msdu_is_wlan_mcast(soc->hal_soc, msdu)) {
960 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
961 				  "Dropping multicast/broadcast fragments");
962 			return QDF_STATUS_E_FAILURE;
963 		}
964 
965 		dp_rx_frag_pull_hdr(soc, msdu, hdrsize);
966 		len += qdf_nbuf_len(msdu);
967 		msdu = qdf_nbuf_next(msdu);
968 	}
969 
970 	qdf_nbuf_append_ext_list(head, rx_nbuf, len);
971 	qdf_nbuf_set_next(head, NULL);
972 	qdf_nbuf_set_is_frag(head, 1);
973 
974 	dp_debug("head len %d ext len %d data len %d ",
975 		 (uint32_t)qdf_nbuf_len(head),
976 		 (uint32_t)qdf_nbuf_len(rx_nbuf),
977 		 (uint32_t)(head->data_len));
978 
979 	return QDF_STATUS_SUCCESS;
980 }
981 
982 /**
983  * dp_rx_defrag_err() - rx err handler
984  * @pdev: handle to pdev object
985  * @vdev_id: vdev id
986  * @peer_mac_addr: peer mac address
987  * @tid: TID
988  * @tsf32: TSF
989  * @err_type: error type
990  * @rx_frame: rx frame
991  * @pn: PN Number
992  * @key_id: key id
993  *
994  * This function handles rx error and send MIC error notification
995  *
996  * Return: None
997  */
998 static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
999 {
1000 	struct ol_if_ops *tops = NULL;
1001 	struct dp_pdev *pdev = vdev->pdev;
1002 	int rx_desc_len = pdev->soc->rx_pkt_tlv_size;
1003 	uint8_t *orig_hdr;
1004 	struct ieee80211_frame *wh;
1005 	struct cdp_rx_mic_err_info mic_failure_info;
1006 
1007 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
1008 	wh = (struct ieee80211_frame *)orig_hdr;
1009 
1010 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.da_mac_addr,
1011 			 (struct qdf_mac_addr *)&wh->i_addr1);
1012 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.ta_mac_addr,
1013 			 (struct qdf_mac_addr *)&wh->i_addr2);
1014 	mic_failure_info.key_id = 0;
1015 	mic_failure_info.multicast =
1016 		IEEE80211_IS_MULTICAST(wh->i_addr1);
1017 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1018 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1019 	mic_failure_info.data = (uint8_t *)wh;
1020 	mic_failure_info.vdev_id = vdev->vdev_id;
1021 
1022 	tops = pdev->soc->cdp_soc.ol_ops;
1023 	if (tops->rx_mic_error)
1024 		tops->rx_mic_error(pdev->soc->ctrl_psoc, pdev->pdev_id,
1025 				   &mic_failure_info);
1026 }
1027 
1028 
1029 /*
1030  * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3
1031  * @soc: dp soc handle
1032  * @txrx_peer: txrx_peer handle
1033  * @nbuf: Pointer to the fragment buffer
1034  * @hdrsize: Size of headers
1035  *
1036  * Transcap the fragment from 802.11 to 802.3
1037  *
1038  * Returns: None
1039  */
1040 static void
1041 dp_rx_defrag_nwifi_to_8023(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
1042 			   int tid, qdf_nbuf_t nbuf, uint16_t hdrsize)
1043 {
1044 	struct llc_snap_hdr_t *llchdr;
1045 	struct ethernet_hdr_t *eth_hdr;
1046 	uint8_t ether_type[2];
1047 	uint16_t fc = 0;
1048 	union dp_align_mac_addr mac_addr;
1049 	uint8_t *rx_desc_info = qdf_mem_malloc(soc->rx_pkt_tlv_size);
1050 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
1051 
1052 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), rx_tid->pn128);
1053 
1054 	hal_rx_print_pn(soc->hal_soc, qdf_nbuf_data(nbuf));
1055 
1056 	if (!rx_desc_info) {
1057 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1058 			"%s: Memory alloc failed ! ", __func__);
1059 		QDF_ASSERT(0);
1060 		return;
1061 	}
1062 
1063 	qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), soc->rx_pkt_tlv_size);
1064 
1065 	llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) +
1066 					soc->rx_pkt_tlv_size + hdrsize);
1067 	qdf_mem_copy(ether_type, llchdr->ethertype, 2);
1068 
1069 	qdf_nbuf_pull_head(nbuf, (soc->rx_pkt_tlv_size + hdrsize +
1070 				  sizeof(struct llc_snap_hdr_t) -
1071 				  sizeof(struct ethernet_hdr_t)));
1072 
1073 	eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf));
1074 
1075 	if (hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
1076 						rx_desc_info))
1077 		fc = hal_rx_get_frame_ctrl_field(soc->hal_soc, rx_desc_info);
1078 
1079 	dp_debug("Frame control type: 0x%x", fc);
1080 
1081 	switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) {
1082 	case IEEE80211_FC1_DIR_NODS:
1083 		hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info,
1084 				      &mac_addr.raw[0]);
1085 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1086 			QDF_MAC_ADDR_SIZE);
1087 		hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info,
1088 				      &mac_addr.raw[0]);
1089 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1090 			QDF_MAC_ADDR_SIZE);
1091 		break;
1092 	case IEEE80211_FC1_DIR_TODS:
1093 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1094 				      &mac_addr.raw[0]);
1095 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1096 			QDF_MAC_ADDR_SIZE);
1097 		hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info,
1098 				      &mac_addr.raw[0]);
1099 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1100 			QDF_MAC_ADDR_SIZE);
1101 		break;
1102 	case IEEE80211_FC1_DIR_FROMDS:
1103 		hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info,
1104 				      &mac_addr.raw[0]);
1105 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1106 			QDF_MAC_ADDR_SIZE);
1107 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1108 				      &mac_addr.raw[0]);
1109 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1110 			QDF_MAC_ADDR_SIZE);
1111 		break;
1112 
1113 	case IEEE80211_FC1_DIR_DSTODS:
1114 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1115 				      &mac_addr.raw[0]);
1116 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1117 			QDF_MAC_ADDR_SIZE);
1118 		hal_rx_mpdu_get_addr4(soc->hal_soc, rx_desc_info,
1119 				      &mac_addr.raw[0]);
1120 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1121 			QDF_MAC_ADDR_SIZE);
1122 		break;
1123 
1124 	default:
1125 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1126 		"%s: Unknown frame control type: 0x%x", __func__, fc);
1127 	}
1128 
1129 	qdf_mem_copy(eth_hdr->ethertype, ether_type,
1130 			sizeof(ether_type));
1131 
1132 	qdf_nbuf_push_head(nbuf, soc->rx_pkt_tlv_size);
1133 	qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, soc->rx_pkt_tlv_size);
1134 	qdf_mem_free(rx_desc_info);
1135 }
1136 
1137 #ifdef RX_DEFRAG_DO_NOT_REINJECT
1138 /*
1139  * dp_rx_defrag_deliver(): Deliver defrag packet to stack
1140  * @peer: Pointer to the peer
1141  * @tid: Transmit Identifier
1142  * @head: Nbuf to be delivered
1143  *
1144  * Returns: None
1145  */
1146 static inline void dp_rx_defrag_deliver(struct dp_txrx_peer *txrx_peer,
1147 					unsigned int tid,
1148 					qdf_nbuf_t head)
1149 {
1150 	struct dp_vdev *vdev = txrx_peer->vdev;
1151 	struct dp_soc *soc = vdev->pdev->soc;
1152 	qdf_nbuf_t deliver_list_head = NULL;
1153 	qdf_nbuf_t deliver_list_tail = NULL;
1154 	uint8_t *rx_tlv_hdr;
1155 
1156 	rx_tlv_hdr = qdf_nbuf_data(head);
1157 
1158 	QDF_NBUF_CB_RX_VDEV_ID(head) = vdev->vdev_id;
1159 	qdf_nbuf_set_tid_val(head, tid);
1160 	qdf_nbuf_pull_head(head, soc->rx_pkt_tlv_size);
1161 
1162 	DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail,
1163 			  head);
1164 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, deliver_list_head,
1165 			       deliver_list_tail);
1166 }
1167 
1168 /*
1169  * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO
1170  * @txrx peer: Pointer to the peer
1171  * @tid: Transmit Identifier
1172  * @head: Buffer to be reinjected back
1173  *
1174  * Reinject the fragment chain back into REO
1175  *
1176  * Returns: QDF_STATUS
1177  */
1178 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_txrx_peer *txrx_peer,
1179 					    unsigned int tid, qdf_nbuf_t head)
1180 {
1181 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1182 
1183 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1184 
1185 	dp_rx_defrag_deliver(txrx_peer, tid, head);
1186 	rx_reorder_array_elem->head = NULL;
1187 	rx_reorder_array_elem->tail = NULL;
1188 	dp_rx_return_head_frag_desc(txrx_peer, tid);
1189 
1190 	return QDF_STATUS_SUCCESS;
1191 }
1192 #else
1193 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1194 /**
1195  * dp_rx_reinject_ring_record_entry() - Record reinject ring history
1196  * @soc: Datapath soc structure
1197  * @paddr: paddr of the buffer reinjected to SW2REO ring
1198  * @sw_cookie: SW cookie of the buffer reinjected to SW2REO ring
1199  * @rbm: Return buffer manager of the buffer reinjected to SW2REO ring
1200  *
1201  * Returns: None
1202  */
1203 static inline void
1204 dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1205 				 uint32_t sw_cookie, uint8_t rbm)
1206 {
1207 	struct dp_buf_info_record *record;
1208 	uint32_t idx;
1209 
1210 	if (qdf_unlikely(!soc->rx_reinject_ring_history))
1211 		return;
1212 
1213 	idx = dp_history_get_next_index(&soc->rx_reinject_ring_history->index,
1214 					DP_RX_REINJECT_HIST_MAX);
1215 
1216 	/* No NULL check needed for record since its an array */
1217 	record = &soc->rx_reinject_ring_history->entry[idx];
1218 
1219 	record->timestamp = qdf_get_log_timestamp();
1220 	record->hbi.paddr = paddr;
1221 	record->hbi.sw_cookie = sw_cookie;
1222 	record->hbi.rbm = rbm;
1223 }
1224 #else
1225 static inline void
1226 dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1227 				 uint32_t sw_cookie, uint8_t rbm)
1228 {
1229 }
1230 #endif
1231 
1232 /*
1233  * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO
1234  * @txrx_peer: Pointer to the txrx_peer
1235  * @tid: Transmit Identifier
1236  * @head: Buffer to be reinjected back
1237  *
1238  * Reinject the fragment chain back into REO
1239  *
1240  * Returns: QDF_STATUS
1241  */
1242 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_txrx_peer *txrx_peer,
1243 					    unsigned int tid, qdf_nbuf_t head)
1244 {
1245 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
1246 	struct dp_soc *soc = pdev->soc;
1247 	struct hal_buf_info buf_info;
1248 	struct hal_buf_info temp_buf_info;
1249 	void *link_desc_va;
1250 	void *msdu0, *msdu_desc_info;
1251 	void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr;
1252 	void *dst_mpdu_desc_info;
1253 	uint64_t dst_qdesc_addr;
1254 	qdf_dma_addr_t paddr;
1255 	uint32_t nbuf_len, seq_no, dst_ind;
1256 	uint32_t *mpdu_wrd;
1257 	uint32_t ret, cookie;
1258 	hal_ring_desc_t dst_ring_desc =
1259 		txrx_peer->rx_tid[tid].dst_ring_desc;
1260 	hal_ring_handle_t hal_srng = soc->reo_reinject_ring.hal_srng;
1261 	struct dp_rx_desc *rx_desc = txrx_peer->rx_tid[tid].head_frag_desc;
1262 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1263 						txrx_peer->rx_tid[tid].array;
1264 	qdf_nbuf_t nbuf_head;
1265 	struct rx_desc_pool *rx_desc_pool = NULL;
1266 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(dst_ring_desc);
1267 	uint8_t rx_defrag_rbm_id = dp_rx_get_defrag_bm_id(soc);
1268 
1269 	/* do duplicate link desc address check */
1270 	dp_rx_link_desc_refill_duplicate_check(
1271 				soc,
1272 				&soc->last_op_info.reo_reinject_link_desc,
1273 				buf_addr_info);
1274 
1275 	nbuf_head = dp_ipa_handle_rx_reo_reinject(soc, head);
1276 	if (qdf_unlikely(!nbuf_head)) {
1277 		dp_err_rl("IPA RX REO reinject failed");
1278 		return QDF_STATUS_E_FAILURE;
1279 	}
1280 
1281 	/* update new allocated skb in case IPA is enabled */
1282 	if (nbuf_head != head) {
1283 		head = nbuf_head;
1284 		rx_desc->nbuf = head;
1285 		rx_reorder_array_elem->head = head;
1286 	}
1287 
1288 	ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
1289 	if (!ent_ring_desc) {
1290 		dp_err_rl("HAL src ring next entry NULL");
1291 		return QDF_STATUS_E_FAILURE;
1292 	}
1293 
1294 	hal_rx_reo_buf_paddr_get(soc->hal_soc, dst_ring_desc, &buf_info);
1295 
1296 	/* buffer_addr_info is the first element of ring_desc */
1297 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)dst_ring_desc,
1298 				  &buf_info);
1299 
1300 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1301 
1302 	qdf_assert_always(link_desc_va);
1303 
1304 	msdu0 = hal_rx_msdu0_buffer_addr_lsb(soc->hal_soc, link_desc_va);
1305 	nbuf_len = qdf_nbuf_len(head) - soc->rx_pkt_tlv_size;
1306 
1307 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW);
1308 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE,
1309 			UNI_DESC_BUF_TYPE_RX_MSDU_LINK);
1310 
1311 	/* msdu reconfig */
1312 	msdu_desc_info = hal_rx_msdu_desc_info_ptr_get(soc->hal_soc, msdu0);
1313 
1314 	dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va);
1315 
1316 	qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info));
1317 
1318 	hal_msdu_desc_info_set(soc->hal_soc, msdu_desc_info, dst_ind, nbuf_len);
1319 
1320 	/* change RX TLV's */
1321 	hal_rx_tlv_msdu_len_set(soc->hal_soc, qdf_nbuf_data(head), nbuf_len);
1322 
1323 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)msdu0,
1324 				  &temp_buf_info);
1325 
1326 	cookie = temp_buf_info.sw_cookie;
1327 	rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
1328 
1329 	/* map the nbuf before reinject it into HW */
1330 	ret = qdf_nbuf_map_nbytes_single(soc->osdev, head,
1331 					 QDF_DMA_FROM_DEVICE,
1332 					 rx_desc_pool->buf_size);
1333 	if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1334 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1335 				"%s: nbuf map failed !", __func__);
1336 		return QDF_STATUS_E_FAILURE;
1337 	}
1338 
1339 	dp_ipa_handle_rx_buf_smmu_mapping(soc, head,
1340 					  rx_desc_pool->buf_size,
1341 					  true);
1342 
1343 	/*
1344 	 * As part of rx frag handler bufffer was unmapped and rx desc
1345 	 * unmapped is set to 1. So again for defrag reinject frame reset
1346 	 * it back to 0.
1347 	 */
1348 	rx_desc->unmapped = 0;
1349 
1350 	paddr = qdf_nbuf_get_frag_paddr(head, 0);
1351 
1352 	ret = dp_check_paddr(soc, &head, &paddr, rx_desc_pool);
1353 
1354 	if (ret == QDF_STATUS_E_FAILURE) {
1355 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1356 				"%s: x86 check failed !", __func__);
1357 		return QDF_STATUS_E_FAILURE;
1358 	}
1359 
1360 	hal_rxdma_buff_addr_info_set(soc->hal_soc, msdu0, paddr, cookie,
1361 				     rx_defrag_rbm_id);
1362 
1363 	/* Lets fill entrance ring now !!! */
1364 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1365 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1366 		"HAL RING Access For REO entrance SRNG Failed: %pK",
1367 		hal_srng);
1368 
1369 		return QDF_STATUS_E_FAILURE;
1370 	}
1371 
1372 	dp_rx_reinject_ring_record_entry(soc, paddr, cookie,
1373 					 rx_defrag_rbm_id);
1374 	paddr = (uint64_t)buf_info.paddr;
1375 	/* buf addr */
1376 	hal_rxdma_buff_addr_info_set(soc->hal_soc, ent_ring_desc, paddr,
1377 				     buf_info.sw_cookie,
1378 				     soc->idle_link_bm_id);
1379 	/* mpdu desc info */
1380 	ent_mpdu_desc_info = hal_ent_mpdu_desc_info(soc->hal_soc,
1381 						    ent_ring_desc);
1382 	dst_mpdu_desc_info = hal_dst_mpdu_desc_info(soc->hal_soc,
1383 						    dst_ring_desc);
1384 
1385 	qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info,
1386 				sizeof(struct rx_mpdu_desc_info));
1387 	qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t));
1388 
1389 	mpdu_wrd = (uint32_t *)dst_mpdu_desc_info;
1390 	seq_no = hal_rx_get_rx_sequence(soc->hal_soc, qdf_nbuf_data(head));
1391 
1392 	hal_mpdu_desc_info_set(soc->hal_soc, ent_mpdu_desc_info, seq_no);
1393 	/* qdesc addr */
1394 	ent_qdesc_addr = hal_get_reo_ent_desc_qdesc_addr(soc->hal_soc,
1395 						(uint8_t *)ent_ring_desc);
1396 
1397 	dst_qdesc_addr = hal_rx_get_qdesc_addr(soc->hal_soc,
1398 					       (uint8_t *)dst_ring_desc,
1399 					       qdf_nbuf_data(head));
1400 
1401 	qdf_mem_copy(ent_qdesc_addr, &dst_qdesc_addr, 5);
1402 
1403 	hal_set_reo_ent_desc_reo_dest_ind(soc->hal_soc,
1404 					  (uint8_t *)ent_ring_desc, dst_ind);
1405 
1406 	hal_srng_access_end(soc->hal_soc, hal_srng);
1407 
1408 	DP_STATS_INC(soc, rx.reo_reinject, 1);
1409 	dp_debug("reinjection done !");
1410 	return QDF_STATUS_SUCCESS;
1411 }
1412 #endif
1413 
1414 /*
1415  * dp_rx_defrag_gcmp_demic(): Remove MIC information from GCMP fragment
1416  * @soc: Datapath soc structure
1417  * @nbuf: Pointer to the fragment buffer
1418  * @hdrlen: 802.11 header length
1419  *
1420  * Remove MIC information from GCMP fragment
1421  *
1422  * Returns: QDF_STATUS
1423  */
1424 static QDF_STATUS dp_rx_defrag_gcmp_demic(struct dp_soc *soc, qdf_nbuf_t nbuf,
1425 					  uint16_t hdrlen)
1426 {
1427 	uint8_t *ivp, *orig_hdr;
1428 	int rx_desc_len = soc->rx_pkt_tlv_size;
1429 
1430 	/* start of the 802.11 header */
1431 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
1432 
1433 	/*
1434 	 * GCMP header is located after 802.11 header and EXTIV
1435 	 * field should always be set to 1 for GCMP protocol.
1436 	 */
1437 	ivp = orig_hdr + hdrlen;
1438 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
1439 		return QDF_STATUS_E_DEFRAG_ERROR;
1440 
1441 	qdf_nbuf_trim_tail(nbuf, dp_f_gcmp.ic_trailer);
1442 
1443 	return QDF_STATUS_SUCCESS;
1444 }
1445 
1446 /*
1447  * dp_rx_defrag(): Defragment the fragment chain
1448  * @txrx peer: Pointer to the peer
1449  * @tid: Transmit Identifier
1450  * @frag_list_head: Pointer to head list
1451  * @frag_list_tail: Pointer to tail list
1452  *
1453  * Defragment the fragment chain
1454  *
1455  * Returns: QDF_STATUS
1456  */
1457 static QDF_STATUS dp_rx_defrag(struct dp_txrx_peer *txrx_peer, unsigned int tid,
1458 			       qdf_nbuf_t frag_list_head,
1459 			       qdf_nbuf_t frag_list_tail)
1460 {
1461 	qdf_nbuf_t tmp_next, prev;
1462 	qdf_nbuf_t cur = frag_list_head, msdu;
1463 	uint32_t index, tkip_demic = 0;
1464 	uint16_t hdr_space;
1465 	uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
1466 	struct dp_vdev *vdev = txrx_peer->vdev;
1467 	struct dp_soc *soc = vdev->pdev->soc;
1468 	uint8_t status = 0;
1469 
1470 	if (!cur)
1471 		return QDF_STATUS_E_DEFRAG_ERROR;
1472 
1473 	hdr_space = dp_rx_defrag_hdrsize(soc, cur);
1474 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, cur) ?
1475 		dp_sec_mcast : dp_sec_ucast;
1476 
1477 	/* Remove FCS from all fragments */
1478 	while (cur) {
1479 		tmp_next = qdf_nbuf_next(cur);
1480 		qdf_nbuf_set_next(cur, NULL);
1481 		qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
1482 		prev = cur;
1483 		qdf_nbuf_set_next(cur, tmp_next);
1484 		cur = tmp_next;
1485 	}
1486 	cur = frag_list_head;
1487 
1488 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1489 		  "%s: index %d Security type: %d", __func__,
1490 		  index, txrx_peer->security[index].sec_type);
1491 
1492 	switch (txrx_peer->security[index].sec_type) {
1493 	case cdp_sec_type_tkip:
1494 		tkip_demic = 1;
1495 		fallthrough;
1496 	case cdp_sec_type_tkip_nomic:
1497 		while (cur) {
1498 			tmp_next = qdf_nbuf_next(cur);
1499 			if (dp_rx_defrag_tkip_decap(soc, cur, hdr_space)) {
1500 
1501 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1502 					QDF_TRACE_LEVEL_ERROR,
1503 					"dp_rx_defrag: TKIP decap failed");
1504 
1505 				return QDF_STATUS_E_DEFRAG_ERROR;
1506 			}
1507 			cur = tmp_next;
1508 		}
1509 
1510 		/* If success, increment header to be stripped later */
1511 		hdr_space += dp_f_tkip.ic_header;
1512 		break;
1513 
1514 	case cdp_sec_type_aes_ccmp:
1515 		while (cur) {
1516 			tmp_next = qdf_nbuf_next(cur);
1517 			if (dp_rx_defrag_ccmp_demic(soc, cur, hdr_space)) {
1518 
1519 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1520 					QDF_TRACE_LEVEL_ERROR,
1521 					"dp_rx_defrag: CCMP demic failed");
1522 
1523 				return QDF_STATUS_E_DEFRAG_ERROR;
1524 			}
1525 			if (dp_rx_defrag_ccmp_decap(soc, cur, hdr_space)) {
1526 
1527 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1528 					QDF_TRACE_LEVEL_ERROR,
1529 					"dp_rx_defrag: CCMP decap failed");
1530 
1531 				return QDF_STATUS_E_DEFRAG_ERROR;
1532 			}
1533 			cur = tmp_next;
1534 		}
1535 
1536 		/* If success, increment header to be stripped later */
1537 		hdr_space += dp_f_ccmp.ic_header;
1538 		break;
1539 
1540 	case cdp_sec_type_wep40:
1541 	case cdp_sec_type_wep104:
1542 	case cdp_sec_type_wep128:
1543 		while (cur) {
1544 			tmp_next = qdf_nbuf_next(cur);
1545 			if (dp_rx_defrag_wep_decap(soc, cur, hdr_space)) {
1546 
1547 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1548 					QDF_TRACE_LEVEL_ERROR,
1549 					"dp_rx_defrag: WEP decap failed");
1550 
1551 				return QDF_STATUS_E_DEFRAG_ERROR;
1552 			}
1553 			cur = tmp_next;
1554 		}
1555 
1556 		/* If success, increment header to be stripped later */
1557 		hdr_space += dp_f_wep.ic_header;
1558 		break;
1559 	case cdp_sec_type_aes_gcmp:
1560 	case cdp_sec_type_aes_gcmp_256:
1561 		while (cur) {
1562 			tmp_next = qdf_nbuf_next(cur);
1563 			if (dp_rx_defrag_gcmp_demic(soc, cur, hdr_space)) {
1564 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1565 					  QDF_TRACE_LEVEL_ERROR,
1566 					  "dp_rx_defrag: GCMP demic failed");
1567 
1568 				return QDF_STATUS_E_DEFRAG_ERROR;
1569 			}
1570 			cur = tmp_next;
1571 		}
1572 
1573 		hdr_space += dp_f_gcmp.ic_header;
1574 		break;
1575 	default:
1576 		break;
1577 	}
1578 
1579 	if (tkip_demic) {
1580 		msdu = frag_list_head;
1581 		qdf_mem_copy(key,
1582 			     &txrx_peer->security[index].michael_key[0],
1583 			     IEEE80211_WEP_MICLEN);
1584 		status = dp_rx_defrag_tkip_demic(soc, key, msdu,
1585 						 soc->rx_pkt_tlv_size +
1586 						 hdr_space);
1587 
1588 		if (status) {
1589 			dp_rx_defrag_err(vdev, frag_list_head);
1590 
1591 			QDF_TRACE(QDF_MODULE_ID_TXRX,
1592 				  QDF_TRACE_LEVEL_ERROR,
1593 				  "%s: TKIP demic failed status %d",
1594 				   __func__, status);
1595 
1596 			return QDF_STATUS_E_DEFRAG_ERROR;
1597 		}
1598 	}
1599 
1600 	/* Convert the header to 802.3 header */
1601 	dp_rx_defrag_nwifi_to_8023(soc, txrx_peer, tid, frag_list_head,
1602 				   hdr_space);
1603 	if (qdf_nbuf_next(frag_list_head)) {
1604 		if (dp_rx_construct_fraglist(txrx_peer, tid, frag_list_head,
1605 					     hdr_space))
1606 			return QDF_STATUS_E_DEFRAG_ERROR;
1607 	}
1608 
1609 	return QDF_STATUS_SUCCESS;
1610 }
1611 
1612 /*
1613  * dp_rx_defrag_cleanup(): Clean up activities
1614  * @txrx_peer: Pointer to the peer
1615  * @tid: Transmit Identifier
1616  *
1617  * Returns: None
1618  */
1619 void dp_rx_defrag_cleanup(struct dp_txrx_peer *txrx_peer, unsigned int tid)
1620 {
1621 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1622 				txrx_peer->rx_tid[tid].array;
1623 
1624 	if (rx_reorder_array_elem) {
1625 		/* Free up nbufs */
1626 		dp_rx_defrag_frames_free(rx_reorder_array_elem->head);
1627 		rx_reorder_array_elem->head = NULL;
1628 		rx_reorder_array_elem->tail = NULL;
1629 	} else {
1630 		dp_info("Cleanup self peer %pK and TID %u",
1631 			txrx_peer, tid);
1632 	}
1633 
1634 	/* Free up saved ring descriptors */
1635 	dp_rx_clear_saved_desc_info(txrx_peer, tid);
1636 
1637 	txrx_peer->rx_tid[tid].defrag_timeout_ms = 0;
1638 	txrx_peer->rx_tid[tid].curr_frag_num = 0;
1639 	txrx_peer->rx_tid[tid].curr_seq_num = 0;
1640 }
1641 
1642 /*
1643  * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor
1644  * @ring_desc: Pointer to the dst ring descriptor
1645  * @txrx_peer: Pointer to the peer
1646  * @tid: Transmit Identifier
1647  *
1648  * Returns: None
1649  */
1650 static QDF_STATUS
1651 dp_rx_defrag_save_info_from_ring_desc(hal_ring_desc_t ring_desc,
1652 				      struct dp_rx_desc *rx_desc,
1653 				      struct dp_txrx_peer *txrx_peer,
1654 				      unsigned int tid)
1655 {
1656 	void *dst_ring_desc = qdf_mem_malloc(
1657 			sizeof(struct reo_destination_ring));
1658 
1659 	if (!dst_ring_desc) {
1660 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1661 			"%s: Memory alloc failed !", __func__);
1662 		QDF_ASSERT(0);
1663 		return QDF_STATUS_E_NOMEM;
1664 	}
1665 
1666 	qdf_mem_copy(dst_ring_desc, ring_desc,
1667 		       sizeof(struct reo_destination_ring));
1668 
1669 	txrx_peer->rx_tid[tid].dst_ring_desc = dst_ring_desc;
1670 	txrx_peer->rx_tid[tid].head_frag_desc = rx_desc;
1671 
1672 	return QDF_STATUS_SUCCESS;
1673 }
1674 
1675 /*
1676  * dp_rx_defrag_store_fragment(): Store incoming fragments
1677  * @soc: Pointer to the SOC data structure
1678  * @ring_desc: Pointer to the ring descriptor
1679  * @mpdu_desc_info: MPDU descriptor info
1680  * @tid: Traffic Identifier
1681  * @rx_desc: Pointer to rx descriptor
1682  * @rx_bfs: Number of bfs consumed
1683  *
1684  * Returns: QDF_STATUS
1685  */
1686 static QDF_STATUS
1687 dp_rx_defrag_store_fragment(struct dp_soc *soc,
1688 			    hal_ring_desc_t ring_desc,
1689 			    union dp_rx_desc_list_elem_t **head,
1690 			    union dp_rx_desc_list_elem_t **tail,
1691 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1692 			    unsigned int tid, struct dp_rx_desc *rx_desc,
1693 			    uint32_t *rx_bfs)
1694 {
1695 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1696 	struct dp_pdev *pdev;
1697 	struct dp_txrx_peer *txrx_peer = NULL;
1698 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1699 	uint16_t peer_id;
1700 	uint8_t fragno, more_frag, all_frag_present = 0;
1701 	uint16_t rxseq = mpdu_desc_info->mpdu_seq;
1702 	QDF_STATUS status;
1703 	struct dp_rx_tid_defrag *rx_tid;
1704 	uint8_t mpdu_sequence_control_valid;
1705 	uint8_t mpdu_frame_control_valid;
1706 	qdf_nbuf_t frag = rx_desc->nbuf;
1707 	uint32_t msdu_len;
1708 
1709 	if (qdf_nbuf_len(frag) > 0) {
1710 		dp_info("Dropping unexpected packet with skb_len: %d,"
1711 			"data len: %d, cookie: %d",
1712 			(uint32_t)qdf_nbuf_len(frag), frag->data_len,
1713 			rx_desc->cookie);
1714 		DP_STATS_INC(soc, rx.rx_frag_err_len_error, 1);
1715 		goto discard_frag;
1716 	}
1717 
1718 	if (dp_rx_buffer_pool_refill(soc, frag, rx_desc->pool_id)) {
1719 		/* fragment queued back to the pool, free the link desc */
1720 		goto err_free_desc;
1721 	}
1722 
1723 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
1724 						  rx_desc->rx_buf_start);
1725 
1726 	qdf_nbuf_set_pktlen(frag, (msdu_len + soc->rx_pkt_tlv_size));
1727 	qdf_nbuf_append_ext_list(frag, NULL, 0);
1728 
1729 	/* Check if the packet is from a valid peer */
1730 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1731 					       mpdu_desc_info->peer_meta_data);
1732 	txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, &txrx_ref_handle,
1733 					       DP_MOD_ID_RX_ERR);
1734 
1735 	if (!txrx_peer) {
1736 		/* We should not receive anything from unknown peer
1737 		 * however, that might happen while we are in the monitor mode.
1738 		 * We don't need to handle that here
1739 		 */
1740 		dp_info_rl("Unknown peer with peer_id %d, dropping fragment",
1741 			   peer_id);
1742 		DP_STATS_INC(soc, rx.rx_frag_err_no_peer, 1);
1743 		goto discard_frag;
1744 	}
1745 
1746 	if (tid >= DP_MAX_TIDS) {
1747 		dp_info("TID out of bounds: %d", tid);
1748 		qdf_assert_always(0);
1749 		goto discard_frag;
1750 	}
1751 
1752 	mpdu_sequence_control_valid =
1753 		hal_rx_get_mpdu_sequence_control_valid(soc->hal_soc,
1754 						       rx_desc->rx_buf_start);
1755 
1756 	/* Invalid MPDU sequence control field, MPDU is of no use */
1757 	if (!mpdu_sequence_control_valid) {
1758 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1759 			"Invalid MPDU seq control field, dropping MPDU");
1760 
1761 		qdf_assert(0);
1762 		goto discard_frag;
1763 	}
1764 
1765 	mpdu_frame_control_valid =
1766 		hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
1767 						    rx_desc->rx_buf_start);
1768 
1769 	/* Invalid frame control field */
1770 	if (!mpdu_frame_control_valid) {
1771 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1772 			"Invalid frame control field, dropping MPDU");
1773 
1774 		qdf_assert(0);
1775 		goto discard_frag;
1776 	}
1777 
1778 	/* Current mpdu sequence */
1779 	more_frag = dp_rx_frag_get_more_frag_bit(soc, rx_desc->rx_buf_start);
1780 
1781 	/* HW does not populate the fragment number as of now
1782 	 * need to get from the 802.11 header
1783 	 */
1784 	fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc->rx_buf_start);
1785 
1786 	pdev = txrx_peer->vdev->pdev;
1787 	rx_tid = &txrx_peer->rx_tid[tid];
1788 
1789 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, frag,
1790 			      QDF_TX_RX_STATUS_OK, false);
1791 
1792 	qdf_spin_lock_bh(&rx_tid->defrag_tid_lock);
1793 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1794 	if (!rx_reorder_array_elem) {
1795 		dp_err_rl("Rcvd Fragmented pkt before tid setup for peer %pK",
1796 			  txrx_peer);
1797 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1798 		goto discard_frag;
1799 	}
1800 
1801 	/*
1802 	 * !more_frag: no more fragments to be delivered
1803 	 * !frag_no: packet is not fragmented
1804 	 * !rx_reorder_array_elem->head: no saved fragments so far
1805 	 */
1806 	if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
1807 		/* We should not get into this situation here.
1808 		 * It means an unfragmented packet with fragment flag
1809 		 * is delivered over the REO exception ring.
1810 		 * Typically it follows normal rx path.
1811 		 */
1812 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1813 			"Rcvd unfragmented pkt on REO Err srng, dropping");
1814 
1815 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1816 		qdf_assert(0);
1817 		goto discard_frag;
1818 	}
1819 
1820 	/* Check if the fragment is for the same sequence or a different one */
1821 	dp_debug("rx_tid %d", tid);
1822 	if (rx_reorder_array_elem->head) {
1823 		dp_debug("rxseq %d\n", rxseq);
1824 		if (rxseq != rx_tid->curr_seq_num) {
1825 
1826 			dp_debug("mismatch cur_seq %d rxseq %d\n",
1827 				 rx_tid->curr_seq_num, rxseq);
1828 			/* Drop stored fragments if out of sequence
1829 			 * fragment is received
1830 			 */
1831 			dp_rx_reorder_flush_frag(txrx_peer, tid);
1832 
1833 			DP_STATS_INC(soc, rx.rx_frag_oor, 1);
1834 
1835 			dp_debug("cur rxseq %d\n", rxseq);
1836 			/*
1837 			 * The sequence number for this fragment becomes the
1838 			 * new sequence number to be processed
1839 			 */
1840 			rx_tid->curr_seq_num = rxseq;
1841 		}
1842 	} else {
1843 		/* Check if we are processing first fragment if it is
1844 		 * not first fragment discard fragment.
1845 		 */
1846 		if (fragno) {
1847 			qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1848 			goto discard_frag;
1849 		}
1850 		dp_debug("cur rxseq %d\n", rxseq);
1851 		/* Start of a new sequence */
1852 		dp_rx_defrag_cleanup(txrx_peer, tid);
1853 		rx_tid->curr_seq_num = rxseq;
1854 		/* store PN number also */
1855 	}
1856 
1857 	/*
1858 	 * If the earlier sequence was dropped, this will be the fresh start.
1859 	 * Else, continue with next fragment in a given sequence
1860 	 */
1861 	status = dp_rx_defrag_fraglist_insert(txrx_peer, tid,
1862 					      &rx_reorder_array_elem->head,
1863 					      &rx_reorder_array_elem->tail,
1864 					      frag, &all_frag_present);
1865 
1866 	/*
1867 	 * Currently, we can have only 6 MSDUs per-MPDU, if the current
1868 	 * packet sequence has more than 6 MSDUs for some reason, we will
1869 	 * have to use the next MSDU link descriptor and chain them together
1870 	 * before reinjection.
1871 	 * ring_desc is validated in dp_rx_err_process.
1872 	 */
1873 	if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) &&
1874 			(rx_reorder_array_elem->head == frag)) {
1875 
1876 		status = dp_rx_defrag_save_info_from_ring_desc(ring_desc,
1877 					rx_desc, txrx_peer, tid);
1878 
1879 		if (status != QDF_STATUS_SUCCESS) {
1880 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1881 				"%s: Unable to store ring desc !", __func__);
1882 			qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1883 			goto discard_frag;
1884 		}
1885 	} else {
1886 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1887 		(*rx_bfs)++;
1888 
1889 		/* Return the non-head link desc */
1890 		if (dp_rx_link_desc_return(soc, ring_desc,
1891 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1892 		    QDF_STATUS_SUCCESS)
1893 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1894 				  "%s: Failed to return link desc", __func__);
1895 
1896 	}
1897 
1898 	if (pdev->soc->rx.flags.defrag_timeout_check)
1899 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
1900 
1901 	/* Yet to receive more fragments for this sequence number */
1902 	if (!all_frag_present) {
1903 		uint32_t now_ms =
1904 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1905 
1906 		txrx_peer->rx_tid[tid].defrag_timeout_ms =
1907 			now_ms + pdev->soc->rx.defrag.timeout_ms;
1908 
1909 		dp_rx_defrag_waitlist_add(txrx_peer, tid);
1910 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
1911 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1912 
1913 		return QDF_STATUS_SUCCESS;
1914 	}
1915 
1916 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1917 		  "All fragments received for sequence: %d", rxseq);
1918 
1919 	/* Process the fragments */
1920 	status = dp_rx_defrag(txrx_peer, tid, rx_reorder_array_elem->head,
1921 			      rx_reorder_array_elem->tail);
1922 	if (QDF_IS_STATUS_ERROR(status)) {
1923 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1924 			"Fragment processing failed");
1925 
1926 		dp_rx_add_to_free_desc_list(head, tail,
1927 				txrx_peer->rx_tid[tid].head_frag_desc);
1928 		(*rx_bfs)++;
1929 
1930 		if (dp_rx_link_desc_return(soc,
1931 					txrx_peer->rx_tid[tid].dst_ring_desc,
1932 					HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1933 				QDF_STATUS_SUCCESS)
1934 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1935 					"%s: Failed to return link desc",
1936 					__func__);
1937 		dp_rx_defrag_cleanup(txrx_peer, tid);
1938 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1939 		goto end;
1940 	}
1941 
1942 	/* Re-inject the fragments back to REO for further processing */
1943 	status = dp_rx_defrag_reo_reinject(txrx_peer, tid,
1944 					   rx_reorder_array_elem->head);
1945 	if (QDF_IS_STATUS_SUCCESS(status)) {
1946 		rx_reorder_array_elem->head = NULL;
1947 		rx_reorder_array_elem->tail = NULL;
1948 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1949 			  "Fragmented sequence successfully reinjected");
1950 	} else {
1951 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1952 		"Fragmented sequence reinjection failed");
1953 		dp_rx_return_head_frag_desc(txrx_peer, tid);
1954 	}
1955 
1956 	dp_rx_defrag_cleanup(txrx_peer, tid);
1957 	qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1958 
1959 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
1960 
1961 	return QDF_STATUS_SUCCESS;
1962 
1963 discard_frag:
1964 	dp_rx_nbuf_free(frag);
1965 err_free_desc:
1966 	dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1967 	if (dp_rx_link_desc_return(soc, ring_desc,
1968 				   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1969 	    QDF_STATUS_SUCCESS)
1970 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1971 			  "%s: Failed to return link desc", __func__);
1972 	(*rx_bfs)++;
1973 
1974 end:
1975 	if (txrx_peer)
1976 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
1977 
1978 	DP_STATS_INC(soc, rx.rx_frag_err, 1);
1979 	return QDF_STATUS_E_DEFRAG_ERROR;
1980 }
1981 
1982 /**
1983  * dp_rx_frag_handle() - Handles fragmented Rx frames
1984  *
1985  * @soc: core txrx main context
1986  * @ring_desc: opaque pointer to the REO error ring descriptor
1987  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
1988  * @head: head of the local descriptor free-list
1989  * @tail: tail of the local descriptor free-list
1990  * @quota: No. of units (packets) that can be serviced in one shot.
1991  *
1992  * This function implements RX 802.11 fragmentation handling
1993  * The handling is mostly same as legacy fragmentation handling.
1994  * If required, this function can re-inject the frames back to
1995  * REO ring (with proper setting to by-pass fragmentation check
1996  * but use duplicate detection / re-ordering and routing these frames
1997  * to a different core.
1998  *
1999  * Return: uint32_t: No. of elements processed
2000  */
2001 uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
2002 			   struct hal_rx_mpdu_desc_info *mpdu_desc_info,
2003 			   struct dp_rx_desc *rx_desc,
2004 			   uint8_t *mac_id,
2005 			   uint32_t quota)
2006 {
2007 	uint32_t rx_bufs_used = 0;
2008 	qdf_nbuf_t msdu = NULL;
2009 	uint32_t tid;
2010 	uint32_t rx_bfs = 0;
2011 	struct dp_pdev *pdev;
2012 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2013 	struct rx_desc_pool *rx_desc_pool;
2014 
2015 	qdf_assert(soc);
2016 	qdf_assert(mpdu_desc_info);
2017 	qdf_assert(rx_desc);
2018 
2019 	dp_debug("Number of MSDUs to process, num_msdus: %d",
2020 		 mpdu_desc_info->msdu_count);
2021 
2022 
2023 	if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) {
2024 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2025 			"Not sufficient MSDUs to process");
2026 		return rx_bufs_used;
2027 	}
2028 
2029 	/* all buffers in MSDU link belong to same pdev */
2030 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
2031 	if (!pdev) {
2032 		dp_nofl_debug("pdev is null for pool_id = %d",
2033 			      rx_desc->pool_id);
2034 		return rx_bufs_used;
2035 	}
2036 
2037 	*mac_id = rx_desc->pool_id;
2038 
2039 	msdu = rx_desc->nbuf;
2040 
2041 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2042 
2043 	if (rx_desc->unmapped)
2044 		return rx_bufs_used;
2045 
2046 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
2047 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
2048 	rx_desc->unmapped = 1;
2049 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2050 
2051 	rx_desc->rx_buf_start = qdf_nbuf_data(msdu);
2052 
2053 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_desc->rx_buf_start);
2054 
2055 	/* Process fragment-by-fragment */
2056 	status = dp_rx_defrag_store_fragment(soc, ring_desc,
2057 					     &pdev->free_list_head,
2058 					     &pdev->free_list_tail,
2059 					     mpdu_desc_info,
2060 					     tid, rx_desc, &rx_bfs);
2061 
2062 	if (rx_bfs)
2063 		rx_bufs_used += rx_bfs;
2064 
2065 	if (!QDF_IS_STATUS_SUCCESS(status))
2066 		dp_info_rl("Rx Defrag err seq#:0x%x msdu_count:%d flags:%d",
2067 			   mpdu_desc_info->mpdu_seq,
2068 			   mpdu_desc_info->msdu_count,
2069 			   mpdu_desc_info->mpdu_flags);
2070 
2071 	return rx_bufs_used;
2072 }
2073 
2074 QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
2075 				      struct dp_txrx_peer *txrx_peer,
2076 				      uint16_t tid,
2077 				      uint16_t rxseq, qdf_nbuf_t nbuf)
2078 {
2079 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
2080 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
2081 	uint8_t all_frag_present;
2082 	uint32_t msdu_len;
2083 	QDF_STATUS status;
2084 
2085 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
2086 
2087 	/*
2088 	 * HW may fill in unexpected peer_id in RX PKT TLV,
2089 	 * if this peer_id related peer is valid by coincidence,
2090 	 * but actually this peer won't do dp_peer_rx_init(like SAP vdev
2091 	 * self peer), then invalid access to rx_reorder_array_elem happened.
2092 	 */
2093 	if (!rx_reorder_array_elem) {
2094 		dp_verbose_debug(
2095 			"peer id:%d drop rx frame!",
2096 			txrx_peer->peer_id);
2097 		DP_STATS_INC(soc, rx.err.defrag_peer_uninit, 1);
2098 		dp_rx_nbuf_free(nbuf);
2099 		goto fail;
2100 	}
2101 
2102 	if (rx_reorder_array_elem->head &&
2103 	    rxseq != rx_tid->curr_seq_num) {
2104 		/* Drop stored fragments if out of sequence
2105 		 * fragment is received
2106 		 */
2107 		dp_rx_reorder_flush_frag(txrx_peer, tid);
2108 
2109 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2110 			  "%s: No list found for TID %d Seq# %d",
2111 				__func__, tid, rxseq);
2112 		dp_rx_nbuf_free(nbuf);
2113 		goto fail;
2114 	}
2115 
2116 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2117 						  qdf_nbuf_data(nbuf));
2118 
2119 	qdf_nbuf_set_pktlen(nbuf, (msdu_len + soc->rx_pkt_tlv_size));
2120 
2121 	status = dp_rx_defrag_fraglist_insert(txrx_peer, tid,
2122 					      &rx_reorder_array_elem->head,
2123 			&rx_reorder_array_elem->tail, nbuf,
2124 			&all_frag_present);
2125 
2126 	if (QDF_IS_STATUS_ERROR(status)) {
2127 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2128 			  "%s Fragment insert failed", __func__);
2129 
2130 		goto fail;
2131 	}
2132 
2133 	if (soc->rx.flags.defrag_timeout_check)
2134 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
2135 
2136 	if (!all_frag_present) {
2137 		uint32_t now_ms =
2138 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2139 
2140 		txrx_peer->rx_tid[tid].defrag_timeout_ms =
2141 			now_ms + soc->rx.defrag.timeout_ms;
2142 
2143 		dp_rx_defrag_waitlist_add(txrx_peer, tid);
2144 
2145 		return QDF_STATUS_SUCCESS;
2146 	}
2147 
2148 	status = dp_rx_defrag(txrx_peer, tid, rx_reorder_array_elem->head,
2149 			      rx_reorder_array_elem->tail);
2150 
2151 	if (QDF_IS_STATUS_ERROR(status)) {
2152 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2153 			  "%s Fragment processing failed", __func__);
2154 
2155 		dp_rx_return_head_frag_desc(txrx_peer, tid);
2156 		dp_rx_defrag_cleanup(txrx_peer, tid);
2157 
2158 		goto fail;
2159 	}
2160 
2161 	/* Re-inject the fragments back to REO for further processing */
2162 	status = dp_rx_defrag_reo_reinject(txrx_peer, tid,
2163 					   rx_reorder_array_elem->head);
2164 	if (QDF_IS_STATUS_SUCCESS(status)) {
2165 		rx_reorder_array_elem->head = NULL;
2166 		rx_reorder_array_elem->tail = NULL;
2167 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2168 			  "%s: Frag seq successfully reinjected",
2169 			__func__);
2170 	} else {
2171 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2172 			  "%s: Frag seq reinjection failed", __func__);
2173 		dp_rx_return_head_frag_desc(txrx_peer, tid);
2174 	}
2175 
2176 	dp_rx_defrag_cleanup(txrx_peer, tid);
2177 	return QDF_STATUS_SUCCESS;
2178 
2179 fail:
2180 	return QDF_STATUS_E_DEFRAG_ERROR;
2181 }
2182