xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #ifndef RX_DEFRAG_DO_NOT_REINJECT
21 #ifndef DP_BE_WAR
22 #include "li/hal_li_rx.h"
23 #endif
24 #endif
25 #include "dp_types.h"
26 #include "dp_rx.h"
27 #include "dp_peer.h"
28 #include "hal_api.h"
29 #include "qdf_trace.h"
30 #include "qdf_nbuf.h"
31 #include "dp_internal.h"
32 #include "dp_rx_defrag.h"
33 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
34 #include "dp_rx_defrag.h"
35 #include "dp_ipa.h"
36 #include "dp_rx_buffer_pool.h"
37 
38 const struct dp_rx_defrag_cipher dp_f_ccmp = {
39 	"AES-CCM",
40 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
41 	IEEE80211_WEP_MICLEN,
42 	0,
43 };
44 
45 const struct dp_rx_defrag_cipher dp_f_tkip = {
46 	"TKIP",
47 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
48 	IEEE80211_WEP_CRCLEN,
49 	IEEE80211_WEP_MICLEN,
50 };
51 
52 const struct dp_rx_defrag_cipher dp_f_wep = {
53 	"WEP",
54 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
55 	IEEE80211_WEP_CRCLEN,
56 	0,
57 };
58 
59 /*
60  * The header and mic length are same for both
61  * GCMP-128 and GCMP-256.
62  */
63 const struct dp_rx_defrag_cipher dp_f_gcmp = {
64 	"AES-GCMP",
65 	WLAN_IEEE80211_GCMP_HEADERLEN,
66 	WLAN_IEEE80211_GCMP_MICLEN,
67 	WLAN_IEEE80211_GCMP_MICLEN,
68 };
69 
70 /*
71  * dp_rx_defrag_frames_free(): Free fragment chain
72  * @frames: Fragment chain
73  *
74  * Iterates through the fragment chain and frees them
75  * Returns: None
76  */
77 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames)
78 {
79 	qdf_nbuf_t next, frag = frames;
80 
81 	while (frag) {
82 		next = qdf_nbuf_next(frag);
83 		qdf_nbuf_free(frag);
84 		frag = next;
85 	}
86 }
87 
88 /*
89  * dp_rx_clear_saved_desc_info(): Clears descriptor info
90  * @peer: Pointer to the peer data structure
91  * @tid: Transmit ID (TID)
92  *
93  * Saves MPDU descriptor info and MSDU link pointer from REO
94  * ring descriptor. The cache is created per peer, per TID
95  *
96  * Returns: None
97  */
98 static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid)
99 {
100 	if (peer->rx_tid[tid].dst_ring_desc)
101 		qdf_mem_free(peer->rx_tid[tid].dst_ring_desc);
102 
103 	peer->rx_tid[tid].dst_ring_desc = NULL;
104 	peer->rx_tid[tid].head_frag_desc = NULL;
105 }
106 
107 static void dp_rx_return_head_frag_desc(struct dp_peer *peer,
108 					unsigned int tid)
109 {
110 	struct dp_soc *soc;
111 	struct dp_pdev *pdev;
112 	struct dp_srng *dp_rxdma_srng;
113 	struct rx_desc_pool *rx_desc_pool;
114 	union dp_rx_desc_list_elem_t *head = NULL;
115 	union dp_rx_desc_list_elem_t *tail = NULL;
116 	uint8_t pool_id;
117 
118 	pdev = peer->vdev->pdev;
119 	soc = pdev->soc;
120 
121 	if (peer->rx_tid[tid].head_frag_desc) {
122 		pool_id = peer->rx_tid[tid].head_frag_desc->pool_id;
123 		dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id];
124 		rx_desc_pool = &soc->rx_desc_buf[pool_id];
125 
126 		dp_rx_add_to_free_desc_list(&head, &tail,
127 					    peer->rx_tid[tid].head_frag_desc);
128 		dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
129 					1, &head, &tail);
130 	}
131 
132 	if (peer->rx_tid[tid].dst_ring_desc) {
133 		if (dp_rx_link_desc_return(soc,
134 					   peer->rx_tid[tid].dst_ring_desc,
135 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
136 		    QDF_STATUS_SUCCESS)
137 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
138 				  "%s: Failed to return link desc", __func__);
139 	}
140 }
141 
142 /*
143  * dp_rx_reorder_flush_frag(): Flush the frag list
144  * @peer: Pointer to the peer data structure
145  * @tid: Transmit ID (TID)
146  *
147  * Flush the per-TID frag list
148  *
149  * Returns: None
150  */
151 void dp_rx_reorder_flush_frag(struct dp_peer *peer,
152 			 unsigned int tid)
153 {
154 	dp_info_rl("Flushing TID %d", tid);
155 
156 	if (!peer) {
157 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
158 					"%s: NULL peer", __func__);
159 		return;
160 	}
161 
162 	dp_rx_return_head_frag_desc(peer, tid);
163 	dp_rx_defrag_cleanup(peer, tid);
164 }
165 
166 /*
167  * dp_rx_defrag_waitlist_flush(): Flush SOC defrag wait list
168  * @soc: DP SOC
169  *
170  * Flush fragments of all waitlisted TID's
171  *
172  * Returns: None
173  */
174 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc)
175 {
176 	struct dp_rx_tid *rx_reorder = NULL;
177 	struct dp_rx_tid *tmp;
178 	uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
179 	TAILQ_HEAD(, dp_rx_tid) temp_list;
180 
181 	TAILQ_INIT(&temp_list);
182 
183 	dp_debug("Current time  %u", now_ms);
184 
185 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
186 	TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist,
187 			   defrag_waitlist_elem, tmp) {
188 		uint32_t tid;
189 
190 		if (rx_reorder->defrag_timeout_ms > now_ms)
191 			break;
192 
193 		tid = rx_reorder->tid;
194 		if (tid >= DP_MAX_TIDS) {
195 			qdf_assert(0);
196 			continue;
197 		}
198 
199 		TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder,
200 			     defrag_waitlist_elem);
201 		DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
202 
203 		/* Move to temp list and clean-up later */
204 		TAILQ_INSERT_TAIL(&temp_list, rx_reorder,
205 				  defrag_waitlist_elem);
206 	}
207 	if (rx_reorder) {
208 		soc->rx.defrag.next_flush_ms =
209 			rx_reorder->defrag_timeout_ms;
210 	} else {
211 		soc->rx.defrag.next_flush_ms =
212 			now_ms + soc->rx.defrag.timeout_ms;
213 	}
214 
215 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
216 
217 	TAILQ_FOREACH_SAFE(rx_reorder, &temp_list,
218 			   defrag_waitlist_elem, tmp) {
219 		struct dp_peer *peer, *temp_peer = NULL;
220 
221 		qdf_spin_lock_bh(&rx_reorder->tid_lock);
222 		TAILQ_REMOVE(&temp_list, rx_reorder,
223 			     defrag_waitlist_elem);
224 		/* get address of current peer */
225 		peer =
226 			container_of(rx_reorder, struct dp_peer,
227 				     rx_tid[rx_reorder->tid]);
228 		qdf_spin_unlock_bh(&rx_reorder->tid_lock);
229 
230 		temp_peer = dp_peer_get_ref_by_id(soc, peer->peer_id,
231 						  DP_MOD_ID_RX_ERR);
232 		if (temp_peer == peer) {
233 			qdf_spin_lock_bh(&rx_reorder->tid_lock);
234 			dp_rx_reorder_flush_frag(peer, rx_reorder->tid);
235 			qdf_spin_unlock_bh(&rx_reorder->tid_lock);
236 		}
237 
238 		if (temp_peer)
239 			dp_peer_unref_delete(temp_peer, DP_MOD_ID_RX_ERR);
240 
241 	}
242 }
243 
244 /*
245  * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list
246  * @peer: Pointer to the peer data structure
247  * @tid: Transmit ID (TID)
248  *
249  * Appends per-tid fragments to global fragment wait list
250  *
251  * Returns: None
252  */
253 static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid)
254 {
255 	struct dp_soc *psoc = peer->vdev->pdev->soc;
256 	struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid];
257 
258 	dp_debug("Adding TID %u to waitlist for peer %pK at MAC address "QDF_MAC_ADDR_FMT,
259 		 tid, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
260 
261 	/* TODO: use LIST macros instead of TAIL macros */
262 	qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock);
263 	if (TAILQ_EMPTY(&psoc->rx.defrag.waitlist))
264 		psoc->rx.defrag.next_flush_ms = rx_reorder->defrag_timeout_ms;
265 	TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder,
266 				defrag_waitlist_elem);
267 	DP_STATS_INC(psoc, rx.rx_frag_wait, 1);
268 	qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock);
269 }
270 
271 /*
272  * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist
273  * @peer: Pointer to the peer data structure
274  * @tid: Transmit ID (TID)
275  *
276  * Remove fragments from waitlist
277  *
278  * Returns: None
279  */
280 void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid)
281 {
282 	struct dp_pdev *pdev = peer->vdev->pdev;
283 	struct dp_soc *soc = pdev->soc;
284 	struct dp_rx_tid *rx_reorder;
285 	struct dp_rx_tid *tmp;
286 
287 	dp_debug("Removing TID %u to waitlist for peer %pK at MAC address "QDF_MAC_ADDR_FMT,
288 		 tid, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
289 
290 	if (tid >= DP_MAX_TIDS) {
291 		dp_err("TID out of bounds: %d", tid);
292 		qdf_assert_always(0);
293 	}
294 
295 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
296 	TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist,
297 			   defrag_waitlist_elem, tmp) {
298 		struct dp_peer *peer_on_waitlist;
299 
300 		/* get address of current peer */
301 		peer_on_waitlist =
302 			container_of(rx_reorder, struct dp_peer,
303 				     rx_tid[rx_reorder->tid]);
304 
305 		/* Ensure it is TID for same peer */
306 		if (peer_on_waitlist == peer && rx_reorder->tid == tid) {
307 			TAILQ_REMOVE(&soc->rx.defrag.waitlist,
308 				rx_reorder, defrag_waitlist_elem);
309 			DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
310 		}
311 	}
312 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
313 }
314 
315 /*
316  * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list
317  * @peer: Pointer to the peer data structure
318  * @tid: Transmit ID (TID)
319  * @head_addr: Pointer to head list
320  * @tail_addr: Pointer to tail list
321  * @frag: Incoming fragment
322  * @all_frag_present: Flag to indicate whether all fragments are received
323  *
324  * Build a per-tid, per-sequence fragment list.
325  *
326  * Returns: Success, if inserted
327  */
328 static QDF_STATUS dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid,
329 	qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag,
330 	uint8_t *all_frag_present)
331 {
332 	struct dp_soc *soc = peer->vdev->pdev->soc;
333 	qdf_nbuf_t next;
334 	qdf_nbuf_t prev = NULL;
335 	qdf_nbuf_t cur;
336 	uint16_t head_fragno, cur_fragno, next_fragno;
337 	uint8_t last_morefrag = 1, count = 0;
338 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
339 	uint8_t *rx_desc_info;
340 
341 	qdf_assert(frag);
342 	qdf_assert(head_addr);
343 	qdf_assert(tail_addr);
344 
345 	*all_frag_present = 0;
346 	rx_desc_info = qdf_nbuf_data(frag);
347 	cur_fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc_info);
348 
349 	dp_debug("cur_fragno %d\n", cur_fragno);
350 	/* If this is the first fragment */
351 	if (!(*head_addr)) {
352 		*head_addr = *tail_addr = frag;
353 		qdf_nbuf_set_next(*tail_addr, NULL);
354 		rx_tid->curr_frag_num = cur_fragno;
355 
356 		goto insert_done;
357 	}
358 
359 	/* In sequence fragment */
360 	if (cur_fragno > rx_tid->curr_frag_num) {
361 		qdf_nbuf_set_next(*tail_addr, frag);
362 		*tail_addr = frag;
363 		qdf_nbuf_set_next(*tail_addr, NULL);
364 		rx_tid->curr_frag_num = cur_fragno;
365 	} else {
366 		/* Out of sequence fragment */
367 		cur = *head_addr;
368 		rx_desc_info = qdf_nbuf_data(cur);
369 		head_fragno = dp_rx_frag_get_mpdu_frag_number(soc,
370 							      rx_desc_info);
371 
372 		if (cur_fragno == head_fragno) {
373 			qdf_nbuf_free(frag);
374 			goto insert_fail;
375 		} else if (head_fragno > cur_fragno) {
376 			qdf_nbuf_set_next(frag, cur);
377 			cur = frag;
378 			*head_addr = frag; /* head pointer to be updated */
379 		} else {
380 			while ((cur_fragno > head_fragno) && cur) {
381 				prev = cur;
382 				cur = qdf_nbuf_next(cur);
383 				if (cur) {
384 					rx_desc_info = qdf_nbuf_data(cur);
385 					head_fragno =
386 						dp_rx_frag_get_mpdu_frag_number(
387 								soc,
388 								rx_desc_info);
389 				}
390 			}
391 
392 			if (cur_fragno == head_fragno) {
393 				qdf_nbuf_free(frag);
394 				goto insert_fail;
395 			}
396 
397 			qdf_nbuf_set_next(prev, frag);
398 			qdf_nbuf_set_next(frag, cur);
399 		}
400 	}
401 
402 	next = qdf_nbuf_next(*head_addr);
403 
404 	rx_desc_info = qdf_nbuf_data(*tail_addr);
405 	last_morefrag = dp_rx_frag_get_more_frag_bit(soc, rx_desc_info);
406 
407 	/* TODO: optimize the loop */
408 	if (!last_morefrag) {
409 		/* Check if all fragments are present */
410 		do {
411 			rx_desc_info = qdf_nbuf_data(next);
412 			next_fragno =
413 				dp_rx_frag_get_mpdu_frag_number(soc,
414 								rx_desc_info);
415 			count++;
416 
417 			if (next_fragno != count)
418 				break;
419 
420 			next = qdf_nbuf_next(next);
421 		} while (next);
422 
423 		if (!next) {
424 			*all_frag_present = 1;
425 			return QDF_STATUS_SUCCESS;
426 		} else {
427 			/* revisit */
428 		}
429 	}
430 
431 insert_done:
432 	return QDF_STATUS_SUCCESS;
433 
434 insert_fail:
435 	return QDF_STATUS_E_FAILURE;
436 }
437 
438 
439 /*
440  * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment
441  * @msdu: Pointer to the fragment
442  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
443  *
444  * decap tkip encrypted fragment
445  *
446  * Returns: QDF_STATUS
447  */
448 static QDF_STATUS
449 dp_rx_defrag_tkip_decap(struct dp_soc *soc,
450 			qdf_nbuf_t msdu, uint16_t hdrlen)
451 {
452 	uint8_t *ivp, *orig_hdr;
453 	int rx_desc_len = soc->rx_pkt_tlv_size;
454 
455 	/* start of 802.11 header info */
456 	orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len);
457 
458 	/* TKIP header is located post 802.11 header */
459 	ivp = orig_hdr + hdrlen;
460 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) {
461 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
462 			"IEEE80211_WEP_EXTIV is missing in TKIP fragment");
463 		return QDF_STATUS_E_DEFRAG_ERROR;
464 	}
465 
466 	qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer);
467 
468 	return QDF_STATUS_SUCCESS;
469 }
470 
471 /*
472  * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment
473  * @nbuf: Pointer to the fragment buffer
474  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
475  *
476  * Remove MIC information from CCMP fragment
477  *
478  * Returns: QDF_STATUS
479  */
480 static QDF_STATUS
481 dp_rx_defrag_ccmp_demic(struct dp_soc *soc, qdf_nbuf_t nbuf, uint16_t hdrlen)
482 {
483 	uint8_t *ivp, *orig_hdr;
484 	int rx_desc_len = soc->rx_pkt_tlv_size;
485 
486 	/* start of the 802.11 header */
487 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
488 
489 	/* CCMP header is located after 802.11 header */
490 	ivp = orig_hdr + hdrlen;
491 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
492 		return QDF_STATUS_E_DEFRAG_ERROR;
493 
494 	qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer);
495 
496 	return QDF_STATUS_SUCCESS;
497 }
498 
499 /*
500  * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment
501  * @nbuf: Pointer to the fragment
502  * @hdrlen: length of the header information
503  *
504  * decap CCMP encrypted fragment
505  *
506  * Returns: QDF_STATUS
507  */
508 static QDF_STATUS
509 dp_rx_defrag_ccmp_decap(struct dp_soc *soc, qdf_nbuf_t nbuf, uint16_t hdrlen)
510 {
511 	uint8_t *ivp, *origHdr;
512 	int rx_desc_len = soc->rx_pkt_tlv_size;
513 
514 	origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
515 	ivp = origHdr + hdrlen;
516 
517 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
518 		return QDF_STATUS_E_DEFRAG_ERROR;
519 
520 	/* Let's pull the header later */
521 
522 	return QDF_STATUS_SUCCESS;
523 }
524 
525 /*
526  * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment
527  * @msdu: Pointer to the fragment
528  * @hdrlen: length of the header information
529  *
530  * decap WEP encrypted fragment
531  *
532  * Returns: QDF_STATUS
533  */
534 static QDF_STATUS
535 dp_rx_defrag_wep_decap(struct dp_soc *soc, qdf_nbuf_t msdu, uint16_t hdrlen)
536 {
537 	uint8_t *origHdr;
538 	int rx_desc_len = soc->rx_pkt_tlv_size;
539 
540 	origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
541 	qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen);
542 
543 	qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer);
544 
545 	return QDF_STATUS_SUCCESS;
546 }
547 
548 /*
549  * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment
550  * @soc: soc handle
551  * @nbuf: Pointer to the fragment
552  *
553  * Calculate the header size of the received fragment
554  *
555  * Returns: header size (uint16_t)
556  */
557 static uint16_t dp_rx_defrag_hdrsize(struct dp_soc *soc, qdf_nbuf_t nbuf)
558 {
559 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
560 	uint16_t size = sizeof(struct ieee80211_frame);
561 	uint16_t fc = 0;
562 	uint32_t to_ds, fr_ds;
563 	uint8_t frm_ctrl_valid;
564 	uint16_t frm_ctrl_field;
565 
566 	to_ds = hal_rx_mpdu_get_to_ds(soc->hal_soc, rx_tlv_hdr);
567 	fr_ds = hal_rx_mpdu_get_fr_ds(soc->hal_soc, rx_tlv_hdr);
568 	frm_ctrl_valid =
569 		hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
570 						    rx_tlv_hdr);
571 	frm_ctrl_field = hal_rx_get_frame_ctrl_field(soc->hal_soc, rx_tlv_hdr);
572 
573 	if (to_ds && fr_ds)
574 		size += QDF_MAC_ADDR_SIZE;
575 
576 	if (frm_ctrl_valid) {
577 		fc = frm_ctrl_field;
578 
579 		/* use 1-st byte for validation */
580 		if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) {
581 			size += sizeof(uint16_t);
582 			/* use 2-nd byte for validation */
583 			if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER)
584 				size += sizeof(struct ieee80211_htc);
585 		}
586 	}
587 
588 	return size;
589 }
590 
591 /*
592  * dp_rx_defrag_michdr(): Calculate a pseudo MIC header
593  * @wh0: Pointer to the wireless header of the fragment
594  * @hdr: Array to hold the pseudo header
595  *
596  * Calculate a pseudo MIC header
597  *
598  * Returns: None
599  */
600 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0,
601 				uint8_t hdr[])
602 {
603 	const struct ieee80211_frame_addr4 *wh =
604 		(const struct ieee80211_frame_addr4 *)wh0;
605 
606 	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
607 	case IEEE80211_FC1_DIR_NODS:
608 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
609 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
610 					   wh->i_addr2);
611 		break;
612 	case IEEE80211_FC1_DIR_TODS:
613 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
614 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
615 					   wh->i_addr2);
616 		break;
617 	case IEEE80211_FC1_DIR_FROMDS:
618 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
619 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
620 					   wh->i_addr3);
621 		break;
622 	case IEEE80211_FC1_DIR_DSTODS:
623 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
624 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
625 					   wh->i_addr4);
626 		break;
627 	}
628 
629 	/*
630 	 * Bit 7 is QDF_IEEE80211_FC0_SUBTYPE_QOS for data frame, but
631 	 * it could also be set for deauth, disassoc, action, etc. for
632 	 * a mgt type frame. It comes into picture for MFP.
633 	 */
634 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
635 		if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
636 				IEEE80211_FC1_DIR_DSTODS) {
637 			const struct ieee80211_qosframe_addr4 *qwh =
638 				(const struct ieee80211_qosframe_addr4 *)wh;
639 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
640 		} else {
641 			const struct ieee80211_qosframe *qwh =
642 				(const struct ieee80211_qosframe *)wh;
643 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
644 		}
645 	} else {
646 		hdr[12] = 0;
647 	}
648 
649 	hdr[13] = hdr[14] = hdr[15] = 0;	/* reserved */
650 }
651 
652 /*
653  * dp_rx_defrag_mic(): Calculate MIC header
654  * @key: Pointer to the key
655  * @wbuf: fragment buffer
656  * @off: Offset
657  * @data_len: Data length
658  * @mic: Array to hold MIC
659  *
660  * Calculate a pseudo MIC header
661  *
662  * Returns: QDF_STATUS
663  */
664 static QDF_STATUS dp_rx_defrag_mic(struct dp_soc *soc, const uint8_t *key,
665 				   qdf_nbuf_t wbuf, uint16_t off,
666 				   uint16_t data_len, uint8_t mic[])
667 {
668 	uint8_t hdr[16] = { 0, };
669 	uint32_t l, r;
670 	const uint8_t *data;
671 	uint32_t space;
672 	int rx_desc_len = soc->rx_pkt_tlv_size;
673 
674 	dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf)
675 		+ rx_desc_len), hdr);
676 
677 	l = dp_rx_get_le32(key);
678 	r = dp_rx_get_le32(key + 4);
679 
680 	/* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
681 	l ^= dp_rx_get_le32(hdr);
682 	dp_rx_michael_block(l, r);
683 	l ^= dp_rx_get_le32(&hdr[4]);
684 	dp_rx_michael_block(l, r);
685 	l ^= dp_rx_get_le32(&hdr[8]);
686 	dp_rx_michael_block(l, r);
687 	l ^= dp_rx_get_le32(&hdr[12]);
688 	dp_rx_michael_block(l, r);
689 
690 	/* first buffer has special handling */
691 	data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
692 	space = qdf_nbuf_len(wbuf) - off;
693 
694 	for (;; ) {
695 		if (space > data_len)
696 			space = data_len;
697 
698 		/* collect 32-bit blocks from current buffer */
699 		while (space >= sizeof(uint32_t)) {
700 			l ^= dp_rx_get_le32(data);
701 			dp_rx_michael_block(l, r);
702 			data += sizeof(uint32_t);
703 			space -= sizeof(uint32_t);
704 			data_len -= sizeof(uint32_t);
705 		}
706 		if (data_len < sizeof(uint32_t))
707 			break;
708 
709 		wbuf = qdf_nbuf_next(wbuf);
710 		if (!wbuf)
711 			return QDF_STATUS_E_DEFRAG_ERROR;
712 
713 		if (space != 0) {
714 			const uint8_t *data_next;
715 			/*
716 			 * Block straddles buffers, split references.
717 			 */
718 			data_next =
719 				(uint8_t *)qdf_nbuf_data(wbuf) + off;
720 			if ((qdf_nbuf_len(wbuf)) <
721 				sizeof(uint32_t) - space) {
722 				return QDF_STATUS_E_DEFRAG_ERROR;
723 			}
724 			switch (space) {
725 			case 1:
726 				l ^= dp_rx_get_le32_split(data[0],
727 					data_next[0], data_next[1],
728 					data_next[2]);
729 				data = data_next + 3;
730 				space = (qdf_nbuf_len(wbuf) - off) - 3;
731 				break;
732 			case 2:
733 				l ^= dp_rx_get_le32_split(data[0], data[1],
734 						    data_next[0], data_next[1]);
735 				data = data_next + 2;
736 				space = (qdf_nbuf_len(wbuf) - off) - 2;
737 				break;
738 			case 3:
739 				l ^= dp_rx_get_le32_split(data[0], data[1],
740 					data[2], data_next[0]);
741 				data = data_next + 1;
742 				space = (qdf_nbuf_len(wbuf) - off) - 1;
743 				break;
744 			}
745 			dp_rx_michael_block(l, r);
746 			data_len -= sizeof(uint32_t);
747 		} else {
748 			/*
749 			 * Setup for next buffer.
750 			 */
751 			data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
752 			space = qdf_nbuf_len(wbuf) - off;
753 		}
754 	}
755 	/* Last block and padding (0x5a, 4..7 x 0) */
756 	switch (data_len) {
757 	case 0:
758 		l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0);
759 		break;
760 	case 1:
761 		l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0);
762 		break;
763 	case 2:
764 		l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0);
765 		break;
766 	case 3:
767 		l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a);
768 		break;
769 	}
770 	dp_rx_michael_block(l, r);
771 	dp_rx_michael_block(l, r);
772 	dp_rx_put_le32(mic, l);
773 	dp_rx_put_le32(mic + 4, r);
774 
775 	return QDF_STATUS_SUCCESS;
776 }
777 
778 /*
779  * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame
780  * @key: Pointer to the key
781  * @msdu: fragment buffer
782  * @hdrlen: Length of the header information
783  *
784  * Remove MIC information from the TKIP frame
785  *
786  * Returns: QDF_STATUS
787  */
788 static QDF_STATUS dp_rx_defrag_tkip_demic(struct dp_soc *soc,
789 					  const uint8_t *key,
790 					  qdf_nbuf_t msdu, uint16_t hdrlen)
791 {
792 	QDF_STATUS status;
793 	uint32_t pktlen = 0, prev_data_len;
794 	uint8_t mic[IEEE80211_WEP_MICLEN];
795 	uint8_t mic0[IEEE80211_WEP_MICLEN];
796 	qdf_nbuf_t prev = NULL, prev0, next;
797 	uint8_t len0 = 0;
798 
799 	next = msdu;
800 	prev0 = msdu;
801 	while (next) {
802 		pktlen += (qdf_nbuf_len(next) - hdrlen);
803 		prev = next;
804 		dp_debug("pktlen %u",
805 			 (uint32_t)(qdf_nbuf_len(next) - hdrlen));
806 		next = qdf_nbuf_next(next);
807 		if (next && !qdf_nbuf_next(next))
808 			prev0 = prev;
809 	}
810 
811 	if (!prev) {
812 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
813 			  "%s Defrag chaining failed !\n", __func__);
814 		return QDF_STATUS_E_DEFRAG_ERROR;
815 	}
816 
817 	prev_data_len = qdf_nbuf_len(prev) - hdrlen;
818 	if (prev_data_len < dp_f_tkip.ic_miclen) {
819 		if (prev0 == prev) {
820 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
821 				  "%s Fragments don't have MIC header !\n", __func__);
822 			return QDF_STATUS_E_DEFRAG_ERROR;
823 		}
824 		len0 = dp_f_tkip.ic_miclen - (uint8_t)prev_data_len;
825 		qdf_nbuf_copy_bits(prev0, qdf_nbuf_len(prev0) - len0, len0,
826 				   (caddr_t)mic0);
827 		qdf_nbuf_trim_tail(prev0, len0);
828 	}
829 
830 	qdf_nbuf_copy_bits(prev, (qdf_nbuf_len(prev) -
831 			   (dp_f_tkip.ic_miclen - len0)),
832 			   (dp_f_tkip.ic_miclen - len0),
833 			   (caddr_t)(&mic0[len0]));
834 	qdf_nbuf_trim_tail(prev, (dp_f_tkip.ic_miclen - len0));
835 	pktlen -= dp_f_tkip.ic_miclen;
836 
837 	if (((qdf_nbuf_len(prev) - hdrlen) == 0) && prev != msdu) {
838 		qdf_nbuf_free(prev);
839 		qdf_nbuf_set_next(prev0, NULL);
840 	}
841 
842 	status = dp_rx_defrag_mic(soc, key, msdu, hdrlen,
843 				  pktlen, mic);
844 
845 	if (QDF_IS_STATUS_ERROR(status))
846 		return status;
847 
848 	if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen))
849 		return QDF_STATUS_E_DEFRAG_ERROR;
850 
851 	return QDF_STATUS_SUCCESS;
852 }
853 
854 /*
855  * dp_rx_frag_pull_hdr(): Pulls the RXTLV & the 802.11 headers
856  * @nbuf: buffer pointer
857  * @hdrsize: size of the header to be pulled
858  *
859  * Pull the RXTLV & the 802.11 headers
860  *
861  * Returns: None
862  */
863 static void dp_rx_frag_pull_hdr(struct dp_soc *soc,
864 				qdf_nbuf_t nbuf, uint16_t hdrsize)
865 {
866 	hal_rx_print_pn(soc->hal_soc, qdf_nbuf_data(nbuf));
867 
868 	qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + hdrsize);
869 
870 	dp_debug("final pktlen %d .11len %d",
871 		 (uint32_t)qdf_nbuf_len(nbuf), hdrsize);
872 }
873 
874 /*
875  * dp_rx_defrag_pn_check(): Check the PN of current fragmented with prev PN
876  * @msdu: msdu to get the current PN
877  * @cur_pn128: PN extracted from current msdu
878  * @prev_pn128: Prev PN
879  *
880  * Returns: 0 on success, non zero on failure
881  */
882 static int dp_rx_defrag_pn_check(struct dp_soc *soc, qdf_nbuf_t msdu,
883 				 uint64_t *cur_pn128, uint64_t *prev_pn128)
884 {
885 	int out_of_order = 0;
886 
887 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(msdu), cur_pn128);
888 
889 	if (cur_pn128[1] == prev_pn128[1])
890 		out_of_order = (cur_pn128[0] - prev_pn128[0] != 1);
891 	else
892 		out_of_order = (cur_pn128[1] - prev_pn128[1] != 1);
893 
894 	return out_of_order;
895 }
896 
897 /*
898  * dp_rx_construct_fraglist(): Construct a nbuf fraglist
899  * @peer: Pointer to the peer
900  * @head: Pointer to list of fragments
901  * @hdrsize: Size of the header to be pulled
902  *
903  * Construct a nbuf fraglist
904  *
905  * Returns: None
906  */
907 static int
908 dp_rx_construct_fraglist(struct dp_peer *peer, int tid, qdf_nbuf_t head,
909 			 uint16_t hdrsize)
910 {
911 	struct dp_soc *soc = peer->vdev->pdev->soc;
912 	qdf_nbuf_t msdu = qdf_nbuf_next(head);
913 	qdf_nbuf_t rx_nbuf = msdu;
914 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
915 	uint32_t len = 0;
916 	uint64_t cur_pn128[2] = {0, 0}, prev_pn128[2];
917 	int out_of_order = 0;
918 	int index;
919 	int needs_pn_check = 0;
920 	enum cdp_sec_type sec_type;
921 
922 	prev_pn128[0] = rx_tid->pn128[0];
923 	prev_pn128[1] = rx_tid->pn128[1];
924 
925 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, msdu) ? dp_sec_mcast :
926 				dp_sec_ucast;
927 	sec_type = peer->security[index].sec_type;
928 
929 	if (!(sec_type == cdp_sec_type_none || sec_type == cdp_sec_type_wep128 ||
930 	      sec_type == cdp_sec_type_wep104 || sec_type == cdp_sec_type_wep40))
931 		needs_pn_check = 1;
932 
933 	while (msdu) {
934 		if (qdf_likely(needs_pn_check))
935 			out_of_order = dp_rx_defrag_pn_check(soc, msdu,
936 							     &cur_pn128[0],
937 							     &prev_pn128[0]);
938 
939 		if (qdf_unlikely(out_of_order)) {
940 			dp_info_rl("cur_pn128[0] 0x%llx cur_pn128[1] 0x%llx prev_pn128[0] 0x%llx prev_pn128[1] 0x%llx",
941 				   cur_pn128[0], cur_pn128[1],
942 				   prev_pn128[0], prev_pn128[1]);
943 			return QDF_STATUS_E_FAILURE;
944 		}
945 
946 		prev_pn128[0] = cur_pn128[0];
947 		prev_pn128[1] = cur_pn128[1];
948 
949 		/*
950 		 * Broadcast and multicast frames should never be fragmented.
951 		 * Iterating through all msdus and dropping fragments if even
952 		 * one of them has mcast/bcast destination address.
953 		 */
954 		if (hal_rx_msdu_is_wlan_mcast(soc->hal_soc, msdu)) {
955 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
956 				  "Dropping multicast/broadcast fragments");
957 			return QDF_STATUS_E_FAILURE;
958 		}
959 
960 		dp_rx_frag_pull_hdr(soc, msdu, hdrsize);
961 		len += qdf_nbuf_len(msdu);
962 		msdu = qdf_nbuf_next(msdu);
963 	}
964 
965 	qdf_nbuf_append_ext_list(head, rx_nbuf, len);
966 	qdf_nbuf_set_next(head, NULL);
967 	qdf_nbuf_set_is_frag(head, 1);
968 
969 	dp_debug("head len %d ext len %d data len %d ",
970 		 (uint32_t)qdf_nbuf_len(head),
971 		 (uint32_t)qdf_nbuf_len(rx_nbuf),
972 		 (uint32_t)(head->data_len));
973 
974 	return QDF_STATUS_SUCCESS;
975 }
976 
977 /**
978  * dp_rx_defrag_err() - rx err handler
979  * @pdev: handle to pdev object
980  * @vdev_id: vdev id
981  * @peer_mac_addr: peer mac address
982  * @tid: TID
983  * @tsf32: TSF
984  * @err_type: error type
985  * @rx_frame: rx frame
986  * @pn: PN Number
987  * @key_id: key id
988  *
989  * This function handles rx error and send MIC error notification
990  *
991  * Return: None
992  */
993 static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
994 {
995 	struct ol_if_ops *tops = NULL;
996 	struct dp_pdev *pdev = vdev->pdev;
997 	int rx_desc_len = pdev->soc->rx_pkt_tlv_size;
998 	uint8_t *orig_hdr;
999 	struct ieee80211_frame *wh;
1000 	struct cdp_rx_mic_err_info mic_failure_info;
1001 
1002 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
1003 	wh = (struct ieee80211_frame *)orig_hdr;
1004 
1005 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.da_mac_addr,
1006 			 (struct qdf_mac_addr *)&wh->i_addr1);
1007 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.ta_mac_addr,
1008 			 (struct qdf_mac_addr *)&wh->i_addr2);
1009 	mic_failure_info.key_id = 0;
1010 	mic_failure_info.multicast =
1011 		IEEE80211_IS_MULTICAST(wh->i_addr1);
1012 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1013 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1014 	mic_failure_info.data = (uint8_t *)wh;
1015 	mic_failure_info.vdev_id = vdev->vdev_id;
1016 
1017 	tops = pdev->soc->cdp_soc.ol_ops;
1018 	if (tops->rx_mic_error)
1019 		tops->rx_mic_error(pdev->soc->ctrl_psoc, pdev->pdev_id,
1020 				   &mic_failure_info);
1021 }
1022 
1023 
1024 /*
1025  * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3
1026  * @soc: dp soc handle
1027  * @nbuf: Pointer to the fragment buffer
1028  * @hdrsize: Size of headers
1029  *
1030  * Transcap the fragment from 802.11 to 802.3
1031  *
1032  * Returns: None
1033  */
1034 static void
1035 dp_rx_defrag_nwifi_to_8023(struct dp_soc *soc, struct dp_peer *peer, int tid,
1036 			   qdf_nbuf_t nbuf, uint16_t hdrsize)
1037 {
1038 	struct llc_snap_hdr_t *llchdr;
1039 	struct ethernet_hdr_t *eth_hdr;
1040 	uint8_t ether_type[2];
1041 	uint16_t fc = 0;
1042 	union dp_align_mac_addr mac_addr;
1043 	uint8_t *rx_desc_info = qdf_mem_malloc(soc->rx_pkt_tlv_size);
1044 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1045 
1046 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), rx_tid->pn128);
1047 
1048 	hal_rx_print_pn(soc->hal_soc, qdf_nbuf_data(nbuf));
1049 
1050 	if (!rx_desc_info) {
1051 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1052 			"%s: Memory alloc failed ! ", __func__);
1053 		QDF_ASSERT(0);
1054 		return;
1055 	}
1056 
1057 	qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), soc->rx_pkt_tlv_size);
1058 
1059 	llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) +
1060 					soc->rx_pkt_tlv_size + hdrsize);
1061 	qdf_mem_copy(ether_type, llchdr->ethertype, 2);
1062 
1063 	qdf_nbuf_pull_head(nbuf, (soc->rx_pkt_tlv_size + hdrsize +
1064 				  sizeof(struct llc_snap_hdr_t) -
1065 				  sizeof(struct ethernet_hdr_t)));
1066 
1067 	eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf));
1068 
1069 	if (hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
1070 						rx_desc_info))
1071 		fc = hal_rx_get_frame_ctrl_field(soc->hal_soc, rx_desc_info);
1072 
1073 	dp_debug("Frame control type: 0x%x", fc);
1074 
1075 	switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) {
1076 	case IEEE80211_FC1_DIR_NODS:
1077 		hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info,
1078 				      &mac_addr.raw[0]);
1079 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1080 			QDF_MAC_ADDR_SIZE);
1081 		hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info,
1082 				      &mac_addr.raw[0]);
1083 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1084 			QDF_MAC_ADDR_SIZE);
1085 		break;
1086 	case IEEE80211_FC1_DIR_TODS:
1087 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1088 				      &mac_addr.raw[0]);
1089 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1090 			QDF_MAC_ADDR_SIZE);
1091 		hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info,
1092 				      &mac_addr.raw[0]);
1093 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1094 			QDF_MAC_ADDR_SIZE);
1095 		break;
1096 	case IEEE80211_FC1_DIR_FROMDS:
1097 		hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info,
1098 				      &mac_addr.raw[0]);
1099 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1100 			QDF_MAC_ADDR_SIZE);
1101 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1102 				      &mac_addr.raw[0]);
1103 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1104 			QDF_MAC_ADDR_SIZE);
1105 		break;
1106 
1107 	case IEEE80211_FC1_DIR_DSTODS:
1108 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1109 				      &mac_addr.raw[0]);
1110 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1111 			QDF_MAC_ADDR_SIZE);
1112 		hal_rx_mpdu_get_addr4(soc->hal_soc, rx_desc_info,
1113 				      &mac_addr.raw[0]);
1114 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1115 			QDF_MAC_ADDR_SIZE);
1116 		break;
1117 
1118 	default:
1119 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1120 		"%s: Unknown frame control type: 0x%x", __func__, fc);
1121 	}
1122 
1123 	qdf_mem_copy(eth_hdr->ethertype, ether_type,
1124 			sizeof(ether_type));
1125 
1126 	qdf_nbuf_push_head(nbuf, soc->rx_pkt_tlv_size);
1127 	qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, soc->rx_pkt_tlv_size);
1128 	qdf_mem_free(rx_desc_info);
1129 }
1130 
1131 #ifdef RX_DEFRAG_DO_NOT_REINJECT
1132 /*
1133  * dp_rx_defrag_deliver(): Deliver defrag packet to stack
1134  * @peer: Pointer to the peer
1135  * @tid: Transmit Identifier
1136  * @head: Nbuf to be delivered
1137  *
1138  * Returns: None
1139  */
1140 static inline void dp_rx_defrag_deliver(struct dp_peer *peer,
1141 					unsigned int tid,
1142 					qdf_nbuf_t head)
1143 {
1144 	struct dp_vdev *vdev = peer->vdev;
1145 	struct dp_soc *soc = vdev->pdev->soc;
1146 	qdf_nbuf_t deliver_list_head = NULL;
1147 	qdf_nbuf_t deliver_list_tail = NULL;
1148 	uint8_t *rx_tlv_hdr;
1149 
1150 	rx_tlv_hdr = qdf_nbuf_data(head);
1151 
1152 	QDF_NBUF_CB_RX_VDEV_ID(head) = vdev->vdev_id;
1153 	qdf_nbuf_set_tid_val(head, tid);
1154 	qdf_nbuf_pull_head(head, soc->rx_pkt_tlv_size);
1155 
1156 	DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail,
1157 			  head);
1158 	dp_rx_deliver_to_stack(soc, vdev, peer, deliver_list_head,
1159 			       deliver_list_tail);
1160 }
1161 
1162 /*
1163  * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO
1164  * @peer: Pointer to the peer
1165  * @tid: Transmit Identifier
1166  * @head: Buffer to be reinjected back
1167  *
1168  * Reinject the fragment chain back into REO
1169  *
1170  * Returns: QDF_STATUS
1171  */
1172 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer,
1173 					    unsigned int tid, qdf_nbuf_t head)
1174 {
1175 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1176 
1177 	rx_reorder_array_elem = peer->rx_tid[tid].array;
1178 
1179 	dp_rx_defrag_deliver(peer, tid, head);
1180 	rx_reorder_array_elem->head = NULL;
1181 	rx_reorder_array_elem->tail = NULL;
1182 	dp_rx_return_head_frag_desc(peer, tid);
1183 
1184 	return QDF_STATUS_SUCCESS;
1185 }
1186 #else
1187 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1188 /**
1189  * dp_rx_reinject_ring_record_entry() - Record reinject ring history
1190  * @soc: Datapath soc structure
1191  * @paddr: paddr of the buffer reinjected to SW2REO ring
1192  * @sw_cookie: SW cookie of the buffer reinjected to SW2REO ring
1193  * @rbm: Return buffer manager of the buffer reinjected to SW2REO ring
1194  *
1195  * Returns: None
1196  */
1197 static inline void
1198 dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1199 				 uint32_t sw_cookie, uint8_t rbm)
1200 {
1201 	struct dp_buf_info_record *record;
1202 	uint32_t idx;
1203 
1204 	if (qdf_unlikely(!soc->rx_reinject_ring_history))
1205 		return;
1206 
1207 	idx = dp_history_get_next_index(&soc->rx_reinject_ring_history->index,
1208 					DP_RX_REINJECT_HIST_MAX);
1209 
1210 	/* No NULL check needed for record since its an array */
1211 	record = &soc->rx_reinject_ring_history->entry[idx];
1212 
1213 	record->timestamp = qdf_get_log_timestamp();
1214 	record->hbi.paddr = paddr;
1215 	record->hbi.sw_cookie = sw_cookie;
1216 	record->hbi.rbm = rbm;
1217 }
1218 #else
1219 static inline void
1220 dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1221 				 uint32_t sw_cookie, uint8_t rbm)
1222 {
1223 }
1224 #endif
1225 
1226 /*
1227  * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO
1228  * @peer: Pointer to the peer
1229  * @tid: Transmit Identifier
1230  * @head: Buffer to be reinjected back
1231  *
1232  * Reinject the fragment chain back into REO
1233  *
1234  * Returns: QDF_STATUS
1235  */
1236 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer,
1237 					    unsigned int tid, qdf_nbuf_t head)
1238 {
1239 	struct dp_pdev *pdev = peer->vdev->pdev;
1240 	struct dp_soc *soc = pdev->soc;
1241 	struct hal_buf_info buf_info;
1242 	struct hal_buf_info temp_buf_info;
1243 	void *link_desc_va;
1244 	void *msdu0, *msdu_desc_info;
1245 	void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr;
1246 	void *dst_mpdu_desc_info, *dst_qdesc_addr;
1247 	qdf_dma_addr_t paddr;
1248 	uint32_t nbuf_len, seq_no, dst_ind;
1249 	uint32_t *mpdu_wrd;
1250 	uint32_t ret, cookie;
1251 	hal_ring_desc_t dst_ring_desc =
1252 		peer->rx_tid[tid].dst_ring_desc;
1253 	hal_ring_handle_t hal_srng = soc->reo_reinject_ring.hal_srng;
1254 	struct dp_rx_desc *rx_desc = peer->rx_tid[tid].head_frag_desc;
1255 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1256 						peer->rx_tid[tid].array;
1257 	qdf_nbuf_t nbuf_head;
1258 	struct rx_desc_pool *rx_desc_pool = NULL;
1259 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(dst_ring_desc);
1260 
1261 	/* do duplicate link desc address check */
1262 	dp_rx_link_desc_refill_duplicate_check(
1263 				soc,
1264 				&soc->last_op_info.reo_reinject_link_desc,
1265 				buf_addr_info);
1266 
1267 	nbuf_head = dp_ipa_handle_rx_reo_reinject(soc, head);
1268 	if (qdf_unlikely(!nbuf_head)) {
1269 		dp_err_rl("IPA RX REO reinject failed");
1270 		return QDF_STATUS_E_FAILURE;
1271 	}
1272 
1273 	/* update new allocated skb in case IPA is enabled */
1274 	if (nbuf_head != head) {
1275 		head = nbuf_head;
1276 		rx_desc->nbuf = head;
1277 		rx_reorder_array_elem->head = head;
1278 	}
1279 
1280 	ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
1281 	if (!ent_ring_desc) {
1282 		dp_err_rl("HAL src ring next entry NULL");
1283 		return QDF_STATUS_E_FAILURE;
1284 	}
1285 
1286 	hal_rx_reo_buf_paddr_get(soc->hal_soc, dst_ring_desc, &buf_info);
1287 
1288 	/* buffer_addr_info is the first element of ring_desc */
1289 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)dst_ring_desc,
1290 				  &buf_info);
1291 
1292 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1293 
1294 	qdf_assert_always(link_desc_va);
1295 
1296 	msdu0 = hal_rx_msdu0_buffer_addr_lsb(soc->hal_soc, link_desc_va);
1297 	nbuf_len = qdf_nbuf_len(head) - soc->rx_pkt_tlv_size;
1298 
1299 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW);
1300 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE,
1301 			UNI_DESC_BUF_TYPE_RX_MSDU_LINK);
1302 
1303 	/* msdu reconfig */
1304 	msdu_desc_info = hal_rx_msdu_desc_info_ptr_get(soc->hal_soc, msdu0);
1305 
1306 	dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va);
1307 
1308 	qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info));
1309 
1310 	hal_msdu_desc_info_set(soc->hal_soc, msdu_desc_info, dst_ind, nbuf_len);
1311 
1312 	/* change RX TLV's */
1313 	hal_rx_tlv_msdu_len_set(soc->hal_soc, qdf_nbuf_data(head), nbuf_len);
1314 
1315 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)msdu0,
1316 				  &temp_buf_info);
1317 
1318 	cookie = temp_buf_info.sw_cookie;
1319 	rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
1320 
1321 	/* map the nbuf before reinject it into HW */
1322 	ret = qdf_nbuf_map_nbytes_single(soc->osdev, head,
1323 					 QDF_DMA_FROM_DEVICE,
1324 					 rx_desc_pool->buf_size);
1325 	if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1326 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1327 				"%s: nbuf map failed !", __func__);
1328 		return QDF_STATUS_E_FAILURE;
1329 	}
1330 
1331 	dp_ipa_handle_rx_buf_smmu_mapping(soc, head,
1332 					  rx_desc_pool->buf_size,
1333 					  true);
1334 
1335 	/*
1336 	 * As part of rx frag handler bufffer was unmapped and rx desc
1337 	 * unmapped is set to 1. So again for defrag reinject frame reset
1338 	 * it back to 0.
1339 	 */
1340 	rx_desc->unmapped = 0;
1341 
1342 	paddr = qdf_nbuf_get_frag_paddr(head, 0);
1343 
1344 	ret = dp_check_paddr(soc, &head, &paddr, rx_desc_pool);
1345 
1346 	if (ret == QDF_STATUS_E_FAILURE) {
1347 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1348 				"%s: x86 check failed !", __func__);
1349 		return QDF_STATUS_E_FAILURE;
1350 	}
1351 
1352 	hal_rxdma_buff_addr_info_set(soc->hal_soc, msdu0, paddr, cookie,
1353 				     DP_DEFRAG_RBM(soc->wbm_sw0_bm_id));
1354 
1355 	/* Lets fill entrance ring now !!! */
1356 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1357 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1358 		"HAL RING Access For REO entrance SRNG Failed: %pK",
1359 		hal_srng);
1360 
1361 		return QDF_STATUS_E_FAILURE;
1362 	}
1363 
1364 	dp_rx_reinject_ring_record_entry(soc, paddr, cookie,
1365 					 DP_DEFRAG_RBM(soc->wbm_sw0_bm_id));
1366 	paddr = (uint64_t)buf_info.paddr;
1367 	/* buf addr */
1368 	hal_rxdma_buff_addr_info_set(soc->hal_soc, ent_ring_desc, paddr,
1369 				     buf_info.sw_cookie,
1370 				     HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
1371 	/* mpdu desc info */
1372 	ent_mpdu_desc_info = hal_ent_mpdu_desc_info(soc->hal_soc,
1373 						    ent_ring_desc);
1374 	dst_mpdu_desc_info = hal_dst_mpdu_desc_info(soc->hal_soc,
1375 						    dst_ring_desc);
1376 
1377 	qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info,
1378 				sizeof(struct rx_mpdu_desc_info));
1379 	qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t));
1380 
1381 	mpdu_wrd = (uint32_t *)dst_mpdu_desc_info;
1382 	seq_no = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_wrd);
1383 
1384 	hal_mpdu_desc_info_set(soc->hal_soc, ent_mpdu_desc_info, seq_no);
1385 	/* qdesc addr */
1386 	ent_qdesc_addr = (uint8_t *)ent_ring_desc +
1387 		REO_ENTRANCE_RING_4_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET;
1388 
1389 	dst_qdesc_addr = (uint8_t *)dst_ring_desc +
1390 		REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET;
1391 
1392 	qdf_mem_copy(ent_qdesc_addr, dst_qdesc_addr, 8);
1393 
1394 	HAL_RX_FLD_SET(ent_ring_desc, REO_ENTRANCE_RING_5,
1395 			REO_DESTINATION_INDICATION, dst_ind);
1396 
1397 	hal_srng_access_end(soc->hal_soc, hal_srng);
1398 
1399 	DP_STATS_INC(soc, rx.reo_reinject, 1);
1400 	dp_debug("reinjection done !");
1401 	return QDF_STATUS_SUCCESS;
1402 }
1403 #endif
1404 
1405 /*
1406  * dp_rx_defrag_gcmp_demic(): Remove MIC information from GCMP fragment
1407  * @soc: Datapath soc structure
1408  * @nbuf: Pointer to the fragment buffer
1409  * @hdrlen: 802.11 header length
1410  *
1411  * Remove MIC information from GCMP fragment
1412  *
1413  * Returns: QDF_STATUS
1414  */
1415 static QDF_STATUS dp_rx_defrag_gcmp_demic(struct dp_soc *soc, qdf_nbuf_t nbuf,
1416 					  uint16_t hdrlen)
1417 {
1418 	uint8_t *ivp, *orig_hdr;
1419 	int rx_desc_len = soc->rx_pkt_tlv_size;
1420 
1421 	/* start of the 802.11 header */
1422 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
1423 
1424 	/*
1425 	 * GCMP header is located after 802.11 header and EXTIV
1426 	 * field should always be set to 1 for GCMP protocol.
1427 	 */
1428 	ivp = orig_hdr + hdrlen;
1429 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
1430 		return QDF_STATUS_E_DEFRAG_ERROR;
1431 
1432 	qdf_nbuf_trim_tail(nbuf, dp_f_gcmp.ic_trailer);
1433 
1434 	return QDF_STATUS_SUCCESS;
1435 }
1436 
1437 /*
1438  * dp_rx_defrag(): Defragment the fragment chain
1439  * @peer: Pointer to the peer
1440  * @tid: Transmit Identifier
1441  * @frag_list_head: Pointer to head list
1442  * @frag_list_tail: Pointer to tail list
1443  *
1444  * Defragment the fragment chain
1445  *
1446  * Returns: QDF_STATUS
1447  */
1448 static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid,
1449 			qdf_nbuf_t frag_list_head, qdf_nbuf_t frag_list_tail)
1450 {
1451 	qdf_nbuf_t tmp_next, prev;
1452 	qdf_nbuf_t cur = frag_list_head, msdu;
1453 	uint32_t index, tkip_demic = 0;
1454 	uint16_t hdr_space;
1455 	uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
1456 	struct dp_vdev *vdev = peer->vdev;
1457 	struct dp_soc *soc = vdev->pdev->soc;
1458 	uint8_t status = 0;
1459 
1460 	if (!cur)
1461 		return QDF_STATUS_E_DEFRAG_ERROR;
1462 
1463 	hdr_space = dp_rx_defrag_hdrsize(soc, cur);
1464 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, cur) ?
1465 		dp_sec_mcast : dp_sec_ucast;
1466 
1467 	/* Remove FCS from all fragments */
1468 	while (cur) {
1469 		tmp_next = qdf_nbuf_next(cur);
1470 		qdf_nbuf_set_next(cur, NULL);
1471 		qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
1472 		prev = cur;
1473 		qdf_nbuf_set_next(cur, tmp_next);
1474 		cur = tmp_next;
1475 	}
1476 	cur = frag_list_head;
1477 
1478 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1479 		  "%s: index %d Security type: %d", __func__,
1480 		  index, peer->security[index].sec_type);
1481 
1482 	switch (peer->security[index].sec_type) {
1483 	case cdp_sec_type_tkip:
1484 		tkip_demic = 1;
1485 
1486 	case cdp_sec_type_tkip_nomic:
1487 		while (cur) {
1488 			tmp_next = qdf_nbuf_next(cur);
1489 			if (dp_rx_defrag_tkip_decap(soc, cur, hdr_space)) {
1490 
1491 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1492 					QDF_TRACE_LEVEL_ERROR,
1493 					"dp_rx_defrag: TKIP decap failed");
1494 
1495 				return QDF_STATUS_E_DEFRAG_ERROR;
1496 			}
1497 			cur = tmp_next;
1498 		}
1499 
1500 		/* If success, increment header to be stripped later */
1501 		hdr_space += dp_f_tkip.ic_header;
1502 		break;
1503 
1504 	case cdp_sec_type_aes_ccmp:
1505 		while (cur) {
1506 			tmp_next = qdf_nbuf_next(cur);
1507 			if (dp_rx_defrag_ccmp_demic(soc, cur, hdr_space)) {
1508 
1509 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1510 					QDF_TRACE_LEVEL_ERROR,
1511 					"dp_rx_defrag: CCMP demic failed");
1512 
1513 				return QDF_STATUS_E_DEFRAG_ERROR;
1514 			}
1515 			if (dp_rx_defrag_ccmp_decap(soc, cur, hdr_space)) {
1516 
1517 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1518 					QDF_TRACE_LEVEL_ERROR,
1519 					"dp_rx_defrag: CCMP decap failed");
1520 
1521 				return QDF_STATUS_E_DEFRAG_ERROR;
1522 			}
1523 			cur = tmp_next;
1524 		}
1525 
1526 		/* If success, increment header to be stripped later */
1527 		hdr_space += dp_f_ccmp.ic_header;
1528 		break;
1529 
1530 	case cdp_sec_type_wep40:
1531 	case cdp_sec_type_wep104:
1532 	case cdp_sec_type_wep128:
1533 		while (cur) {
1534 			tmp_next = qdf_nbuf_next(cur);
1535 			if (dp_rx_defrag_wep_decap(soc, cur, hdr_space)) {
1536 
1537 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1538 					QDF_TRACE_LEVEL_ERROR,
1539 					"dp_rx_defrag: WEP decap failed");
1540 
1541 				return QDF_STATUS_E_DEFRAG_ERROR;
1542 			}
1543 			cur = tmp_next;
1544 		}
1545 
1546 		/* If success, increment header to be stripped later */
1547 		hdr_space += dp_f_wep.ic_header;
1548 		break;
1549 	case cdp_sec_type_aes_gcmp:
1550 	case cdp_sec_type_aes_gcmp_256:
1551 		while (cur) {
1552 			tmp_next = qdf_nbuf_next(cur);
1553 			if (dp_rx_defrag_gcmp_demic(soc, cur, hdr_space)) {
1554 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1555 					  QDF_TRACE_LEVEL_ERROR,
1556 					  "dp_rx_defrag: GCMP demic failed");
1557 
1558 				return QDF_STATUS_E_DEFRAG_ERROR;
1559 			}
1560 			cur = tmp_next;
1561 		}
1562 
1563 		hdr_space += dp_f_gcmp.ic_header;
1564 		break;
1565 	default:
1566 		break;
1567 	}
1568 
1569 	if (tkip_demic) {
1570 		msdu = frag_list_head;
1571 		qdf_mem_copy(key,
1572 			     &peer->security[index].michael_key[0],
1573 			     IEEE80211_WEP_MICLEN);
1574 		status = dp_rx_defrag_tkip_demic(soc, key, msdu,
1575 						 soc->rx_pkt_tlv_size +
1576 						 hdr_space);
1577 
1578 		if (status) {
1579 			dp_rx_defrag_err(vdev, frag_list_head);
1580 
1581 			QDF_TRACE(QDF_MODULE_ID_TXRX,
1582 				  QDF_TRACE_LEVEL_ERROR,
1583 				  "%s: TKIP demic failed status %d",
1584 				   __func__, status);
1585 
1586 			return QDF_STATUS_E_DEFRAG_ERROR;
1587 		}
1588 	}
1589 
1590 	/* Convert the header to 802.3 header */
1591 	dp_rx_defrag_nwifi_to_8023(soc, peer, tid, frag_list_head, hdr_space);
1592 	if (qdf_nbuf_next(frag_list_head)) {
1593 		if (dp_rx_construct_fraglist(peer, tid, frag_list_head, hdr_space))
1594 			return QDF_STATUS_E_DEFRAG_ERROR;
1595 	}
1596 
1597 	return QDF_STATUS_SUCCESS;
1598 }
1599 
1600 /*
1601  * dp_rx_defrag_cleanup(): Clean up activities
1602  * @peer: Pointer to the peer
1603  * @tid: Transmit Identifier
1604  *
1605  * Returns: None
1606  */
1607 void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid)
1608 {
1609 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1610 				peer->rx_tid[tid].array;
1611 
1612 	if (rx_reorder_array_elem) {
1613 		/* Free up nbufs */
1614 		dp_rx_defrag_frames_free(rx_reorder_array_elem->head);
1615 		rx_reorder_array_elem->head = NULL;
1616 		rx_reorder_array_elem->tail = NULL;
1617 	} else {
1618 		dp_info("Cleanup self peer %pK and TID %u at MAC address "QDF_MAC_ADDR_FMT,
1619 			peer, tid, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1620 	}
1621 
1622 	/* Free up saved ring descriptors */
1623 	dp_rx_clear_saved_desc_info(peer, tid);
1624 
1625 	peer->rx_tid[tid].defrag_timeout_ms = 0;
1626 	peer->rx_tid[tid].curr_frag_num = 0;
1627 	peer->rx_tid[tid].curr_seq_num = 0;
1628 }
1629 
1630 /*
1631  * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor
1632  * @ring_desc: Pointer to the dst ring descriptor
1633  * @peer: Pointer to the peer
1634  * @tid: Transmit Identifier
1635  *
1636  * Returns: None
1637  */
1638 static QDF_STATUS
1639 dp_rx_defrag_save_info_from_ring_desc(hal_ring_desc_t ring_desc,
1640 				      struct dp_rx_desc *rx_desc,
1641 				      struct dp_peer *peer,
1642 				      unsigned int tid)
1643 {
1644 	void *dst_ring_desc = qdf_mem_malloc(
1645 			sizeof(struct reo_destination_ring));
1646 
1647 	if (!dst_ring_desc) {
1648 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1649 			"%s: Memory alloc failed !", __func__);
1650 		QDF_ASSERT(0);
1651 		return QDF_STATUS_E_NOMEM;
1652 	}
1653 
1654 	qdf_mem_copy(dst_ring_desc, ring_desc,
1655 		       sizeof(struct reo_destination_ring));
1656 
1657 	peer->rx_tid[tid].dst_ring_desc = dst_ring_desc;
1658 	peer->rx_tid[tid].head_frag_desc = rx_desc;
1659 
1660 	return QDF_STATUS_SUCCESS;
1661 }
1662 
1663 /*
1664  * dp_rx_defrag_store_fragment(): Store incoming fragments
1665  * @soc: Pointer to the SOC data structure
1666  * @ring_desc: Pointer to the ring descriptor
1667  * @mpdu_desc_info: MPDU descriptor info
1668  * @tid: Traffic Identifier
1669  * @rx_desc: Pointer to rx descriptor
1670  * @rx_bfs: Number of bfs consumed
1671  *
1672  * Returns: QDF_STATUS
1673  */
1674 static QDF_STATUS
1675 dp_rx_defrag_store_fragment(struct dp_soc *soc,
1676 			    hal_ring_desc_t ring_desc,
1677 			    union dp_rx_desc_list_elem_t **head,
1678 			    union dp_rx_desc_list_elem_t **tail,
1679 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1680 			    unsigned int tid, struct dp_rx_desc *rx_desc,
1681 			    uint32_t *rx_bfs)
1682 {
1683 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1684 	struct dp_pdev *pdev;
1685 	struct dp_peer *peer = NULL;
1686 	uint16_t peer_id;
1687 	uint8_t fragno, more_frag, all_frag_present = 0;
1688 	uint16_t rxseq = mpdu_desc_info->mpdu_seq;
1689 	QDF_STATUS status;
1690 	struct dp_rx_tid *rx_tid;
1691 	uint8_t mpdu_sequence_control_valid;
1692 	uint8_t mpdu_frame_control_valid;
1693 	qdf_nbuf_t frag = rx_desc->nbuf;
1694 	uint32_t msdu_len;
1695 
1696 	if (qdf_nbuf_len(frag) > 0) {
1697 		dp_info("Dropping unexpected packet with skb_len: %d,"
1698 			"data len: %d, cookie: %d",
1699 			(uint32_t)qdf_nbuf_len(frag), frag->data_len,
1700 			rx_desc->cookie);
1701 		DP_STATS_INC(soc, rx.rx_frag_err_len_error, 1);
1702 		goto discard_frag;
1703 	}
1704 
1705 	if (dp_rx_buffer_pool_refill(soc, frag, rx_desc->pool_id)) {
1706 		/* fragment queued back to the pool, free the link desc */
1707 		goto err_free_desc;
1708 	}
1709 
1710 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
1711 						  rx_desc->rx_buf_start);
1712 
1713 	qdf_nbuf_set_pktlen(frag, (msdu_len + soc->rx_pkt_tlv_size));
1714 	qdf_nbuf_append_ext_list(frag, NULL, 0);
1715 
1716 	/* Check if the packet is from a valid peer */
1717 	peer_id = DP_PEER_METADATA_PEER_ID_GET(
1718 					mpdu_desc_info->peer_meta_data);
1719 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
1720 
1721 	if (!peer) {
1722 		/* We should not receive anything from unknown peer
1723 		 * however, that might happen while we are in the monitor mode.
1724 		 * We don't need to handle that here
1725 		 */
1726 		dp_info_rl("Unknown peer with peer_id %d, dropping fragment",
1727 			   peer_id);
1728 		DP_STATS_INC(soc, rx.rx_frag_err_no_peer, 1);
1729 		goto discard_frag;
1730 	}
1731 
1732 	if (tid >= DP_MAX_TIDS) {
1733 		dp_info("TID out of bounds: %d", tid);
1734 		qdf_assert_always(0);
1735 		goto discard_frag;
1736 	}
1737 
1738 	pdev = peer->vdev->pdev;
1739 	rx_tid = &peer->rx_tid[tid];
1740 
1741 	mpdu_sequence_control_valid =
1742 		hal_rx_get_mpdu_sequence_control_valid(soc->hal_soc,
1743 						       rx_desc->rx_buf_start);
1744 
1745 	/* Invalid MPDU sequence control field, MPDU is of no use */
1746 	if (!mpdu_sequence_control_valid) {
1747 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1748 			"Invalid MPDU seq control field, dropping MPDU");
1749 
1750 		qdf_assert(0);
1751 		goto discard_frag;
1752 	}
1753 
1754 	mpdu_frame_control_valid =
1755 		hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
1756 						    rx_desc->rx_buf_start);
1757 
1758 	/* Invalid frame control field */
1759 	if (!mpdu_frame_control_valid) {
1760 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1761 			"Invalid frame control field, dropping MPDU");
1762 
1763 		qdf_assert(0);
1764 		goto discard_frag;
1765 	}
1766 
1767 	/* Current mpdu sequence */
1768 	more_frag = dp_rx_frag_get_more_frag_bit(soc, rx_desc->rx_buf_start);
1769 
1770 	/* HW does not populate the fragment number as of now
1771 	 * need to get from the 802.11 header
1772 	 */
1773 	fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc->rx_buf_start);
1774 
1775 	rx_reorder_array_elem = peer->rx_tid[tid].array;
1776 	if (!rx_reorder_array_elem) {
1777 		dp_err_rl("Rcvd Fragmented pkt before tid setup for peer %pK",
1778 			  peer);
1779 		goto discard_frag;
1780 	}
1781 
1782 	/*
1783 	 * !more_frag: no more fragments to be delivered
1784 	 * !frag_no: packet is not fragmented
1785 	 * !rx_reorder_array_elem->head: no saved fragments so far
1786 	 */
1787 	if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
1788 		/* We should not get into this situation here.
1789 		 * It means an unfragmented packet with fragment flag
1790 		 * is delivered over the REO exception ring.
1791 		 * Typically it follows normal rx path.
1792 		 */
1793 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1794 			"Rcvd unfragmented pkt on REO Err srng, dropping");
1795 
1796 		qdf_assert(0);
1797 		goto discard_frag;
1798 	}
1799 
1800 	/* Check if the fragment is for the same sequence or a different one */
1801 	dp_debug("rx_tid %d", tid);
1802 	if (rx_reorder_array_elem->head) {
1803 		dp_debug("rxseq %d\n", rxseq);
1804 		if (rxseq != rx_tid->curr_seq_num) {
1805 
1806 			dp_debug("mismatch cur_seq %d rxseq %d\n",
1807 				 rx_tid->curr_seq_num, rxseq);
1808 			/* Drop stored fragments if out of sequence
1809 			 * fragment is received
1810 			 */
1811 			dp_rx_reorder_flush_frag(peer, tid);
1812 
1813 			DP_STATS_INC(soc, rx.rx_frag_oor, 1);
1814 
1815 			dp_debug("cur rxseq %d\n", rxseq);
1816 			/*
1817 			 * The sequence number for this fragment becomes the
1818 			 * new sequence number to be processed
1819 			 */
1820 			rx_tid->curr_seq_num = rxseq;
1821 		}
1822 	} else {
1823 		dp_debug("cur rxseq %d\n", rxseq);
1824 		/* Start of a new sequence */
1825 		dp_rx_defrag_cleanup(peer, tid);
1826 		rx_tid->curr_seq_num = rxseq;
1827 		/* store PN number also */
1828 	}
1829 
1830 	/*
1831 	 * If the earlier sequence was dropped, this will be the fresh start.
1832 	 * Else, continue with next fragment in a given sequence
1833 	 */
1834 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1835 	status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head,
1836 			&rx_reorder_array_elem->tail, frag,
1837 			&all_frag_present);
1838 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1839 
1840 	/*
1841 	 * Currently, we can have only 6 MSDUs per-MPDU, if the current
1842 	 * packet sequence has more than 6 MSDUs for some reason, we will
1843 	 * have to use the next MSDU link descriptor and chain them together
1844 	 * before reinjection.
1845 	 * ring_desc is validated in dp_rx_err_process.
1846 	 */
1847 	if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) &&
1848 			(rx_reorder_array_elem->head == frag)) {
1849 
1850 		status = dp_rx_defrag_save_info_from_ring_desc(ring_desc,
1851 					rx_desc, peer, tid);
1852 
1853 		if (status != QDF_STATUS_SUCCESS) {
1854 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1855 				"%s: Unable to store ring desc !", __func__);
1856 			goto discard_frag;
1857 		}
1858 	} else {
1859 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1860 		(*rx_bfs)++;
1861 
1862 		/* Return the non-head link desc */
1863 		if (dp_rx_link_desc_return(soc, ring_desc,
1864 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1865 		    QDF_STATUS_SUCCESS)
1866 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1867 				  "%s: Failed to return link desc", __func__);
1868 
1869 	}
1870 
1871 	if (pdev->soc->rx.flags.defrag_timeout_check)
1872 		dp_rx_defrag_waitlist_remove(peer, tid);
1873 
1874 	/* Yet to receive more fragments for this sequence number */
1875 	if (!all_frag_present) {
1876 		uint32_t now_ms =
1877 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1878 
1879 		peer->rx_tid[tid].defrag_timeout_ms =
1880 			now_ms + pdev->soc->rx.defrag.timeout_ms;
1881 
1882 		dp_rx_defrag_waitlist_add(peer, tid);
1883 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
1884 
1885 		return QDF_STATUS_SUCCESS;
1886 	}
1887 
1888 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1889 		  "All fragments received for sequence: %d", rxseq);
1890 
1891 	/* Process the fragments */
1892 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1893 	status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head,
1894 		rx_reorder_array_elem->tail);
1895 	if (QDF_IS_STATUS_ERROR(status)) {
1896 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1897 			"Fragment processing failed");
1898 
1899 		dp_rx_add_to_free_desc_list(head, tail,
1900 				peer->rx_tid[tid].head_frag_desc);
1901 		(*rx_bfs)++;
1902 
1903 		if (dp_rx_link_desc_return(soc,
1904 					peer->rx_tid[tid].dst_ring_desc,
1905 					HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1906 				QDF_STATUS_SUCCESS)
1907 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1908 					"%s: Failed to return link desc",
1909 					__func__);
1910 		dp_rx_defrag_cleanup(peer, tid);
1911 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1912 		goto end;
1913 	}
1914 
1915 	/* Re-inject the fragments back to REO for further processing */
1916 	status = dp_rx_defrag_reo_reinject(peer, tid,
1917 			rx_reorder_array_elem->head);
1918 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1919 	if (QDF_IS_STATUS_SUCCESS(status)) {
1920 		rx_reorder_array_elem->head = NULL;
1921 		rx_reorder_array_elem->tail = NULL;
1922 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1923 			  "Fragmented sequence successfully reinjected");
1924 	} else {
1925 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1926 		"Fragmented sequence reinjection failed");
1927 		dp_rx_return_head_frag_desc(peer, tid);
1928 	}
1929 
1930 	dp_rx_defrag_cleanup(peer, tid);
1931 
1932 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
1933 
1934 	return QDF_STATUS_SUCCESS;
1935 
1936 discard_frag:
1937 	qdf_nbuf_free(frag);
1938 err_free_desc:
1939 	dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1940 	if (dp_rx_link_desc_return(soc, ring_desc,
1941 				   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1942 	    QDF_STATUS_SUCCESS)
1943 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1944 			  "%s: Failed to return link desc", __func__);
1945 	(*rx_bfs)++;
1946 
1947 end:
1948 	if (peer)
1949 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
1950 
1951 	DP_STATS_INC(soc, rx.rx_frag_err, 1);
1952 	return QDF_STATUS_E_DEFRAG_ERROR;
1953 }
1954 
1955 /**
1956  * dp_rx_frag_handle() - Handles fragmented Rx frames
1957  *
1958  * @soc: core txrx main context
1959  * @ring_desc: opaque pointer to the REO error ring descriptor
1960  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
1961  * @head: head of the local descriptor free-list
1962  * @tail: tail of the local descriptor free-list
1963  * @quota: No. of units (packets) that can be serviced in one shot.
1964  *
1965  * This function implements RX 802.11 fragmentation handling
1966  * The handling is mostly same as legacy fragmentation handling.
1967  * If required, this function can re-inject the frames back to
1968  * REO ring (with proper setting to by-pass fragmentation check
1969  * but use duplicate detection / re-ordering and routing these frames
1970  * to a different core.
1971  *
1972  * Return: uint32_t: No. of elements processed
1973  */
1974 uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
1975 			   struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1976 			   struct dp_rx_desc *rx_desc,
1977 			   uint8_t *mac_id,
1978 			   uint32_t quota)
1979 {
1980 	uint32_t rx_bufs_used = 0;
1981 	qdf_nbuf_t msdu = NULL;
1982 	uint32_t tid;
1983 	uint32_t rx_bfs = 0;
1984 	struct dp_pdev *pdev;
1985 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1986 	struct rx_desc_pool *rx_desc_pool;
1987 
1988 	qdf_assert(soc);
1989 	qdf_assert(mpdu_desc_info);
1990 	qdf_assert(rx_desc);
1991 
1992 	dp_debug("Number of MSDUs to process, num_msdus: %d",
1993 		 mpdu_desc_info->msdu_count);
1994 
1995 
1996 	if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) {
1997 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1998 			"Not sufficient MSDUs to process");
1999 		return rx_bufs_used;
2000 	}
2001 
2002 	/* all buffers in MSDU link belong to same pdev */
2003 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
2004 	if (!pdev) {
2005 		dp_nofl_debug("pdev is null for pool_id = %d",
2006 			      rx_desc->pool_id);
2007 		return rx_bufs_used;
2008 	}
2009 
2010 	*mac_id = rx_desc->pool_id;
2011 
2012 	msdu = rx_desc->nbuf;
2013 
2014 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2015 
2016 	if (rx_desc->unmapped)
2017 		return rx_bufs_used;
2018 
2019 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
2020 	dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
2021 					  rx_desc_pool->buf_size,
2022 					  false);
2023 	qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
2024 				     QDF_DMA_FROM_DEVICE,
2025 				     rx_desc_pool->buf_size);
2026 	rx_desc->unmapped = 1;
2027 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2028 
2029 	rx_desc->rx_buf_start = qdf_nbuf_data(msdu);
2030 
2031 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_desc->rx_buf_start);
2032 
2033 	/* Process fragment-by-fragment */
2034 	status = dp_rx_defrag_store_fragment(soc, ring_desc,
2035 					     &pdev->free_list_head,
2036 					     &pdev->free_list_tail,
2037 					     mpdu_desc_info,
2038 					     tid, rx_desc, &rx_bfs);
2039 
2040 	if (rx_bfs)
2041 		rx_bufs_used += rx_bfs;
2042 
2043 	if (!QDF_IS_STATUS_SUCCESS(status))
2044 		dp_info_rl("Rx Defrag err seq#:0x%x msdu_count:%d flags:%d",
2045 			   mpdu_desc_info->mpdu_seq,
2046 			   mpdu_desc_info->msdu_count,
2047 			   mpdu_desc_info->mpdu_flags);
2048 
2049 	return rx_bufs_used;
2050 }
2051 
2052 QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
2053 				      struct dp_peer *peer, uint16_t tid,
2054 				      uint16_t rxseq, qdf_nbuf_t nbuf)
2055 {
2056 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
2057 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
2058 	uint8_t all_frag_present;
2059 	uint32_t msdu_len;
2060 	QDF_STATUS status;
2061 
2062 	rx_reorder_array_elem = peer->rx_tid[tid].array;
2063 
2064 	/*
2065 	 * HW may fill in unexpected peer_id in RX PKT TLV,
2066 	 * if this peer_id related peer is valid by coincidence,
2067 	 * but actually this peer won't do dp_peer_rx_init(like SAP vdev
2068 	 * self peer), then invalid access to rx_reorder_array_elem happened.
2069 	 */
2070 	if (!rx_reorder_array_elem) {
2071 		dp_verbose_debug(
2072 			"peer id:%d mac: "QDF_MAC_ADDR_FMT" drop rx frame!",
2073 			peer->peer_id,
2074 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2075 		DP_STATS_INC(soc, rx.err.defrag_peer_uninit, 1);
2076 		qdf_nbuf_free(nbuf);
2077 		goto fail;
2078 	}
2079 
2080 	if (rx_reorder_array_elem->head &&
2081 	    rxseq != rx_tid->curr_seq_num) {
2082 		/* Drop stored fragments if out of sequence
2083 		 * fragment is received
2084 		 */
2085 		dp_rx_reorder_flush_frag(peer, tid);
2086 
2087 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2088 			  "%s: No list found for TID %d Seq# %d",
2089 				__func__, tid, rxseq);
2090 		qdf_nbuf_free(nbuf);
2091 		goto fail;
2092 	}
2093 
2094 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2095 						  qdf_nbuf_data(nbuf));
2096 
2097 	qdf_nbuf_set_pktlen(nbuf, (msdu_len + soc->rx_pkt_tlv_size));
2098 
2099 	status = dp_rx_defrag_fraglist_insert(peer, tid,
2100 					      &rx_reorder_array_elem->head,
2101 			&rx_reorder_array_elem->tail, nbuf,
2102 			&all_frag_present);
2103 
2104 	if (QDF_IS_STATUS_ERROR(status)) {
2105 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2106 			  "%s Fragment insert failed", __func__);
2107 
2108 		goto fail;
2109 	}
2110 
2111 	if (soc->rx.flags.defrag_timeout_check)
2112 		dp_rx_defrag_waitlist_remove(peer, tid);
2113 
2114 	if (!all_frag_present) {
2115 		uint32_t now_ms =
2116 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2117 
2118 		peer->rx_tid[tid].defrag_timeout_ms =
2119 			now_ms + soc->rx.defrag.timeout_ms;
2120 
2121 		dp_rx_defrag_waitlist_add(peer, tid);
2122 
2123 		return QDF_STATUS_SUCCESS;
2124 	}
2125 
2126 	status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head,
2127 			      rx_reorder_array_elem->tail);
2128 
2129 	if (QDF_IS_STATUS_ERROR(status)) {
2130 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2131 			  "%s Fragment processing failed", __func__);
2132 
2133 		dp_rx_return_head_frag_desc(peer, tid);
2134 		dp_rx_defrag_cleanup(peer, tid);
2135 
2136 		goto fail;
2137 	}
2138 
2139 	/* Re-inject the fragments back to REO for further processing */
2140 	status = dp_rx_defrag_reo_reinject(peer, tid,
2141 					   rx_reorder_array_elem->head);
2142 	if (QDF_IS_STATUS_SUCCESS(status)) {
2143 		rx_reorder_array_elem->head = NULL;
2144 		rx_reorder_array_elem->tail = NULL;
2145 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2146 			  "%s: Frag seq successfully reinjected",
2147 			__func__);
2148 	} else {
2149 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2150 			  "%s: Frag seq reinjection failed", __func__);
2151 		dp_rx_return_head_frag_desc(peer, tid);
2152 	}
2153 
2154 	dp_rx_defrag_cleanup(peer, tid);
2155 	return QDF_STATUS_SUCCESS;
2156 
2157 fail:
2158 	return QDF_STATUS_E_DEFRAG_ERROR;
2159 }
2160