xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c (revision ad85c389289a03e320cd08dea21861f9857892fc)
1 /*
2  * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_peer.h"
23 #include "hal_api.h"
24 #include "qdf_trace.h"
25 #include "qdf_nbuf.h"
26 #include "dp_internal.h"
27 #include "dp_rx_defrag.h"
28 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
29 #include "dp_rx_defrag.h"
30 
31 const struct dp_rx_defrag_cipher dp_f_ccmp = {
32 	"AES-CCM",
33 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
34 	IEEE80211_WEP_MICLEN,
35 	0,
36 };
37 
38 const struct dp_rx_defrag_cipher dp_f_tkip = {
39 	"TKIP",
40 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
41 	IEEE80211_WEP_CRCLEN,
42 	IEEE80211_WEP_MICLEN,
43 };
44 
45 const struct dp_rx_defrag_cipher dp_f_wep = {
46 	"WEP",
47 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
48 	IEEE80211_WEP_CRCLEN,
49 	0,
50 };
51 
52 /*
53  * dp_rx_defrag_frames_free(): Free fragment chain
54  * @frames: Fragment chain
55  *
56  * Iterates through the fragment chain and frees them
57  * Returns: None
58  */
59 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames)
60 {
61 	qdf_nbuf_t next, frag = frames;
62 
63 	while (frag) {
64 		next = qdf_nbuf_next(frag);
65 		qdf_nbuf_free(frag);
66 		frag = next;
67 	}
68 }
69 
70 /*
71  * dp_rx_clear_saved_desc_info(): Clears descriptor info
72  * @peer: Pointer to the peer data structure
73  * @tid: Transmit ID (TID)
74  *
75  * Saves MPDU descriptor info and MSDU link pointer from REO
76  * ring descriptor. The cache is created per peer, per TID
77  *
78  * Returns: None
79  */
80 static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid)
81 {
82 	if (peer->rx_tid[tid].dst_ring_desc)
83 		qdf_mem_free(peer->rx_tid[tid].dst_ring_desc);
84 
85 	peer->rx_tid[tid].dst_ring_desc = NULL;
86 }
87 
88 static void dp_rx_return_head_frag_desc(struct dp_peer *peer,
89 					unsigned int tid)
90 {
91 	struct dp_soc *soc;
92 	struct dp_pdev *pdev;
93 	struct dp_srng *dp_rxdma_srng;
94 	struct rx_desc_pool *rx_desc_pool;
95 	union dp_rx_desc_list_elem_t *head = NULL;
96 	union dp_rx_desc_list_elem_t *tail = NULL;
97 
98 	if (peer->rx_tid[tid].head_frag_desc) {
99 		pdev = peer->vdev->pdev;
100 		soc = pdev->soc;
101 		dp_rxdma_srng = &pdev->rx_refill_buf_ring;
102 		rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id];
103 
104 		dp_rx_add_to_free_desc_list(&head, &tail,
105 					    peer->rx_tid[tid].head_frag_desc);
106 		dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
107 					1, &head, &tail);
108 	}
109 }
110 
111 /*
112  * dp_rx_reorder_flush_frag(): Flush the frag list
113  * @peer: Pointer to the peer data structure
114  * @tid: Transmit ID (TID)
115  *
116  * Flush the per-TID frag list
117  *
118  * Returns: None
119  */
120 void dp_rx_reorder_flush_frag(struct dp_peer *peer,
121 			 unsigned int tid)
122 {
123 	struct dp_soc *soc;
124 
125 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
126 		  FL("Flushing TID %d"), tid);
127 
128 	if (!peer) {
129 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
130 					"%s: NULL peer", __func__);
131 		return;
132 	}
133 
134 	soc = peer->vdev->pdev->soc;
135 
136 	if (peer->rx_tid[tid].dst_ring_desc) {
137 		if (dp_rx_link_desc_return(soc,
138 					peer->rx_tid[tid].dst_ring_desc,
139 					HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
140 					QDF_STATUS_SUCCESS)
141 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
142 					"%s: Failed to return link desc",
143 					__func__);
144 	}
145 
146 	dp_rx_return_head_frag_desc(peer, tid);
147 	dp_rx_defrag_cleanup(peer, tid);
148 }
149 
150 /*
151  * dp_rx_defrag_waitlist_flush(): Flush SOC defrag wait list
152  * @soc: DP SOC
153  *
154  * Flush fragments of all waitlisted TID's
155  *
156  * Returns: None
157  */
158 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc)
159 {
160 	struct dp_rx_tid *rx_reorder;
161 	struct dp_rx_tid *tmp;
162 	uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
163 	TAILQ_HEAD(, dp_rx_tid) temp_list;
164 
165 	TAILQ_INIT(&temp_list);
166 
167 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
168 		  FL("Current time  %u"), now_ms);
169 
170 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
171 	TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist,
172 			   defrag_waitlist_elem, tmp) {
173 		uint32_t tid;
174 
175 		if (rx_reorder->defrag_timeout_ms > now_ms)
176 			break;
177 
178 		tid = rx_reorder->tid;
179 		if (tid >= DP_MAX_TIDS) {
180 			qdf_assert(0);
181 			continue;
182 		}
183 
184 		TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder,
185 			     defrag_waitlist_elem);
186 		DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
187 
188 		/* Move to temp list and clean-up later */
189 		TAILQ_INSERT_TAIL(&temp_list, rx_reorder,
190 				  defrag_waitlist_elem);
191 	}
192 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
193 
194 	TAILQ_FOREACH_SAFE(rx_reorder, &temp_list,
195 			   defrag_waitlist_elem, tmp) {
196 		struct dp_peer *peer;
197 
198 		/* get address of current peer */
199 		peer =
200 			container_of(rx_reorder, struct dp_peer,
201 				     rx_tid[rx_reorder->tid]);
202 
203 		qdf_spin_lock_bh(&rx_reorder->tid_lock);
204 		dp_rx_reorder_flush_frag(peer, rx_reorder->tid);
205 		qdf_spin_unlock_bh(&rx_reorder->tid_lock);
206 	}
207 }
208 
209 /*
210  * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list
211  * @peer: Pointer to the peer data structure
212  * @tid: Transmit ID (TID)
213  *
214  * Appends per-tid fragments to global fragment wait list
215  *
216  * Returns: None
217  */
218 static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid)
219 {
220 	struct dp_soc *psoc = peer->vdev->pdev->soc;
221 	struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid];
222 
223 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
224 		  FL("Adding TID %u to waitlist for peer %pK"),
225 		  tid, peer);
226 
227 	/* TODO: use LIST macros instead of TAIL macros */
228 	qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock);
229 	TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder,
230 				defrag_waitlist_elem);
231 	DP_STATS_INC(psoc, rx.rx_frag_wait, 1);
232 	qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock);
233 }
234 
235 /*
236  * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist
237  * @peer: Pointer to the peer data structure
238  * @tid: Transmit ID (TID)
239  *
240  * Remove fragments from waitlist
241  *
242  * Returns: None
243  */
244 void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid)
245 {
246 	struct dp_pdev *pdev = peer->vdev->pdev;
247 	struct dp_soc *soc = pdev->soc;
248 	struct dp_rx_tid *rx_reorder;
249 
250 	if (tid > DP_MAX_TIDS) {
251 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
252 			  "TID out of bounds: %d", tid);
253 		qdf_assert(0);
254 		return;
255 	}
256 
257 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
258 	TAILQ_FOREACH(rx_reorder, &soc->rx.defrag.waitlist,
259 			   defrag_waitlist_elem) {
260 		struct dp_peer *peer_on_waitlist;
261 
262 		/* get address of current peer */
263 		peer_on_waitlist =
264 			container_of(rx_reorder, struct dp_peer,
265 				     rx_tid[rx_reorder->tid]);
266 
267 		/* Ensure it is TID for same peer */
268 		if (peer_on_waitlist == peer && rx_reorder->tid == tid) {
269 			TAILQ_REMOVE(&soc->rx.defrag.waitlist,
270 				rx_reorder, defrag_waitlist_elem);
271 			DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
272 		}
273 	}
274 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
275 }
276 
277 /*
278  * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list
279  * @peer: Pointer to the peer data structure
280  * @tid: Transmit ID (TID)
281  * @head_addr: Pointer to head list
282  * @tail_addr: Pointer to tail list
283  * @frag: Incoming fragment
284  * @all_frag_present: Flag to indicate whether all fragments are received
285  *
286  * Build a per-tid, per-sequence fragment list.
287  *
288  * Returns: Success, if inserted
289  */
290 static QDF_STATUS dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid,
291 	qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag,
292 	uint8_t *all_frag_present)
293 {
294 	qdf_nbuf_t next;
295 	qdf_nbuf_t prev = NULL;
296 	qdf_nbuf_t cur;
297 	uint16_t head_fragno, cur_fragno, next_fragno;
298 	uint8_t last_morefrag = 1, count = 0;
299 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
300 	uint8_t *rx_desc_info;
301 
302 
303 	qdf_assert(frag);
304 	qdf_assert(head_addr);
305 	qdf_assert(tail_addr);
306 
307 	*all_frag_present = 0;
308 	rx_desc_info = qdf_nbuf_data(frag);
309 	cur_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
310 
311 	/* If this is the first fragment */
312 	if (!(*head_addr)) {
313 		*head_addr = *tail_addr = frag;
314 		qdf_nbuf_set_next(*tail_addr, NULL);
315 		rx_tid->curr_frag_num = cur_fragno;
316 
317 		goto insert_done;
318 	}
319 
320 	/* In sequence fragment */
321 	if (cur_fragno > rx_tid->curr_frag_num) {
322 		qdf_nbuf_set_next(*tail_addr, frag);
323 		*tail_addr = frag;
324 		qdf_nbuf_set_next(*tail_addr, NULL);
325 		rx_tid->curr_frag_num = cur_fragno;
326 	} else {
327 		/* Out of sequence fragment */
328 		cur = *head_addr;
329 		rx_desc_info = qdf_nbuf_data(cur);
330 		head_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
331 
332 		if (cur_fragno == head_fragno) {
333 			qdf_nbuf_free(frag);
334 			goto insert_fail;
335 		} else if (head_fragno > cur_fragno) {
336 			qdf_nbuf_set_next(frag, cur);
337 			cur = frag;
338 			*head_addr = frag; /* head pointer to be updated */
339 		} else {
340 			while ((cur_fragno > head_fragno) && cur != NULL) {
341 				prev = cur;
342 				cur = qdf_nbuf_next(cur);
343 				rx_desc_info = qdf_nbuf_data(cur);
344 				head_fragno =
345 					dp_rx_frag_get_mpdu_frag_number(
346 								rx_desc_info);
347 			}
348 
349 			if (cur_fragno == head_fragno) {
350 				qdf_nbuf_free(frag);
351 				goto insert_fail;
352 			}
353 
354 			qdf_nbuf_set_next(prev, frag);
355 			qdf_nbuf_set_next(frag, cur);
356 		}
357 	}
358 
359 	next = qdf_nbuf_next(*head_addr);
360 
361 	rx_desc_info = qdf_nbuf_data(*tail_addr);
362 	last_morefrag = dp_rx_frag_get_more_frag_bit(rx_desc_info);
363 
364 	/* TODO: optimize the loop */
365 	if (!last_morefrag) {
366 		/* Check if all fragments are present */
367 		do {
368 			rx_desc_info = qdf_nbuf_data(next);
369 			next_fragno =
370 				dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
371 			count++;
372 
373 			if (next_fragno != count)
374 				break;
375 
376 			next = qdf_nbuf_next(next);
377 		} while (next);
378 
379 		if (!next) {
380 			*all_frag_present = 1;
381 			return QDF_STATUS_SUCCESS;
382 		}
383 	}
384 
385 insert_done:
386 	return QDF_STATUS_SUCCESS;
387 
388 insert_fail:
389 	return QDF_STATUS_E_FAILURE;
390 }
391 
392 
393 /*
394  * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment
395  * @msdu: Pointer to the fragment
396  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
397  *
398  * decap tkip encrypted fragment
399  *
400  * Returns: QDF_STATUS
401  */
402 static QDF_STATUS dp_rx_defrag_tkip_decap(qdf_nbuf_t msdu, uint16_t hdrlen)
403 {
404 	uint8_t *ivp, *orig_hdr;
405 	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
406 
407 	/* start of 802.11 header info */
408 	orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len);
409 
410 	/* TKIP header is located post 802.11 header */
411 	ivp = orig_hdr + hdrlen;
412 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) {
413 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
414 			"IEEE80211_WEP_EXTIV is missing in TKIP fragment");
415 		return QDF_STATUS_E_DEFRAG_ERROR;
416 	}
417 
418 	qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer);
419 
420 	return QDF_STATUS_SUCCESS;
421 }
422 
423 /*
424  * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment
425  * @nbuf: Pointer to the fragment buffer
426  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
427  *
428  * Remove MIC information from CCMP fragment
429  *
430  * Returns: QDF_STATUS
431  */
432 static QDF_STATUS dp_rx_defrag_ccmp_demic(qdf_nbuf_t nbuf, uint16_t hdrlen)
433 {
434 	uint8_t *ivp, *orig_hdr;
435 	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
436 
437 	/* start of the 802.11 header */
438 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
439 
440 	/* CCMP header is located after 802.11 header */
441 	ivp = orig_hdr + hdrlen;
442 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
443 		return QDF_STATUS_E_DEFRAG_ERROR;
444 
445 	qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer);
446 
447 	return QDF_STATUS_SUCCESS;
448 }
449 
450 /*
451  * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment
452  * @nbuf: Pointer to the fragment
453  * @hdrlen: length of the header information
454  *
455  * decap CCMP encrypted fragment
456  *
457  * Returns: QDF_STATUS
458  */
459 static QDF_STATUS dp_rx_defrag_ccmp_decap(qdf_nbuf_t nbuf, uint16_t hdrlen)
460 {
461 	uint8_t *ivp, *origHdr;
462 	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
463 
464 	origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
465 	ivp = origHdr + hdrlen;
466 
467 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
468 		return QDF_STATUS_E_DEFRAG_ERROR;
469 
470 	/* Let's pull the header later */
471 
472 	return QDF_STATUS_SUCCESS;
473 }
474 
475 /*
476  * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment
477  * @msdu: Pointer to the fragment
478  * @hdrlen: length of the header information
479  *
480  * decap WEP encrypted fragment
481  *
482  * Returns: QDF_STATUS
483  */
484 static QDF_STATUS dp_rx_defrag_wep_decap(qdf_nbuf_t msdu, uint16_t hdrlen)
485 {
486 	uint8_t *origHdr;
487 	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
488 
489 	origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
490 	qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen);
491 
492 	qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer);
493 
494 	return QDF_STATUS_SUCCESS;
495 }
496 
497 /*
498  * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment
499  * @nbuf: Pointer to the fragment
500  *
501  * Calculate the header size of the received fragment
502  *
503  * Returns: header size (uint16_t)
504  */
505 static uint16_t dp_rx_defrag_hdrsize(qdf_nbuf_t nbuf)
506 {
507 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
508 	uint16_t size = sizeof(struct ieee80211_frame);
509 	uint16_t fc = 0;
510 	uint32_t to_ds, fr_ds;
511 	uint8_t frm_ctrl_valid;
512 	uint16_t frm_ctrl_field;
513 
514 	to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr);
515 	fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr);
516 	frm_ctrl_valid = hal_rx_get_mpdu_frame_control_valid(rx_tlv_hdr);
517 	frm_ctrl_field = hal_rx_get_frame_ctrl_field(rx_tlv_hdr);
518 
519 	if (to_ds && fr_ds)
520 		size += IEEE80211_ADDR_LEN;
521 
522 	if (frm_ctrl_valid) {
523 		fc = frm_ctrl_field;
524 
525 		/* use 1-st byte for validation */
526 		if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) {
527 			size += sizeof(uint16_t);
528 			/* use 2-nd byte for validation */
529 			if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER)
530 				size += sizeof(struct ieee80211_htc);
531 		}
532 	}
533 
534 	return size;
535 }
536 
537 /*
538  * dp_rx_defrag_michdr(): Calculate a pseudo MIC header
539  * @wh0: Pointer to the wireless header of the fragment
540  * @hdr: Array to hold the pseudo header
541  *
542  * Calculate a pseudo MIC header
543  *
544  * Returns: None
545  */
546 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0,
547 				uint8_t hdr[])
548 {
549 	const struct ieee80211_frame_addr4 *wh =
550 		(const struct ieee80211_frame_addr4 *)wh0;
551 
552 	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
553 	case IEEE80211_FC1_DIR_NODS:
554 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
555 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
556 					   wh->i_addr2);
557 		break;
558 	case IEEE80211_FC1_DIR_TODS:
559 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
560 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
561 					   wh->i_addr2);
562 		break;
563 	case IEEE80211_FC1_DIR_FROMDS:
564 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
565 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
566 					   wh->i_addr3);
567 		break;
568 	case IEEE80211_FC1_DIR_DSTODS:
569 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
570 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
571 					   wh->i_addr4);
572 		break;
573 	}
574 
575 	/*
576 	 * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but
577 	 * it could also be set for deauth, disassoc, action, etc. for
578 	 * a mgt type frame. It comes into picture for MFP.
579 	 */
580 	if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
581 		if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
582 				IEEE80211_FC1_DIR_DSTODS) {
583 			const struct ieee80211_qosframe_addr4 *qwh =
584 				(const struct ieee80211_qosframe_addr4 *)wh;
585 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
586 		} else {
587 			const struct ieee80211_qosframe *qwh =
588 				(const struct ieee80211_qosframe *)wh;
589 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
590 		}
591 	} else {
592 		hdr[12] = 0;
593 	}
594 
595 	hdr[13] = hdr[14] = hdr[15] = 0;	/* reserved */
596 }
597 
598 /*
599  * dp_rx_defrag_mic(): Calculate MIC header
600  * @key: Pointer to the key
601  * @wbuf: fragment buffer
602  * @off: Offset
603  * @data_len: Data length
604  * @mic: Array to hold MIC
605  *
606  * Calculate a pseudo MIC header
607  *
608  * Returns: QDF_STATUS
609  */
610 static QDF_STATUS dp_rx_defrag_mic(const uint8_t *key, qdf_nbuf_t wbuf,
611 		uint16_t off, uint16_t data_len, uint8_t mic[])
612 {
613 	uint8_t hdr[16] = { 0, };
614 	uint32_t l, r;
615 	const uint8_t *data;
616 	uint32_t space;
617 	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
618 
619 	dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf)
620 		+ rx_desc_len), hdr);
621 
622 	l = dp_rx_get_le32(key);
623 	r = dp_rx_get_le32(key + 4);
624 
625 	/* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
626 	l ^= dp_rx_get_le32(hdr);
627 	dp_rx_michael_block(l, r);
628 	l ^= dp_rx_get_le32(&hdr[4]);
629 	dp_rx_michael_block(l, r);
630 	l ^= dp_rx_get_le32(&hdr[8]);
631 	dp_rx_michael_block(l, r);
632 	l ^= dp_rx_get_le32(&hdr[12]);
633 	dp_rx_michael_block(l, r);
634 
635 	/* first buffer has special handling */
636 	data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
637 	space = qdf_nbuf_len(wbuf) - off;
638 
639 	for (;; ) {
640 		if (space > data_len)
641 			space = data_len;
642 
643 		/* collect 32-bit blocks from current buffer */
644 		while (space >= sizeof(uint32_t)) {
645 			l ^= dp_rx_get_le32(data);
646 			dp_rx_michael_block(l, r);
647 			data += sizeof(uint32_t);
648 			space -= sizeof(uint32_t);
649 			data_len -= sizeof(uint32_t);
650 		}
651 		if (data_len < sizeof(uint32_t))
652 			break;
653 
654 		wbuf = qdf_nbuf_next(wbuf);
655 		if (wbuf == NULL)
656 			return QDF_STATUS_E_DEFRAG_ERROR;
657 
658 		if (space != 0) {
659 			const uint8_t *data_next;
660 			/*
661 			 * Block straddles buffers, split references.
662 			 */
663 			data_next =
664 				(uint8_t *)qdf_nbuf_data(wbuf) + off;
665 			if ((qdf_nbuf_len(wbuf)) <
666 				sizeof(uint32_t) - space) {
667 				return QDF_STATUS_E_DEFRAG_ERROR;
668 			}
669 			switch (space) {
670 			case 1:
671 				l ^= dp_rx_get_le32_split(data[0],
672 					data_next[0], data_next[1],
673 					data_next[2]);
674 				data = data_next + 3;
675 				space = (qdf_nbuf_len(wbuf) - off) - 3;
676 				break;
677 			case 2:
678 				l ^= dp_rx_get_le32_split(data[0], data[1],
679 						    data_next[0], data_next[1]);
680 				data = data_next + 2;
681 				space = (qdf_nbuf_len(wbuf) - off) - 2;
682 				break;
683 			case 3:
684 				l ^= dp_rx_get_le32_split(data[0], data[1],
685 					data[2], data_next[0]);
686 				data = data_next + 1;
687 				space = (qdf_nbuf_len(wbuf) - off) - 1;
688 				break;
689 			}
690 			dp_rx_michael_block(l, r);
691 			data_len -= sizeof(uint32_t);
692 		} else {
693 			/*
694 			 * Setup for next buffer.
695 			 */
696 			data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
697 			space = qdf_nbuf_len(wbuf) - off;
698 		}
699 	}
700 	/* Last block and padding (0x5a, 4..7 x 0) */
701 	switch (data_len) {
702 	case 0:
703 		l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0);
704 		break;
705 	case 1:
706 		l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0);
707 		break;
708 	case 2:
709 		l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0);
710 		break;
711 	case 3:
712 		l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a);
713 		break;
714 	}
715 	dp_rx_michael_block(l, r);
716 	dp_rx_michael_block(l, r);
717 	dp_rx_put_le32(mic, l);
718 	dp_rx_put_le32(mic + 4, r);
719 
720 	return QDF_STATUS_SUCCESS;
721 }
722 
723 /*
724  * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame
725  * @key: Pointer to the key
726  * @msdu: fragment buffer
727  * @hdrlen: Length of the header information
728  *
729  * Remove MIC information from the TKIP frame
730  *
731  * Returns: QDF_STATUS
732  */
733 static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key,
734 					qdf_nbuf_t msdu, uint16_t hdrlen)
735 {
736 	QDF_STATUS status;
737 	uint32_t pktlen = 0;
738 	uint8_t mic[IEEE80211_WEP_MICLEN];
739 	uint8_t mic0[IEEE80211_WEP_MICLEN];
740 	qdf_nbuf_t prev = NULL, next;
741 
742 	next = msdu;
743 	while (next) {
744 		pktlen += (qdf_nbuf_len(next) - hdrlen);
745 		prev = next;
746 		dp_debug("%s pktlen %u", __func__,
747 			 (uint32_t)(qdf_nbuf_len(next) - hdrlen));
748 		next = qdf_nbuf_next(next);
749 	}
750 
751 	if (!prev) {
752 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
753 			  "%s Defrag chaining failed !\n", __func__);
754 		return QDF_STATUS_E_DEFRAG_ERROR;
755 	}
756 
757 	qdf_nbuf_copy_bits(prev, qdf_nbuf_len(prev) - dp_f_tkip.ic_miclen,
758 			   dp_f_tkip.ic_miclen, (caddr_t)mic0);
759 	qdf_nbuf_trim_tail(prev, dp_f_tkip.ic_miclen);
760 	pktlen -= dp_f_tkip.ic_miclen;
761 
762 	status = dp_rx_defrag_mic(key, msdu, hdrlen,
763 				pktlen, mic);
764 
765 	if (QDF_IS_STATUS_ERROR(status))
766 		return status;
767 
768 	if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen))
769 		return QDF_STATUS_E_DEFRAG_ERROR;
770 
771 	return QDF_STATUS_SUCCESS;
772 }
773 
774 /*
775  * dp_rx_frag_pull_hdr(): Pulls the RXTLV & the 802.11 headers
776  * @nbuf: buffer pointer
777  * @hdrsize: size of the header to be pulled
778  *
779  * Pull the RXTLV & the 802.11 headers
780  *
781  * Returns: None
782  */
783 static void dp_rx_frag_pull_hdr(qdf_nbuf_t nbuf, uint16_t hdrsize)
784 {
785 	qdf_nbuf_pull_head(nbuf,
786 			RX_PKT_TLVS_LEN + hdrsize);
787 
788 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
789 		  "%s: final pktlen %d .11len %d",
790 		  __func__, (uint32_t)qdf_nbuf_len(nbuf), hdrsize);
791 }
792 
793 /*
794  * dp_rx_construct_fraglist(): Construct a nbuf fraglist
795  * @peer: Pointer to the peer
796  * @head: Pointer to list of fragments
797  * @hdrsize: Size of the header to be pulled
798  *
799  * Construct a nbuf fraglist
800  *
801  * Returns: None
802  */
803 static void
804 dp_rx_construct_fraglist(struct dp_peer *peer,
805 		qdf_nbuf_t head, uint16_t hdrsize)
806 {
807 	qdf_nbuf_t msdu = qdf_nbuf_next(head);
808 	qdf_nbuf_t rx_nbuf = msdu;
809 	uint32_t len = 0;
810 
811 	while (msdu) {
812 		dp_rx_frag_pull_hdr(msdu, hdrsize);
813 		len += qdf_nbuf_len(msdu);
814 		msdu = qdf_nbuf_next(msdu);
815 	}
816 
817 	qdf_nbuf_append_ext_list(head, rx_nbuf, len);
818 	qdf_nbuf_set_next(head, NULL);
819 
820 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
821 		  "%s: head len %d ext len %d data len %d ",
822 		  __func__,
823 		  (uint32_t)qdf_nbuf_len(head),
824 		  (uint32_t)qdf_nbuf_len(rx_nbuf),
825 		  (uint32_t)(head->data_len));
826 }
827 
828 /**
829  * dp_rx_defrag_err() - rx err handler
830  * @pdev: handle to pdev object
831  * @vdev_id: vdev id
832  * @peer_mac_addr: peer mac address
833  * @tid: TID
834  * @tsf32: TSF
835  * @err_type: error type
836  * @rx_frame: rx frame
837  * @pn: PN Number
838  * @key_id: key id
839  *
840  * This function handles rx error and send MIC error notification
841  *
842  * Return: None
843  */
844 static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
845 {
846 	struct ol_if_ops *tops = NULL;
847 	struct dp_pdev *pdev = vdev->pdev;
848 	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
849 	uint8_t *orig_hdr;
850 	struct ieee80211_frame *wh;
851 
852 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
853 	wh = (struct ieee80211_frame *)orig_hdr;
854 
855 	tops = pdev->soc->cdp_soc.ol_ops;
856 	if (tops->rx_mic_error)
857 		tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh);
858 }
859 
860 
861 /*
862  * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3
863  * @nbuf: Pointer to the fragment buffer
864  * @hdrsize: Size of headers
865  *
866  * Transcap the fragment from 802.11 to 802.3
867  *
868  * Returns: None
869  */
870 static void
871 dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t nbuf, uint16_t hdrsize)
872 {
873 	struct llc_snap_hdr_t *llchdr;
874 	struct ethernet_hdr_t *eth_hdr;
875 	uint8_t ether_type[2];
876 	uint16_t fc = 0;
877 	union dp_align_mac_addr mac_addr;
878 	uint8_t *rx_desc_info = qdf_mem_malloc(RX_PKT_TLVS_LEN);
879 
880 	if (rx_desc_info == NULL) {
881 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
882 			"%s: Memory alloc failed ! ", __func__);
883 		QDF_ASSERT(0);
884 		return;
885 	}
886 
887 	qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), RX_PKT_TLVS_LEN);
888 
889 	llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) +
890 					RX_PKT_TLVS_LEN + hdrsize);
891 	qdf_mem_copy(ether_type, llchdr->ethertype, 2);
892 
893 	qdf_nbuf_pull_head(nbuf, (RX_PKT_TLVS_LEN + hdrsize +
894 				  sizeof(struct llc_snap_hdr_t) -
895 				  sizeof(struct ethernet_hdr_t)));
896 
897 	eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf));
898 
899 	if (hal_rx_get_mpdu_frame_control_valid(rx_desc_info))
900 		fc = hal_rx_get_frame_ctrl_field(rx_desc_info);
901 
902 	dp_debug("%s: frame control type: 0x%x", __func__, fc);
903 
904 	switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) {
905 	case IEEE80211_FC1_DIR_NODS:
906 		hal_rx_mpdu_get_addr1(rx_desc_info,
907 			&mac_addr.raw[0]);
908 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
909 			IEEE80211_ADDR_LEN);
910 		hal_rx_mpdu_get_addr2(rx_desc_info,
911 			&mac_addr.raw[0]);
912 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
913 			IEEE80211_ADDR_LEN);
914 		break;
915 	case IEEE80211_FC1_DIR_TODS:
916 		hal_rx_mpdu_get_addr3(rx_desc_info,
917 			&mac_addr.raw[0]);
918 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
919 			IEEE80211_ADDR_LEN);
920 		hal_rx_mpdu_get_addr2(rx_desc_info,
921 			&mac_addr.raw[0]);
922 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
923 			IEEE80211_ADDR_LEN);
924 		break;
925 	case IEEE80211_FC1_DIR_FROMDS:
926 		hal_rx_mpdu_get_addr1(rx_desc_info,
927 			&mac_addr.raw[0]);
928 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
929 			IEEE80211_ADDR_LEN);
930 		hal_rx_mpdu_get_addr3(rx_desc_info,
931 			&mac_addr.raw[0]);
932 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
933 			IEEE80211_ADDR_LEN);
934 		break;
935 
936 	case IEEE80211_FC1_DIR_DSTODS:
937 		hal_rx_mpdu_get_addr3(rx_desc_info,
938 			&mac_addr.raw[0]);
939 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
940 			IEEE80211_ADDR_LEN);
941 		hal_rx_mpdu_get_addr4(rx_desc_info,
942 			&mac_addr.raw[0]);
943 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
944 			IEEE80211_ADDR_LEN);
945 		break;
946 
947 	default:
948 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
949 		"%s: Unknown frame control type: 0x%x", __func__, fc);
950 	}
951 
952 	qdf_mem_copy(eth_hdr->ethertype, ether_type,
953 			sizeof(ether_type));
954 
955 	qdf_nbuf_push_head(nbuf, RX_PKT_TLVS_LEN);
956 	qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, RX_PKT_TLVS_LEN);
957 	qdf_mem_free(rx_desc_info);
958 }
959 
960 /*
961  * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO
962  * @peer: Pointer to the peer
963  * @tid: Transmit Identifier
964  * @head: Buffer to be reinjected back
965  *
966  * Reinject the fragment chain back into REO
967  *
968  * Returns: QDF_STATUS
969  */
970  static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer,
971 					unsigned tid, qdf_nbuf_t head)
972 {
973 	struct dp_pdev *pdev = peer->vdev->pdev;
974 	struct dp_soc *soc = pdev->soc;
975 	struct hal_buf_info buf_info;
976 	void *link_desc_va;
977 	void *msdu0, *msdu_desc_info;
978 	void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr;
979 	void *dst_mpdu_desc_info, *dst_qdesc_addr;
980 	qdf_dma_addr_t paddr;
981 	uint32_t nbuf_len, seq_no, dst_ind;
982 	uint32_t *mpdu_wrd;
983 	uint32_t ret, cookie;
984 
985 	void *dst_ring_desc =
986 		peer->rx_tid[tid].dst_ring_desc;
987 	void *hal_srng = soc->reo_reinject_ring.hal_srng;
988 
989 	ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
990 	if (!ent_ring_desc) {
991 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
992 			  "HAL src ring next entry NULL");
993 		return QDF_STATUS_E_FAILURE;
994 	}
995 
996 	hal_rx_reo_buf_paddr_get(dst_ring_desc, &buf_info);
997 
998 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
999 
1000 	qdf_assert(link_desc_va);
1001 
1002 	msdu0 = (uint8_t *)link_desc_va +
1003 		RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET;
1004 
1005 	nbuf_len = qdf_nbuf_len(head) - RX_PKT_TLVS_LEN;
1006 
1007 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW);
1008 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE,
1009 			UNI_DESC_BUF_TYPE_RX_MSDU_LINK);
1010 
1011 	/* msdu reconfig */
1012 	msdu_desc_info = (uint8_t *)msdu0 +
1013 		RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET;
1014 
1015 	dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va);
1016 
1017 	qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info));
1018 
1019 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1020 			FIRST_MSDU_IN_MPDU_FLAG, 1);
1021 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1022 			LAST_MSDU_IN_MPDU_FLAG, 1);
1023 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1024 			MSDU_CONTINUATION, 0x0);
1025 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1026 			REO_DESTINATION_INDICATION, dst_ind);
1027 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1028 			MSDU_LENGTH, nbuf_len);
1029 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1030 			SA_IS_VALID, 1);
1031 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1032 			DA_IS_VALID, 1);
1033 
1034 	/* change RX TLV's */
1035 	hal_rx_msdu_start_msdu_len_set(
1036 			qdf_nbuf_data(head), nbuf_len);
1037 
1038 	cookie = HAL_RX_BUF_COOKIE_GET(msdu0);
1039 
1040 	/* map the nbuf before reinject it into HW */
1041 	ret = qdf_nbuf_map_single(soc->osdev, head,
1042 					QDF_DMA_BIDIRECTIONAL);
1043 
1044 	if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1045 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1046 				"%s: nbuf map failed !", __func__);
1047 		return QDF_STATUS_E_FAILURE;
1048 	}
1049 
1050 	paddr = qdf_nbuf_get_frag_paddr(head, 0);
1051 
1052 	ret = check_x86_paddr(soc, &head, &paddr, pdev);
1053 
1054 	if (ret == QDF_STATUS_E_FAILURE) {
1055 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1056 				"%s: x86 check failed !", __func__);
1057 		return QDF_STATUS_E_FAILURE;
1058 	}
1059 
1060 	hal_rxdma_buff_addr_info_set(msdu0, paddr, cookie, DP_WBM2SW_RBM);
1061 
1062 	/* Lets fill entrance ring now !!! */
1063 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1064 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1065 		"HAL RING Access For REO entrance SRNG Failed: %pK",
1066 		hal_srng);
1067 
1068 		return QDF_STATUS_E_FAILURE;
1069 	}
1070 
1071 	paddr = (uint64_t)buf_info.paddr;
1072 	/* buf addr */
1073 	hal_rxdma_buff_addr_info_set(ent_ring_desc, paddr,
1074 				     buf_info.sw_cookie,
1075 				     HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
1076 	/* mpdu desc info */
1077 	ent_mpdu_desc_info = (uint8_t *)ent_ring_desc +
1078 	RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET;
1079 
1080 	dst_mpdu_desc_info = (uint8_t *)dst_ring_desc +
1081 	REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET;
1082 
1083 	qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info,
1084 				sizeof(struct rx_mpdu_desc_info));
1085 	qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t));
1086 
1087 	mpdu_wrd = (uint32_t *)dst_mpdu_desc_info;
1088 	seq_no = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_wrd);
1089 
1090 	HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
1091 			MSDU_COUNT, 0x1);
1092 	HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
1093 			MPDU_SEQUENCE_NUMBER, seq_no);
1094 
1095 	/* unset frag bit */
1096 	HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
1097 			FRAGMENT_FLAG, 0x0);
1098 
1099 	/* set sa/da valid bits */
1100 	HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
1101 			SA_IS_VALID, 0x1);
1102 	HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
1103 			DA_IS_VALID, 0x1);
1104 	HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
1105 			RAW_MPDU, 0x0);
1106 
1107 	/* qdesc addr */
1108 	ent_qdesc_addr = (uint8_t *)ent_ring_desc +
1109 		REO_ENTRANCE_RING_4_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET;
1110 
1111 	dst_qdesc_addr = (uint8_t *)dst_ring_desc +
1112 		REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET;
1113 
1114 	qdf_mem_copy(ent_qdesc_addr, dst_qdesc_addr, 8);
1115 
1116 	HAL_RX_FLD_SET(ent_ring_desc, REO_ENTRANCE_RING_5,
1117 			REO_DESTINATION_INDICATION, dst_ind);
1118 
1119 	hal_srng_access_end(soc->hal_soc, hal_srng);
1120 
1121 	DP_STATS_INC(soc, rx.reo_reinject, 1);
1122 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1123 		  "%s: reinjection done !", __func__);
1124 	return QDF_STATUS_SUCCESS;
1125 }
1126 
1127 /*
1128  * dp_rx_defrag(): Defragment the fragment chain
1129  * @peer: Pointer to the peer
1130  * @tid: Transmit Identifier
1131  * @frag_list_head: Pointer to head list
1132  * @frag_list_tail: Pointer to tail list
1133  *
1134  * Defragment the fragment chain
1135  *
1136  * Returns: QDF_STATUS
1137  */
1138 static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid,
1139 			qdf_nbuf_t frag_list_head, qdf_nbuf_t frag_list_tail)
1140 {
1141 	qdf_nbuf_t tmp_next, prev;
1142 	qdf_nbuf_t cur = frag_list_head, msdu;
1143 	uint32_t index, tkip_demic = 0;
1144 	uint16_t hdr_space;
1145 	uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
1146 	struct dp_vdev *vdev = peer->vdev;
1147 	struct dp_soc *soc = vdev->pdev->soc;
1148 	uint8_t status = 0;
1149 
1150 	hdr_space = dp_rx_defrag_hdrsize(cur);
1151 	index = hal_rx_msdu_is_wlan_mcast(cur) ?
1152 		dp_sec_mcast : dp_sec_ucast;
1153 
1154 	/* Remove FCS from all fragments */
1155 	while (cur) {
1156 		tmp_next = qdf_nbuf_next(cur);
1157 		qdf_nbuf_set_next(cur, NULL);
1158 		qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
1159 		prev = cur;
1160 		qdf_nbuf_set_next(cur, tmp_next);
1161 		cur = tmp_next;
1162 	}
1163 	cur = frag_list_head;
1164 
1165 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1166 		  "%s: index %d Security type: %d", __func__,
1167 		  index, peer->security[index].sec_type);
1168 
1169 	switch (peer->security[index].sec_type) {
1170 	case cdp_sec_type_tkip:
1171 		tkip_demic = 1;
1172 
1173 	case cdp_sec_type_tkip_nomic:
1174 		while (cur) {
1175 			tmp_next = qdf_nbuf_next(cur);
1176 			if (dp_rx_defrag_tkip_decap(cur, hdr_space)) {
1177 
1178 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1179 					QDF_TRACE_LEVEL_ERROR,
1180 					"dp_rx_defrag: TKIP decap failed");
1181 
1182 				return QDF_STATUS_E_DEFRAG_ERROR;
1183 			}
1184 			cur = tmp_next;
1185 		}
1186 
1187 		/* If success, increment header to be stripped later */
1188 		hdr_space += dp_f_tkip.ic_header;
1189 		break;
1190 
1191 	case cdp_sec_type_aes_ccmp:
1192 		while (cur) {
1193 			tmp_next = qdf_nbuf_next(cur);
1194 			if (dp_rx_defrag_ccmp_demic(cur, hdr_space)) {
1195 
1196 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1197 					QDF_TRACE_LEVEL_ERROR,
1198 					"dp_rx_defrag: CCMP demic failed");
1199 
1200 				return QDF_STATUS_E_DEFRAG_ERROR;
1201 			}
1202 			if (dp_rx_defrag_ccmp_decap(cur, hdr_space)) {
1203 
1204 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1205 					QDF_TRACE_LEVEL_ERROR,
1206 					"dp_rx_defrag: CCMP decap failed");
1207 
1208 				return QDF_STATUS_E_DEFRAG_ERROR;
1209 			}
1210 			cur = tmp_next;
1211 		}
1212 
1213 		/* If success, increment header to be stripped later */
1214 		hdr_space += dp_f_ccmp.ic_header;
1215 		break;
1216 
1217 	case cdp_sec_type_wep40:
1218 	case cdp_sec_type_wep104:
1219 	case cdp_sec_type_wep128:
1220 		while (cur) {
1221 			tmp_next = qdf_nbuf_next(cur);
1222 			if (dp_rx_defrag_wep_decap(cur, hdr_space)) {
1223 
1224 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1225 					QDF_TRACE_LEVEL_ERROR,
1226 					"dp_rx_defrag: WEP decap failed");
1227 
1228 				return QDF_STATUS_E_DEFRAG_ERROR;
1229 			}
1230 			cur = tmp_next;
1231 		}
1232 
1233 		/* If success, increment header to be stripped later */
1234 		hdr_space += dp_f_wep.ic_header;
1235 		break;
1236 	default:
1237 		QDF_TRACE(QDF_MODULE_ID_TXRX,
1238 			QDF_TRACE_LEVEL_ERROR,
1239 			"dp_rx_defrag: Did not match any security type");
1240 		break;
1241 	}
1242 
1243 	if (tkip_demic) {
1244 		msdu = frag_list_head;
1245 		if (soc->cdp_soc.ol_ops->rx_frag_tkip_demic) {
1246 			status = soc->cdp_soc.ol_ops->rx_frag_tkip_demic(
1247 				(void *)peer->ctrl_peer, msdu, hdr_space);
1248 		} else {
1249 			qdf_mem_copy(key,
1250 				     &peer->security[index].michael_key[0],
1251 				IEEE80211_WEP_MICLEN);
1252 			status = dp_rx_defrag_tkip_demic(key, msdu,
1253 							 RX_PKT_TLVS_LEN +
1254 							 hdr_space);
1255 
1256 			if (status) {
1257 				dp_rx_defrag_err(vdev, frag_list_head);
1258 
1259 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1260 					  QDF_TRACE_LEVEL_ERROR,
1261 					  "%s: TKIP demic failed status %d",
1262 					  __func__, status);
1263 
1264 				return QDF_STATUS_E_DEFRAG_ERROR;
1265 			}
1266 		}
1267 	}
1268 
1269 	/* Convert the header to 802.3 header */
1270 	dp_rx_defrag_nwifi_to_8023(frag_list_head, hdr_space);
1271 	dp_rx_construct_fraglist(peer, frag_list_head, hdr_space);
1272 
1273 	return QDF_STATUS_SUCCESS;
1274 }
1275 
1276 /*
1277  * dp_rx_defrag_cleanup(): Clean up activities
1278  * @peer: Pointer to the peer
1279  * @tid: Transmit Identifier
1280  *
1281  * Returns: None
1282  */
1283 void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid)
1284 {
1285 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1286 				peer->rx_tid[tid].array;
1287 
1288 	if (!rx_reorder_array_elem) {
1289 		/*
1290 		 * if this condition is hit then somebody
1291 		 * must have reset this pointer to NULL.
1292 		 * array pointer usually points to base variable
1293 		 * of TID queue structure: "struct dp_rx_tid"
1294 		 */
1295 		QDF_ASSERT(0);
1296 		return;
1297 	}
1298 	/* Free up nbufs */
1299 	dp_rx_defrag_frames_free(rx_reorder_array_elem->head);
1300 
1301 	/* Free up saved ring descriptors */
1302 	dp_rx_clear_saved_desc_info(peer, tid);
1303 
1304 	rx_reorder_array_elem->head = NULL;
1305 	rx_reorder_array_elem->tail = NULL;
1306 	peer->rx_tid[tid].defrag_timeout_ms = 0;
1307 	peer->rx_tid[tid].curr_frag_num = 0;
1308 	peer->rx_tid[tid].curr_seq_num = 0;
1309 	peer->rx_tid[tid].head_frag_desc = NULL;
1310 }
1311 
1312 /*
1313  * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor
1314  * @ring_desc: Pointer to the dst ring descriptor
1315  * @peer: Pointer to the peer
1316  * @tid: Transmit Identifier
1317  *
1318  * Returns: None
1319  */
1320 static QDF_STATUS dp_rx_defrag_save_info_from_ring_desc(void *ring_desc,
1321 	struct dp_rx_desc *rx_desc, struct dp_peer *peer, unsigned tid)
1322 {
1323 	void *dst_ring_desc = qdf_mem_malloc(
1324 			sizeof(struct reo_destination_ring));
1325 
1326 	if (dst_ring_desc == NULL) {
1327 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1328 			"%s: Memory alloc failed !", __func__);
1329 		QDF_ASSERT(0);
1330 		return QDF_STATUS_E_NOMEM;
1331 	}
1332 
1333 	qdf_mem_copy(dst_ring_desc, ring_desc,
1334 		       sizeof(struct reo_destination_ring));
1335 
1336 	peer->rx_tid[tid].dst_ring_desc = dst_ring_desc;
1337 	peer->rx_tid[tid].head_frag_desc = rx_desc;
1338 
1339 	return QDF_STATUS_SUCCESS;
1340 }
1341 
1342 /*
1343  * dp_rx_defrag_store_fragment(): Store incoming fragments
1344  * @soc: Pointer to the SOC data structure
1345  * @ring_desc: Pointer to the ring descriptor
1346  * @mpdu_desc_info: MPDU descriptor info
1347  * @tid: Traffic Identifier
1348  * @rx_desc: Pointer to rx descriptor
1349  * @rx_bfs: Number of bfs consumed
1350  *
1351  * Returns: QDF_STATUS
1352  */
1353 static QDF_STATUS dp_rx_defrag_store_fragment(struct dp_soc *soc,
1354 			void *ring_desc,
1355 			union dp_rx_desc_list_elem_t **head,
1356 			union dp_rx_desc_list_elem_t **tail,
1357 			struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1358 			unsigned tid, struct dp_rx_desc *rx_desc,
1359 			uint32_t *rx_bfs)
1360 {
1361 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1362 	struct dp_pdev *pdev;
1363 	struct dp_peer *peer;
1364 	uint16_t peer_id;
1365 	uint8_t fragno, more_frag, all_frag_present = 0;
1366 	uint16_t rxseq = mpdu_desc_info->mpdu_seq;
1367 	QDF_STATUS status;
1368 	struct dp_rx_tid *rx_tid;
1369 	uint8_t mpdu_sequence_control_valid;
1370 	uint8_t mpdu_frame_control_valid;
1371 	qdf_nbuf_t frag = rx_desc->nbuf;
1372 
1373 	/* Check if the packet is from a valid peer */
1374 	peer_id = DP_PEER_METADATA_PEER_ID_GET(
1375 					mpdu_desc_info->peer_meta_data);
1376 	peer = dp_peer_find_by_id(soc, peer_id);
1377 
1378 	if (!peer) {
1379 		/* We should not receive anything from unknown peer
1380 		 * however, that might happen while we are in the monitor mode.
1381 		 * We don't need to handle that here
1382 		 */
1383 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1384 			"Unknown peer, dropping the fragment");
1385 
1386 		qdf_nbuf_free(frag);
1387 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1388 		*rx_bfs = 1;
1389 
1390 		goto end;
1391 	}
1392 
1393 	pdev = peer->vdev->pdev;
1394 	rx_tid = &peer->rx_tid[tid];
1395 
1396 	rx_reorder_array_elem = peer->rx_tid[tid].array;
1397 
1398 	mpdu_sequence_control_valid =
1399 		hal_rx_get_mpdu_sequence_control_valid(rx_desc->rx_buf_start);
1400 
1401 	/* Invalid MPDU sequence control field, MPDU is of no use */
1402 	if (!mpdu_sequence_control_valid) {
1403 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1404 			"Invalid MPDU seq control field, dropping MPDU");
1405 		qdf_nbuf_free(frag);
1406 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1407 		*rx_bfs = 1;
1408 
1409 		qdf_assert(0);
1410 		goto end;
1411 	}
1412 
1413 	mpdu_frame_control_valid =
1414 		hal_rx_get_mpdu_frame_control_valid(rx_desc->rx_buf_start);
1415 
1416 	/* Invalid frame control field */
1417 	if (!mpdu_frame_control_valid) {
1418 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1419 			"Invalid frame control field, dropping MPDU");
1420 		qdf_nbuf_free(frag);
1421 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1422 		*rx_bfs = 1;
1423 
1424 		qdf_assert(0);
1425 		goto end;
1426 	}
1427 
1428 	/* Current mpdu sequence */
1429 	more_frag = dp_rx_frag_get_more_frag_bit(rx_desc->rx_buf_start);
1430 
1431 	/* HW does not populate the fragment number as of now
1432 	 * need to get from the 802.11 header
1433 	 */
1434 	fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc->rx_buf_start);
1435 
1436 	/*
1437 	 * !more_frag: no more fragments to be delivered
1438 	 * !frag_no: packet is not fragmented
1439 	 * !rx_reorder_array_elem->head: no saved fragments so far
1440 	 */
1441 	if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
1442 		/* We should not get into this situation here.
1443 		 * It means an unfragmented packet with fragment flag
1444 		 * is delivered over the REO exception ring.
1445 		 * Typically it follows normal rx path.
1446 		 */
1447 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1448 			"Rcvd unfragmented pkt on REO Err srng, dropping");
1449 		qdf_nbuf_free(frag);
1450 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1451 		*rx_bfs = 1;
1452 
1453 		qdf_assert(0);
1454 		goto end;
1455 	}
1456 
1457 	/* Check if the fragment is for the same sequence or a different one */
1458 	if (rx_reorder_array_elem->head) {
1459 		if (rxseq != rx_tid->curr_seq_num) {
1460 
1461 			/* Drop stored fragments if out of sequence
1462 			 * fragment is received
1463 			 */
1464 			dp_rx_reorder_flush_frag(peer, tid);
1465 
1466 			DP_STATS_INC(soc, rx.rx_frag_err, 1);
1467 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1468 				"%s mismatch, dropping earlier sequence ",
1469 				(rxseq == rx_tid->curr_seq_num)
1470 				? "address"
1471 				: "seq number");
1472 
1473 			/*
1474 			 * The sequence number for this fragment becomes the
1475 			 * new sequence number to be processed
1476 			 */
1477 			rx_tid->curr_seq_num = rxseq;
1478 		}
1479 	} else {
1480 		/* Start of a new sequence */
1481 		dp_rx_defrag_cleanup(peer, tid);
1482 		rx_tid->curr_seq_num = rxseq;
1483 	}
1484 
1485 	/*
1486 	 * If the earlier sequence was dropped, this will be the fresh start.
1487 	 * Else, continue with next fragment in a given sequence
1488 	 */
1489 	status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head,
1490 			&rx_reorder_array_elem->tail, frag,
1491 			&all_frag_present);
1492 
1493 	/*
1494 	 * Currently, we can have only 6 MSDUs per-MPDU, if the current
1495 	 * packet sequence has more than 6 MSDUs for some reason, we will
1496 	 * have to use the next MSDU link descriptor and chain them together
1497 	 * before reinjection
1498 	 */
1499 	if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) &&
1500 			(rx_reorder_array_elem->head == frag)) {
1501 
1502 		status = dp_rx_defrag_save_info_from_ring_desc(ring_desc,
1503 					rx_desc, peer, tid);
1504 
1505 		if (status != QDF_STATUS_SUCCESS) {
1506 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1507 				"%s: Unable to store ring desc !", __func__);
1508 			goto end;
1509 		}
1510 	} else {
1511 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1512 		*rx_bfs = 1;
1513 
1514 		/* Return the non-head link desc */
1515 		if (dp_rx_link_desc_return(soc, ring_desc,
1516 					HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1517 				QDF_STATUS_SUCCESS)
1518 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1519 					"%s: Failed to return link desc",
1520 					__func__);
1521 
1522 	}
1523 
1524 	if (pdev->soc->rx.flags.defrag_timeout_check)
1525 		dp_rx_defrag_waitlist_remove(peer, tid);
1526 
1527 	/* Yet to receive more fragments for this sequence number */
1528 	if (!all_frag_present) {
1529 		uint32_t now_ms =
1530 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1531 
1532 		peer->rx_tid[tid].defrag_timeout_ms =
1533 			now_ms + pdev->soc->rx.defrag.timeout_ms;
1534 
1535 		dp_rx_defrag_waitlist_add(peer, tid);
1536 		dp_peer_unref_del_find_by_id(peer);
1537 
1538 		return QDF_STATUS_SUCCESS;
1539 	}
1540 
1541 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1542 		  "All fragments received for sequence: %d", rxseq);
1543 
1544 	/* Process the fragments */
1545 	status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head,
1546 		rx_reorder_array_elem->tail);
1547 	if (QDF_IS_STATUS_ERROR(status)) {
1548 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1549 			"Fragment processing failed");
1550 
1551 		dp_rx_add_to_free_desc_list(head, tail,
1552 				peer->rx_tid[tid].head_frag_desc);
1553 		*rx_bfs = 1;
1554 
1555 		if (dp_rx_link_desc_return(soc,
1556 					peer->rx_tid[tid].dst_ring_desc,
1557 					HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1558 				QDF_STATUS_SUCCESS)
1559 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1560 					"%s: Failed to return link desc",
1561 					__func__);
1562 		dp_rx_defrag_cleanup(peer, tid);
1563 		goto end;
1564 	}
1565 
1566 	/* Re-inject the fragments back to REO for further processing */
1567 	status = dp_rx_defrag_reo_reinject(peer, tid,
1568 			rx_reorder_array_elem->head);
1569 	if (QDF_IS_STATUS_SUCCESS(status)) {
1570 		rx_reorder_array_elem->head = NULL;
1571 		rx_reorder_array_elem->tail = NULL;
1572 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1573 			  "Fragmented sequence successfully reinjected");
1574 	} else {
1575 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1576 		"Fragmented sequence reinjection failed");
1577 		dp_rx_return_head_frag_desc(peer, tid);
1578 	}
1579 
1580 	dp_rx_defrag_cleanup(peer, tid);
1581 
1582 	dp_peer_unref_del_find_by_id(peer);
1583 
1584 	return QDF_STATUS_SUCCESS;
1585 
1586 end:
1587 	if (peer)
1588 		dp_peer_unref_del_find_by_id(peer);
1589 
1590 	DP_STATS_INC(soc, rx.rx_frag_err, 1);
1591 	return QDF_STATUS_E_DEFRAG_ERROR;
1592 }
1593 
1594 /**
1595  * dp_rx_frag_handle() - Handles fragmented Rx frames
1596  *
1597  * @soc: core txrx main context
1598  * @ring_desc: opaque pointer to the REO error ring descriptor
1599  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
1600  * @head: head of the local descriptor free-list
1601  * @tail: tail of the local descriptor free-list
1602  * @quota: No. of units (packets) that can be serviced in one shot.
1603  *
1604  * This function implements RX 802.11 fragmentation handling
1605  * The handling is mostly same as legacy fragmentation handling.
1606  * If required, this function can re-inject the frames back to
1607  * REO ring (with proper setting to by-pass fragmentation check
1608  * but use duplicate detection / re-ordering and routing these frames
1609  * to a different core.
1610  *
1611  * Return: uint32_t: No. of elements processed
1612  */
1613 uint32_t dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc,
1614 		struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1615 		uint8_t *mac_id,
1616 		uint32_t quota)
1617 {
1618 	uint32_t rx_bufs_used = 0;
1619 	void *link_desc_va;
1620 	struct hal_buf_info buf_info;
1621 	struct hal_rx_msdu_list msdu_list; /* per MPDU list of MSDUs */
1622 	qdf_nbuf_t msdu = NULL;
1623 	uint32_t tid, msdu_len;
1624 	int idx, rx_bfs = 0;
1625 	struct dp_pdev *pdev;
1626 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1627 	struct dp_rx_desc *rx_desc = NULL;
1628 
1629 	qdf_assert(soc);
1630 	qdf_assert(mpdu_desc_info);
1631 
1632 	/* Fragment from a valid peer */
1633 	hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
1634 
1635 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1636 
1637 	qdf_assert(link_desc_va);
1638 
1639 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1640 		"Number of MSDUs to process, num_msdus: %d",
1641 		mpdu_desc_info->msdu_count);
1642 
1643 
1644 	if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) {
1645 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1646 			"Not sufficient MSDUs to process");
1647 		return rx_bufs_used;
1648 	}
1649 
1650 	/* Get msdu_list for the given MPDU */
1651 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1652 			     &mpdu_desc_info->msdu_count);
1653 
1654 	/* Process all MSDUs in the current MPDU */
1655 	for (idx = 0; (idx < mpdu_desc_info->msdu_count); idx++) {
1656 		struct dp_rx_desc *rx_desc =
1657 			dp_rx_cookie_2_va_rxdma_buf(soc,
1658 				msdu_list.sw_cookie[idx]);
1659 
1660 		qdf_assert_always(rx_desc);
1661 
1662 		/* all buffers in MSDU link belong to same pdev */
1663 		pdev = soc->pdev_list[rx_desc->pool_id];
1664 		*mac_id = rx_desc->pool_id;
1665 
1666 		msdu = rx_desc->nbuf;
1667 
1668 		qdf_nbuf_unmap_single(soc->osdev, msdu,
1669 				QDF_DMA_BIDIRECTIONAL);
1670 
1671 		rx_desc->rx_buf_start = qdf_nbuf_data(msdu);
1672 
1673 		msdu_len = hal_rx_msdu_start_msdu_len_get(
1674 				rx_desc->rx_buf_start);
1675 
1676 		qdf_nbuf_set_pktlen(msdu, (msdu_len + RX_PKT_TLVS_LEN));
1677 		qdf_nbuf_append_ext_list(msdu, NULL, 0);
1678 
1679 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1680 						rx_desc->rx_buf_start);
1681 
1682 		/* Process fragment-by-fragment */
1683 		status = dp_rx_defrag_store_fragment(soc, ring_desc,
1684 						     &pdev->free_list_head,
1685 						     &pdev->free_list_tail,
1686 						     mpdu_desc_info,
1687 						     tid, rx_desc, &rx_bfs);
1688 
1689 		if (rx_bfs)
1690 			rx_bufs_used++;
1691 
1692 		if (!QDF_IS_STATUS_SUCCESS(status)) {
1693 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1694 				"Rx Defrag err seq#:0x%x msdu_count:%d flags:%d",
1695 				mpdu_desc_info->mpdu_seq,
1696 				mpdu_desc_info->msdu_count,
1697 				mpdu_desc_info->mpdu_flags);
1698 
1699 			/* No point in processing rest of the fragments */
1700 			break;
1701 		}
1702 	}
1703 
1704 	if (!QDF_IS_STATUS_SUCCESS(status)) {
1705 		/* drop any remaining buffers in current descriptor */
1706 		idx++;
1707 		for (; (idx < mpdu_desc_info->msdu_count); idx++) {
1708 			rx_desc =
1709 				dp_rx_cookie_2_va_rxdma_buf(soc,
1710 							    msdu_list.sw_cookie[idx]);
1711 			qdf_assert(rx_desc);
1712 			msdu = rx_desc->nbuf;
1713 			qdf_nbuf_unmap_single(soc->osdev, msdu,
1714 					      QDF_DMA_BIDIRECTIONAL);
1715 			qdf_nbuf_free(msdu);
1716 			dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1717 						    &pdev->free_list_tail,
1718 						    rx_desc);
1719 			rx_bufs_used++;
1720 		}
1721 		if (dp_rx_link_desc_return(soc, ring_desc,
1722 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1723 					   QDF_STATUS_SUCCESS)
1724 			dp_err("Failed to return link desc");
1725 	}
1726 
1727 	return rx_bufs_used;
1728 }
1729 
1730 QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
1731 				      struct dp_peer *peer, uint16_t tid,
1732 		uint16_t rxseq, qdf_nbuf_t nbuf)
1733 {
1734 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1735 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1736 	uint8_t all_frag_present;
1737 	uint32_t msdu_len;
1738 	QDF_STATUS status;
1739 
1740 	rx_reorder_array_elem = peer->rx_tid[tid].array;
1741 
1742 	if (rx_reorder_array_elem->head &&
1743 	    rxseq != rx_tid->curr_seq_num) {
1744 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1745 			  "%s: No list found for TID %d Seq# %d",
1746 				__func__, tid, rxseq);
1747 		qdf_nbuf_free(nbuf);
1748 		goto fail;
1749 	}
1750 
1751 	msdu_len = hal_rx_msdu_start_msdu_len_get(qdf_nbuf_data(nbuf));
1752 
1753 	qdf_nbuf_set_pktlen(nbuf, (msdu_len + RX_PKT_TLVS_LEN));
1754 
1755 	status = dp_rx_defrag_fraglist_insert(peer, tid,
1756 					      &rx_reorder_array_elem->head,
1757 			&rx_reorder_array_elem->tail, nbuf,
1758 			&all_frag_present);
1759 
1760 	if (QDF_IS_STATUS_ERROR(status)) {
1761 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1762 			  "%s Fragment insert failed", __func__);
1763 
1764 		goto fail;
1765 	}
1766 
1767 	if (soc->rx.flags.defrag_timeout_check)
1768 		dp_rx_defrag_waitlist_remove(peer, tid);
1769 
1770 	if (!all_frag_present) {
1771 		uint32_t now_ms =
1772 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1773 
1774 		peer->rx_tid[tid].defrag_timeout_ms =
1775 			now_ms + soc->rx.defrag.timeout_ms;
1776 
1777 		dp_rx_defrag_waitlist_add(peer, tid);
1778 
1779 		return QDF_STATUS_SUCCESS;
1780 	}
1781 
1782 	status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head,
1783 			      rx_reorder_array_elem->tail);
1784 
1785 	if (QDF_IS_STATUS_ERROR(status)) {
1786 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1787 			  "%s Fragment processing failed", __func__);
1788 
1789 		dp_rx_return_head_frag_desc(peer, tid);
1790 		dp_rx_defrag_cleanup(peer, tid);
1791 
1792 		goto fail;
1793 	}
1794 
1795 	/* Re-inject the fragments back to REO for further processing */
1796 	status = dp_rx_defrag_reo_reinject(peer, tid,
1797 					   rx_reorder_array_elem->head);
1798 	if (QDF_IS_STATUS_SUCCESS(status)) {
1799 		rx_reorder_array_elem->head = NULL;
1800 		rx_reorder_array_elem->tail = NULL;
1801 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1802 			  "%s: Frag seq successfully reinjected",
1803 			__func__);
1804 	} else {
1805 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1806 			  "%s: Frag seq reinjection failed",
1807 			__func__);
1808 		dp_rx_return_head_frag_desc(peer, tid);
1809 	}
1810 
1811 	dp_rx_defrag_cleanup(peer, tid);
1812 	return QDF_STATUS_SUCCESS;
1813 
1814 fail:
1815 	return QDF_STATUS_E_DEFRAG_ERROR;
1816 }
1817