xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_peer.h"
23 #include "hal_api.h"
24 #include "qdf_trace.h"
25 #include "qdf_nbuf.h"
26 #include "dp_internal.h"
27 #include "dp_rx_defrag.h"
28 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
29 #include "dp_rx_defrag.h"
30 
31 const struct dp_rx_defrag_cipher dp_f_ccmp = {
32 	"AES-CCM",
33 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
34 	IEEE80211_WEP_MICLEN,
35 	0,
36 };
37 
38 const struct dp_rx_defrag_cipher dp_f_tkip = {
39 	"TKIP",
40 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
41 	IEEE80211_WEP_CRCLEN,
42 	IEEE80211_WEP_MICLEN,
43 };
44 
45 const struct dp_rx_defrag_cipher dp_f_wep = {
46 	"WEP",
47 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
48 	IEEE80211_WEP_CRCLEN,
49 	0,
50 };
51 
52 /*
53  * dp_rx_defrag_frames_free(): Free fragment chain
54  * @frames: Fragment chain
55  *
56  * Iterates through the fragment chain and frees them
57  * Returns: None
58  */
59 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames)
60 {
61 	qdf_nbuf_t next, frag = frames;
62 
63 	while (frag) {
64 		next = qdf_nbuf_next(frag);
65 		qdf_nbuf_free(frag);
66 		frag = next;
67 	}
68 }
69 
70 /*
71  * dp_rx_clear_saved_desc_info(): Clears descriptor info
72  * @peer: Pointer to the peer data structure
73  * @tid: Transmit ID (TID)
74  *
75  * Saves MPDU descriptor info and MSDU link pointer from REO
76  * ring descriptor. The cache is created per peer, per TID
77  *
78  * Returns: None
79  */
80 static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid)
81 {
82 	if (peer->rx_tid[tid].dst_ring_desc)
83 		qdf_mem_free(peer->rx_tid[tid].dst_ring_desc);
84 
85 	peer->rx_tid[tid].dst_ring_desc = NULL;
86 }
87 
88 static void dp_rx_return_head_frag_desc(struct dp_peer *peer,
89 					unsigned int tid)
90 {
91 	struct dp_soc *soc;
92 	struct dp_pdev *pdev;
93 	struct dp_srng *dp_rxdma_srng;
94 	struct rx_desc_pool *rx_desc_pool;
95 	union dp_rx_desc_list_elem_t *head = NULL;
96 	union dp_rx_desc_list_elem_t *tail = NULL;
97 
98 	if (peer->rx_tid[tid].head_frag_desc) {
99 		pdev = peer->vdev->pdev;
100 		soc = pdev->soc;
101 		dp_rxdma_srng = &pdev->rx_refill_buf_ring;
102 		rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id];
103 
104 		dp_rx_add_to_free_desc_list(&head, &tail,
105 					    peer->rx_tid[tid].head_frag_desc);
106 		dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
107 					1, &head, &tail);
108 	}
109 }
110 
111 /*
112  * dp_rx_reorder_flush_frag(): Flush the frag list
113  * @peer: Pointer to the peer data structure
114  * @tid: Transmit ID (TID)
115  *
116  * Flush the per-TID frag list
117  *
118  * Returns: None
119  */
120 void dp_rx_reorder_flush_frag(struct dp_peer *peer,
121 			 unsigned int tid)
122 {
123 	struct dp_soc *soc;
124 
125 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
126 		  FL("Flushing TID %d"), tid);
127 
128 	if (!peer) {
129 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
130 					"%s: NULL peer", __func__);
131 		return;
132 	}
133 
134 	soc = peer->vdev->pdev->soc;
135 
136 	if (peer->rx_tid[tid].dst_ring_desc) {
137 		if (dp_rx_link_desc_return(soc,
138 					peer->rx_tid[tid].dst_ring_desc,
139 					HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
140 					QDF_STATUS_SUCCESS)
141 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
142 					"%s: Failed to return link desc",
143 					__func__);
144 	}
145 
146 	dp_rx_return_head_frag_desc(peer, tid);
147 	dp_rx_defrag_cleanup(peer, tid);
148 }
149 
150 /*
151  * dp_rx_defrag_waitlist_flush(): Flush SOC defrag wait list
152  * @soc: DP SOC
153  *
154  * Flush fragments of all waitlisted TID's
155  *
156  * Returns: None
157  */
158 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc)
159 {
160 	struct dp_rx_tid *rx_reorder;
161 	struct dp_rx_tid *tmp;
162 	uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
163 	TAILQ_HEAD(, dp_rx_tid) temp_list;
164 
165 	TAILQ_INIT(&temp_list);
166 
167 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
168 		  FL("Current time  %u"), now_ms);
169 
170 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
171 	TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist,
172 			   defrag_waitlist_elem, tmp) {
173 		uint32_t tid;
174 
175 		if (rx_reorder->defrag_timeout_ms > now_ms)
176 			break;
177 
178 		tid = rx_reorder->tid;
179 		if (tid >= DP_MAX_TIDS) {
180 			qdf_assert(0);
181 			continue;
182 		}
183 
184 		TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder,
185 			     defrag_waitlist_elem);
186 		DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
187 
188 		/* Move to temp list and clean-up later */
189 		TAILQ_INSERT_TAIL(&temp_list, rx_reorder,
190 				  defrag_waitlist_elem);
191 	}
192 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
193 
194 	TAILQ_FOREACH_SAFE(rx_reorder, &temp_list,
195 			   defrag_waitlist_elem, tmp) {
196 		struct dp_peer *peer;
197 
198 		/* get address of current peer */
199 		peer =
200 			container_of(rx_reorder, struct dp_peer,
201 				     rx_tid[rx_reorder->tid]);
202 
203 		qdf_spin_lock_bh(&rx_reorder->tid_lock);
204 		dp_rx_reorder_flush_frag(peer, rx_reorder->tid);
205 		qdf_spin_unlock_bh(&rx_reorder->tid_lock);
206 	}
207 }
208 
209 /*
210  * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list
211  * @peer: Pointer to the peer data structure
212  * @tid: Transmit ID (TID)
213  *
214  * Appends per-tid fragments to global fragment wait list
215  *
216  * Returns: None
217  */
218 static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid)
219 {
220 	struct dp_soc *psoc = peer->vdev->pdev->soc;
221 	struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid];
222 
223 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
224 		  FL("Adding TID %u to waitlist for peer %pK"),
225 		  tid, peer);
226 
227 	/* TODO: use LIST macros instead of TAIL macros */
228 	qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock);
229 	TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder,
230 				defrag_waitlist_elem);
231 	DP_STATS_INC(psoc, rx.rx_frag_wait, 1);
232 	qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock);
233 }
234 
235 /*
236  * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist
237  * @peer: Pointer to the peer data structure
238  * @tid: Transmit ID (TID)
239  *
240  * Remove fragments from waitlist
241  *
242  * Returns: None
243  */
244 void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid)
245 {
246 	struct dp_pdev *pdev = peer->vdev->pdev;
247 	struct dp_soc *soc = pdev->soc;
248 	struct dp_rx_tid *rx_reorder;
249 
250 	if (tid > DP_MAX_TIDS) {
251 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
252 			  "TID out of bounds: %d", tid);
253 		qdf_assert(0);
254 		return;
255 	}
256 
257 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
258 	TAILQ_FOREACH(rx_reorder, &soc->rx.defrag.waitlist,
259 			   defrag_waitlist_elem) {
260 		struct dp_peer *peer_on_waitlist;
261 
262 		/* get address of current peer */
263 		peer_on_waitlist =
264 			container_of(rx_reorder, struct dp_peer,
265 				     rx_tid[rx_reorder->tid]);
266 
267 		/* Ensure it is TID for same peer */
268 		if (peer_on_waitlist == peer && rx_reorder->tid == tid) {
269 			TAILQ_REMOVE(&soc->rx.defrag.waitlist,
270 				rx_reorder, defrag_waitlist_elem);
271 			DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
272 		}
273 	}
274 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
275 }
276 
277 /*
278  * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list
279  * @peer: Pointer to the peer data structure
280  * @tid: Transmit ID (TID)
281  * @head_addr: Pointer to head list
282  * @tail_addr: Pointer to tail list
283  * @frag: Incoming fragment
284  * @all_frag_present: Flag to indicate whether all fragments are received
285  *
286  * Build a per-tid, per-sequence fragment list.
287  *
288  * Returns: Success, if inserted
289  */
290 static QDF_STATUS dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid,
291 	qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag,
292 	uint8_t *all_frag_present)
293 {
294 	qdf_nbuf_t next;
295 	qdf_nbuf_t prev = NULL;
296 	qdf_nbuf_t cur;
297 	uint16_t head_fragno, cur_fragno, next_fragno;
298 	uint8_t last_morefrag = 1, count = 0;
299 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
300 	uint8_t *rx_desc_info;
301 
302 
303 	qdf_assert(frag);
304 	qdf_assert(head_addr);
305 	qdf_assert(tail_addr);
306 
307 	*all_frag_present = 0;
308 	rx_desc_info = qdf_nbuf_data(frag);
309 	cur_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
310 
311 	/* If this is the first fragment */
312 	if (!(*head_addr)) {
313 		*head_addr = *tail_addr = frag;
314 		qdf_nbuf_set_next(*tail_addr, NULL);
315 		rx_tid->curr_frag_num = cur_fragno;
316 
317 		goto insert_done;
318 	}
319 
320 	/* In sequence fragment */
321 	if (cur_fragno > rx_tid->curr_frag_num) {
322 		qdf_nbuf_set_next(*tail_addr, frag);
323 		*tail_addr = frag;
324 		qdf_nbuf_set_next(*tail_addr, NULL);
325 		rx_tid->curr_frag_num = cur_fragno;
326 	} else {
327 		/* Out of sequence fragment */
328 		cur = *head_addr;
329 		rx_desc_info = qdf_nbuf_data(cur);
330 		head_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
331 
332 		if (cur_fragno == head_fragno) {
333 			qdf_nbuf_free(frag);
334 			goto insert_fail;
335 		} else if (head_fragno > cur_fragno) {
336 			qdf_nbuf_set_next(frag, cur);
337 			cur = frag;
338 			*head_addr = frag; /* head pointer to be updated */
339 		} else {
340 			while ((cur_fragno > head_fragno) && cur != NULL) {
341 				prev = cur;
342 				cur = qdf_nbuf_next(cur);
343 				rx_desc_info = qdf_nbuf_data(cur);
344 				head_fragno =
345 					dp_rx_frag_get_mpdu_frag_number(
346 								rx_desc_info);
347 			}
348 
349 			if (cur_fragno == head_fragno) {
350 				qdf_nbuf_free(frag);
351 				goto insert_fail;
352 			}
353 
354 			qdf_nbuf_set_next(prev, frag);
355 			qdf_nbuf_set_next(frag, cur);
356 		}
357 	}
358 
359 	next = qdf_nbuf_next(*head_addr);
360 
361 	rx_desc_info = qdf_nbuf_data(*tail_addr);
362 	last_morefrag = dp_rx_frag_get_more_frag_bit(rx_desc_info);
363 
364 	/* TODO: optimize the loop */
365 	if (!last_morefrag) {
366 		/* Check if all fragments are present */
367 		do {
368 			rx_desc_info = qdf_nbuf_data(next);
369 			next_fragno =
370 				dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
371 			count++;
372 
373 			if (next_fragno != count)
374 				break;
375 
376 			next = qdf_nbuf_next(next);
377 		} while (next);
378 
379 		if (!next) {
380 			*all_frag_present = 1;
381 			return QDF_STATUS_SUCCESS;
382 		}
383 	}
384 
385 insert_done:
386 	return QDF_STATUS_SUCCESS;
387 
388 insert_fail:
389 	return QDF_STATUS_E_FAILURE;
390 }
391 
392 
393 /*
394  * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment
395  * @msdu: Pointer to the fragment
396  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
397  *
398  * decap tkip encrypted fragment
399  *
400  * Returns: QDF_STATUS
401  */
402 static QDF_STATUS dp_rx_defrag_tkip_decap(qdf_nbuf_t msdu, uint16_t hdrlen)
403 {
404 	uint8_t *ivp, *orig_hdr;
405 	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
406 
407 	/* start of 802.11 header info */
408 	orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len);
409 
410 	/* TKIP header is located post 802.11 header */
411 	ivp = orig_hdr + hdrlen;
412 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) {
413 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
414 			"IEEE80211_WEP_EXTIV is missing in TKIP fragment");
415 		return QDF_STATUS_E_DEFRAG_ERROR;
416 	}
417 
418 	qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer);
419 
420 	return QDF_STATUS_SUCCESS;
421 }
422 
423 /*
424  * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment
425  * @nbuf: Pointer to the fragment buffer
426  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
427  *
428  * Remove MIC information from CCMP fragment
429  *
430  * Returns: QDF_STATUS
431  */
432 static QDF_STATUS dp_rx_defrag_ccmp_demic(qdf_nbuf_t nbuf, uint16_t hdrlen)
433 {
434 	uint8_t *ivp, *orig_hdr;
435 	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
436 
437 	/* start of the 802.11 header */
438 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
439 
440 	/* CCMP header is located after 802.11 header */
441 	ivp = orig_hdr + hdrlen;
442 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
443 		return QDF_STATUS_E_DEFRAG_ERROR;
444 
445 	qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer);
446 
447 	return QDF_STATUS_SUCCESS;
448 }
449 
450 /*
451  * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment
452  * @nbuf: Pointer to the fragment
453  * @hdrlen: length of the header information
454  *
455  * decap CCMP encrypted fragment
456  *
457  * Returns: QDF_STATUS
458  */
459 static QDF_STATUS dp_rx_defrag_ccmp_decap(qdf_nbuf_t nbuf, uint16_t hdrlen)
460 {
461 	uint8_t *ivp, *origHdr;
462 	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
463 
464 	origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
465 	ivp = origHdr + hdrlen;
466 
467 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
468 		return QDF_STATUS_E_DEFRAG_ERROR;
469 
470 	/* Let's pull the header later */
471 
472 	return QDF_STATUS_SUCCESS;
473 }
474 
475 /*
476  * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment
477  * @msdu: Pointer to the fragment
478  * @hdrlen: length of the header information
479  *
480  * decap WEP encrypted fragment
481  *
482  * Returns: QDF_STATUS
483  */
484 static QDF_STATUS dp_rx_defrag_wep_decap(qdf_nbuf_t msdu, uint16_t hdrlen)
485 {
486 	uint8_t *origHdr;
487 	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
488 
489 	origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
490 	qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen);
491 
492 	qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer);
493 
494 	return QDF_STATUS_SUCCESS;
495 }
496 
497 /*
498  * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment
499  * @nbuf: Pointer to the fragment
500  *
501  * Calculate the header size of the received fragment
502  *
503  * Returns: header size (uint16_t)
504  */
505 static uint16_t dp_rx_defrag_hdrsize(qdf_nbuf_t nbuf)
506 {
507 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
508 	uint16_t size = sizeof(struct ieee80211_frame);
509 	uint16_t fc = 0;
510 	uint32_t to_ds, fr_ds;
511 	uint8_t frm_ctrl_valid;
512 	uint16_t frm_ctrl_field;
513 
514 	to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr);
515 	fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr);
516 	frm_ctrl_valid = hal_rx_get_mpdu_frame_control_valid(rx_tlv_hdr);
517 	frm_ctrl_field = hal_rx_get_frame_ctrl_field(rx_tlv_hdr);
518 
519 	if (to_ds && fr_ds)
520 		size += IEEE80211_ADDR_LEN;
521 
522 	if (frm_ctrl_valid) {
523 		fc = frm_ctrl_field;
524 
525 		/* use 1-st byte for validation */
526 		if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) {
527 			size += sizeof(uint16_t);
528 			/* use 2-nd byte for validation */
529 			if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER)
530 				size += sizeof(struct ieee80211_htc);
531 		}
532 	}
533 
534 	return size;
535 }
536 
537 /*
538  * dp_rx_defrag_michdr(): Calculate a pseudo MIC header
539  * @wh0: Pointer to the wireless header of the fragment
540  * @hdr: Array to hold the pseudo header
541  *
542  * Calculate a pseudo MIC header
543  *
544  * Returns: None
545  */
546 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0,
547 				uint8_t hdr[])
548 {
549 	const struct ieee80211_frame_addr4 *wh =
550 		(const struct ieee80211_frame_addr4 *)wh0;
551 
552 	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
553 	case IEEE80211_FC1_DIR_NODS:
554 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
555 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
556 					   wh->i_addr2);
557 		break;
558 	case IEEE80211_FC1_DIR_TODS:
559 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
560 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
561 					   wh->i_addr2);
562 		break;
563 	case IEEE80211_FC1_DIR_FROMDS:
564 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
565 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
566 					   wh->i_addr3);
567 		break;
568 	case IEEE80211_FC1_DIR_DSTODS:
569 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
570 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
571 					   wh->i_addr4);
572 		break;
573 	}
574 
575 	/*
576 	 * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but
577 	 * it could also be set for deauth, disassoc, action, etc. for
578 	 * a mgt type frame. It comes into picture for MFP.
579 	 */
580 	if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
581 		if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
582 				IEEE80211_FC1_DIR_DSTODS) {
583 			const struct ieee80211_qosframe_addr4 *qwh =
584 				(const struct ieee80211_qosframe_addr4 *)wh;
585 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
586 		} else {
587 			const struct ieee80211_qosframe *qwh =
588 				(const struct ieee80211_qosframe *)wh;
589 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
590 		}
591 	} else {
592 		hdr[12] = 0;
593 	}
594 
595 	hdr[13] = hdr[14] = hdr[15] = 0;	/* reserved */
596 }
597 
598 /*
599  * dp_rx_defrag_mic(): Calculate MIC header
600  * @key: Pointer to the key
601  * @wbuf: fragment buffer
602  * @off: Offset
603  * @data_len: Data length
604  * @mic: Array to hold MIC
605  *
606  * Calculate a pseudo MIC header
607  *
608  * Returns: QDF_STATUS
609  */
610 static QDF_STATUS dp_rx_defrag_mic(const uint8_t *key, qdf_nbuf_t wbuf,
611 		uint16_t off, uint16_t data_len, uint8_t mic[])
612 {
613 	uint8_t hdr[16] = { 0, };
614 	uint32_t l, r;
615 	const uint8_t *data;
616 	uint32_t space;
617 	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
618 
619 	dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf)
620 		+ rx_desc_len), hdr);
621 
622 	l = dp_rx_get_le32(key);
623 	r = dp_rx_get_le32(key + 4);
624 
625 	/* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
626 	l ^= dp_rx_get_le32(hdr);
627 	dp_rx_michael_block(l, r);
628 	l ^= dp_rx_get_le32(&hdr[4]);
629 	dp_rx_michael_block(l, r);
630 	l ^= dp_rx_get_le32(&hdr[8]);
631 	dp_rx_michael_block(l, r);
632 	l ^= dp_rx_get_le32(&hdr[12]);
633 	dp_rx_michael_block(l, r);
634 
635 	/* first buffer has special handling */
636 	data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
637 	space = qdf_nbuf_len(wbuf) - off;
638 
639 	for (;; ) {
640 		if (space > data_len)
641 			space = data_len;
642 
643 		/* collect 32-bit blocks from current buffer */
644 		while (space >= sizeof(uint32_t)) {
645 			l ^= dp_rx_get_le32(data);
646 			dp_rx_michael_block(l, r);
647 			data += sizeof(uint32_t);
648 			space -= sizeof(uint32_t);
649 			data_len -= sizeof(uint32_t);
650 		}
651 		if (data_len < sizeof(uint32_t))
652 			break;
653 
654 		wbuf = qdf_nbuf_next(wbuf);
655 		if (wbuf == NULL)
656 			return QDF_STATUS_E_DEFRAG_ERROR;
657 
658 		if (space != 0) {
659 			const uint8_t *data_next;
660 			/*
661 			 * Block straddles buffers, split references.
662 			 */
663 			data_next =
664 				(uint8_t *)qdf_nbuf_data(wbuf) + off;
665 			if ((qdf_nbuf_len(wbuf)) <
666 				sizeof(uint32_t) - space) {
667 				return QDF_STATUS_E_DEFRAG_ERROR;
668 			}
669 			switch (space) {
670 			case 1:
671 				l ^= dp_rx_get_le32_split(data[0],
672 					data_next[0], data_next[1],
673 					data_next[2]);
674 				data = data_next + 3;
675 				space = (qdf_nbuf_len(wbuf) - off) - 3;
676 				break;
677 			case 2:
678 				l ^= dp_rx_get_le32_split(data[0], data[1],
679 						    data_next[0], data_next[1]);
680 				data = data_next + 2;
681 				space = (qdf_nbuf_len(wbuf) - off) - 2;
682 				break;
683 			case 3:
684 				l ^= dp_rx_get_le32_split(data[0], data[1],
685 					data[2], data_next[0]);
686 				data = data_next + 1;
687 				space = (qdf_nbuf_len(wbuf) - off) - 1;
688 				break;
689 			}
690 			dp_rx_michael_block(l, r);
691 			data_len -= sizeof(uint32_t);
692 		} else {
693 			/*
694 			 * Setup for next buffer.
695 			 */
696 			data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
697 			space = qdf_nbuf_len(wbuf) - off;
698 		}
699 	}
700 	/* Last block and padding (0x5a, 4..7 x 0) */
701 	switch (data_len) {
702 	case 0:
703 		l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0);
704 		break;
705 	case 1:
706 		l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0);
707 		break;
708 	case 2:
709 		l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0);
710 		break;
711 	case 3:
712 		l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a);
713 		break;
714 	}
715 	dp_rx_michael_block(l, r);
716 	dp_rx_michael_block(l, r);
717 	dp_rx_put_le32(mic, l);
718 	dp_rx_put_le32(mic + 4, r);
719 
720 	return QDF_STATUS_SUCCESS;
721 }
722 
723 /*
724  * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame
725  * @key: Pointer to the key
726  * @msdu: fragment buffer
727  * @hdrlen: Length of the header information
728  *
729  * Remove MIC information from the TKIP frame
730  *
731  * Returns: QDF_STATUS
732  */
733 static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key,
734 					qdf_nbuf_t msdu, uint16_t hdrlen)
735 {
736 	QDF_STATUS status;
737 	uint32_t pktlen = 0;
738 	uint8_t mic[IEEE80211_WEP_MICLEN];
739 	uint8_t mic0[IEEE80211_WEP_MICLEN];
740 	qdf_nbuf_t prev = NULL, next;
741 
742 	next = msdu;
743 	while (next) {
744 		pktlen += (qdf_nbuf_len(next) - hdrlen);
745 		prev = next;
746 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
747 			  "%s pktlen %u", __func__,
748 			  (uint32_t)(qdf_nbuf_len(next) - hdrlen));
749 		next = qdf_nbuf_next(next);
750 	}
751 
752 	if (!prev) {
753 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
754 			  "%s Defrag chaining failed !\n", __func__);
755 		return QDF_STATUS_E_DEFRAG_ERROR;
756 	}
757 
758 	qdf_nbuf_copy_bits(prev, qdf_nbuf_len(prev) - dp_f_tkip.ic_miclen,
759 			   dp_f_tkip.ic_miclen, (caddr_t)mic0);
760 	qdf_nbuf_trim_tail(prev, dp_f_tkip.ic_miclen);
761 	pktlen -= dp_f_tkip.ic_miclen;
762 
763 	status = dp_rx_defrag_mic(key, msdu, hdrlen,
764 				pktlen, mic);
765 
766 	if (QDF_IS_STATUS_ERROR(status))
767 		return status;
768 
769 	if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen))
770 		return QDF_STATUS_E_DEFRAG_ERROR;
771 
772 	return QDF_STATUS_SUCCESS;
773 }
774 
775 /*
776  * dp_rx_frag_pull_hdr(): Pulls the RXTLV & the 802.11 headers
777  * @nbuf: buffer pointer
778  * @hdrsize: size of the header to be pulled
779  *
780  * Pull the RXTLV & the 802.11 headers
781  *
782  * Returns: None
783  */
784 static void dp_rx_frag_pull_hdr(qdf_nbuf_t nbuf, uint16_t hdrsize)
785 {
786 	qdf_nbuf_pull_head(nbuf,
787 			RX_PKT_TLVS_LEN + hdrsize);
788 
789 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
790 		  "%s: final pktlen %d .11len %d",
791 		  __func__, (uint32_t)qdf_nbuf_len(nbuf), hdrsize);
792 }
793 
794 /*
795  * dp_rx_construct_fraglist(): Construct a nbuf fraglist
796  * @peer: Pointer to the peer
797  * @head: Pointer to list of fragments
798  * @hdrsize: Size of the header to be pulled
799  *
800  * Construct a nbuf fraglist
801  *
802  * Returns: None
803  */
804 static void
805 dp_rx_construct_fraglist(struct dp_peer *peer,
806 		qdf_nbuf_t head, uint16_t hdrsize)
807 {
808 	qdf_nbuf_t msdu = qdf_nbuf_next(head);
809 	qdf_nbuf_t rx_nbuf = msdu;
810 	uint32_t len = 0;
811 
812 	while (msdu) {
813 		dp_rx_frag_pull_hdr(msdu, hdrsize);
814 		len += qdf_nbuf_len(msdu);
815 		msdu = qdf_nbuf_next(msdu);
816 	}
817 
818 	qdf_nbuf_append_ext_list(head, rx_nbuf, len);
819 	qdf_nbuf_set_next(head, NULL);
820 
821 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
822 		  "%s: head len %d ext len %d data len %d ",
823 		  __func__,
824 		  (uint32_t)qdf_nbuf_len(head),
825 		  (uint32_t)qdf_nbuf_len(rx_nbuf),
826 		  (uint32_t)(head->data_len));
827 }
828 
829 /**
830  * dp_rx_defrag_err() - rx err handler
831  * @pdev: handle to pdev object
832  * @vdev_id: vdev id
833  * @peer_mac_addr: peer mac address
834  * @tid: TID
835  * @tsf32: TSF
836  * @err_type: error type
837  * @rx_frame: rx frame
838  * @pn: PN Number
839  * @key_id: key id
840  *
841  * This function handles rx error and send MIC error notification
842  *
843  * Return: None
844  */
845 static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
846 {
847 	struct ol_if_ops *tops = NULL;
848 	struct dp_pdev *pdev = vdev->pdev;
849 	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
850 	uint8_t *orig_hdr;
851 	struct ieee80211_frame *wh;
852 
853 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
854 	wh = (struct ieee80211_frame *)orig_hdr;
855 
856 	tops = pdev->soc->cdp_soc.ol_ops;
857 	if (tops->rx_mic_error)
858 		tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh);
859 }
860 
861 
862 /*
863  * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3
864  * @nbuf: Pointer to the fragment buffer
865  * @hdrsize: Size of headers
866  *
867  * Transcap the fragment from 802.11 to 802.3
868  *
869  * Returns: None
870  */
871 static void
872 dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t nbuf, uint16_t hdrsize)
873 {
874 	struct llc_snap_hdr_t *llchdr;
875 	struct ethernet_hdr_t *eth_hdr;
876 	uint8_t ether_type[2];
877 	uint16_t fc = 0;
878 	union dp_align_mac_addr mac_addr;
879 	uint8_t *rx_desc_info = qdf_mem_malloc(RX_PKT_TLVS_LEN);
880 
881 	if (rx_desc_info == NULL) {
882 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
883 			"%s: Memory alloc failed ! ", __func__);
884 		QDF_ASSERT(0);
885 		return;
886 	}
887 
888 	qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), RX_PKT_TLVS_LEN);
889 
890 	llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) +
891 					RX_PKT_TLVS_LEN + hdrsize);
892 	qdf_mem_copy(ether_type, llchdr->ethertype, 2);
893 
894 	qdf_nbuf_pull_head(nbuf, (RX_PKT_TLVS_LEN + hdrsize +
895 				  sizeof(struct llc_snap_hdr_t) -
896 				  sizeof(struct ethernet_hdr_t)));
897 
898 	eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf));
899 
900 	if (hal_rx_get_mpdu_frame_control_valid(rx_desc_info))
901 		fc = hal_rx_get_frame_ctrl_field(rx_desc_info);
902 
903 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
904 		"%s: frame control type: 0x%x", __func__, fc);
905 
906 	switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) {
907 	case IEEE80211_FC1_DIR_NODS:
908 		hal_rx_mpdu_get_addr1(rx_desc_info,
909 			&mac_addr.raw[0]);
910 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
911 			IEEE80211_ADDR_LEN);
912 		hal_rx_mpdu_get_addr2(rx_desc_info,
913 			&mac_addr.raw[0]);
914 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
915 			IEEE80211_ADDR_LEN);
916 		break;
917 	case IEEE80211_FC1_DIR_TODS:
918 		hal_rx_mpdu_get_addr3(rx_desc_info,
919 			&mac_addr.raw[0]);
920 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
921 			IEEE80211_ADDR_LEN);
922 		hal_rx_mpdu_get_addr2(rx_desc_info,
923 			&mac_addr.raw[0]);
924 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
925 			IEEE80211_ADDR_LEN);
926 		break;
927 	case IEEE80211_FC1_DIR_FROMDS:
928 		hal_rx_mpdu_get_addr1(rx_desc_info,
929 			&mac_addr.raw[0]);
930 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
931 			IEEE80211_ADDR_LEN);
932 		hal_rx_mpdu_get_addr3(rx_desc_info,
933 			&mac_addr.raw[0]);
934 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
935 			IEEE80211_ADDR_LEN);
936 		break;
937 
938 	case IEEE80211_FC1_DIR_DSTODS:
939 		hal_rx_mpdu_get_addr3(rx_desc_info,
940 			&mac_addr.raw[0]);
941 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
942 			IEEE80211_ADDR_LEN);
943 		hal_rx_mpdu_get_addr4(rx_desc_info,
944 			&mac_addr.raw[0]);
945 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
946 			IEEE80211_ADDR_LEN);
947 		break;
948 
949 	default:
950 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
951 		"%s: Unknown frame control type: 0x%x", __func__, fc);
952 	}
953 
954 	qdf_mem_copy(eth_hdr->ethertype, ether_type,
955 			sizeof(ether_type));
956 
957 	qdf_nbuf_push_head(nbuf, RX_PKT_TLVS_LEN);
958 	qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, RX_PKT_TLVS_LEN);
959 	qdf_mem_free(rx_desc_info);
960 }
961 
962 /*
963  * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO
964  * @peer: Pointer to the peer
965  * @tid: Transmit Identifier
966  * @head: Buffer to be reinjected back
967  *
968  * Reinject the fragment chain back into REO
969  *
970  * Returns: QDF_STATUS
971  */
972  static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer,
973 					unsigned tid, qdf_nbuf_t head)
974 {
975 	struct dp_pdev *pdev = peer->vdev->pdev;
976 	struct dp_soc *soc = pdev->soc;
977 	struct hal_buf_info buf_info;
978 	void *link_desc_va;
979 	void *msdu0, *msdu_desc_info;
980 	void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr;
981 	void *dst_mpdu_desc_info, *dst_qdesc_addr;
982 	qdf_dma_addr_t paddr;
983 	uint32_t nbuf_len, seq_no, dst_ind;
984 	uint32_t *mpdu_wrd;
985 	uint32_t ret, cookie;
986 
987 	void *dst_ring_desc =
988 		peer->rx_tid[tid].dst_ring_desc;
989 	void *hal_srng = soc->reo_reinject_ring.hal_srng;
990 
991 	ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
992 	if (!ent_ring_desc) {
993 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
994 			  "HAL src ring next entry NULL");
995 		return QDF_STATUS_E_FAILURE;
996 	}
997 
998 	hal_rx_reo_buf_paddr_get(dst_ring_desc, &buf_info);
999 
1000 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1001 
1002 	qdf_assert(link_desc_va);
1003 
1004 	msdu0 = (uint8_t *)link_desc_va +
1005 		RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET;
1006 
1007 	nbuf_len = qdf_nbuf_len(head) - RX_PKT_TLVS_LEN;
1008 
1009 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW);
1010 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE,
1011 			UNI_DESC_BUF_TYPE_RX_MSDU_LINK);
1012 
1013 	/* msdu reconfig */
1014 	msdu_desc_info = (uint8_t *)msdu0 +
1015 		RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET;
1016 
1017 	dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va);
1018 
1019 	qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info));
1020 
1021 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1022 			FIRST_MSDU_IN_MPDU_FLAG, 1);
1023 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1024 			LAST_MSDU_IN_MPDU_FLAG, 1);
1025 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1026 			MSDU_CONTINUATION, 0x0);
1027 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1028 			REO_DESTINATION_INDICATION, dst_ind);
1029 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1030 			MSDU_LENGTH, nbuf_len);
1031 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1032 			SA_IS_VALID, 1);
1033 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
1034 			DA_IS_VALID, 1);
1035 
1036 	/* change RX TLV's */
1037 	hal_rx_msdu_start_msdu_len_set(
1038 			qdf_nbuf_data(head), nbuf_len);
1039 
1040 	cookie = HAL_RX_BUF_COOKIE_GET(msdu0);
1041 
1042 	/* map the nbuf before reinject it into HW */
1043 	ret = qdf_nbuf_map_single(soc->osdev, head,
1044 					QDF_DMA_BIDIRECTIONAL);
1045 
1046 	if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1047 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1048 				"%s: nbuf map failed !", __func__);
1049 		return QDF_STATUS_E_FAILURE;
1050 	}
1051 
1052 	paddr = qdf_nbuf_get_frag_paddr(head, 0);
1053 
1054 	ret = check_x86_paddr(soc, &head, &paddr, pdev);
1055 
1056 	if (ret == QDF_STATUS_E_FAILURE) {
1057 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1058 				"%s: x86 check failed !", __func__);
1059 		return QDF_STATUS_E_FAILURE;
1060 	}
1061 
1062 	hal_rxdma_buff_addr_info_set(msdu0, paddr, cookie, DP_WBM2SW_RBM);
1063 
1064 	/* Lets fill entrance ring now !!! */
1065 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1066 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1067 		"HAL RING Access For REO entrance SRNG Failed: %pK",
1068 		hal_srng);
1069 
1070 		return QDF_STATUS_E_FAILURE;
1071 	}
1072 
1073 	paddr = (uint64_t)buf_info.paddr;
1074 	/* buf addr */
1075 	hal_rxdma_buff_addr_info_set(ent_ring_desc, paddr,
1076 				     buf_info.sw_cookie,
1077 				     HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
1078 	/* mpdu desc info */
1079 	ent_mpdu_desc_info = (uint8_t *)ent_ring_desc +
1080 	RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET;
1081 
1082 	dst_mpdu_desc_info = (uint8_t *)dst_ring_desc +
1083 	REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET;
1084 
1085 	qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info,
1086 				sizeof(struct rx_mpdu_desc_info));
1087 	qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t));
1088 
1089 	mpdu_wrd = (uint32_t *)dst_mpdu_desc_info;
1090 	seq_no = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_wrd);
1091 
1092 	HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
1093 			MSDU_COUNT, 0x1);
1094 	HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
1095 			MPDU_SEQUENCE_NUMBER, seq_no);
1096 
1097 	/* unset frag bit */
1098 	HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
1099 			FRAGMENT_FLAG, 0x0);
1100 
1101 	/* set sa/da valid bits */
1102 	HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
1103 			SA_IS_VALID, 0x1);
1104 	HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
1105 			DA_IS_VALID, 0x1);
1106 	HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
1107 			RAW_MPDU, 0x0);
1108 
1109 	/* qdesc addr */
1110 	ent_qdesc_addr = (uint8_t *)ent_ring_desc +
1111 		REO_ENTRANCE_RING_4_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET;
1112 
1113 	dst_qdesc_addr = (uint8_t *)dst_ring_desc +
1114 		REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET;
1115 
1116 	qdf_mem_copy(ent_qdesc_addr, dst_qdesc_addr, 8);
1117 
1118 	HAL_RX_FLD_SET(ent_ring_desc, REO_ENTRANCE_RING_5,
1119 			REO_DESTINATION_INDICATION, dst_ind);
1120 
1121 	hal_srng_access_end(soc->hal_soc, hal_srng);
1122 
1123 	DP_STATS_INC(soc, rx.reo_reinject, 1);
1124 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1125 		  "%s: reinjection done !", __func__);
1126 	return QDF_STATUS_SUCCESS;
1127 }
1128 
1129 /*
1130  * dp_rx_defrag(): Defragment the fragment chain
1131  * @peer: Pointer to the peer
1132  * @tid: Transmit Identifier
1133  * @frag_list_head: Pointer to head list
1134  * @frag_list_tail: Pointer to tail list
1135  *
1136  * Defragment the fragment chain
1137  *
1138  * Returns: QDF_STATUS
1139  */
1140 static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid,
1141 			qdf_nbuf_t frag_list_head, qdf_nbuf_t frag_list_tail)
1142 {
1143 	qdf_nbuf_t tmp_next, prev;
1144 	qdf_nbuf_t cur = frag_list_head, msdu;
1145 	uint32_t index, tkip_demic = 0;
1146 	uint16_t hdr_space;
1147 	uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
1148 	struct dp_vdev *vdev = peer->vdev;
1149 	struct dp_soc *soc = vdev->pdev->soc;
1150 	uint8_t status = 0;
1151 
1152 	hdr_space = dp_rx_defrag_hdrsize(cur);
1153 	index = hal_rx_msdu_is_wlan_mcast(cur) ?
1154 		dp_sec_mcast : dp_sec_ucast;
1155 
1156 	/* Remove FCS from all fragments */
1157 	while (cur) {
1158 		tmp_next = qdf_nbuf_next(cur);
1159 		qdf_nbuf_set_next(cur, NULL);
1160 		qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
1161 		prev = cur;
1162 		qdf_nbuf_set_next(cur, tmp_next);
1163 		cur = tmp_next;
1164 	}
1165 	cur = frag_list_head;
1166 
1167 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1168 		  "%s: index %d Security type: %d", __func__,
1169 		  index, peer->security[index].sec_type);
1170 
1171 	switch (peer->security[index].sec_type) {
1172 	case cdp_sec_type_tkip:
1173 		tkip_demic = 1;
1174 
1175 	case cdp_sec_type_tkip_nomic:
1176 		while (cur) {
1177 			tmp_next = qdf_nbuf_next(cur);
1178 			if (dp_rx_defrag_tkip_decap(cur, hdr_space)) {
1179 
1180 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1181 					QDF_TRACE_LEVEL_ERROR,
1182 					"dp_rx_defrag: TKIP decap failed");
1183 
1184 				return QDF_STATUS_E_DEFRAG_ERROR;
1185 			}
1186 			cur = tmp_next;
1187 		}
1188 
1189 		/* If success, increment header to be stripped later */
1190 		hdr_space += dp_f_tkip.ic_header;
1191 		break;
1192 
1193 	case cdp_sec_type_aes_ccmp:
1194 		while (cur) {
1195 			tmp_next = qdf_nbuf_next(cur);
1196 			if (dp_rx_defrag_ccmp_demic(cur, hdr_space)) {
1197 
1198 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1199 					QDF_TRACE_LEVEL_ERROR,
1200 					"dp_rx_defrag: CCMP demic failed");
1201 
1202 				return QDF_STATUS_E_DEFRAG_ERROR;
1203 			}
1204 			if (dp_rx_defrag_ccmp_decap(cur, hdr_space)) {
1205 
1206 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1207 					QDF_TRACE_LEVEL_ERROR,
1208 					"dp_rx_defrag: CCMP decap failed");
1209 
1210 				return QDF_STATUS_E_DEFRAG_ERROR;
1211 			}
1212 			cur = tmp_next;
1213 		}
1214 
1215 		/* If success, increment header to be stripped later */
1216 		hdr_space += dp_f_ccmp.ic_header;
1217 		break;
1218 
1219 	case cdp_sec_type_wep40:
1220 	case cdp_sec_type_wep104:
1221 	case cdp_sec_type_wep128:
1222 		while (cur) {
1223 			tmp_next = qdf_nbuf_next(cur);
1224 			if (dp_rx_defrag_wep_decap(cur, hdr_space)) {
1225 
1226 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1227 					QDF_TRACE_LEVEL_ERROR,
1228 					"dp_rx_defrag: WEP decap failed");
1229 
1230 				return QDF_STATUS_E_DEFRAG_ERROR;
1231 			}
1232 			cur = tmp_next;
1233 		}
1234 
1235 		/* If success, increment header to be stripped later */
1236 		hdr_space += dp_f_wep.ic_header;
1237 		break;
1238 	default:
1239 		QDF_TRACE(QDF_MODULE_ID_TXRX,
1240 			QDF_TRACE_LEVEL_ERROR,
1241 			"dp_rx_defrag: Did not match any security type");
1242 		break;
1243 	}
1244 
1245 	if (tkip_demic) {
1246 		msdu = frag_list_head;
1247 		if (soc->cdp_soc.ol_ops->rx_frag_tkip_demic) {
1248 			status = soc->cdp_soc.ol_ops->rx_frag_tkip_demic(
1249 				(void *)peer->ctrl_peer, msdu, hdr_space);
1250 		} else {
1251 			qdf_mem_copy(key,
1252 				     &peer->security[index].michael_key[0],
1253 				IEEE80211_WEP_MICLEN);
1254 			status = dp_rx_defrag_tkip_demic(key, msdu,
1255 							 RX_PKT_TLVS_LEN +
1256 							 hdr_space);
1257 
1258 			if (status) {
1259 				dp_rx_defrag_err(vdev, frag_list_head);
1260 
1261 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1262 					  QDF_TRACE_LEVEL_ERROR,
1263 					  "%s: TKIP demic failed status %d",
1264 					  __func__, status);
1265 
1266 				return QDF_STATUS_E_DEFRAG_ERROR;
1267 			}
1268 		}
1269 	}
1270 
1271 	/* Convert the header to 802.3 header */
1272 	dp_rx_defrag_nwifi_to_8023(frag_list_head, hdr_space);
1273 	dp_rx_construct_fraglist(peer, frag_list_head, hdr_space);
1274 
1275 	return QDF_STATUS_SUCCESS;
1276 }
1277 
1278 /*
1279  * dp_rx_defrag_cleanup(): Clean up activities
1280  * @peer: Pointer to the peer
1281  * @tid: Transmit Identifier
1282  *
1283  * Returns: None
1284  */
1285 void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid)
1286 {
1287 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1288 				peer->rx_tid[tid].array;
1289 
1290 	if (!rx_reorder_array_elem) {
1291 		/*
1292 		 * if this condition is hit then somebody
1293 		 * must have reset this pointer to NULL.
1294 		 * array pointer usually points to base variable
1295 		 * of TID queue structure: "struct dp_rx_tid"
1296 		 */
1297 		QDF_ASSERT(0);
1298 		return;
1299 	}
1300 	/* Free up nbufs */
1301 	dp_rx_defrag_frames_free(rx_reorder_array_elem->head);
1302 
1303 	/* Free up saved ring descriptors */
1304 	dp_rx_clear_saved_desc_info(peer, tid);
1305 
1306 	rx_reorder_array_elem->head = NULL;
1307 	rx_reorder_array_elem->tail = NULL;
1308 	peer->rx_tid[tid].defrag_timeout_ms = 0;
1309 	peer->rx_tid[tid].curr_frag_num = 0;
1310 	peer->rx_tid[tid].curr_seq_num = 0;
1311 	peer->rx_tid[tid].head_frag_desc = NULL;
1312 }
1313 
1314 /*
1315  * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor
1316  * @ring_desc: Pointer to the dst ring descriptor
1317  * @peer: Pointer to the peer
1318  * @tid: Transmit Identifier
1319  *
1320  * Returns: None
1321  */
1322 static QDF_STATUS dp_rx_defrag_save_info_from_ring_desc(void *ring_desc,
1323 	struct dp_rx_desc *rx_desc, struct dp_peer *peer, unsigned tid)
1324 {
1325 	void *dst_ring_desc = qdf_mem_malloc(
1326 			sizeof(struct reo_destination_ring));
1327 
1328 	if (dst_ring_desc == NULL) {
1329 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1330 			"%s: Memory alloc failed !", __func__);
1331 		QDF_ASSERT(0);
1332 		return QDF_STATUS_E_NOMEM;
1333 	}
1334 
1335 	qdf_mem_copy(dst_ring_desc, ring_desc,
1336 		       sizeof(struct reo_destination_ring));
1337 
1338 	peer->rx_tid[tid].dst_ring_desc = dst_ring_desc;
1339 	peer->rx_tid[tid].head_frag_desc = rx_desc;
1340 
1341 	return QDF_STATUS_SUCCESS;
1342 }
1343 
1344 /*
1345  * dp_rx_defrag_store_fragment(): Store incoming fragments
1346  * @soc: Pointer to the SOC data structure
1347  * @ring_desc: Pointer to the ring descriptor
1348  * @mpdu_desc_info: MPDU descriptor info
1349  * @tid: Traffic Identifier
1350  * @rx_desc: Pointer to rx descriptor
1351  * @rx_bfs: Number of bfs consumed
1352  *
1353  * Returns: QDF_STATUS
1354  */
1355 static QDF_STATUS dp_rx_defrag_store_fragment(struct dp_soc *soc,
1356 			void *ring_desc,
1357 			union dp_rx_desc_list_elem_t **head,
1358 			union dp_rx_desc_list_elem_t **tail,
1359 			struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1360 			unsigned tid, struct dp_rx_desc *rx_desc,
1361 			uint32_t *rx_bfs)
1362 {
1363 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1364 	struct dp_pdev *pdev;
1365 	struct dp_peer *peer;
1366 	uint16_t peer_id;
1367 	uint8_t fragno, more_frag, all_frag_present = 0;
1368 	uint16_t rxseq = mpdu_desc_info->mpdu_seq;
1369 	QDF_STATUS status;
1370 	struct dp_rx_tid *rx_tid;
1371 	uint8_t mpdu_sequence_control_valid;
1372 	uint8_t mpdu_frame_control_valid;
1373 	qdf_nbuf_t frag = rx_desc->nbuf;
1374 
1375 	/* Check if the packet is from a valid peer */
1376 	peer_id = DP_PEER_METADATA_PEER_ID_GET(
1377 					mpdu_desc_info->peer_meta_data);
1378 	peer = dp_peer_find_by_id(soc, peer_id);
1379 
1380 	if (!peer) {
1381 		/* We should not receive anything from unknown peer
1382 		 * however, that might happen while we are in the monitor mode.
1383 		 * We don't need to handle that here
1384 		 */
1385 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1386 			"Unknown peer, dropping the fragment");
1387 
1388 		qdf_nbuf_free(frag);
1389 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1390 		*rx_bfs = 1;
1391 
1392 		goto end;
1393 	}
1394 
1395 	pdev = peer->vdev->pdev;
1396 	rx_tid = &peer->rx_tid[tid];
1397 
1398 	rx_reorder_array_elem = peer->rx_tid[tid].array;
1399 
1400 	mpdu_sequence_control_valid =
1401 		hal_rx_get_mpdu_sequence_control_valid(rx_desc->rx_buf_start);
1402 
1403 	/* Invalid MPDU sequence control field, MPDU is of no use */
1404 	if (!mpdu_sequence_control_valid) {
1405 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1406 			"Invalid MPDU seq control field, dropping MPDU");
1407 		qdf_nbuf_free(frag);
1408 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1409 		*rx_bfs = 1;
1410 
1411 		qdf_assert(0);
1412 		goto end;
1413 	}
1414 
1415 	mpdu_frame_control_valid =
1416 		hal_rx_get_mpdu_frame_control_valid(rx_desc->rx_buf_start);
1417 
1418 	/* Invalid frame control field */
1419 	if (!mpdu_frame_control_valid) {
1420 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1421 			"Invalid frame control field, dropping MPDU");
1422 		qdf_nbuf_free(frag);
1423 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1424 		*rx_bfs = 1;
1425 
1426 		qdf_assert(0);
1427 		goto end;
1428 	}
1429 
1430 	/* Current mpdu sequence */
1431 	more_frag = dp_rx_frag_get_more_frag_bit(rx_desc->rx_buf_start);
1432 
1433 	/* HW does not populate the fragment number as of now
1434 	 * need to get from the 802.11 header
1435 	 */
1436 	fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc->rx_buf_start);
1437 
1438 	/*
1439 	 * !more_frag: no more fragments to be delivered
1440 	 * !frag_no: packet is not fragmented
1441 	 * !rx_reorder_array_elem->head: no saved fragments so far
1442 	 */
1443 	if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
1444 		/* We should not get into this situation here.
1445 		 * It means an unfragmented packet with fragment flag
1446 		 * is delivered over the REO exception ring.
1447 		 * Typically it follows normal rx path.
1448 		 */
1449 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1450 			"Rcvd unfragmented pkt on REO Err srng, dropping");
1451 		qdf_nbuf_free(frag);
1452 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1453 		*rx_bfs = 1;
1454 
1455 		qdf_assert(0);
1456 		goto end;
1457 	}
1458 
1459 	/* Check if the fragment is for the same sequence or a different one */
1460 	if (rx_reorder_array_elem->head) {
1461 		if (rxseq != rx_tid->curr_seq_num) {
1462 
1463 			/* Drop stored fragments if out of sequence
1464 			 * fragment is received
1465 			 */
1466 			dp_rx_reorder_flush_frag(peer, tid);
1467 
1468 			DP_STATS_INC(soc, rx.rx_frag_err, 1);
1469 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1470 				"%s mismatch, dropping earlier sequence ",
1471 				(rxseq == rx_tid->curr_seq_num)
1472 				? "address"
1473 				: "seq number");
1474 
1475 			/*
1476 			 * The sequence number for this fragment becomes the
1477 			 * new sequence number to be processed
1478 			 */
1479 			rx_tid->curr_seq_num = rxseq;
1480 		}
1481 	} else {
1482 		/* Start of a new sequence */
1483 		dp_rx_defrag_cleanup(peer, tid);
1484 		rx_tid->curr_seq_num = rxseq;
1485 	}
1486 
1487 	/*
1488 	 * If the earlier sequence was dropped, this will be the fresh start.
1489 	 * Else, continue with next fragment in a given sequence
1490 	 */
1491 	status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head,
1492 			&rx_reorder_array_elem->tail, frag,
1493 			&all_frag_present);
1494 
1495 	/*
1496 	 * Currently, we can have only 6 MSDUs per-MPDU, if the current
1497 	 * packet sequence has more than 6 MSDUs for some reason, we will
1498 	 * have to use the next MSDU link descriptor and chain them together
1499 	 * before reinjection
1500 	 */
1501 	if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) &&
1502 			(rx_reorder_array_elem->head == frag)) {
1503 
1504 		status = dp_rx_defrag_save_info_from_ring_desc(ring_desc,
1505 					rx_desc, peer, tid);
1506 
1507 		if (status != QDF_STATUS_SUCCESS) {
1508 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1509 				"%s: Unable to store ring desc !", __func__);
1510 			goto end;
1511 		}
1512 	} else {
1513 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
1514 		*rx_bfs = 1;
1515 
1516 		/* Return the non-head link desc */
1517 		if (dp_rx_link_desc_return(soc, ring_desc,
1518 					HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1519 				QDF_STATUS_SUCCESS)
1520 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1521 					"%s: Failed to return link desc",
1522 					__func__);
1523 
1524 	}
1525 
1526 	if (pdev->soc->rx.flags.defrag_timeout_check)
1527 		dp_rx_defrag_waitlist_remove(peer, tid);
1528 
1529 	/* Yet to receive more fragments for this sequence number */
1530 	if (!all_frag_present) {
1531 		uint32_t now_ms =
1532 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1533 
1534 		peer->rx_tid[tid].defrag_timeout_ms =
1535 			now_ms + pdev->soc->rx.defrag.timeout_ms;
1536 
1537 		dp_rx_defrag_waitlist_add(peer, tid);
1538 		dp_peer_unref_del_find_by_id(peer);
1539 
1540 		return QDF_STATUS_SUCCESS;
1541 	}
1542 
1543 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1544 		  "All fragments received for sequence: %d", rxseq);
1545 
1546 	/* Process the fragments */
1547 	status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head,
1548 		rx_reorder_array_elem->tail);
1549 	if (QDF_IS_STATUS_ERROR(status)) {
1550 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1551 			"Fragment processing failed");
1552 
1553 		dp_rx_add_to_free_desc_list(head, tail,
1554 				peer->rx_tid[tid].head_frag_desc);
1555 		*rx_bfs = 1;
1556 
1557 		if (dp_rx_link_desc_return(soc,
1558 					peer->rx_tid[tid].dst_ring_desc,
1559 					HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1560 				QDF_STATUS_SUCCESS)
1561 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1562 					"%s: Failed to return link desc",
1563 					__func__);
1564 		dp_rx_defrag_cleanup(peer, tid);
1565 		goto end;
1566 	}
1567 
1568 	/* Re-inject the fragments back to REO for further processing */
1569 	status = dp_rx_defrag_reo_reinject(peer, tid,
1570 			rx_reorder_array_elem->head);
1571 	if (QDF_IS_STATUS_SUCCESS(status)) {
1572 		rx_reorder_array_elem->head = NULL;
1573 		rx_reorder_array_elem->tail = NULL;
1574 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1575 			  "Fragmented sequence successfully reinjected");
1576 	} else {
1577 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1578 		"Fragmented sequence reinjection failed");
1579 		dp_rx_return_head_frag_desc(peer, tid);
1580 	}
1581 
1582 	dp_rx_defrag_cleanup(peer, tid);
1583 
1584 	dp_peer_unref_del_find_by_id(peer);
1585 
1586 	return QDF_STATUS_SUCCESS;
1587 
1588 end:
1589 	if (peer)
1590 		dp_peer_unref_del_find_by_id(peer);
1591 
1592 	DP_STATS_INC(soc, rx.rx_frag_err, 1);
1593 	return QDF_STATUS_E_DEFRAG_ERROR;
1594 }
1595 
1596 /**
1597  * dp_rx_frag_handle() - Handles fragmented Rx frames
1598  *
1599  * @soc: core txrx main context
1600  * @ring_desc: opaque pointer to the REO error ring descriptor
1601  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
1602  * @head: head of the local descriptor free-list
1603  * @tail: tail of the local descriptor free-list
1604  * @quota: No. of units (packets) that can be serviced in one shot.
1605  *
1606  * This function implements RX 802.11 fragmentation handling
1607  * The handling is mostly same as legacy fragmentation handling.
1608  * If required, this function can re-inject the frames back to
1609  * REO ring (with proper setting to by-pass fragmentation check
1610  * but use duplicate detection / re-ordering and routing these frames
1611  * to a different core.
1612  *
1613  * Return: uint32_t: No. of elements processed
1614  */
1615 uint32_t dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc,
1616 		struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1617 		uint8_t *mac_id,
1618 		uint32_t quota)
1619 {
1620 	uint32_t rx_bufs_used = 0;
1621 	void *link_desc_va;
1622 	struct hal_buf_info buf_info;
1623 	struct hal_rx_msdu_list msdu_list; /* per MPDU list of MSDUs */
1624 	qdf_nbuf_t msdu = NULL;
1625 	uint32_t tid, msdu_len;
1626 	int idx, rx_bfs = 0;
1627 	struct dp_pdev *pdev;
1628 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1629 	struct dp_rx_desc *rx_desc = NULL;
1630 
1631 	qdf_assert(soc);
1632 	qdf_assert(mpdu_desc_info);
1633 
1634 	/* Fragment from a valid peer */
1635 	hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
1636 
1637 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1638 
1639 	qdf_assert(link_desc_va);
1640 
1641 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1642 		"Number of MSDUs to process, num_msdus: %d",
1643 		mpdu_desc_info->msdu_count);
1644 
1645 
1646 	if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) {
1647 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1648 			"Not sufficient MSDUs to process");
1649 		return rx_bufs_used;
1650 	}
1651 
1652 	/* Get msdu_list for the given MPDU */
1653 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1654 			     &mpdu_desc_info->msdu_count);
1655 
1656 	/* Process all MSDUs in the current MPDU */
1657 	for (idx = 0; (idx < mpdu_desc_info->msdu_count); idx++) {
1658 		struct dp_rx_desc *rx_desc =
1659 			dp_rx_cookie_2_va_rxdma_buf(soc,
1660 				msdu_list.sw_cookie[idx]);
1661 
1662 		qdf_assert_always(rx_desc);
1663 
1664 		/* all buffers in MSDU link belong to same pdev */
1665 		pdev = soc->pdev_list[rx_desc->pool_id];
1666 		*mac_id = rx_desc->pool_id;
1667 
1668 		msdu = rx_desc->nbuf;
1669 
1670 		qdf_nbuf_unmap_single(soc->osdev, msdu,
1671 				QDF_DMA_BIDIRECTIONAL);
1672 
1673 		rx_desc->rx_buf_start = qdf_nbuf_data(msdu);
1674 
1675 		msdu_len = hal_rx_msdu_start_msdu_len_get(
1676 				rx_desc->rx_buf_start);
1677 
1678 		qdf_nbuf_set_pktlen(msdu, (msdu_len + RX_PKT_TLVS_LEN));
1679 		qdf_nbuf_append_ext_list(msdu, NULL, 0);
1680 
1681 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1682 						rx_desc->rx_buf_start);
1683 
1684 		/* Process fragment-by-fragment */
1685 		status = dp_rx_defrag_store_fragment(soc, ring_desc,
1686 						     &pdev->free_list_head,
1687 						     &pdev->free_list_tail,
1688 						     mpdu_desc_info,
1689 						     tid, rx_desc, &rx_bfs);
1690 
1691 		if (rx_bfs)
1692 			rx_bufs_used++;
1693 
1694 		if (!QDF_IS_STATUS_SUCCESS(status)) {
1695 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1696 				"Rx Defrag err seq#:0x%x msdu_count:%d flags:%d",
1697 				mpdu_desc_info->mpdu_seq,
1698 				mpdu_desc_info->msdu_count,
1699 				mpdu_desc_info->mpdu_flags);
1700 
1701 			/* No point in processing rest of the fragments */
1702 			break;
1703 		}
1704 	}
1705 
1706 	if (!QDF_IS_STATUS_SUCCESS(status)) {
1707 		/* drop any remaining buffers in current descriptor */
1708 		idx++;
1709 		for (; (idx < mpdu_desc_info->msdu_count); idx++) {
1710 			rx_desc =
1711 				dp_rx_cookie_2_va_rxdma_buf(soc,
1712 							    msdu_list.sw_cookie[idx]);
1713 			qdf_assert(rx_desc);
1714 			msdu = rx_desc->nbuf;
1715 			qdf_nbuf_unmap_single(soc->osdev, msdu,
1716 					      QDF_DMA_BIDIRECTIONAL);
1717 			qdf_nbuf_free(msdu);
1718 			dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1719 						    &pdev->free_list_tail,
1720 						    rx_desc);
1721 			rx_bufs_used++;
1722 		}
1723 		if (dp_rx_link_desc_return(soc, ring_desc,
1724 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
1725 					   QDF_STATUS_SUCCESS)
1726 			dp_err("Failed to return link desc");
1727 	}
1728 
1729 	return rx_bufs_used;
1730 }
1731 
1732 QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
1733 				      struct dp_peer *peer, uint16_t tid,
1734 		uint16_t rxseq, qdf_nbuf_t nbuf)
1735 {
1736 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1737 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1738 	uint8_t all_frag_present;
1739 	uint32_t msdu_len;
1740 	QDF_STATUS status;
1741 
1742 	rx_reorder_array_elem = peer->rx_tid[tid].array;
1743 
1744 	if (rx_reorder_array_elem->head &&
1745 	    rxseq != rx_tid->curr_seq_num) {
1746 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1747 			  "%s: No list found for TID %d Seq# %d",
1748 				__func__, tid, rxseq);
1749 		qdf_nbuf_free(nbuf);
1750 		goto fail;
1751 	}
1752 
1753 	msdu_len = hal_rx_msdu_start_msdu_len_get(qdf_nbuf_data(nbuf));
1754 
1755 	qdf_nbuf_set_pktlen(nbuf, (msdu_len + RX_PKT_TLVS_LEN));
1756 
1757 	status = dp_rx_defrag_fraglist_insert(peer, tid,
1758 					      &rx_reorder_array_elem->head,
1759 			&rx_reorder_array_elem->tail, nbuf,
1760 			&all_frag_present);
1761 
1762 	if (QDF_IS_STATUS_ERROR(status)) {
1763 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1764 			  "%s Fragment insert failed", __func__);
1765 
1766 		goto fail;
1767 	}
1768 
1769 	if (soc->rx.flags.defrag_timeout_check)
1770 		dp_rx_defrag_waitlist_remove(peer, tid);
1771 
1772 	if (!all_frag_present) {
1773 		uint32_t now_ms =
1774 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1775 
1776 		peer->rx_tid[tid].defrag_timeout_ms =
1777 			now_ms + soc->rx.defrag.timeout_ms;
1778 
1779 		dp_rx_defrag_waitlist_add(peer, tid);
1780 
1781 		return QDF_STATUS_SUCCESS;
1782 	}
1783 
1784 	status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head,
1785 			      rx_reorder_array_elem->tail);
1786 
1787 	if (QDF_IS_STATUS_ERROR(status)) {
1788 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1789 			  "%s Fragment processing failed", __func__);
1790 
1791 		dp_rx_return_head_frag_desc(peer, tid);
1792 		dp_rx_defrag_cleanup(peer, tid);
1793 
1794 		goto fail;
1795 	}
1796 
1797 	/* Re-inject the fragments back to REO for further processing */
1798 	status = dp_rx_defrag_reo_reinject(peer, tid,
1799 					   rx_reorder_array_elem->head);
1800 	if (QDF_IS_STATUS_SUCCESS(status)) {
1801 		rx_reorder_array_elem->head = NULL;
1802 		rx_reorder_array_elem->tail = NULL;
1803 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1804 			  "%s: Frag seq successfully reinjected",
1805 			__func__);
1806 	} else {
1807 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1808 			  "%s: Frag seq reinjection failed",
1809 			__func__);
1810 		dp_rx_return_head_frag_desc(peer, tid);
1811 	}
1812 
1813 	dp_rx_defrag_cleanup(peer, tid);
1814 	return QDF_STATUS_SUCCESS;
1815 
1816 fail:
1817 	return QDF_STATUS_E_DEFRAG_ERROR;
1818 }
1819