xref: /wlan-dirver/qcacld-3.0/core/dp/txrx/ol_rx_defrag.c (revision bf57c2c31e6d81d5645b6ca42f1e2fde23df07f7)
1 /*
2  * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /*-
29  * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  * notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  * notice, this list of conditions and the following disclaimer in the
39  * documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
42  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
45  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 #include <ol_htt_api.h>
53 #include <ol_txrx_api.h>
54 #include <ol_txrx_htt_api.h>
55 #include <ol_htt_rx_api.h>
56 #include <ol_rx_reorder.h>
57 #include <ol_rx_pn.h>
58 #include <ol_rx_fwd.h>
59 #include <ol_rx.h>
60 #include <ol_txrx_internal.h>
61 #include <ol_ctrl_txrx_api.h>
62 #include <ol_txrx_peer_find.h>
63 #include <qdf_nbuf.h>
64 #include <qdf_util.h>
65 #include <athdefs.h>
66 #include <qdf_mem.h>
67 #include <ol_rx_defrag.h>
68 #include <enet.h>
69 #include <qdf_time.h>           /* qdf_system_time */
70 
71 #define DEFRAG_IEEE80211_ADDR_EQ(a1, a2) \
72 	(!qdf_mem_cmp(a1, a2, IEEE80211_ADDR_LEN))
73 
74 #define DEFRAG_IEEE80211_ADDR_COPY(dst, src) \
75 	qdf_mem_copy(dst, src, IEEE80211_ADDR_LEN)
76 
77 #define DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \
78 	(((wh)->i_fc[0] & \
79 	  (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \
80 	 (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
81 
82 #define DEFRAG_IEEE80211_QOS_GET_TID(_x) \
83 	((_x)->i_qos[0] & IEEE80211_QOS_TID)
84 
85 const struct ol_rx_defrag_cipher f_ccmp = {
86 	"AES-CCM",
87 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
88 	IEEE80211_WEP_MICLEN,
89 	0,
90 };
91 
92 const struct ol_rx_defrag_cipher f_tkip = {
93 	"TKIP",
94 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
95 	IEEE80211_WEP_CRCLEN,
96 	IEEE80211_WEP_MICLEN,
97 };
98 
99 const struct ol_rx_defrag_cipher f_wep = {
100 	"WEP",
101 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
102 	IEEE80211_WEP_CRCLEN,
103 	0,
104 };
105 
106 #if defined(CONFIG_HL_SUPPORT)
107 
108 /**
109  * ol_rx_frag_get_mac_hdr() - retrieve mac header
110  * @htt_pdev: pointer to htt pdev handle
111  * @frag: rx fragment
112  *
113  * Return: pointer to ieee mac header of frag
114  */
115 static struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
116 	htt_pdev_handle htt_pdev, qdf_nbuf_t frag)
117 {
118 	void *rx_desc;
119 	int rx_desc_len;
120 
121 	rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag);
122 	rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc);
123 	return (struct ieee80211_frame *)(qdf_nbuf_data(frag) + rx_desc_len);
124 }
125 
126 /**
127  * ol_rx_frag_pull_hdr() - point to payload of rx frag
128  * @htt_pdev: pointer to htt pdev handle
129  * @frag: rx fragment
130  * @hdrsize: header size
131  *
132  * Return: None
133  */
134 static void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev,
135 	qdf_nbuf_t frag, int hdrsize)
136 {
137 	void *rx_desc;
138 	int rx_desc_len;
139 
140 	rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag);
141 	rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc);
142 	qdf_nbuf_pull_head(frag, rx_desc_len + hdrsize);
143 }
144 
145 /**
146  * ol_rx_frag_desc_adjust() - adjust rx frag descriptor position
147  * @pdev: pointer to txrx handle
148  * @msdu: msdu
149  * @rx_desc_old_position: rx descriptor old position
150  * @ind_old_position:index of old position
151  * @rx_desc_len: rx desciptor length
152  *
153  * Return: None
154  */
155 static void
156 ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
157 		       qdf_nbuf_t msdu,
158 			void **rx_desc_old_position,
159 			void **ind_old_position, int *rx_desc_len)
160 {
161 	*rx_desc_old_position = htt_rx_msdu_desc_retrieve(pdev->htt_pdev,
162 									msdu);
163 	*ind_old_position = *rx_desc_old_position - HTT_RX_IND_HL_BYTES;
164 	*rx_desc_len = htt_rx_msdu_rx_desc_size_hl(pdev->htt_pdev,
165 			*rx_desc_old_position);
166 }
167 
168 /**
169  * ol_rx_frag_restructure() - point to payload for HL
170  * @pdev: physical device object
171  * @msdu: the buffer containing the MSDU payload
172  * @rx_desc_old_position: rx MSDU descriptor
173  * @ind_old_position: rx msdu indication
174  * @f_type: pointing to rx defrag cipher
175  * @rx_desc_len: length by which rx descriptor to move
176  *
177  * Return: None
178  */
179 static void
180 ol_rx_frag_restructure(
181 	ol_txrx_pdev_handle pdev,
182 	qdf_nbuf_t msdu,
183 	void *rx_desc_old_position,
184 	void *ind_old_position,
185 	const struct ol_rx_defrag_cipher *f_type,
186 	int rx_desc_len)
187 {
188 	if ((ind_old_position == NULL) || (rx_desc_old_position == NULL)) {
189 		ol_txrx_err("ind_old_position,rx_desc_old_position is NULL\n");
190 		ASSERT(0);
191 		return;
192 	}
193 	/* move rx description*/
194 	qdf_mem_move(rx_desc_old_position + f_type->ic_header,
195 		     rx_desc_old_position, rx_desc_len);
196 	/* move rx indication*/
197 	qdf_mem_move(ind_old_position + f_type->ic_header, ind_old_position,
198 		     HTT_RX_IND_HL_BYTES);
199 }
200 
201 /**
202  * ol_rx_get_desc_len() - point to payload for HL
203  * @htt_pdev: the HTT instance the rx data was received on
204  * @wbuf: buffer containing the MSDU payload
205  * @rx_desc_old_position: rx MSDU descriptor
206  *
207  * Return: Return the HL rx desc size
208  */
209 static
210 int ol_rx_get_desc_len(htt_pdev_handle htt_pdev,
211 			qdf_nbuf_t wbuf,
212 			void **rx_desc_old_position)
213 {
214 	int rx_desc_len = 0;
215 	*rx_desc_old_position = htt_rx_msdu_desc_retrieve(htt_pdev, wbuf);
216 	rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev,
217 			*rx_desc_old_position);
218 
219 	return rx_desc_len;
220 }
221 
222 /**
223  * ol_rx_defrag_push_rx_desc() - point to payload for HL
224  * @nbuf: buffer containing the MSDU payload
225  * @rx_desc_old_position: rx MSDU descriptor
226  * @ind_old_position: rx msdu indication
227  * @rx_desc_len: HL rx desc size
228  *
229  * Return: Return the HL rx desc size
230  */
231 static
232 void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf,
233 				void *rx_desc_old_position,
234 				void *ind_old_position,
235 				int rx_desc_len)
236 {
237 	qdf_nbuf_push_head(nbuf, rx_desc_len);
238 	qdf_mem_move(
239 		qdf_nbuf_data(nbuf), rx_desc_old_position, rx_desc_len);
240 	qdf_mem_move(
241 		qdf_nbuf_data(nbuf) - HTT_RX_IND_HL_BYTES, ind_old_position,
242 		HTT_RX_IND_HL_BYTES);
243 }
244 #else
245 
246 static inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
247 	htt_pdev_handle htt_pdev,
248 	qdf_nbuf_t frag)
249 {
250 	return
251 		(struct ieee80211_frame *) qdf_nbuf_data(frag);
252 }
253 
254 static inline void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev,
255 	qdf_nbuf_t frag, int hdrsize)
256 {
257 	qdf_nbuf_pull_head(frag, hdrsize);
258 }
259 
260 static inline void
261 ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
262 		       qdf_nbuf_t msdu,
263 		       void **rx_desc_old_position,
264 		       void **ind_old_position, int *rx_desc_len)
265 {
266 	*rx_desc_old_position = NULL;
267 	*ind_old_position = NULL;
268 	*rx_desc_len = 0;
269 }
270 
271 static inline void
272 ol_rx_frag_restructure(
273 		ol_txrx_pdev_handle pdev,
274 		qdf_nbuf_t msdu,
275 		void *rx_desc_old_position,
276 		void *ind_old_position,
277 		const struct ol_rx_defrag_cipher *f_type,
278 		int rx_desc_len)
279 {
280 	/* no op */
281 }
282 
283 static inline
284 int ol_rx_get_desc_len(htt_pdev_handle htt_pdev,
285 			qdf_nbuf_t wbuf,
286 			void **rx_desc_old_position)
287 {
288 	return 0;
289 }
290 
291 static inline
292 void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf,
293 			void *rx_desc_old_position,
294 			void *ind_old_position,
295 			int rx_desc_len)
296 {
297 	return;
298 }
299 #endif /* CONFIG_HL_SUPPORT */
300 
301 #ifdef WDI_EVENT_ENABLE
302 static inline
303 void ol_rx_frag_send_pktlog_event(struct ol_txrx_pdev_t *pdev,
304 	struct ol_txrx_peer_t *peer, qdf_nbuf_t msdu, uint8_t pktlog_bit)
305 {
306 	ol_rx_send_pktlog_event(pdev, peer, msdu, pktlog_bit);
307 }
308 
309 #else
310 static inline
311 void ol_rx_frag_send_pktlog_event(struct ol_txrx_pdev_t *pdev,
312 	struct ol_txrx_peer_t *peer, qdf_nbuf_t msdu, uint8_t pktlog_bit)
313 {
314 }
315 
316 #endif
317 
318 /*
319  * Process incoming fragments
320  */
321 void
322 ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev,
323 			      qdf_nbuf_t rx_frag_ind_msg,
324 			      uint16_t peer_id, uint8_t tid)
325 {
326 	uint16_t seq_num;
327 	uint16_t seq_num_start, seq_num_end;
328 	struct ol_txrx_peer_t *peer;
329 	htt_pdev_handle htt_pdev;
330 	qdf_nbuf_t head_msdu, tail_msdu;
331 	void *rx_mpdu_desc;
332 	uint8_t pktlog_bit;
333 	uint32_t msdu_count = 0;
334 	int ret;
335 
336 	if (tid >= OL_TXRX_NUM_EXT_TIDS) {
337 		ol_txrx_err("%s:  invalid tid, %u\n", __FUNCTION__, tid);
338 		return;
339 	}
340 
341 	htt_pdev = pdev->htt_pdev;
342 	peer = ol_txrx_peer_find_by_id(pdev, peer_id);
343 
344 	if (!ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev) &&
345 	    htt_rx_ind_flush(pdev->htt_pdev, rx_frag_ind_msg) && peer) {
346 		htt_rx_frag_ind_flush_seq_num_range(pdev->htt_pdev,
347 						    rx_frag_ind_msg,
348 						    &seq_num_start,
349 						    &seq_num_end);
350 		/*
351 		 * Assuming flush indication for frags sent from target is
352 		 * separate from normal frames
353 		 */
354 		ol_rx_reorder_flush_frag(htt_pdev, peer, tid, seq_num_start);
355 	}
356 	pktlog_bit =
357 		(htt_rx_amsdu_rx_in_order_get_pktlog(rx_frag_ind_msg) == 0x01);
358 	ret = htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu,
359 			      &tail_msdu, &msdu_count);
360 	/* Return if msdu pop fails from rx hash table, as recovery
361 	 * is triggered and we exit gracefully.
362 	 */
363 	if (!ret)
364 		return;
365 	if (peer) {
366 		qdf_assert(head_msdu == tail_msdu);
367 		if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
368 			rx_mpdu_desc =
369 				htt_rx_mpdu_desc_list_next(htt_pdev, head_msdu);
370 		} else {
371 			rx_mpdu_desc =
372 				htt_rx_mpdu_desc_list_next(htt_pdev,
373 							   rx_frag_ind_msg);
374 		}
375 		seq_num = htt_rx_mpdu_desc_seq_num(htt_pdev, rx_mpdu_desc);
376 		OL_RX_ERR_STATISTICS_1(pdev, peer->vdev, peer, rx_mpdu_desc,
377 				       OL_RX_ERR_NONE_FRAG);
378 		ol_rx_frag_send_pktlog_event(pdev, peer, head_msdu, pktlog_bit);
379 		ol_rx_reorder_store_frag(pdev, peer, tid, seq_num, head_msdu);
380 	} else {
381 		/* invalid frame - discard it */
382 		if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev))
383 			htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu);
384 		else
385 			htt_rx_mpdu_desc_list_next(htt_pdev, rx_frag_ind_msg);
386 
387 		ol_rx_frag_send_pktlog_event(pdev, peer, head_msdu, pktlog_bit);
388 		htt_rx_desc_frame_free(htt_pdev, head_msdu);
389 	}
390 	/* request HTT to provide new rx MSDU buffers for the target to fill. */
391 	htt_rx_msdu_buff_replenish(htt_pdev);
392 }
393 
394 /*
395  * Flushing fragments
396  */
397 void
398 ol_rx_reorder_flush_frag(htt_pdev_handle htt_pdev,
399 			 struct ol_txrx_peer_t *peer,
400 			 unsigned int tid, uint16_t seq_num)
401 {
402 	struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
403 	int seq;
404 
405 	seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask;
406 	rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq];
407 	if (rx_reorder_array_elem->head) {
408 		ol_rx_frames_free(htt_pdev, rx_reorder_array_elem->head);
409 		rx_reorder_array_elem->head = NULL;
410 		rx_reorder_array_elem->tail = NULL;
411 	}
412 }
413 
414 /*
415  * Reorder and store fragments
416  */
417 void
418 ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev,
419 			 struct ol_txrx_peer_t *peer,
420 			 unsigned int tid, uint16_t seq_num, qdf_nbuf_t frag)
421 {
422 	struct ieee80211_frame *fmac_hdr, *mac_hdr;
423 	uint8_t fragno, more_frag, all_frag_present = 0;
424 	struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
425 	uint16_t frxseq, rxseq, seq;
426 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
427 
428 	seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask;
429 	qdf_assert(seq == 0);
430 	rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq];
431 
432 	mac_hdr = (struct ieee80211_frame *)
433 		ol_rx_frag_get_mac_hdr(htt_pdev, frag);
434 	rxseq = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >>
435 		IEEE80211_SEQ_SEQ_SHIFT;
436 	fragno = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
437 		IEEE80211_SEQ_FRAG_MASK;
438 	more_frag = mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
439 
440 	if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
441 		rx_reorder_array_elem->head = frag;
442 		rx_reorder_array_elem->tail = frag;
443 		qdf_nbuf_set_next(frag, NULL);
444 		ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
445 		rx_reorder_array_elem->head = NULL;
446 		rx_reorder_array_elem->tail = NULL;
447 		return;
448 	}
449 	if (rx_reorder_array_elem->head) {
450 		fmac_hdr = (struct ieee80211_frame *)
451 			ol_rx_frag_get_mac_hdr(htt_pdev,
452 					       rx_reorder_array_elem->head);
453 		frxseq = qdf_le16_to_cpu(*(uint16_t *) fmac_hdr->i_seq) >>
454 			IEEE80211_SEQ_SEQ_SHIFT;
455 		if (rxseq != frxseq
456 		    || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr1,
457 						 fmac_hdr->i_addr1)
458 		    || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr2,
459 						 fmac_hdr->i_addr2)) {
460 			ol_rx_frames_free(htt_pdev,
461 					  rx_reorder_array_elem->head);
462 			rx_reorder_array_elem->head = NULL;
463 			rx_reorder_array_elem->tail = NULL;
464 			ol_txrx_err("\n ol_rx_reorder_store:%s mismatch\n",
465 				   (rxseq == frxseq)
466 				   ? "address"
467 				   : "seq number");
468 		}
469 	}
470 
471 	ol_rx_fraglist_insert(htt_pdev, &rx_reorder_array_elem->head,
472 			      &rx_reorder_array_elem->tail, frag,
473 			      &all_frag_present);
474 
475 	if (pdev->rx.flags.defrag_timeout_check)
476 		ol_rx_defrag_waitlist_remove(peer, tid);
477 
478 	if (all_frag_present) {
479 		ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
480 		rx_reorder_array_elem->head = NULL;
481 		rx_reorder_array_elem->tail = NULL;
482 		peer->tids_rx_reorder[tid].defrag_timeout_ms = 0;
483 		peer->tids_last_seq[tid] = seq_num;
484 	} else if (pdev->rx.flags.defrag_timeout_check) {
485 		uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
486 
487 		peer->tids_rx_reorder[tid].defrag_timeout_ms =
488 			now_ms + pdev->rx.defrag.timeout_ms;
489 		ol_rx_defrag_waitlist_add(peer, tid);
490 	}
491 }
492 
493 /*
494  * Insert and store fragments
495  */
496 void
497 ol_rx_fraglist_insert(htt_pdev_handle htt_pdev,
498 		      qdf_nbuf_t *head_addr,
499 		      qdf_nbuf_t *tail_addr,
500 		      qdf_nbuf_t frag, uint8_t *all_frag_present)
501 {
502 	qdf_nbuf_t next, prev = NULL, cur = *head_addr;
503 	struct ieee80211_frame *mac_hdr, *cmac_hdr, *next_hdr, *lmac_hdr;
504 	uint8_t fragno, cur_fragno, lfragno, next_fragno;
505 	uint8_t last_morefrag = 1, count = 0;
506 
507 	qdf_assert(frag);
508 
509 	mac_hdr = (struct ieee80211_frame *)
510 		ol_rx_frag_get_mac_hdr(htt_pdev, frag);
511 	fragno = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
512 		IEEE80211_SEQ_FRAG_MASK;
513 
514 	if (!(*head_addr)) {
515 		*head_addr = frag;
516 		*tail_addr = frag;
517 		qdf_nbuf_set_next(*tail_addr, NULL);
518 		return;
519 	}
520 	/* For efficiency, compare with tail first */
521 	lmac_hdr = (struct ieee80211_frame *)
522 		ol_rx_frag_get_mac_hdr(htt_pdev, *tail_addr);
523 	lfragno = qdf_le16_to_cpu(*(uint16_t *) lmac_hdr->i_seq) &
524 		  IEEE80211_SEQ_FRAG_MASK;
525 	if (fragno > lfragno) {
526 		qdf_nbuf_set_next(*tail_addr, frag);
527 		*tail_addr = frag;
528 		qdf_nbuf_set_next(*tail_addr, NULL);
529 	} else {
530 		do {
531 			cmac_hdr = (struct ieee80211_frame *)
532 				ol_rx_frag_get_mac_hdr(htt_pdev, cur);
533 			cur_fragno =
534 				qdf_le16_to_cpu(*(uint16_t *) cmac_hdr->i_seq) &
535 				IEEE80211_SEQ_FRAG_MASK;
536 			prev = cur;
537 			cur = qdf_nbuf_next(cur);
538 		} while (fragno > cur_fragno);
539 
540 		if (fragno == cur_fragno) {
541 			htt_rx_desc_frame_free(htt_pdev, frag);
542 			*all_frag_present = 0;
543 			return;
544 		}
545 
546 		qdf_nbuf_set_next(prev, frag);
547 		qdf_nbuf_set_next(frag, cur);
548 	}
549 	next = qdf_nbuf_next(*head_addr);
550 	lmac_hdr = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
551 								    *tail_addr);
552 	last_morefrag = lmac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
553 	if (!last_morefrag) {
554 		do {
555 			next_hdr =
556 				(struct ieee80211_frame *)
557 				ol_rx_frag_get_mac_hdr(htt_pdev, next);
558 			next_fragno =
559 				qdf_le16_to_cpu(*(uint16_t *) next_hdr->i_seq) &
560 				IEEE80211_SEQ_FRAG_MASK;
561 			count++;
562 			if (next_fragno != count)
563 				break;
564 
565 			next = qdf_nbuf_next(next);
566 		} while (next);
567 
568 		if (!next) {
569 			*all_frag_present = 1;
570 			return;
571 		}
572 	}
573 	*all_frag_present = 0;
574 }
575 
576 /*
577  * add tid to pending fragment wait list
578  */
579 void ol_rx_defrag_waitlist_add(struct ol_txrx_peer_t *peer, unsigned int tid)
580 {
581 	struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
582 	struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid];
583 
584 	TAILQ_INSERT_TAIL(&pdev->rx.defrag.waitlist, rx_reorder,
585 			  defrag_waitlist_elem);
586 }
587 
588 /*
589  * remove tid from pending fragment wait list
590  */
591 void ol_rx_defrag_waitlist_remove(struct ol_txrx_peer_t *peer, unsigned int tid)
592 {
593 	struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
594 	struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid];
595 
596 	if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) {
597 
598 		TAILQ_REMOVE(&pdev->rx.defrag.waitlist, rx_reorder,
599 			     defrag_waitlist_elem);
600 
601 		rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
602 		rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
603 	} else if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) {
604 		ol_txrx_alert("waitlist->tqe_prv = NULL\n");
605 		QDF_ASSERT(0);
606 		rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
607 	}
608 }
609 
610 #ifndef container_of
611 #define container_of(ptr, type, member) \
612 	((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
613 #endif
614 
615 /*
616  * flush stale fragments from the waitlist
617  */
618 void ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t *pdev)
619 {
620 	struct ol_rx_reorder_t *rx_reorder, *tmp;
621 	uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
622 
623 	TAILQ_FOREACH_SAFE(rx_reorder, &pdev->rx.defrag.waitlist,
624 			   defrag_waitlist_elem, tmp) {
625 		struct ol_txrx_peer_t *peer;
626 		struct ol_rx_reorder_t *rx_reorder_base;
627 		unsigned int tid;
628 
629 		if (rx_reorder->defrag_timeout_ms > now_ms)
630 			break;
631 
632 		tid = rx_reorder->tid;
633 		if (tid >= OL_TXRX_NUM_EXT_TIDS) {
634 			ol_txrx_err("%s:  invalid tid, %u\n", __FUNCTION__, tid);
635 			WARN_ON(1);
636 			continue;
637 		}
638 		/* get index 0 of the rx_reorder array */
639 		rx_reorder_base = rx_reorder - tid;
640 		peer =
641 			container_of(rx_reorder_base, struct ol_txrx_peer_t,
642 				     tids_rx_reorder[0]);
643 
644 		ol_rx_defrag_waitlist_remove(peer, tid);
645 		ol_rx_reorder_flush_frag(pdev->htt_pdev, peer, tid,
646 					 0 /* frags always stored at seq 0 */);
647 	}
648 }
649 
650 /*
651  * Handling security checking and processing fragments
652  */
653 void
654 ol_rx_defrag(ol_txrx_pdev_handle pdev,
655 	     struct ol_txrx_peer_t *peer, unsigned int tid,
656 	     qdf_nbuf_t frag_list)
657 {
658 	struct ol_txrx_vdev_t *vdev = NULL;
659 	qdf_nbuf_t tmp_next, msdu, prev = NULL, cur = frag_list;
660 	uint8_t index, tkip_demic = 0;
661 	uint16_t hdr_space;
662 	void *rx_desc;
663 	struct ieee80211_frame *wh;
664 	uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
665 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
666 
667 	vdev = peer->vdev;
668 
669 	/* bypass defrag for safe mode */
670 	if (vdev->safemode) {
671 		if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev))
672 			ol_rx_in_order_deliver(vdev, peer, tid, frag_list);
673 		else
674 			ol_rx_deliver(vdev, peer, tid, frag_list);
675 		return;
676 	}
677 
678 	while (cur) {
679 		tmp_next = qdf_nbuf_next(cur);
680 		qdf_nbuf_set_next(cur, NULL);
681 		if (!ol_rx_pn_check_base(vdev, peer, tid, cur)) {
682 			/* PN check failed,discard frags */
683 			if (prev) {
684 				qdf_nbuf_set_next(prev, NULL);
685 				ol_rx_frames_free(htt_pdev, frag_list);
686 			}
687 			ol_rx_frames_free(htt_pdev, tmp_next);
688 			ol_txrx_err("ol_rx_defrag: PN Check failed\n");
689 			return;
690 		}
691 		/* remove FCS from each fragment */
692 		qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
693 		prev = cur;
694 		qdf_nbuf_set_next(cur, tmp_next);
695 		cur = tmp_next;
696 	}
697 	cur = frag_list;
698 	wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, cur);
699 	hdr_space = ol_rx_frag_hdrsize(wh);
700 	rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag_list);
701 	qdf_assert(htt_rx_msdu_has_wlan_mcast_flag(htt_pdev, rx_desc));
702 	index = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc) ?
703 		txrx_sec_mcast : txrx_sec_ucast;
704 
705 	switch (peer->security[index].sec_type) {
706 	case htt_sec_type_tkip:
707 		tkip_demic = 1;
708 	/* fall-through to rest of tkip ops */
709 	case htt_sec_type_tkip_nomic:
710 		while (cur) {
711 			tmp_next = qdf_nbuf_next(cur);
712 			if (!ol_rx_frag_tkip_decap(pdev, cur, hdr_space)) {
713 				/* TKIP decap failed, discard frags */
714 				ol_rx_frames_free(htt_pdev, frag_list);
715 				ol_txrx_err("\n ol_rx_defrag: TKIP decap failed\n");
716 				return;
717 			}
718 			cur = tmp_next;
719 		}
720 		break;
721 
722 	case htt_sec_type_aes_ccmp:
723 		while (cur) {
724 			tmp_next = qdf_nbuf_next(cur);
725 			if (!ol_rx_frag_ccmp_demic(pdev, cur, hdr_space)) {
726 				/* CCMP demic failed, discard frags */
727 				ol_rx_frames_free(htt_pdev, frag_list);
728 				ol_txrx_err("\n ol_rx_defrag: CCMP demic failed\n");
729 				return;
730 			}
731 			if (!ol_rx_frag_ccmp_decap(pdev, cur, hdr_space)) {
732 				/* CCMP decap failed, discard frags */
733 				ol_rx_frames_free(htt_pdev, frag_list);
734 				ol_txrx_err("\n ol_rx_defrag: CCMP decap failed\n");
735 				return;
736 			}
737 			cur = tmp_next;
738 		}
739 		break;
740 
741 	case htt_sec_type_wep40:
742 	case htt_sec_type_wep104:
743 	case htt_sec_type_wep128:
744 		while (cur) {
745 			tmp_next = qdf_nbuf_next(cur);
746 			if (!ol_rx_frag_wep_decap(pdev, cur, hdr_space)) {
747 				/* wep decap failed, discard frags */
748 				ol_rx_frames_free(htt_pdev, frag_list);
749 				ol_txrx_err("\n ol_rx_defrag: wep decap failed\n");
750 				return;
751 			}
752 			cur = tmp_next;
753 		}
754 		break;
755 
756 	default:
757 		break;
758 	}
759 
760 	msdu = ol_rx_defrag_decap_recombine(htt_pdev, frag_list, hdr_space);
761 	if (!msdu)
762 		return;
763 
764 	if (tkip_demic) {
765 		qdf_mem_copy(key,
766 			     peer->security[index].michael_key,
767 			     sizeof(peer->security[index].michael_key));
768 		if (!ol_rx_frag_tkip_demic(pdev, key, msdu, hdr_space)) {
769 			htt_rx_desc_frame_free(htt_pdev, msdu);
770 			ol_rx_err(pdev->ctrl_pdev,
771 				  vdev->vdev_id, peer->mac_addr.raw, tid, 0,
772 				  OL_RX_DEFRAG_ERR, msdu, NULL, 0);
773 			ol_txrx_err("\n ol_rx_defrag: TKIP demic failed\n");
774 			return;
775 		}
776 	}
777 	wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, msdu);
778 	if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh))
779 		ol_rx_defrag_qos_decap(pdev, msdu, hdr_space);
780 	if (ol_cfg_frame_type(pdev->ctrl_pdev) == wlan_frm_fmt_802_3)
781 		ol_rx_defrag_nwifi_to_8023(pdev, msdu);
782 
783 	ol_rx_fwd_check(vdev, peer, tid, msdu);
784 }
785 
786 /*
787  * Handling TKIP processing for defragmentation
788  */
789 int
790 ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev,
791 		      qdf_nbuf_t msdu, uint16_t hdrlen)
792 {
793 	uint8_t *ivp, *origHdr;
794 
795 	void *rx_desc_old_position = NULL;
796 	void *ind_old_position = NULL;
797 	int rx_desc_len = 0;
798 
799 	ol_rx_frag_desc_adjust(pdev,
800 			       msdu,
801 			       &rx_desc_old_position,
802 			       &ind_old_position, &rx_desc_len);
803 	/* Header should have extended IV */
804 	origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
805 
806 	ivp = origHdr + hdrlen;
807 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
808 		return OL_RX_DEFRAG_ERR;
809 
810 	qdf_mem_move(origHdr + f_tkip.ic_header, origHdr, hdrlen);
811 	ol_rx_frag_restructure(
812 			pdev,
813 			msdu,
814 			rx_desc_old_position,
815 			ind_old_position,
816 			&f_tkip,
817 			rx_desc_len);
818 	qdf_nbuf_pull_head(msdu, f_tkip.ic_header);
819 	qdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer);
820 	return OL_RX_DEFRAG_OK;
821 }
822 
823 /*
824  * Handling WEP processing for defragmentation
825  */
826 int
827 ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t hdrlen)
828 {
829 	uint8_t *origHdr;
830 	void *rx_desc_old_position = NULL;
831 	void *ind_old_position = NULL;
832 	int rx_desc_len = 0;
833 
834 	ol_rx_frag_desc_adjust(pdev,
835 			       msdu,
836 			       &rx_desc_old_position,
837 			       &ind_old_position, &rx_desc_len);
838 	origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
839 	qdf_mem_move(origHdr + f_wep.ic_header, origHdr, hdrlen);
840 	ol_rx_frag_restructure(
841 			pdev,
842 			msdu,
843 			rx_desc_old_position,
844 			ind_old_position,
845 			&f_wep,
846 			rx_desc_len);
847 	qdf_nbuf_pull_head(msdu, f_wep.ic_header);
848 	qdf_nbuf_trim_tail(msdu, f_wep.ic_trailer);
849 	return OL_RX_DEFRAG_OK;
850 }
851 
852 /*
853  * Verify and strip MIC from the frame.
854  */
855 int
856 ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev, const uint8_t *key,
857 		      qdf_nbuf_t msdu, uint16_t hdrlen)
858 {
859 	int status;
860 	uint32_t pktlen;
861 	uint8_t mic[IEEE80211_WEP_MICLEN];
862 	uint8_t mic0[IEEE80211_WEP_MICLEN];
863 	void *rx_desc_old_position = NULL;
864 	void *ind_old_position = NULL;
865 	int rx_desc_len = 0;
866 
867 	ol_rx_frag_desc_adjust(pdev,
868 			       msdu,
869 			       &rx_desc_old_position,
870 			       &ind_old_position, &rx_desc_len);
871 
872 	pktlen = ol_rx_defrag_len(msdu) - rx_desc_len;
873 
874 	status = ol_rx_defrag_mic(pdev, key, msdu, hdrlen,
875 				  pktlen - (hdrlen + f_tkip.ic_miclen), mic);
876 	if (status != OL_RX_DEFRAG_OK)
877 		return OL_RX_DEFRAG_ERR;
878 
879 	ol_rx_defrag_copydata(msdu, pktlen - f_tkip.ic_miclen + rx_desc_len,
880 			      f_tkip.ic_miclen, (caddr_t) mic0);
881 	if (!qdf_mem_cmp(mic, mic0, f_tkip.ic_miclen))
882 		return OL_RX_DEFRAG_ERR;
883 
884 	qdf_nbuf_trim_tail(msdu, f_tkip.ic_miclen);
885 	return OL_RX_DEFRAG_OK;
886 }
887 
888 /*
889  * Handling CCMP processing for defragmentation
890  */
891 int
892 ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev,
893 		      qdf_nbuf_t nbuf, uint16_t hdrlen)
894 {
895 	uint8_t *ivp, *origHdr;
896 	void *rx_desc_old_position = NULL;
897 	void *ind_old_position = NULL;
898 	int rx_desc_len = 0;
899 
900 	ol_rx_frag_desc_adjust(pdev,
901 			       nbuf,
902 			       &rx_desc_old_position,
903 			       &ind_old_position, &rx_desc_len);
904 
905 	origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
906 	ivp = origHdr + hdrlen;
907 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
908 		return OL_RX_DEFRAG_ERR;
909 
910 	qdf_mem_move(origHdr + f_ccmp.ic_header, origHdr, hdrlen);
911 	ol_rx_frag_restructure(
912 			pdev,
913 			nbuf,
914 			rx_desc_old_position,
915 			ind_old_position,
916 			&f_ccmp,
917 			rx_desc_len);
918 	qdf_nbuf_pull_head(nbuf, f_ccmp.ic_header);
919 
920 	return OL_RX_DEFRAG_OK;
921 }
922 
923 /*
924  * Verify and strip MIC from the frame.
925  */
926 int
927 ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev,
928 		      qdf_nbuf_t wbuf, uint16_t hdrlen)
929 {
930 	uint8_t *ivp, *origHdr;
931 	void *rx_desc_old_position = NULL;
932 	void *ind_old_position = NULL;
933 	int rx_desc_len = 0;
934 
935 	ol_rx_frag_desc_adjust(pdev,
936 			       wbuf,
937 			       &rx_desc_old_position,
938 			       &ind_old_position, &rx_desc_len);
939 
940 	origHdr = (uint8_t *) (qdf_nbuf_data(wbuf) + rx_desc_len);
941 
942 	ivp = origHdr + hdrlen;
943 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
944 		return OL_RX_DEFRAG_ERR;
945 
946 	qdf_nbuf_trim_tail(wbuf, f_ccmp.ic_trailer);
947 
948 	return OL_RX_DEFRAG_OK;
949 }
950 
951 /*
952  * Craft pseudo header used to calculate the MIC.
953  */
954 void ol_rx_defrag_michdr(const struct ieee80211_frame *wh0, uint8_t hdr[])
955 {
956 	const struct ieee80211_frame_addr4 *wh =
957 		(const struct ieee80211_frame_addr4 *)wh0;
958 
959 	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
960 	case IEEE80211_FC1_DIR_NODS:
961 		DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1);   /* DA */
962 		DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
963 					   wh->i_addr2);
964 		break;
965 	case IEEE80211_FC1_DIR_TODS:
966 		DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3);   /* DA */
967 		DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
968 					   wh->i_addr2);
969 		break;
970 	case IEEE80211_FC1_DIR_FROMDS:
971 		DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1);   /* DA */
972 		DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
973 					   wh->i_addr3);
974 		break;
975 	case IEEE80211_FC1_DIR_DSTODS:
976 		DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3);   /* DA */
977 		DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
978 					   wh->i_addr4);
979 		break;
980 	}
981 	/*
982 	 * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but
983 	 * it could also be set for deauth, disassoc, action, etc. for
984 	 * a mgt type frame. It comes into picture for MFP.
985 	 */
986 	if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
987 		const struct ieee80211_qosframe *qwh =
988 			(const struct ieee80211_qosframe *)wh;
989 		hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
990 	} else {
991 		hdr[12] = 0;
992 	}
993 	hdr[13] = hdr[14] = hdr[15] = 0;        /* reserved */
994 }
995 
996 /*
997  * Michael_mic for defragmentation
998  */
999 int
1000 ol_rx_defrag_mic(ol_txrx_pdev_handle pdev,
1001 		 const uint8_t *key,
1002 		 qdf_nbuf_t wbuf,
1003 		 uint16_t off, uint16_t data_len, uint8_t mic[])
1004 {
1005 	uint8_t hdr[16] = { 0, };
1006 	uint32_t l, r;
1007 	const uint8_t *data;
1008 	uint32_t space;
1009 	void *rx_desc_old_position = NULL;
1010 	void *ind_old_position = NULL;
1011 	int rx_desc_len = 0;
1012 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
1013 
1014 	ol_rx_frag_desc_adjust(pdev,
1015 			       wbuf,
1016 			       &rx_desc_old_position,
1017 			       &ind_old_position, &rx_desc_len);
1018 
1019 	ol_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) +
1020 						       rx_desc_len), hdr);
1021 	l = get_le32(key);
1022 	r = get_le32(key + 4);
1023 
1024 	/* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
1025 	l ^= get_le32(hdr);
1026 	michael_block(l, r);
1027 	l ^= get_le32(&hdr[4]);
1028 	michael_block(l, r);
1029 	l ^= get_le32(&hdr[8]);
1030 	michael_block(l, r);
1031 	l ^= get_le32(&hdr[12]);
1032 	michael_block(l, r);
1033 
1034 	/* first buffer has special handling */
1035 	data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len + off;
1036 	space = ol_rx_defrag_len(wbuf) - rx_desc_len - off;
1037 	for (;; ) {
1038 		if (space > data_len)
1039 			space = data_len;
1040 
1041 		/* collect 32-bit blocks from current buffer */
1042 		while (space >= sizeof(uint32_t)) {
1043 			l ^= get_le32(data);
1044 			michael_block(l, r);
1045 			data += sizeof(uint32_t);
1046 			space -= sizeof(uint32_t);
1047 			data_len -= sizeof(uint32_t);
1048 		}
1049 		if (data_len < sizeof(uint32_t))
1050 			break;
1051 
1052 		wbuf = qdf_nbuf_next(wbuf);
1053 		if (wbuf == NULL)
1054 			return OL_RX_DEFRAG_ERR;
1055 
1056 		rx_desc_len = ol_rx_get_desc_len(htt_pdev, wbuf,
1057 						 &rx_desc_old_position);
1058 
1059 		if (space != 0) {
1060 			const uint8_t *data_next;
1061 			/*
1062 			 * Block straddles buffers, split references.
1063 			 */
1064 			data_next =
1065 				(uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len;
1066 			if ((ol_rx_defrag_len(wbuf) - rx_desc_len) <
1067 			    sizeof(uint32_t) - space) {
1068 				return OL_RX_DEFRAG_ERR;
1069 			}
1070 			switch (space) {
1071 			case 1:
1072 				l ^= get_le32_split(data[0], data_next[0],
1073 						    data_next[1], data_next[2]);
1074 				data = data_next + 3;
1075 				space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
1076 					- 3;
1077 				break;
1078 			case 2:
1079 				l ^= get_le32_split(data[0], data[1],
1080 						    data_next[0], data_next[1]);
1081 				data = data_next + 2;
1082 				space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
1083 					- 2;
1084 				break;
1085 			case 3:
1086 				l ^= get_le32_split(data[0], data[1], data[2],
1087 						    data_next[0]);
1088 				data = data_next + 1;
1089 				space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
1090 					- 1;
1091 				break;
1092 			}
1093 			michael_block(l, r);
1094 			data_len -= sizeof(uint32_t);
1095 		} else {
1096 			/*
1097 			 * Setup for next buffer.
1098 			 */
1099 			data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len;
1100 			space = ol_rx_defrag_len(wbuf) - rx_desc_len;
1101 		}
1102 	}
1103 	/* Last block and padding (0x5a, 4..7 x 0) */
1104 	switch (data_len) {
1105 	case 0:
1106 		l ^= get_le32_split(0x5a, 0, 0, 0);
1107 		break;
1108 	case 1:
1109 		l ^= get_le32_split(data[0], 0x5a, 0, 0);
1110 		break;
1111 	case 2:
1112 		l ^= get_le32_split(data[0], data[1], 0x5a, 0);
1113 		break;
1114 	case 3:
1115 		l ^= get_le32_split(data[0], data[1], data[2], 0x5a);
1116 		break;
1117 	}
1118 	michael_block(l, r);
1119 	michael_block(l, r);
1120 	put_le32(mic, l);
1121 	put_le32(mic + 4, r);
1122 
1123 	return OL_RX_DEFRAG_OK;
1124 }
1125 
1126 /*
1127  * Calculate headersize
1128  */
1129 uint16_t ol_rx_frag_hdrsize(const void *data)
1130 {
1131 	const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
1132 	uint16_t size = sizeof(struct ieee80211_frame);
1133 
1134 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
1135 		size += IEEE80211_ADDR_LEN;
1136 
1137 	if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
1138 		size += sizeof(uint16_t);
1139 		if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
1140 			size += sizeof(struct ieee80211_htc);
1141 	}
1142 	return size;
1143 }
1144 
1145 /*
1146  * Recombine and decap fragments
1147  */
1148 qdf_nbuf_t
1149 ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev,
1150 			     qdf_nbuf_t frag_list, uint16_t hdrsize)
1151 {
1152 	qdf_nbuf_t tmp;
1153 	qdf_nbuf_t msdu = frag_list;
1154 	qdf_nbuf_t rx_nbuf = frag_list;
1155 	struct ieee80211_frame *wh;
1156 
1157 	msdu = qdf_nbuf_next(msdu);
1158 	qdf_nbuf_set_next(rx_nbuf, NULL);
1159 	while (msdu) {
1160 		htt_rx_msdu_desc_free(htt_pdev, msdu);
1161 		tmp = qdf_nbuf_next(msdu);
1162 		qdf_nbuf_set_next(msdu, NULL);
1163 		ol_rx_frag_pull_hdr(htt_pdev, msdu, hdrsize);
1164 		if (!ol_rx_defrag_concat(rx_nbuf, msdu)) {
1165 			ol_rx_frames_free(htt_pdev, tmp);
1166 			htt_rx_desc_frame_free(htt_pdev, rx_nbuf);
1167 			qdf_nbuf_free(msdu);
1168 			/* msdu rx desc already freed above */
1169 			return NULL;
1170 		}
1171 		msdu = tmp;
1172 	}
1173 	wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
1174 							      rx_nbuf);
1175 	wh->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG;
1176 	*(uint16_t *) wh->i_seq &= ~IEEE80211_SEQ_FRAG_MASK;
1177 
1178 	return rx_nbuf;
1179 }
1180 
1181 void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu)
1182 {
1183 	struct ieee80211_frame wh;
1184 	uint32_t hdrsize;
1185 	struct llc_snap_hdr_t llchdr;
1186 	struct ethernet_hdr_t *eth_hdr;
1187 	void *rx_desc_old_position = NULL;
1188 	void *ind_old_position = NULL;
1189 	int rx_desc_len = 0;
1190 	struct ieee80211_frame *wh_ptr;
1191 
1192 	ol_rx_frag_desc_adjust(pdev,
1193 			       msdu,
1194 			       &rx_desc_old_position,
1195 			       &ind_old_position, &rx_desc_len);
1196 
1197 	wh_ptr = (struct ieee80211_frame *)(qdf_nbuf_data(msdu) + rx_desc_len);
1198 	qdf_mem_copy(&wh, wh_ptr, sizeof(wh));
1199 	hdrsize = sizeof(struct ieee80211_frame);
1200 	qdf_mem_copy(&llchdr, ((uint8_t *) (qdf_nbuf_data(msdu) +
1201 					    rx_desc_len)) + hdrsize,
1202 		     sizeof(struct llc_snap_hdr_t));
1203 
1204 	/*
1205 	 * Now move the data pointer to the beginning of the mac header :
1206 	 * new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize)
1207 	 */
1208 	qdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize +
1209 				  sizeof(struct llc_snap_hdr_t) -
1210 				  sizeof(struct ethernet_hdr_t)));
1211 	eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(msdu));
1212 	switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
1213 	case IEEE80211_FC1_DIR_NODS:
1214 		qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
1215 			     IEEE80211_ADDR_LEN);
1216 		qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN);
1217 		break;
1218 	case IEEE80211_FC1_DIR_TODS:
1219 		qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr3,
1220 			     IEEE80211_ADDR_LEN);
1221 		qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN);
1222 		break;
1223 	case IEEE80211_FC1_DIR_FROMDS:
1224 		qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
1225 			     IEEE80211_ADDR_LEN);
1226 		qdf_mem_copy(eth_hdr->src_addr, wh.i_addr3, IEEE80211_ADDR_LEN);
1227 		break;
1228 	case IEEE80211_FC1_DIR_DSTODS:
1229 		break;
1230 	}
1231 
1232 	qdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype,
1233 		     sizeof(llchdr.ethertype));
1234 
1235 	ol_rx_defrag_push_rx_desc(msdu, rx_desc_old_position,
1236 					  ind_old_position, rx_desc_len);
1237 }
1238 
1239 /*
1240  * Handling QOS for defragmentation
1241  */
1242 void
1243 ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev,
1244 		       qdf_nbuf_t nbuf, uint16_t hdrlen)
1245 {
1246 	struct ieee80211_frame *wh;
1247 	uint16_t qoslen;
1248 	void *rx_desc_old_position = NULL;
1249 	void *ind_old_position = NULL;
1250 	int rx_desc_len = 0;
1251 
1252 	ol_rx_frag_desc_adjust(pdev,
1253 			       nbuf,
1254 			       &rx_desc_old_position,
1255 			       &ind_old_position, &rx_desc_len);
1256 
1257 	wh = (struct ieee80211_frame *)(qdf_nbuf_data(nbuf) + rx_desc_len);
1258 	if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
1259 		qoslen = sizeof(struct ieee80211_qoscntl);
1260 		/* Qos frame with Order bit set indicates a HTC frame */
1261 		if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
1262 			qoslen += sizeof(struct ieee80211_htc);
1263 
1264 		/* remove QoS filed from header */
1265 		hdrlen -= qoslen;
1266 		qdf_mem_move((uint8_t *) wh + qoslen, wh, hdrlen);
1267 		wh = (struct ieee80211_frame *)qdf_nbuf_pull_head(nbuf,
1268 								  rx_desc_len +
1269 								  qoslen);
1270 		/* clear QoS bit */
1271 		/*
1272 		 * KW# 6154 'qdf_nbuf_pull_head' in turn calls
1273 		 * __qdf_nbuf_pull_head,
1274 		 * which returns NULL if there is not sufficient data to pull.
1275 		 * It's guaranteed that qdf_nbuf_pull_head will succeed rather
1276 		 * than returning NULL, since the entire rx frame is already
1277 		 * present in the rx buffer.
1278 		 * However, to make it obvious to static analyzers that this
1279 		 * code is safe, add an explicit check that qdf_nbuf_pull_head
1280 		 * returns a non-NULL value.
1281 		 * Since this part of the code is not performance-critical,
1282 		 * adding this explicit check is okay.
1283 		 */
1284 		if (wh)
1285 			wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS;
1286 
1287 		ol_rx_defrag_push_rx_desc(nbuf, rx_desc_old_position,
1288 					  ind_old_position, rx_desc_len);
1289 
1290 	}
1291 }
1292