1 /* 2 * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved. 3 * 4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc. 5 * 6 * 7 * Permission to use, copy, modify, and/or distribute this software for 8 * any purpose with or without fee is hereby granted, provided that the 9 * above copyright notice and this permission notice appear in all 10 * copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 19 * PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * This file was originally distributed by Qualcomm Atheros, Inc. 24 * under proprietary terms before Copyright ownership was assigned 25 * to the Linux Foundation. 26 */ 27 28 /*- 29 * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions 34 * are met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce the above copyright 38 * notice, this list of conditions and the following disclaimer in the 39 * documentation and/or other materials provided with the distribution. 40 * 41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 42 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 43 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 44 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 45 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 46 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 47 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 48 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 50 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 51 */ 52 #include <ol_htt_api.h> 53 #include <ol_txrx_api.h> 54 #include <ol_txrx_htt_api.h> 55 #include <ol_htt_rx_api.h> 56 #include <ol_rx_reorder.h> 57 #include <ol_rx_pn.h> 58 #include <ol_rx_fwd.h> 59 #include <ol_rx.h> 60 #include <ol_txrx_internal.h> 61 #include <ol_ctrl_txrx_api.h> 62 #include <ol_txrx_peer_find.h> 63 #include <qdf_nbuf.h> 64 #include <qdf_util.h> 65 #include <athdefs.h> 66 #include <qdf_mem.h> 67 #include <ol_rx_defrag.h> 68 #include <enet.h> 69 #include <qdf_time.h> /* qdf_system_time */ 70 71 #define DEFRAG_IEEE80211_ADDR_EQ(a1, a2) \ 72 (!qdf_mem_cmp(a1, a2, IEEE80211_ADDR_LEN)) 73 74 #define DEFRAG_IEEE80211_ADDR_COPY(dst, src) \ 75 qdf_mem_copy(dst, src, IEEE80211_ADDR_LEN) 76 77 #define DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \ 78 (((wh)->i_fc[0] & \ 79 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \ 80 (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS)) 81 82 #define DEFRAG_IEEE80211_QOS_GET_TID(_x) \ 83 ((_x)->i_qos[0] & IEEE80211_QOS_TID) 84 85 const struct ol_rx_defrag_cipher f_ccmp = { 86 "AES-CCM", 87 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 88 IEEE80211_WEP_MICLEN, 89 0, 90 }; 91 92 const struct ol_rx_defrag_cipher f_tkip = { 93 "TKIP", 94 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 95 IEEE80211_WEP_CRCLEN, 96 IEEE80211_WEP_MICLEN, 97 }; 98 99 const struct ol_rx_defrag_cipher f_wep = { 100 "WEP", 101 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN, 102 IEEE80211_WEP_CRCLEN, 103 0, 104 }; 105 106 #if defined(CONFIG_HL_SUPPORT) 107 108 /** 109 * ol_rx_frag_get_mac_hdr() - retrieve mac header 110 * @htt_pdev: pointer to htt pdev handle 111 * @frag: rx fragment 112 * 113 * Return: pointer to ieee mac header of frag 114 */ 115 static struct ieee80211_frame *ol_rx_frag_get_mac_hdr( 116 htt_pdev_handle htt_pdev, qdf_nbuf_t frag) 117 { 118 void *rx_desc; 119 int rx_desc_len; 120 121 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag); 122 rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc); 123 return (struct ieee80211_frame *)(qdf_nbuf_data(frag) + rx_desc_len); 124 } 125 126 /** 127 * ol_rx_frag_pull_hdr() - point to payload of rx frag 128 * @htt_pdev: pointer to htt pdev handle 129 * @frag: rx fragment 130 * @hdrsize: header size 131 * 132 * Return: None 133 */ 134 static void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev, 135 qdf_nbuf_t frag, int hdrsize) 136 { 137 void *rx_desc; 138 int rx_desc_len; 139 140 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag); 141 rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc); 142 qdf_nbuf_pull_head(frag, rx_desc_len + hdrsize); 143 } 144 145 /** 146 * ol_rx_frag_desc_adjust() - adjust rx frag descriptor position 147 * @pdev: pointer to txrx handle 148 * @msdu: msdu 149 * @rx_desc_old_position: rx descriptor old position 150 * @ind_old_position:index of old position 151 * @rx_desc_len: rx desciptor length 152 * 153 * Return: None 154 */ 155 static void 156 ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev, 157 qdf_nbuf_t msdu, 158 void **rx_desc_old_position, 159 void **ind_old_position, int *rx_desc_len) 160 { 161 *rx_desc_old_position = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, 162 msdu); 163 *ind_old_position = *rx_desc_old_position - HTT_RX_IND_HL_BYTES; 164 *rx_desc_len = htt_rx_msdu_rx_desc_size_hl(pdev->htt_pdev, 165 *rx_desc_old_position); 166 } 167 168 /** 169 * ol_rx_frag_restructure() - point to payload for HL 170 * @pdev: physical device object 171 * @msdu: the buffer containing the MSDU payload 172 * @rx_desc_old_position: rx MSDU descriptor 173 * @ind_old_position: rx msdu indication 174 * @f_type: pointing to rx defrag cipher 175 * @rx_desc_len: length by which rx descriptor to move 176 * 177 * Return: None 178 */ 179 static void 180 ol_rx_frag_restructure( 181 ol_txrx_pdev_handle pdev, 182 qdf_nbuf_t msdu, 183 void *rx_desc_old_position, 184 void *ind_old_position, 185 const struct ol_rx_defrag_cipher *f_type, 186 int rx_desc_len) 187 { 188 if ((ind_old_position == NULL) || (rx_desc_old_position == NULL)) { 189 ol_txrx_err("ind_old_position,rx_desc_old_position is NULL\n"); 190 ASSERT(0); 191 return; 192 } 193 /* move rx description*/ 194 qdf_mem_move(rx_desc_old_position + f_type->ic_header, 195 rx_desc_old_position, rx_desc_len); 196 /* move rx indication*/ 197 qdf_mem_move(ind_old_position + f_type->ic_header, ind_old_position, 198 HTT_RX_IND_HL_BYTES); 199 } 200 201 /** 202 * ol_rx_get_desc_len() - point to payload for HL 203 * @htt_pdev: the HTT instance the rx data was received on 204 * @wbuf: buffer containing the MSDU payload 205 * @rx_desc_old_position: rx MSDU descriptor 206 * 207 * Return: Return the HL rx desc size 208 */ 209 static 210 int ol_rx_get_desc_len(htt_pdev_handle htt_pdev, 211 qdf_nbuf_t wbuf, 212 void **rx_desc_old_position) 213 { 214 int rx_desc_len = 0; 215 *rx_desc_old_position = htt_rx_msdu_desc_retrieve(htt_pdev, wbuf); 216 rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, 217 *rx_desc_old_position); 218 219 return rx_desc_len; 220 } 221 222 /** 223 * ol_rx_defrag_push_rx_desc() - point to payload for HL 224 * @nbuf: buffer containing the MSDU payload 225 * @rx_desc_old_position: rx MSDU descriptor 226 * @ind_old_position: rx msdu indication 227 * @rx_desc_len: HL rx desc size 228 * 229 * Return: Return the HL rx desc size 230 */ 231 static 232 void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf, 233 void *rx_desc_old_position, 234 void *ind_old_position, 235 int rx_desc_len) 236 { 237 qdf_nbuf_push_head(nbuf, rx_desc_len); 238 qdf_mem_move( 239 qdf_nbuf_data(nbuf), rx_desc_old_position, rx_desc_len); 240 qdf_mem_move( 241 qdf_nbuf_data(nbuf) - HTT_RX_IND_HL_BYTES, ind_old_position, 242 HTT_RX_IND_HL_BYTES); 243 } 244 #else 245 246 static inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr( 247 htt_pdev_handle htt_pdev, 248 qdf_nbuf_t frag) 249 { 250 return 251 (struct ieee80211_frame *) qdf_nbuf_data(frag); 252 } 253 254 static inline void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev, 255 qdf_nbuf_t frag, int hdrsize) 256 { 257 qdf_nbuf_pull_head(frag, hdrsize); 258 } 259 260 static inline void 261 ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev, 262 qdf_nbuf_t msdu, 263 void **rx_desc_old_position, 264 void **ind_old_position, int *rx_desc_len) 265 { 266 *rx_desc_old_position = NULL; 267 *ind_old_position = NULL; 268 *rx_desc_len = 0; 269 } 270 271 static inline void 272 ol_rx_frag_restructure( 273 ol_txrx_pdev_handle pdev, 274 qdf_nbuf_t msdu, 275 void *rx_desc_old_position, 276 void *ind_old_position, 277 const struct ol_rx_defrag_cipher *f_type, 278 int rx_desc_len) 279 { 280 /* no op */ 281 } 282 283 static inline 284 int ol_rx_get_desc_len(htt_pdev_handle htt_pdev, 285 qdf_nbuf_t wbuf, 286 void **rx_desc_old_position) 287 { 288 return 0; 289 } 290 291 static inline 292 void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf, 293 void *rx_desc_old_position, 294 void *ind_old_position, 295 int rx_desc_len) 296 { 297 return; 298 } 299 #endif /* CONFIG_HL_SUPPORT */ 300 301 #ifdef WDI_EVENT_ENABLE 302 static inline 303 void ol_rx_frag_send_pktlog_event(struct ol_txrx_pdev_t *pdev, 304 struct ol_txrx_peer_t *peer, qdf_nbuf_t msdu, uint8_t pktlog_bit) 305 { 306 ol_rx_send_pktlog_event(pdev, peer, msdu, pktlog_bit); 307 } 308 309 #else 310 static inline 311 void ol_rx_frag_send_pktlog_event(struct ol_txrx_pdev_t *pdev, 312 struct ol_txrx_peer_t *peer, qdf_nbuf_t msdu, uint8_t pktlog_bit) 313 { 314 } 315 316 #endif 317 318 /* 319 * Process incoming fragments 320 */ 321 void 322 ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev, 323 qdf_nbuf_t rx_frag_ind_msg, 324 uint16_t peer_id, uint8_t tid) 325 { 326 uint16_t seq_num; 327 int seq_num_start, seq_num_end; 328 struct ol_txrx_peer_t *peer; 329 htt_pdev_handle htt_pdev; 330 qdf_nbuf_t head_msdu, tail_msdu; 331 void *rx_mpdu_desc; 332 uint8_t pktlog_bit; 333 uint32_t msdu_count = 0; 334 int ret; 335 336 htt_pdev = pdev->htt_pdev; 337 peer = ol_txrx_peer_find_by_id(pdev, peer_id); 338 339 if (!ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev) && 340 htt_rx_ind_flush(pdev->htt_pdev, rx_frag_ind_msg) && peer) { 341 htt_rx_frag_ind_flush_seq_num_range(pdev->htt_pdev, 342 rx_frag_ind_msg, 343 &seq_num_start, 344 &seq_num_end); 345 /* 346 * Assuming flush indication for frags sent from target is 347 * separate from normal frames 348 */ 349 ol_rx_reorder_flush_frag(htt_pdev, peer, tid, seq_num_start); 350 } 351 pktlog_bit = 352 (htt_rx_amsdu_rx_in_order_get_pktlog(rx_frag_ind_msg) == 0x01); 353 ret = htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu, 354 &tail_msdu, &msdu_count); 355 /* Return if msdu pop fails from rx hash table, as recovery 356 * is triggered and we exit gracefully. 357 */ 358 if (!ret) 359 return; 360 if (peer) { 361 qdf_assert(head_msdu == tail_msdu); 362 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) { 363 rx_mpdu_desc = 364 htt_rx_mpdu_desc_list_next(htt_pdev, head_msdu); 365 } else { 366 rx_mpdu_desc = 367 htt_rx_mpdu_desc_list_next(htt_pdev, 368 rx_frag_ind_msg); 369 } 370 seq_num = htt_rx_mpdu_desc_seq_num(htt_pdev, rx_mpdu_desc); 371 OL_RX_ERR_STATISTICS_1(pdev, peer->vdev, peer, rx_mpdu_desc, 372 OL_RX_ERR_NONE_FRAG); 373 ol_rx_frag_send_pktlog_event(pdev, peer, head_msdu, pktlog_bit); 374 ol_rx_reorder_store_frag(pdev, peer, tid, seq_num, head_msdu); 375 } else { 376 /* invalid frame - discard it */ 377 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) 378 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu); 379 else 380 htt_rx_mpdu_desc_list_next(htt_pdev, rx_frag_ind_msg); 381 382 ol_rx_frag_send_pktlog_event(pdev, peer, head_msdu, pktlog_bit); 383 htt_rx_desc_frame_free(htt_pdev, head_msdu); 384 } 385 /* request HTT to provide new rx MSDU buffers for the target to fill. */ 386 htt_rx_msdu_buff_replenish(htt_pdev); 387 } 388 389 /* 390 * Flushing fragments 391 */ 392 void 393 ol_rx_reorder_flush_frag(htt_pdev_handle htt_pdev, 394 struct ol_txrx_peer_t *peer, 395 unsigned int tid, int seq_num) 396 { 397 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem; 398 int seq; 399 400 seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask; 401 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq]; 402 if (rx_reorder_array_elem->head) { 403 ol_rx_frames_free(htt_pdev, rx_reorder_array_elem->head); 404 rx_reorder_array_elem->head = NULL; 405 rx_reorder_array_elem->tail = NULL; 406 } 407 } 408 409 /* 410 * Reorder and store fragments 411 */ 412 void 413 ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev, 414 struct ol_txrx_peer_t *peer, 415 unsigned int tid, uint16_t seq_num, qdf_nbuf_t frag) 416 { 417 struct ieee80211_frame *fmac_hdr, *mac_hdr; 418 uint8_t fragno, more_frag, all_frag_present = 0; 419 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem; 420 uint16_t frxseq, rxseq, seq; 421 htt_pdev_handle htt_pdev = pdev->htt_pdev; 422 423 seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask; 424 qdf_assert(seq == 0); 425 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq]; 426 427 mac_hdr = (struct ieee80211_frame *) 428 ol_rx_frag_get_mac_hdr(htt_pdev, frag); 429 rxseq = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >> 430 IEEE80211_SEQ_SEQ_SHIFT; 431 fragno = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) & 432 IEEE80211_SEQ_FRAG_MASK; 433 more_frag = mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG; 434 435 if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) { 436 rx_reorder_array_elem->head = frag; 437 rx_reorder_array_elem->tail = frag; 438 qdf_nbuf_set_next(frag, NULL); 439 ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head); 440 rx_reorder_array_elem->head = NULL; 441 rx_reorder_array_elem->tail = NULL; 442 return; 443 } 444 if (rx_reorder_array_elem->head) { 445 fmac_hdr = (struct ieee80211_frame *) 446 ol_rx_frag_get_mac_hdr(htt_pdev, 447 rx_reorder_array_elem->head); 448 frxseq = qdf_le16_to_cpu(*(uint16_t *) fmac_hdr->i_seq) >> 449 IEEE80211_SEQ_SEQ_SHIFT; 450 if (rxseq != frxseq 451 || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr1, 452 fmac_hdr->i_addr1) 453 || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr2, 454 fmac_hdr->i_addr2)) { 455 ol_rx_frames_free(htt_pdev, 456 rx_reorder_array_elem->head); 457 rx_reorder_array_elem->head = NULL; 458 rx_reorder_array_elem->tail = NULL; 459 ol_txrx_err("\n ol_rx_reorder_store:%s mismatch\n", 460 (rxseq == frxseq) 461 ? "address" 462 : "seq number"); 463 } 464 } 465 466 ol_rx_fraglist_insert(htt_pdev, &rx_reorder_array_elem->head, 467 &rx_reorder_array_elem->tail, frag, 468 &all_frag_present); 469 470 if (pdev->rx.flags.defrag_timeout_check) 471 ol_rx_defrag_waitlist_remove(peer, tid); 472 473 if (all_frag_present) { 474 ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head); 475 rx_reorder_array_elem->head = NULL; 476 rx_reorder_array_elem->tail = NULL; 477 peer->tids_rx_reorder[tid].defrag_timeout_ms = 0; 478 peer->tids_last_seq[tid] = seq_num; 479 } else if (pdev->rx.flags.defrag_timeout_check) { 480 uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); 481 482 peer->tids_rx_reorder[tid].defrag_timeout_ms = 483 now_ms + pdev->rx.defrag.timeout_ms; 484 ol_rx_defrag_waitlist_add(peer, tid); 485 } 486 } 487 488 /* 489 * Insert and store fragments 490 */ 491 void 492 ol_rx_fraglist_insert(htt_pdev_handle htt_pdev, 493 qdf_nbuf_t *head_addr, 494 qdf_nbuf_t *tail_addr, 495 qdf_nbuf_t frag, uint8_t *all_frag_present) 496 { 497 qdf_nbuf_t next, prev = NULL, cur = *head_addr; 498 struct ieee80211_frame *mac_hdr, *cmac_hdr, *next_hdr, *lmac_hdr; 499 uint8_t fragno, cur_fragno, lfragno, next_fragno; 500 uint8_t last_morefrag = 1, count = 0; 501 502 qdf_assert(frag); 503 504 mac_hdr = (struct ieee80211_frame *) 505 ol_rx_frag_get_mac_hdr(htt_pdev, frag); 506 fragno = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) & 507 IEEE80211_SEQ_FRAG_MASK; 508 509 if (!(*head_addr)) { 510 *head_addr = frag; 511 *tail_addr = frag; 512 qdf_nbuf_set_next(*tail_addr, NULL); 513 return; 514 } 515 /* For efficiency, compare with tail first */ 516 lmac_hdr = (struct ieee80211_frame *) 517 ol_rx_frag_get_mac_hdr(htt_pdev, *tail_addr); 518 lfragno = qdf_le16_to_cpu(*(uint16_t *) lmac_hdr->i_seq) & 519 IEEE80211_SEQ_FRAG_MASK; 520 if (fragno > lfragno) { 521 qdf_nbuf_set_next(*tail_addr, frag); 522 *tail_addr = frag; 523 qdf_nbuf_set_next(*tail_addr, NULL); 524 } else { 525 do { 526 cmac_hdr = (struct ieee80211_frame *) 527 ol_rx_frag_get_mac_hdr(htt_pdev, cur); 528 cur_fragno = 529 qdf_le16_to_cpu(*(uint16_t *) cmac_hdr->i_seq) & 530 IEEE80211_SEQ_FRAG_MASK; 531 prev = cur; 532 cur = qdf_nbuf_next(cur); 533 } while (fragno > cur_fragno); 534 535 if (fragno == cur_fragno) { 536 htt_rx_desc_frame_free(htt_pdev, frag); 537 *all_frag_present = 0; 538 return; 539 } 540 541 qdf_nbuf_set_next(prev, frag); 542 qdf_nbuf_set_next(frag, cur); 543 } 544 next = qdf_nbuf_next(*head_addr); 545 lmac_hdr = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, 546 *tail_addr); 547 last_morefrag = lmac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG; 548 if (!last_morefrag) { 549 do { 550 next_hdr = 551 (struct ieee80211_frame *) 552 ol_rx_frag_get_mac_hdr(htt_pdev, next); 553 next_fragno = 554 qdf_le16_to_cpu(*(uint16_t *) next_hdr->i_seq) & 555 IEEE80211_SEQ_FRAG_MASK; 556 count++; 557 if (next_fragno != count) 558 break; 559 560 next = qdf_nbuf_next(next); 561 } while (next); 562 563 if (!next) { 564 *all_frag_present = 1; 565 return; 566 } 567 } 568 *all_frag_present = 0; 569 } 570 571 /* 572 * add tid to pending fragment wait list 573 */ 574 void ol_rx_defrag_waitlist_add(struct ol_txrx_peer_t *peer, unsigned int tid) 575 { 576 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev; 577 struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid]; 578 579 TAILQ_INSERT_TAIL(&pdev->rx.defrag.waitlist, rx_reorder, 580 defrag_waitlist_elem); 581 } 582 583 /* 584 * remove tid from pending fragment wait list 585 */ 586 void ol_rx_defrag_waitlist_remove(struct ol_txrx_peer_t *peer, unsigned int tid) 587 { 588 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev; 589 struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid]; 590 591 if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) { 592 593 TAILQ_REMOVE(&pdev->rx.defrag.waitlist, rx_reorder, 594 defrag_waitlist_elem); 595 596 rx_reorder->defrag_waitlist_elem.tqe_next = NULL; 597 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL; 598 } else if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) { 599 ol_txrx_alert("waitlist->tqe_prv = NULL\n"); 600 QDF_ASSERT(0); 601 rx_reorder->defrag_waitlist_elem.tqe_next = NULL; 602 } 603 } 604 605 #ifndef container_of 606 #define container_of(ptr, type, member) \ 607 ((type *)((char *)(ptr) - (char *)(&((type *)0)->member))) 608 #endif 609 610 /* 611 * flush stale fragments from the waitlist 612 */ 613 void ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t *pdev) 614 { 615 struct ol_rx_reorder_t *rx_reorder, *tmp; 616 uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); 617 618 TAILQ_FOREACH_SAFE(rx_reorder, &pdev->rx.defrag.waitlist, 619 defrag_waitlist_elem, tmp) { 620 struct ol_txrx_peer_t *peer; 621 struct ol_rx_reorder_t *rx_reorder_base; 622 unsigned int tid; 623 624 if (rx_reorder->defrag_timeout_ms > now_ms) 625 break; 626 627 tid = rx_reorder->tid; 628 /* get index 0 of the rx_reorder array */ 629 rx_reorder_base = rx_reorder - tid; 630 peer = 631 container_of(rx_reorder_base, struct ol_txrx_peer_t, 632 tids_rx_reorder[0]); 633 634 ol_rx_defrag_waitlist_remove(peer, tid); 635 ol_rx_reorder_flush_frag(pdev->htt_pdev, peer, tid, 636 0 /* frags always stored at seq 0 */); 637 } 638 } 639 640 /* 641 * Handling security checking and processing fragments 642 */ 643 void 644 ol_rx_defrag(ol_txrx_pdev_handle pdev, 645 struct ol_txrx_peer_t *peer, unsigned int tid, 646 qdf_nbuf_t frag_list) 647 { 648 struct ol_txrx_vdev_t *vdev = NULL; 649 qdf_nbuf_t tmp_next, msdu, prev = NULL, cur = frag_list; 650 uint8_t index, tkip_demic = 0; 651 uint16_t hdr_space; 652 void *rx_desc; 653 struct ieee80211_frame *wh; 654 uint8_t key[DEFRAG_IEEE80211_KEY_LEN]; 655 htt_pdev_handle htt_pdev = pdev->htt_pdev; 656 657 vdev = peer->vdev; 658 659 /* bypass defrag for safe mode */ 660 if (vdev->safemode) { 661 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) 662 ol_rx_in_order_deliver(vdev, peer, tid, frag_list); 663 else 664 ol_rx_deliver(vdev, peer, tid, frag_list); 665 return; 666 } 667 668 while (cur) { 669 tmp_next = qdf_nbuf_next(cur); 670 qdf_nbuf_set_next(cur, NULL); 671 if (!ol_rx_pn_check_base(vdev, peer, tid, cur)) { 672 /* PN check failed,discard frags */ 673 if (prev) { 674 qdf_nbuf_set_next(prev, NULL); 675 ol_rx_frames_free(htt_pdev, frag_list); 676 } 677 ol_rx_frames_free(htt_pdev, tmp_next); 678 ol_txrx_err("ol_rx_defrag: PN Check failed\n"); 679 return; 680 } 681 /* remove FCS from each fragment */ 682 qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN); 683 prev = cur; 684 qdf_nbuf_set_next(cur, tmp_next); 685 cur = tmp_next; 686 } 687 cur = frag_list; 688 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, cur); 689 hdr_space = ol_rx_frag_hdrsize(wh); 690 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag_list); 691 qdf_assert(htt_rx_msdu_has_wlan_mcast_flag(htt_pdev, rx_desc)); 692 index = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc) ? 693 txrx_sec_mcast : txrx_sec_ucast; 694 695 switch (peer->security[index].sec_type) { 696 case htt_sec_type_tkip: 697 tkip_demic = 1; 698 /* fall-through to rest of tkip ops */ 699 case htt_sec_type_tkip_nomic: 700 while (cur) { 701 tmp_next = qdf_nbuf_next(cur); 702 if (!ol_rx_frag_tkip_decap(pdev, cur, hdr_space)) { 703 /* TKIP decap failed, discard frags */ 704 ol_rx_frames_free(htt_pdev, frag_list); 705 ol_txrx_err("\n ol_rx_defrag: TKIP decap failed\n"); 706 return; 707 } 708 cur = tmp_next; 709 } 710 break; 711 712 case htt_sec_type_aes_ccmp: 713 while (cur) { 714 tmp_next = qdf_nbuf_next(cur); 715 if (!ol_rx_frag_ccmp_demic(pdev, cur, hdr_space)) { 716 /* CCMP demic failed, discard frags */ 717 ol_rx_frames_free(htt_pdev, frag_list); 718 ol_txrx_err("\n ol_rx_defrag: CCMP demic failed\n"); 719 return; 720 } 721 if (!ol_rx_frag_ccmp_decap(pdev, cur, hdr_space)) { 722 /* CCMP decap failed, discard frags */ 723 ol_rx_frames_free(htt_pdev, frag_list); 724 ol_txrx_err("\n ol_rx_defrag: CCMP decap failed\n"); 725 return; 726 } 727 cur = tmp_next; 728 } 729 break; 730 731 case htt_sec_type_wep40: 732 case htt_sec_type_wep104: 733 case htt_sec_type_wep128: 734 while (cur) { 735 tmp_next = qdf_nbuf_next(cur); 736 if (!ol_rx_frag_wep_decap(pdev, cur, hdr_space)) { 737 /* wep decap failed, discard frags */ 738 ol_rx_frames_free(htt_pdev, frag_list); 739 ol_txrx_err("\n ol_rx_defrag: wep decap failed\n"); 740 return; 741 } 742 cur = tmp_next; 743 } 744 break; 745 746 default: 747 break; 748 } 749 750 msdu = ol_rx_defrag_decap_recombine(htt_pdev, frag_list, hdr_space); 751 if (!msdu) 752 return; 753 754 if (tkip_demic) { 755 qdf_mem_copy(key, 756 peer->security[index].michael_key, 757 sizeof(peer->security[index].michael_key)); 758 if (!ol_rx_frag_tkip_demic(pdev, key, msdu, hdr_space)) { 759 htt_rx_desc_frame_free(htt_pdev, msdu); 760 ol_rx_err(pdev->ctrl_pdev, 761 vdev->vdev_id, peer->mac_addr.raw, tid, 0, 762 OL_RX_DEFRAG_ERR, msdu, NULL, 0); 763 ol_txrx_err("\n ol_rx_defrag: TKIP demic failed\n"); 764 return; 765 } 766 } 767 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, msdu); 768 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) 769 ol_rx_defrag_qos_decap(pdev, msdu, hdr_space); 770 if (ol_cfg_frame_type(pdev->ctrl_pdev) == wlan_frm_fmt_802_3) 771 ol_rx_defrag_nwifi_to_8023(pdev, msdu); 772 773 ol_rx_fwd_check(vdev, peer, tid, msdu); 774 } 775 776 /* 777 * Handling TKIP processing for defragmentation 778 */ 779 int 780 ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev, 781 qdf_nbuf_t msdu, uint16_t hdrlen) 782 { 783 uint8_t *ivp, *origHdr; 784 785 void *rx_desc_old_position = NULL; 786 void *ind_old_position = NULL; 787 int rx_desc_len = 0; 788 789 ol_rx_frag_desc_adjust(pdev, 790 msdu, 791 &rx_desc_old_position, 792 &ind_old_position, &rx_desc_len); 793 /* Header should have extended IV */ 794 origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); 795 796 ivp = origHdr + hdrlen; 797 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 798 return OL_RX_DEFRAG_ERR; 799 800 qdf_mem_move(origHdr + f_tkip.ic_header, origHdr, hdrlen); 801 ol_rx_frag_restructure( 802 pdev, 803 msdu, 804 rx_desc_old_position, 805 ind_old_position, 806 &f_tkip, 807 rx_desc_len); 808 qdf_nbuf_pull_head(msdu, f_tkip.ic_header); 809 qdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer); 810 return OL_RX_DEFRAG_OK; 811 } 812 813 /* 814 * Handling WEP processing for defragmentation 815 */ 816 int 817 ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t hdrlen) 818 { 819 uint8_t *origHdr; 820 void *rx_desc_old_position = NULL; 821 void *ind_old_position = NULL; 822 int rx_desc_len = 0; 823 824 ol_rx_frag_desc_adjust(pdev, 825 msdu, 826 &rx_desc_old_position, 827 &ind_old_position, &rx_desc_len); 828 origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); 829 qdf_mem_move(origHdr + f_wep.ic_header, origHdr, hdrlen); 830 ol_rx_frag_restructure( 831 pdev, 832 msdu, 833 rx_desc_old_position, 834 ind_old_position, 835 &f_wep, 836 rx_desc_len); 837 qdf_nbuf_pull_head(msdu, f_wep.ic_header); 838 qdf_nbuf_trim_tail(msdu, f_wep.ic_trailer); 839 return OL_RX_DEFRAG_OK; 840 } 841 842 /* 843 * Verify and strip MIC from the frame. 844 */ 845 int 846 ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev, const uint8_t *key, 847 qdf_nbuf_t msdu, uint16_t hdrlen) 848 { 849 int status; 850 uint32_t pktlen; 851 uint8_t mic[IEEE80211_WEP_MICLEN]; 852 uint8_t mic0[IEEE80211_WEP_MICLEN]; 853 void *rx_desc_old_position = NULL; 854 void *ind_old_position = NULL; 855 int rx_desc_len = 0; 856 857 ol_rx_frag_desc_adjust(pdev, 858 msdu, 859 &rx_desc_old_position, 860 &ind_old_position, &rx_desc_len); 861 862 pktlen = ol_rx_defrag_len(msdu) - rx_desc_len; 863 864 status = ol_rx_defrag_mic(pdev, key, msdu, hdrlen, 865 pktlen - (hdrlen + f_tkip.ic_miclen), mic); 866 if (status != OL_RX_DEFRAG_OK) 867 return OL_RX_DEFRAG_ERR; 868 869 ol_rx_defrag_copydata(msdu, pktlen - f_tkip.ic_miclen + rx_desc_len, 870 f_tkip.ic_miclen, (caddr_t) mic0); 871 if (!qdf_mem_cmp(mic, mic0, f_tkip.ic_miclen)) 872 return OL_RX_DEFRAG_ERR; 873 874 qdf_nbuf_trim_tail(msdu, f_tkip.ic_miclen); 875 return OL_RX_DEFRAG_OK; 876 } 877 878 /* 879 * Handling CCMP processing for defragmentation 880 */ 881 int 882 ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev, 883 qdf_nbuf_t nbuf, uint16_t hdrlen) 884 { 885 uint8_t *ivp, *origHdr; 886 void *rx_desc_old_position = NULL; 887 void *ind_old_position = NULL; 888 int rx_desc_len = 0; 889 890 ol_rx_frag_desc_adjust(pdev, 891 nbuf, 892 &rx_desc_old_position, 893 &ind_old_position, &rx_desc_len); 894 895 origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len); 896 ivp = origHdr + hdrlen; 897 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 898 return OL_RX_DEFRAG_ERR; 899 900 qdf_mem_move(origHdr + f_ccmp.ic_header, origHdr, hdrlen); 901 ol_rx_frag_restructure( 902 pdev, 903 nbuf, 904 rx_desc_old_position, 905 ind_old_position, 906 &f_ccmp, 907 rx_desc_len); 908 qdf_nbuf_pull_head(nbuf, f_ccmp.ic_header); 909 910 return OL_RX_DEFRAG_OK; 911 } 912 913 /* 914 * Verify and strip MIC from the frame. 915 */ 916 int 917 ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev, 918 qdf_nbuf_t wbuf, uint16_t hdrlen) 919 { 920 uint8_t *ivp, *origHdr; 921 void *rx_desc_old_position = NULL; 922 void *ind_old_position = NULL; 923 int rx_desc_len = 0; 924 925 ol_rx_frag_desc_adjust(pdev, 926 wbuf, 927 &rx_desc_old_position, 928 &ind_old_position, &rx_desc_len); 929 930 origHdr = (uint8_t *) (qdf_nbuf_data(wbuf) + rx_desc_len); 931 932 ivp = origHdr + hdrlen; 933 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 934 return OL_RX_DEFRAG_ERR; 935 936 qdf_nbuf_trim_tail(wbuf, f_ccmp.ic_trailer); 937 938 return OL_RX_DEFRAG_OK; 939 } 940 941 /* 942 * Craft pseudo header used to calculate the MIC. 943 */ 944 void ol_rx_defrag_michdr(const struct ieee80211_frame *wh0, uint8_t hdr[]) 945 { 946 const struct ieee80211_frame_addr4 *wh = 947 (const struct ieee80211_frame_addr4 *)wh0; 948 949 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { 950 case IEEE80211_FC1_DIR_NODS: 951 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 952 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 953 wh->i_addr2); 954 break; 955 case IEEE80211_FC1_DIR_TODS: 956 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 957 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 958 wh->i_addr2); 959 break; 960 case IEEE80211_FC1_DIR_FROMDS: 961 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 962 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 963 wh->i_addr3); 964 break; 965 case IEEE80211_FC1_DIR_DSTODS: 966 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 967 DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 968 wh->i_addr4); 969 break; 970 } 971 /* 972 * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but 973 * it could also be set for deauth, disassoc, action, etc. for 974 * a mgt type frame. It comes into picture for MFP. 975 */ 976 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { 977 const struct ieee80211_qosframe *qwh = 978 (const struct ieee80211_qosframe *)wh; 979 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; 980 } else { 981 hdr[12] = 0; 982 } 983 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ 984 } 985 986 /* 987 * Michael_mic for defragmentation 988 */ 989 int 990 ol_rx_defrag_mic(ol_txrx_pdev_handle pdev, 991 const uint8_t *key, 992 qdf_nbuf_t wbuf, 993 uint16_t off, uint16_t data_len, uint8_t mic[]) 994 { 995 uint8_t hdr[16] = { 0, }; 996 uint32_t l, r; 997 const uint8_t *data; 998 uint32_t space; 999 void *rx_desc_old_position = NULL; 1000 void *ind_old_position = NULL; 1001 int rx_desc_len = 0; 1002 htt_pdev_handle htt_pdev = pdev->htt_pdev; 1003 1004 ol_rx_frag_desc_adjust(pdev, 1005 wbuf, 1006 &rx_desc_old_position, 1007 &ind_old_position, &rx_desc_len); 1008 1009 ol_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) + 1010 rx_desc_len), hdr); 1011 l = get_le32(key); 1012 r = get_le32(key + 4); 1013 1014 /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ 1015 l ^= get_le32(hdr); 1016 michael_block(l, r); 1017 l ^= get_le32(&hdr[4]); 1018 michael_block(l, r); 1019 l ^= get_le32(&hdr[8]); 1020 michael_block(l, r); 1021 l ^= get_le32(&hdr[12]); 1022 michael_block(l, r); 1023 1024 /* first buffer has special handling */ 1025 data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len + off; 1026 space = ol_rx_defrag_len(wbuf) - rx_desc_len - off; 1027 for (;; ) { 1028 if (space > data_len) 1029 space = data_len; 1030 1031 /* collect 32-bit blocks from current buffer */ 1032 while (space >= sizeof(uint32_t)) { 1033 l ^= get_le32(data); 1034 michael_block(l, r); 1035 data += sizeof(uint32_t); 1036 space -= sizeof(uint32_t); 1037 data_len -= sizeof(uint32_t); 1038 } 1039 if (data_len < sizeof(uint32_t)) 1040 break; 1041 1042 wbuf = qdf_nbuf_next(wbuf); 1043 if (wbuf == NULL) 1044 return OL_RX_DEFRAG_ERR; 1045 1046 rx_desc_len = ol_rx_get_desc_len(htt_pdev, wbuf, 1047 &rx_desc_old_position); 1048 1049 if (space != 0) { 1050 const uint8_t *data_next; 1051 /* 1052 * Block straddles buffers, split references. 1053 */ 1054 data_next = 1055 (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len; 1056 if ((ol_rx_defrag_len(wbuf) - rx_desc_len) < 1057 sizeof(uint32_t) - space) { 1058 return OL_RX_DEFRAG_ERR; 1059 } 1060 switch (space) { 1061 case 1: 1062 l ^= get_le32_split(data[0], data_next[0], 1063 data_next[1], data_next[2]); 1064 data = data_next + 3; 1065 space = (ol_rx_defrag_len(wbuf) - rx_desc_len) 1066 - 3; 1067 break; 1068 case 2: 1069 l ^= get_le32_split(data[0], data[1], 1070 data_next[0], data_next[1]); 1071 data = data_next + 2; 1072 space = (ol_rx_defrag_len(wbuf) - rx_desc_len) 1073 - 2; 1074 break; 1075 case 3: 1076 l ^= get_le32_split(data[0], data[1], data[2], 1077 data_next[0]); 1078 data = data_next + 1; 1079 space = (ol_rx_defrag_len(wbuf) - rx_desc_len) 1080 - 1; 1081 break; 1082 } 1083 michael_block(l, r); 1084 data_len -= sizeof(uint32_t); 1085 } else { 1086 /* 1087 * Setup for next buffer. 1088 */ 1089 data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len; 1090 space = ol_rx_defrag_len(wbuf) - rx_desc_len; 1091 } 1092 } 1093 /* Last block and padding (0x5a, 4..7 x 0) */ 1094 switch (data_len) { 1095 case 0: 1096 l ^= get_le32_split(0x5a, 0, 0, 0); 1097 break; 1098 case 1: 1099 l ^= get_le32_split(data[0], 0x5a, 0, 0); 1100 break; 1101 case 2: 1102 l ^= get_le32_split(data[0], data[1], 0x5a, 0); 1103 break; 1104 case 3: 1105 l ^= get_le32_split(data[0], data[1], data[2], 0x5a); 1106 break; 1107 } 1108 michael_block(l, r); 1109 michael_block(l, r); 1110 put_le32(mic, l); 1111 put_le32(mic + 4, r); 1112 1113 return OL_RX_DEFRAG_OK; 1114 } 1115 1116 /* 1117 * Calculate headersize 1118 */ 1119 uint16_t ol_rx_frag_hdrsize(const void *data) 1120 { 1121 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data; 1122 uint16_t size = sizeof(struct ieee80211_frame); 1123 1124 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) 1125 size += IEEE80211_ADDR_LEN; 1126 1127 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) { 1128 size += sizeof(uint16_t); 1129 if (wh->i_fc[1] & IEEE80211_FC1_ORDER) 1130 size += sizeof(struct ieee80211_htc); 1131 } 1132 return size; 1133 } 1134 1135 /* 1136 * Recombine and decap fragments 1137 */ 1138 qdf_nbuf_t 1139 ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev, 1140 qdf_nbuf_t frag_list, uint16_t hdrsize) 1141 { 1142 qdf_nbuf_t tmp; 1143 qdf_nbuf_t msdu = frag_list; 1144 qdf_nbuf_t rx_nbuf = frag_list; 1145 struct ieee80211_frame *wh; 1146 1147 msdu = qdf_nbuf_next(msdu); 1148 qdf_nbuf_set_next(rx_nbuf, NULL); 1149 while (msdu) { 1150 htt_rx_msdu_desc_free(htt_pdev, msdu); 1151 tmp = qdf_nbuf_next(msdu); 1152 qdf_nbuf_set_next(msdu, NULL); 1153 ol_rx_frag_pull_hdr(htt_pdev, msdu, hdrsize); 1154 if (!ol_rx_defrag_concat(rx_nbuf, msdu)) { 1155 ol_rx_frames_free(htt_pdev, tmp); 1156 htt_rx_desc_frame_free(htt_pdev, rx_nbuf); 1157 qdf_nbuf_free(msdu); 1158 /* msdu rx desc already freed above */ 1159 return NULL; 1160 } 1161 msdu = tmp; 1162 } 1163 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, 1164 rx_nbuf); 1165 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG; 1166 *(uint16_t *) wh->i_seq &= ~IEEE80211_SEQ_FRAG_MASK; 1167 1168 return rx_nbuf; 1169 } 1170 1171 void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu) 1172 { 1173 struct ieee80211_frame wh; 1174 uint32_t hdrsize; 1175 struct llc_snap_hdr_t llchdr; 1176 struct ethernet_hdr_t *eth_hdr; 1177 void *rx_desc_old_position = NULL; 1178 void *ind_old_position = NULL; 1179 int rx_desc_len = 0; 1180 struct ieee80211_frame *wh_ptr; 1181 1182 ol_rx_frag_desc_adjust(pdev, 1183 msdu, 1184 &rx_desc_old_position, 1185 &ind_old_position, &rx_desc_len); 1186 1187 wh_ptr = (struct ieee80211_frame *)(qdf_nbuf_data(msdu) + rx_desc_len); 1188 qdf_mem_copy(&wh, wh_ptr, sizeof(wh)); 1189 hdrsize = sizeof(struct ieee80211_frame); 1190 qdf_mem_copy(&llchdr, ((uint8_t *) (qdf_nbuf_data(msdu) + 1191 rx_desc_len)) + hdrsize, 1192 sizeof(struct llc_snap_hdr_t)); 1193 1194 /* 1195 * Now move the data pointer to the beginning of the mac header : 1196 * new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize) 1197 */ 1198 qdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize + 1199 sizeof(struct llc_snap_hdr_t) - 1200 sizeof(struct ethernet_hdr_t))); 1201 eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(msdu)); 1202 switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) { 1203 case IEEE80211_FC1_DIR_NODS: 1204 qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1, 1205 IEEE80211_ADDR_LEN); 1206 qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN); 1207 break; 1208 case IEEE80211_FC1_DIR_TODS: 1209 qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr3, 1210 IEEE80211_ADDR_LEN); 1211 qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN); 1212 break; 1213 case IEEE80211_FC1_DIR_FROMDS: 1214 qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1, 1215 IEEE80211_ADDR_LEN); 1216 qdf_mem_copy(eth_hdr->src_addr, wh.i_addr3, IEEE80211_ADDR_LEN); 1217 break; 1218 case IEEE80211_FC1_DIR_DSTODS: 1219 break; 1220 } 1221 1222 qdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype, 1223 sizeof(llchdr.ethertype)); 1224 1225 ol_rx_defrag_push_rx_desc(msdu, rx_desc_old_position, 1226 ind_old_position, rx_desc_len); 1227 } 1228 1229 /* 1230 * Handling QOS for defragmentation 1231 */ 1232 void 1233 ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev, 1234 qdf_nbuf_t nbuf, uint16_t hdrlen) 1235 { 1236 struct ieee80211_frame *wh; 1237 uint16_t qoslen; 1238 void *rx_desc_old_position = NULL; 1239 void *ind_old_position = NULL; 1240 int rx_desc_len = 0; 1241 1242 ol_rx_frag_desc_adjust(pdev, 1243 nbuf, 1244 &rx_desc_old_position, 1245 &ind_old_position, &rx_desc_len); 1246 1247 wh = (struct ieee80211_frame *)(qdf_nbuf_data(nbuf) + rx_desc_len); 1248 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) { 1249 qoslen = sizeof(struct ieee80211_qoscntl); 1250 /* Qos frame with Order bit set indicates a HTC frame */ 1251 if (wh->i_fc[1] & IEEE80211_FC1_ORDER) 1252 qoslen += sizeof(struct ieee80211_htc); 1253 1254 /* remove QoS filed from header */ 1255 hdrlen -= qoslen; 1256 qdf_mem_move((uint8_t *) wh + qoslen, wh, hdrlen); 1257 wh = (struct ieee80211_frame *)qdf_nbuf_pull_head(nbuf, 1258 rx_desc_len + 1259 qoslen); 1260 /* clear QoS bit */ 1261 /* 1262 * KW# 6154 'qdf_nbuf_pull_head' in turn calls 1263 * __qdf_nbuf_pull_head, 1264 * which returns NULL if there is not sufficient data to pull. 1265 * It's guaranteed that qdf_nbuf_pull_head will succeed rather 1266 * than returning NULL, since the entire rx frame is already 1267 * present in the rx buffer. 1268 * However, to make it obvious to static analyzers that this 1269 * code is safe, add an explicit check that qdf_nbuf_pull_head 1270 * returns a non-NULL value. 1271 * Since this part of the code is not performance-critical, 1272 * adding this explicit check is okay. 1273 */ 1274 if (wh) 1275 wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS; 1276 1277 ol_rx_defrag_push_rx_desc(nbuf, rx_desc_old_position, 1278 ind_old_position, rx_desc_len); 1279 1280 } 1281 } 1282