1 /* 2 * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #include "dp_rx_defrag.h" 27 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 28 #include "dp_rx_defrag.h" 29 30 const struct dp_rx_defrag_cipher dp_f_ccmp = { 31 "AES-CCM", 32 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 33 IEEE80211_WEP_MICLEN, 34 0, 35 }; 36 37 const struct dp_rx_defrag_cipher dp_f_tkip = { 38 "TKIP", 39 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 40 IEEE80211_WEP_CRCLEN, 41 IEEE80211_WEP_MICLEN, 42 }; 43 44 const struct dp_rx_defrag_cipher dp_f_wep = { 45 "WEP", 46 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN, 47 IEEE80211_WEP_CRCLEN, 48 0, 49 }; 50 51 /* 52 * dp_rx_defrag_frames_free(): Free fragment chain 53 * @frames: Fragment chain 54 * 55 * Iterates through the fragment chain and frees them 56 * Returns: None 57 */ 58 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames) 59 { 60 qdf_nbuf_t next, frag = frames; 61 62 while (frag) { 63 next = qdf_nbuf_next(frag); 64 qdf_nbuf_free(frag); 65 frag = next; 66 } 67 } 68 69 /* 70 * dp_rx_clear_saved_desc_info(): Clears descriptor info 71 * @peer: Pointer to the peer data structure 72 * @tid: Transmit ID (TID) 73 * 74 * Saves MPDU descriptor info and MSDU link pointer from REO 75 * ring descriptor. The cache is created per peer, per TID 76 * 77 * Returns: None 78 */ 79 static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid) 80 { 81 if (peer->rx_tid[tid].dst_ring_desc) 82 qdf_mem_free(peer->rx_tid[tid].dst_ring_desc); 83 84 peer->rx_tid[tid].dst_ring_desc = NULL; 85 } 86 87 static void dp_rx_return_head_frag_desc(struct dp_peer *peer, 88 unsigned int tid) 89 { 90 struct dp_soc *soc; 91 struct dp_pdev *pdev; 92 struct dp_srng *dp_rxdma_srng; 93 struct rx_desc_pool *rx_desc_pool; 94 union dp_rx_desc_list_elem_t *head = NULL; 95 union dp_rx_desc_list_elem_t *tail = NULL; 96 97 if (peer->rx_tid[tid].head_frag_desc) { 98 pdev = peer->vdev->pdev; 99 soc = pdev->soc; 100 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 101 rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id]; 102 103 dp_rx_add_to_free_desc_list(&head, &tail, 104 peer->rx_tid[tid].head_frag_desc); 105 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 106 1, &head, &tail); 107 } 108 } 109 110 /* 111 * dp_rx_reorder_flush_frag(): Flush the frag list 112 * @peer: Pointer to the peer data structure 113 * @tid: Transmit ID (TID) 114 * 115 * Flush the per-TID frag list 116 * 117 * Returns: None 118 */ 119 void dp_rx_reorder_flush_frag(struct dp_peer *peer, 120 unsigned int tid) 121 { 122 struct dp_soc *soc; 123 124 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 125 FL("Flushing TID %d"), tid); 126 127 if (!peer) { 128 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 129 "%s: NULL peer", __func__); 130 return; 131 } 132 133 soc = peer->vdev->pdev->soc; 134 135 if (peer->rx_tid[tid].dst_ring_desc) { 136 if (dp_rx_link_desc_return(soc, 137 peer->rx_tid[tid].dst_ring_desc, 138 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 139 QDF_STATUS_SUCCESS) 140 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 141 "%s: Failed to return link desc", 142 __func__); 143 } 144 145 dp_rx_return_head_frag_desc(peer, tid); 146 dp_rx_defrag_cleanup(peer, tid); 147 } 148 149 /* 150 * dp_rx_defrag_waitlist_flush(): Flush SOC defrag wait list 151 * @soc: DP SOC 152 * 153 * Flush fragments of all waitlisted TID's 154 * 155 * Returns: None 156 */ 157 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc) 158 { 159 struct dp_rx_tid *rx_reorder; 160 struct dp_rx_tid *tmp; 161 uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); 162 TAILQ_HEAD(, dp_rx_tid) temp_list; 163 164 TAILQ_INIT(&temp_list); 165 166 qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); 167 TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist, 168 defrag_waitlist_elem, tmp) { 169 unsigned int tid; 170 171 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 172 FL("Current time %u"), now_ms); 173 174 if (rx_reorder->defrag_timeout_ms > now_ms) 175 break; 176 177 tid = rx_reorder->tid; 178 if (tid >= DP_MAX_TIDS) { 179 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 180 "%s: TID out of bounds: %d", __func__, tid); 181 qdf_assert(0); 182 continue; 183 } 184 185 TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder, 186 defrag_waitlist_elem); 187 188 /* Move to temp list and clean-up later */ 189 TAILQ_INSERT_TAIL(&temp_list, rx_reorder, 190 defrag_waitlist_elem); 191 } 192 qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); 193 194 TAILQ_FOREACH_SAFE(rx_reorder, &temp_list, 195 defrag_waitlist_elem, tmp) { 196 struct dp_peer *peer; 197 198 /* get address of current peer */ 199 peer = 200 container_of(rx_reorder, struct dp_peer, 201 rx_tid[rx_reorder->tid]); 202 dp_rx_reorder_flush_frag(peer, rx_reorder->tid); 203 } 204 } 205 206 /* 207 * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list 208 * @peer: Pointer to the peer data structure 209 * @tid: Transmit ID (TID) 210 * 211 * Appends per-tid fragments to global fragment wait list 212 * 213 * Returns: None 214 */ 215 static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid) 216 { 217 struct dp_soc *psoc = peer->vdev->pdev->soc; 218 struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid]; 219 220 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 221 FL("Adding TID %u to waitlist for peer %pK"), 222 tid, peer); 223 224 /* TODO: use LIST macros instead of TAIL macros */ 225 qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock); 226 TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder, 227 defrag_waitlist_elem); 228 qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock); 229 } 230 231 /* 232 * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist 233 * @peer: Pointer to the peer data structure 234 * @tid: Transmit ID (TID) 235 * 236 * Remove fragments from waitlist 237 * 238 * Returns: None 239 */ 240 void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid) 241 { 242 struct dp_pdev *pdev = peer->vdev->pdev; 243 struct dp_soc *soc = pdev->soc; 244 struct dp_rx_tid *rx_reorder; 245 246 if (tid > DP_MAX_TIDS) { 247 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 248 "TID out of bounds: %d", tid); 249 qdf_assert(0); 250 return; 251 } 252 253 qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); 254 TAILQ_FOREACH(rx_reorder, &soc->rx.defrag.waitlist, 255 defrag_waitlist_elem) { 256 struct dp_peer *peer_on_waitlist; 257 258 /* get address of current peer */ 259 peer_on_waitlist = 260 container_of(rx_reorder, struct dp_peer, 261 rx_tid[rx_reorder->tid]); 262 263 /* Ensure it is TID for same peer */ 264 if (peer_on_waitlist == peer && rx_reorder->tid == tid) 265 TAILQ_REMOVE(&soc->rx.defrag.waitlist, 266 rx_reorder, defrag_waitlist_elem); 267 } 268 qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); 269 } 270 271 /* 272 * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list 273 * @peer: Pointer to the peer data structure 274 * @tid: Transmit ID (TID) 275 * @head_addr: Pointer to head list 276 * @tail_addr: Pointer to tail list 277 * @frag: Incoming fragment 278 * @all_frag_present: Flag to indicate whether all fragments are received 279 * 280 * Build a per-tid, per-sequence fragment list. 281 * 282 * Returns: Success, if inserted 283 */ 284 static QDF_STATUS dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid, 285 qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag, 286 uint8_t *all_frag_present) 287 { 288 qdf_nbuf_t next; 289 qdf_nbuf_t prev = NULL; 290 qdf_nbuf_t cur; 291 uint16_t head_fragno, cur_fragno, next_fragno; 292 uint8_t last_morefrag = 1, count = 0; 293 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 294 uint8_t *rx_desc_info; 295 296 297 qdf_assert(frag); 298 qdf_assert(head_addr); 299 qdf_assert(tail_addr); 300 301 *all_frag_present = 0; 302 rx_desc_info = qdf_nbuf_data(frag); 303 cur_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 304 305 /* If this is the first fragment */ 306 if (!(*head_addr)) { 307 *head_addr = *tail_addr = frag; 308 qdf_nbuf_set_next(*tail_addr, NULL); 309 rx_tid->curr_frag_num = cur_fragno; 310 311 goto insert_done; 312 } 313 314 /* In sequence fragment */ 315 if (cur_fragno > rx_tid->curr_frag_num) { 316 qdf_nbuf_set_next(*tail_addr, frag); 317 *tail_addr = frag; 318 qdf_nbuf_set_next(*tail_addr, NULL); 319 rx_tid->curr_frag_num = cur_fragno; 320 } else { 321 /* Out of sequence fragment */ 322 cur = *head_addr; 323 rx_desc_info = qdf_nbuf_data(cur); 324 head_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 325 326 if (cur_fragno == head_fragno) { 327 qdf_nbuf_free(frag); 328 goto insert_fail; 329 } else if (head_fragno > cur_fragno) { 330 qdf_nbuf_set_next(frag, cur); 331 cur = frag; 332 *head_addr = frag; /* head pointer to be updated */ 333 } else { 334 while ((cur_fragno > head_fragno) && cur != NULL) { 335 prev = cur; 336 cur = qdf_nbuf_next(cur); 337 rx_desc_info = qdf_nbuf_data(cur); 338 head_fragno = 339 dp_rx_frag_get_mpdu_frag_number( 340 rx_desc_info); 341 } 342 343 if (cur_fragno == head_fragno) { 344 qdf_nbuf_free(frag); 345 goto insert_fail; 346 } 347 348 qdf_nbuf_set_next(prev, frag); 349 qdf_nbuf_set_next(frag, cur); 350 } 351 } 352 353 next = qdf_nbuf_next(*head_addr); 354 355 rx_desc_info = qdf_nbuf_data(*tail_addr); 356 last_morefrag = dp_rx_frag_get_more_frag_bit(rx_desc_info); 357 358 /* TODO: optimize the loop */ 359 if (!last_morefrag) { 360 /* Check if all fragments are present */ 361 do { 362 rx_desc_info = qdf_nbuf_data(next); 363 next_fragno = 364 dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 365 count++; 366 367 if (next_fragno != count) 368 break; 369 370 next = qdf_nbuf_next(next); 371 } while (next); 372 373 if (!next) { 374 *all_frag_present = 1; 375 return QDF_STATUS_SUCCESS; 376 } 377 } 378 379 insert_done: 380 return QDF_STATUS_SUCCESS; 381 382 insert_fail: 383 return QDF_STATUS_E_FAILURE; 384 } 385 386 387 /* 388 * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment 389 * @msdu: Pointer to the fragment 390 * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) 391 * 392 * decap tkip encrypted fragment 393 * 394 * Returns: QDF_STATUS 395 */ 396 static QDF_STATUS dp_rx_defrag_tkip_decap(qdf_nbuf_t msdu, uint16_t hdrlen) 397 { 398 uint8_t *ivp, *orig_hdr; 399 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 400 401 /* start of 802.11 header info */ 402 orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len); 403 404 /* TKIP header is located post 802.11 header */ 405 ivp = orig_hdr + hdrlen; 406 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) { 407 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 408 "IEEE80211_WEP_EXTIV is missing in TKIP fragment"); 409 return QDF_STATUS_E_DEFRAG_ERROR; 410 } 411 412 qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer); 413 414 return QDF_STATUS_SUCCESS; 415 } 416 417 /* 418 * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment 419 * @nbuf: Pointer to the fragment buffer 420 * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) 421 * 422 * Remove MIC information from CCMP fragment 423 * 424 * Returns: QDF_STATUS 425 */ 426 static QDF_STATUS dp_rx_defrag_ccmp_demic(qdf_nbuf_t nbuf, uint16_t hdrlen) 427 { 428 uint8_t *ivp, *orig_hdr; 429 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 430 431 /* start of the 802.11 header */ 432 orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); 433 434 /* CCMP header is located after 802.11 header */ 435 ivp = orig_hdr + hdrlen; 436 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 437 return QDF_STATUS_E_DEFRAG_ERROR; 438 439 qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer); 440 441 return QDF_STATUS_SUCCESS; 442 } 443 444 /* 445 * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment 446 * @nbuf: Pointer to the fragment 447 * @hdrlen: length of the header information 448 * 449 * decap CCMP encrypted fragment 450 * 451 * Returns: QDF_STATUS 452 */ 453 static QDF_STATUS dp_rx_defrag_ccmp_decap(qdf_nbuf_t nbuf, uint16_t hdrlen) 454 { 455 uint8_t *ivp, *origHdr; 456 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 457 458 origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len); 459 ivp = origHdr + hdrlen; 460 461 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 462 return QDF_STATUS_E_DEFRAG_ERROR; 463 464 /* Let's pull the header later */ 465 466 return QDF_STATUS_SUCCESS; 467 } 468 469 /* 470 * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment 471 * @msdu: Pointer to the fragment 472 * @hdrlen: length of the header information 473 * 474 * decap WEP encrypted fragment 475 * 476 * Returns: QDF_STATUS 477 */ 478 static QDF_STATUS dp_rx_defrag_wep_decap(qdf_nbuf_t msdu, uint16_t hdrlen) 479 { 480 uint8_t *origHdr; 481 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 482 483 origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); 484 qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen); 485 486 qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer); 487 488 return QDF_STATUS_SUCCESS; 489 } 490 491 /* 492 * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment 493 * @nbuf: Pointer to the fragment 494 * 495 * Calculate the header size of the received fragment 496 * 497 * Returns: header size (uint16_t) 498 */ 499 static uint16_t dp_rx_defrag_hdrsize(qdf_nbuf_t nbuf) 500 { 501 uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf); 502 uint16_t size = sizeof(struct ieee80211_frame); 503 uint16_t fc = 0; 504 uint32_t to_ds, fr_ds; 505 uint8_t frm_ctrl_valid; 506 uint16_t frm_ctrl_field; 507 508 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); 509 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); 510 frm_ctrl_valid = hal_rx_get_mpdu_frame_control_valid(rx_tlv_hdr); 511 frm_ctrl_field = hal_rx_get_frame_ctrl_field(rx_tlv_hdr); 512 513 if (to_ds && fr_ds) 514 size += IEEE80211_ADDR_LEN; 515 516 if (frm_ctrl_valid) { 517 fc = frm_ctrl_field; 518 519 /* use 1-st byte for validation */ 520 if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) { 521 size += sizeof(uint16_t); 522 /* use 2-nd byte for validation */ 523 if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER) 524 size += sizeof(struct ieee80211_htc); 525 } 526 } 527 528 return size; 529 } 530 531 /* 532 * dp_rx_defrag_michdr(): Calculate a pseudo MIC header 533 * @wh0: Pointer to the wireless header of the fragment 534 * @hdr: Array to hold the pseudo header 535 * 536 * Calculate a pseudo MIC header 537 * 538 * Returns: None 539 */ 540 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0, 541 uint8_t hdr[]) 542 { 543 const struct ieee80211_frame_addr4 *wh = 544 (const struct ieee80211_frame_addr4 *)wh0; 545 546 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { 547 case IEEE80211_FC1_DIR_NODS: 548 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 549 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 550 wh->i_addr2); 551 break; 552 case IEEE80211_FC1_DIR_TODS: 553 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 554 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 555 wh->i_addr2); 556 break; 557 case IEEE80211_FC1_DIR_FROMDS: 558 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 559 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 560 wh->i_addr3); 561 break; 562 case IEEE80211_FC1_DIR_DSTODS: 563 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 564 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 565 wh->i_addr4); 566 break; 567 } 568 569 /* 570 * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but 571 * it could also be set for deauth, disassoc, action, etc. for 572 * a mgt type frame. It comes into picture for MFP. 573 */ 574 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { 575 const struct ieee80211_qosframe *qwh = 576 (const struct ieee80211_qosframe *)wh; 577 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; 578 } else { 579 hdr[12] = 0; 580 } 581 582 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ 583 } 584 585 /* 586 * dp_rx_defrag_mic(): Calculate MIC header 587 * @key: Pointer to the key 588 * @wbuf: fragment buffer 589 * @off: Offset 590 * @data_len: Data length 591 * @mic: Array to hold MIC 592 * 593 * Calculate a pseudo MIC header 594 * 595 * Returns: QDF_STATUS 596 */ 597 static QDF_STATUS dp_rx_defrag_mic(const uint8_t *key, qdf_nbuf_t wbuf, 598 uint16_t off, uint16_t data_len, uint8_t mic[]) 599 { 600 uint8_t hdr[16] = { 0, }; 601 uint32_t l, r; 602 const uint8_t *data; 603 uint32_t space; 604 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 605 606 dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) 607 + rx_desc_len), hdr); 608 609 l = dp_rx_get_le32(key); 610 r = dp_rx_get_le32(key + 4); 611 612 /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ 613 l ^= dp_rx_get_le32(hdr); 614 dp_rx_michael_block(l, r); 615 l ^= dp_rx_get_le32(&hdr[4]); 616 dp_rx_michael_block(l, r); 617 l ^= dp_rx_get_le32(&hdr[8]); 618 dp_rx_michael_block(l, r); 619 l ^= dp_rx_get_le32(&hdr[12]); 620 dp_rx_michael_block(l, r); 621 622 /* first buffer has special handling */ 623 data = (uint8_t *)qdf_nbuf_data(wbuf) + off; 624 space = qdf_nbuf_len(wbuf) - off; 625 626 for (;; ) { 627 if (space > data_len) 628 space = data_len; 629 630 /* collect 32-bit blocks from current buffer */ 631 while (space >= sizeof(uint32_t)) { 632 l ^= dp_rx_get_le32(data); 633 dp_rx_michael_block(l, r); 634 data += sizeof(uint32_t); 635 space -= sizeof(uint32_t); 636 data_len -= sizeof(uint32_t); 637 } 638 if (data_len < sizeof(uint32_t)) 639 break; 640 641 wbuf = qdf_nbuf_next(wbuf); 642 if (wbuf == NULL) 643 return QDF_STATUS_E_DEFRAG_ERROR; 644 645 if (space != 0) { 646 const uint8_t *data_next; 647 /* 648 * Block straddles buffers, split references. 649 */ 650 data_next = 651 (uint8_t *)qdf_nbuf_data(wbuf) + off; 652 if ((qdf_nbuf_len(wbuf)) < 653 sizeof(uint32_t) - space) { 654 return QDF_STATUS_E_DEFRAG_ERROR; 655 } 656 switch (space) { 657 case 1: 658 l ^= dp_rx_get_le32_split(data[0], 659 data_next[0], data_next[1], 660 data_next[2]); 661 data = data_next + 3; 662 space = (qdf_nbuf_len(wbuf) - off) - 3; 663 break; 664 case 2: 665 l ^= dp_rx_get_le32_split(data[0], data[1], 666 data_next[0], data_next[1]); 667 data = data_next + 2; 668 space = (qdf_nbuf_len(wbuf) - off) - 2; 669 break; 670 case 3: 671 l ^= dp_rx_get_le32_split(data[0], data[1], 672 data[2], data_next[0]); 673 data = data_next + 1; 674 space = (qdf_nbuf_len(wbuf) - off) - 1; 675 break; 676 } 677 dp_rx_michael_block(l, r); 678 data_len -= sizeof(uint32_t); 679 } else { 680 /* 681 * Setup for next buffer. 682 */ 683 data = (uint8_t *)qdf_nbuf_data(wbuf) + off; 684 space = qdf_nbuf_len(wbuf) - off; 685 } 686 } 687 /* Last block and padding (0x5a, 4..7 x 0) */ 688 switch (data_len) { 689 case 0: 690 l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0); 691 break; 692 case 1: 693 l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0); 694 break; 695 case 2: 696 l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0); 697 break; 698 case 3: 699 l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a); 700 break; 701 } 702 dp_rx_michael_block(l, r); 703 dp_rx_michael_block(l, r); 704 dp_rx_put_le32(mic, l); 705 dp_rx_put_le32(mic + 4, r); 706 707 return QDF_STATUS_SUCCESS; 708 } 709 710 /* 711 * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame 712 * @key: Pointer to the key 713 * @msdu: fragment buffer 714 * @hdrlen: Length of the header information 715 * 716 * Remove MIC information from the TKIP frame 717 * 718 * Returns: QDF_STATUS 719 */ 720 static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key, 721 qdf_nbuf_t msdu, uint16_t hdrlen) 722 { 723 QDF_STATUS status; 724 uint32_t pktlen = 0; 725 uint8_t mic[IEEE80211_WEP_MICLEN]; 726 uint8_t mic0[IEEE80211_WEP_MICLEN]; 727 qdf_nbuf_t prev = NULL, next; 728 729 next = msdu; 730 while (next) { 731 pktlen += (qdf_nbuf_len(next) - hdrlen); 732 prev = next; 733 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 734 "%s pktlen %ld", __func__, 735 qdf_nbuf_len(next) - hdrlen); 736 next = qdf_nbuf_next(next); 737 } 738 739 if (!prev) { 740 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 741 "%s Defrag chaining failed !\n", __func__); 742 return QDF_STATUS_E_DEFRAG_ERROR; 743 } 744 745 qdf_nbuf_copy_bits(prev, qdf_nbuf_len(prev) - dp_f_tkip.ic_miclen, 746 dp_f_tkip.ic_miclen, (caddr_t)mic0); 747 qdf_nbuf_trim_tail(prev, dp_f_tkip.ic_miclen); 748 pktlen -= dp_f_tkip.ic_miclen; 749 750 status = dp_rx_defrag_mic(key, msdu, hdrlen, 751 pktlen, mic); 752 753 if (QDF_IS_STATUS_ERROR(status)) 754 return status; 755 756 if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen)) 757 return QDF_STATUS_E_DEFRAG_ERROR; 758 759 return QDF_STATUS_SUCCESS; 760 } 761 762 /* 763 * dp_rx_frag_pull_hdr(): Pulls the RXTLV & the 802.11 headers 764 * @nbuf: buffer pointer 765 * @hdrsize: size of the header to be pulled 766 * 767 * Pull the RXTLV & the 802.11 headers 768 * 769 * Returns: None 770 */ 771 static void dp_rx_frag_pull_hdr(qdf_nbuf_t nbuf, uint16_t hdrsize) 772 { 773 qdf_nbuf_pull_head(nbuf, 774 RX_PKT_TLVS_LEN + hdrsize); 775 776 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 777 "%s: final pktlen %d .11len %d", 778 __func__, 779 (uint32_t)qdf_nbuf_len(nbuf), hdrsize); 780 } 781 782 /* 783 * dp_rx_construct_fraglist(): Construct a nbuf fraglist 784 * @peer: Pointer to the peer 785 * @head: Pointer to list of fragments 786 * @hdrsize: Size of the header to be pulled 787 * 788 * Construct a nbuf fraglist 789 * 790 * Returns: None 791 */ 792 static void 793 dp_rx_construct_fraglist(struct dp_peer *peer, 794 qdf_nbuf_t head, uint16_t hdrsize) 795 { 796 qdf_nbuf_t msdu = qdf_nbuf_next(head); 797 qdf_nbuf_t rx_nbuf = msdu; 798 uint32_t len = 0; 799 800 while (msdu) { 801 dp_rx_frag_pull_hdr(msdu, hdrsize); 802 len += qdf_nbuf_len(msdu); 803 msdu = qdf_nbuf_next(msdu); 804 } 805 806 qdf_nbuf_append_ext_list(head, rx_nbuf, len); 807 qdf_nbuf_set_next(head, NULL); 808 809 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 810 "%s: head len %d ext len %d data len %d ", 811 __func__, 812 (uint32_t)qdf_nbuf_len(head), 813 (uint32_t)qdf_nbuf_len(rx_nbuf), 814 (uint32_t)(head->data_len)); 815 } 816 817 /** 818 * dp_rx_defrag_err() - rx err handler 819 * @pdev: handle to pdev object 820 * @vdev_id: vdev id 821 * @peer_mac_addr: peer mac address 822 * @tid: TID 823 * @tsf32: TSF 824 * @err_type: error type 825 * @rx_frame: rx frame 826 * @pn: PN Number 827 * @key_id: key id 828 * 829 * This function handles rx error and send MIC error notification 830 * 831 * Return: None 832 */ 833 static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 834 { 835 struct ol_if_ops *tops = NULL; 836 struct dp_pdev *pdev = vdev->pdev; 837 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 838 uint8_t *orig_hdr; 839 struct ieee80211_frame *wh; 840 841 orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); 842 wh = (struct ieee80211_frame *)orig_hdr; 843 844 tops = pdev->soc->cdp_soc.ol_ops; 845 if (tops->rx_mic_error) 846 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 847 } 848 849 850 /* 851 * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3 852 * @nbuf: Pointer to the fragment buffer 853 * @hdrsize: Size of headers 854 * 855 * Transcap the fragment from 802.11 to 802.3 856 * 857 * Returns: None 858 */ 859 static void 860 dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t nbuf, uint16_t hdrsize) 861 { 862 struct llc_snap_hdr_t *llchdr; 863 struct ethernet_hdr_t *eth_hdr; 864 uint8_t ether_type[2]; 865 uint16_t fc = 0; 866 union dp_align_mac_addr mac_addr; 867 uint8_t *rx_desc_info = qdf_mem_malloc(RX_PKT_TLVS_LEN); 868 869 if (rx_desc_info == NULL) { 870 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 871 "%s: Memory alloc failed ! ", __func__); 872 QDF_ASSERT(0); 873 return; 874 } 875 876 qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), RX_PKT_TLVS_LEN); 877 878 llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) + 879 RX_PKT_TLVS_LEN + hdrsize); 880 qdf_mem_copy(ether_type, llchdr->ethertype, 2); 881 882 qdf_nbuf_pull_head(nbuf, (RX_PKT_TLVS_LEN + hdrsize + 883 sizeof(struct llc_snap_hdr_t) - 884 sizeof(struct ethernet_hdr_t))); 885 886 eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf)); 887 888 if (hal_rx_get_mpdu_frame_control_valid(rx_desc_info)) 889 fc = hal_rx_get_frame_ctrl_field(rx_desc_info); 890 891 switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) { 892 893 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 894 "%s: frame control type: 0x%x", __func__, fc); 895 896 case IEEE80211_FC1_DIR_NODS: 897 hal_rx_mpdu_get_addr1(rx_desc_info, 898 &mac_addr.raw[0]); 899 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 900 IEEE80211_ADDR_LEN); 901 hal_rx_mpdu_get_addr2(rx_desc_info, 902 &mac_addr.raw[0]); 903 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 904 IEEE80211_ADDR_LEN); 905 break; 906 case IEEE80211_FC1_DIR_TODS: 907 hal_rx_mpdu_get_addr3(rx_desc_info, 908 &mac_addr.raw[0]); 909 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 910 IEEE80211_ADDR_LEN); 911 hal_rx_mpdu_get_addr2(rx_desc_info, 912 &mac_addr.raw[0]); 913 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 914 IEEE80211_ADDR_LEN); 915 break; 916 case IEEE80211_FC1_DIR_FROMDS: 917 hal_rx_mpdu_get_addr1(rx_desc_info, 918 &mac_addr.raw[0]); 919 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 920 IEEE80211_ADDR_LEN); 921 hal_rx_mpdu_get_addr3(rx_desc_info, 922 &mac_addr.raw[0]); 923 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 924 IEEE80211_ADDR_LEN); 925 break; 926 927 case IEEE80211_FC1_DIR_DSTODS: 928 hal_rx_mpdu_get_addr3(rx_desc_info, 929 &mac_addr.raw[0]); 930 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 931 IEEE80211_ADDR_LEN); 932 hal_rx_mpdu_get_addr4(rx_desc_info, 933 &mac_addr.raw[0]); 934 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 935 IEEE80211_ADDR_LEN); 936 break; 937 938 default: 939 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 940 "%s: Unknown frame control type: 0x%x", __func__, fc); 941 } 942 943 qdf_mem_copy(eth_hdr->ethertype, ether_type, 944 sizeof(ether_type)); 945 946 qdf_nbuf_push_head(nbuf, RX_PKT_TLVS_LEN); 947 qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, RX_PKT_TLVS_LEN); 948 qdf_mem_free(rx_desc_info); 949 } 950 951 /* 952 * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO 953 * @peer: Pointer to the peer 954 * @tid: Transmit Identifier 955 * @head: Buffer to be reinjected back 956 * 957 * Reinject the fragment chain back into REO 958 * 959 * Returns: QDF_STATUS 960 */ 961 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer, 962 unsigned tid, qdf_nbuf_t head) 963 { 964 struct dp_pdev *pdev = peer->vdev->pdev; 965 struct dp_soc *soc = pdev->soc; 966 struct hal_buf_info buf_info; 967 void *link_desc_va; 968 void *msdu0, *msdu_desc_info; 969 void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr; 970 void *dst_mpdu_desc_info, *dst_qdesc_addr; 971 qdf_dma_addr_t paddr; 972 uint32_t nbuf_len, seq_no, dst_ind; 973 uint32_t *mpdu_wrd; 974 uint32_t ret, cookie; 975 976 void *dst_ring_desc = 977 peer->rx_tid[tid].dst_ring_desc; 978 void *hal_srng = soc->reo_reinject_ring.hal_srng; 979 980 hal_rx_reo_buf_paddr_get(dst_ring_desc, &buf_info); 981 982 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 983 984 qdf_assert(link_desc_va); 985 986 msdu0 = (uint8_t *)link_desc_va + 987 RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET; 988 989 nbuf_len = qdf_nbuf_len(head) - RX_PKT_TLVS_LEN; 990 991 HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW); 992 HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE, 993 UNI_DESC_BUF_TYPE_RX_MSDU_LINK); 994 995 /* msdu reconfig */ 996 msdu_desc_info = (uint8_t *)msdu0 + 997 RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET; 998 999 dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va); 1000 1001 qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info)); 1002 1003 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1004 FIRST_MSDU_IN_MPDU_FLAG, 1); 1005 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1006 LAST_MSDU_IN_MPDU_FLAG, 1); 1007 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1008 MSDU_CONTINUATION, 0x0); 1009 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1010 REO_DESTINATION_INDICATION, dst_ind); 1011 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1012 MSDU_LENGTH, nbuf_len); 1013 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1014 SA_IS_VALID, 1); 1015 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1016 DA_IS_VALID, 1); 1017 1018 /* change RX TLV's */ 1019 hal_rx_msdu_start_msdu_len_set( 1020 qdf_nbuf_data(head), nbuf_len); 1021 1022 cookie = HAL_RX_BUF_COOKIE_GET(msdu0); 1023 1024 /* map the nbuf before reinject it into HW */ 1025 ret = qdf_nbuf_map_single(soc->osdev, head, 1026 QDF_DMA_BIDIRECTIONAL); 1027 1028 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 1029 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1030 "%s: nbuf map failed !", __func__); 1031 qdf_nbuf_free(head); 1032 return QDF_STATUS_E_FAILURE; 1033 } 1034 1035 paddr = qdf_nbuf_get_frag_paddr(head, 0); 1036 1037 ret = check_x86_paddr(soc, &head, &paddr, pdev); 1038 1039 if (ret == QDF_STATUS_E_FAILURE) { 1040 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1041 "%s: x86 check failed !", __func__); 1042 return QDF_STATUS_E_FAILURE; 1043 } 1044 1045 hal_rxdma_buff_addr_info_set(msdu0, paddr, cookie, DP_WBM2SW_RBM); 1046 1047 /* Lets fill entrance ring now !!! */ 1048 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 1049 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1050 "HAL RING Access For REO entrance SRNG Failed: %pK", 1051 hal_srng); 1052 1053 return QDF_STATUS_E_FAILURE; 1054 } 1055 1056 ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng); 1057 1058 qdf_assert(ent_ring_desc); 1059 1060 paddr = (uint64_t)buf_info.paddr; 1061 /* buf addr */ 1062 hal_rxdma_buff_addr_info_set(ent_ring_desc, paddr, 1063 buf_info.sw_cookie, 1064 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 1065 /* mpdu desc info */ 1066 ent_mpdu_desc_info = (uint8_t *)ent_ring_desc + 1067 RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET; 1068 1069 dst_mpdu_desc_info = (uint8_t *)dst_ring_desc + 1070 REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET; 1071 1072 qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info, 1073 sizeof(struct rx_mpdu_desc_info)); 1074 qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t)); 1075 1076 mpdu_wrd = (uint32_t *)dst_mpdu_desc_info; 1077 seq_no = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_wrd); 1078 1079 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1080 MSDU_COUNT, 0x1); 1081 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1082 MPDU_SEQUENCE_NUMBER, seq_no); 1083 1084 /* unset frag bit */ 1085 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1086 FRAGMENT_FLAG, 0x0); 1087 1088 /* set sa/da valid bits */ 1089 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1090 SA_IS_VALID, 0x1); 1091 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1092 DA_IS_VALID, 0x1); 1093 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1094 RAW_MPDU, 0x0); 1095 1096 /* qdesc addr */ 1097 ent_qdesc_addr = (uint8_t *)ent_ring_desc + 1098 REO_ENTRANCE_RING_4_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; 1099 1100 dst_qdesc_addr = (uint8_t *)dst_ring_desc + 1101 REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; 1102 1103 qdf_mem_copy(ent_qdesc_addr, dst_qdesc_addr, 8); 1104 1105 HAL_RX_FLD_SET(ent_ring_desc, REO_ENTRANCE_RING_5, 1106 REO_DESTINATION_INDICATION, dst_ind); 1107 1108 hal_srng_access_end(soc->hal_soc, hal_srng); 1109 1110 DP_STATS_INC(soc, rx.reo_reinject, 1); 1111 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1112 "%s: reinjection done !", __func__); 1113 return QDF_STATUS_SUCCESS; 1114 } 1115 1116 /* 1117 * dp_rx_defrag(): Defragment the fragment chain 1118 * @peer: Pointer to the peer 1119 * @tid: Transmit Identifier 1120 * @frag_list_head: Pointer to head list 1121 * @frag_list_tail: Pointer to tail list 1122 * 1123 * Defragment the fragment chain 1124 * 1125 * Returns: QDF_STATUS 1126 */ 1127 static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid, 1128 qdf_nbuf_t frag_list_head, qdf_nbuf_t frag_list_tail) 1129 { 1130 qdf_nbuf_t tmp_next, prev; 1131 qdf_nbuf_t cur = frag_list_head, msdu; 1132 uint32_t index, tkip_demic = 0; 1133 uint16_t hdr_space; 1134 uint8_t key[DEFRAG_IEEE80211_KEY_LEN]; 1135 struct dp_vdev *vdev = peer->vdev; 1136 struct dp_soc *soc = vdev->pdev->soc; 1137 uint8_t status = 0; 1138 1139 hdr_space = dp_rx_defrag_hdrsize(cur); 1140 index = hal_rx_msdu_is_wlan_mcast(cur) ? 1141 dp_sec_mcast : dp_sec_ucast; 1142 1143 /* Remove FCS from all fragments */ 1144 while (cur) { 1145 tmp_next = qdf_nbuf_next(cur); 1146 qdf_nbuf_set_next(cur, NULL); 1147 qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN); 1148 prev = cur; 1149 qdf_nbuf_set_next(cur, tmp_next); 1150 cur = tmp_next; 1151 } 1152 cur = frag_list_head; 1153 1154 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1155 "%s: index %d Security type: %d", __func__, 1156 index, peer->security[index].sec_type); 1157 1158 switch (peer->security[index].sec_type) { 1159 case htt_sec_type_tkip: 1160 tkip_demic = 1; 1161 1162 case htt_sec_type_tkip_nomic: 1163 while (cur) { 1164 tmp_next = qdf_nbuf_next(cur); 1165 if (dp_rx_defrag_tkip_decap(cur, hdr_space)) { 1166 1167 QDF_TRACE(QDF_MODULE_ID_TXRX, 1168 QDF_TRACE_LEVEL_ERROR, 1169 "dp_rx_defrag: TKIP decap failed"); 1170 1171 return QDF_STATUS_E_DEFRAG_ERROR; 1172 } 1173 cur = tmp_next; 1174 } 1175 1176 /* If success, increment header to be stripped later */ 1177 hdr_space += dp_f_tkip.ic_header; 1178 break; 1179 1180 case htt_sec_type_aes_ccmp: 1181 while (cur) { 1182 tmp_next = qdf_nbuf_next(cur); 1183 if (dp_rx_defrag_ccmp_demic(cur, hdr_space)) { 1184 1185 QDF_TRACE(QDF_MODULE_ID_TXRX, 1186 QDF_TRACE_LEVEL_ERROR, 1187 "dp_rx_defrag: CCMP demic failed"); 1188 1189 return QDF_STATUS_E_DEFRAG_ERROR; 1190 } 1191 if (dp_rx_defrag_ccmp_decap(cur, hdr_space)) { 1192 1193 QDF_TRACE(QDF_MODULE_ID_TXRX, 1194 QDF_TRACE_LEVEL_ERROR, 1195 "dp_rx_defrag: CCMP decap failed"); 1196 1197 return QDF_STATUS_E_DEFRAG_ERROR; 1198 } 1199 cur = tmp_next; 1200 } 1201 1202 /* If success, increment header to be stripped later */ 1203 hdr_space += dp_f_ccmp.ic_header; 1204 break; 1205 1206 case htt_sec_type_wep40: 1207 case htt_sec_type_wep104: 1208 case htt_sec_type_wep128: 1209 while (cur) { 1210 tmp_next = qdf_nbuf_next(cur); 1211 if (dp_rx_defrag_wep_decap(cur, hdr_space)) { 1212 1213 QDF_TRACE(QDF_MODULE_ID_TXRX, 1214 QDF_TRACE_LEVEL_ERROR, 1215 "dp_rx_defrag: WEP decap failed"); 1216 1217 return QDF_STATUS_E_DEFRAG_ERROR; 1218 } 1219 cur = tmp_next; 1220 } 1221 1222 /* If success, increment header to be stripped later */ 1223 hdr_space += dp_f_wep.ic_header; 1224 break; 1225 default: 1226 QDF_TRACE(QDF_MODULE_ID_TXRX, 1227 QDF_TRACE_LEVEL_ERROR, 1228 "dp_rx_defrag: Did not match any security type"); 1229 break; 1230 } 1231 1232 if (tkip_demic) { 1233 msdu = frag_list_head; 1234 if (soc->cdp_soc.ol_ops->rx_frag_tkip_demic) { 1235 status = soc->cdp_soc.ol_ops->rx_frag_tkip_demic( 1236 (void *)peer->ctrl_peer, msdu, hdr_space); 1237 } else { 1238 qdf_mem_copy(key, 1239 &peer->security[index].michael_key[0], 1240 IEEE80211_WEP_MICLEN); 1241 status = dp_rx_defrag_tkip_demic(key, msdu, 1242 RX_PKT_TLVS_LEN + 1243 hdr_space); 1244 1245 if (status) { 1246 dp_rx_defrag_err(vdev, frag_list_head); 1247 1248 QDF_TRACE(QDF_MODULE_ID_TXRX, 1249 QDF_TRACE_LEVEL_ERROR, 1250 "%s: TKIP demic failed status %d", 1251 __func__, status); 1252 1253 return QDF_STATUS_E_DEFRAG_ERROR; 1254 } 1255 } 1256 } 1257 1258 /* Convert the header to 802.3 header */ 1259 dp_rx_defrag_nwifi_to_8023(frag_list_head, hdr_space); 1260 dp_rx_construct_fraglist(peer, frag_list_head, hdr_space); 1261 1262 return QDF_STATUS_SUCCESS; 1263 } 1264 1265 /* 1266 * dp_rx_defrag_cleanup(): Clean up activities 1267 * @peer: Pointer to the peer 1268 * @tid: Transmit Identifier 1269 * 1270 * Returns: None 1271 */ 1272 void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid) 1273 { 1274 struct dp_rx_reorder_array_elem *rx_reorder_array_elem = 1275 peer->rx_tid[tid].array; 1276 1277 if (!rx_reorder_array_elem) { 1278 /* 1279 * if this condition is hit then somebody 1280 * must have reset this pointer to NULL. 1281 * array pointer usually points to base variable 1282 * of TID queue structure: "struct dp_rx_tid" 1283 */ 1284 QDF_ASSERT(0); 1285 return; 1286 } 1287 /* Free up nbufs */ 1288 dp_rx_defrag_frames_free(rx_reorder_array_elem->head); 1289 1290 /* Free up saved ring descriptors */ 1291 dp_rx_clear_saved_desc_info(peer, tid); 1292 1293 rx_reorder_array_elem->head = NULL; 1294 rx_reorder_array_elem->tail = NULL; 1295 peer->rx_tid[tid].defrag_timeout_ms = 0; 1296 peer->rx_tid[tid].curr_frag_num = 0; 1297 peer->rx_tid[tid].curr_seq_num = 0; 1298 peer->rx_tid[tid].head_frag_desc = NULL; 1299 } 1300 1301 /* 1302 * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor 1303 * @ring_desc: Pointer to the dst ring descriptor 1304 * @peer: Pointer to the peer 1305 * @tid: Transmit Identifier 1306 * 1307 * Returns: None 1308 */ 1309 static QDF_STATUS dp_rx_defrag_save_info_from_ring_desc(void *ring_desc, 1310 struct dp_rx_desc *rx_desc, struct dp_peer *peer, unsigned tid) 1311 { 1312 void *dst_ring_desc = qdf_mem_malloc( 1313 sizeof(struct reo_destination_ring)); 1314 1315 if (dst_ring_desc == NULL) { 1316 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1317 "%s: Memory alloc failed !", __func__); 1318 QDF_ASSERT(0); 1319 return QDF_STATUS_E_NOMEM; 1320 } 1321 1322 qdf_mem_copy(dst_ring_desc, ring_desc, 1323 sizeof(struct reo_destination_ring)); 1324 1325 peer->rx_tid[tid].dst_ring_desc = dst_ring_desc; 1326 peer->rx_tid[tid].head_frag_desc = rx_desc; 1327 1328 return QDF_STATUS_SUCCESS; 1329 } 1330 1331 /* 1332 * dp_rx_defrag_store_fragment(): Store incoming fragments 1333 * @soc: Pointer to the SOC data structure 1334 * @ring_desc: Pointer to the ring descriptor 1335 * @mpdu_desc_info: MPDU descriptor info 1336 * @tid: Traffic Identifier 1337 * @rx_desc: Pointer to rx descriptor 1338 * @rx_bfs: Number of bfs consumed 1339 * 1340 * Returns: QDF_STATUS 1341 */ 1342 static QDF_STATUS dp_rx_defrag_store_fragment(struct dp_soc *soc, 1343 void *ring_desc, 1344 union dp_rx_desc_list_elem_t **head, 1345 union dp_rx_desc_list_elem_t **tail, 1346 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1347 unsigned tid, struct dp_rx_desc *rx_desc, 1348 uint32_t *rx_bfs) 1349 { 1350 struct dp_rx_reorder_array_elem *rx_reorder_array_elem; 1351 struct dp_pdev *pdev; 1352 struct dp_peer *peer; 1353 uint16_t peer_id; 1354 uint8_t fragno, more_frag, all_frag_present = 0; 1355 uint16_t rxseq = mpdu_desc_info->mpdu_seq; 1356 QDF_STATUS status; 1357 struct dp_rx_tid *rx_tid; 1358 uint8_t mpdu_sequence_control_valid; 1359 uint8_t mpdu_frame_control_valid; 1360 qdf_nbuf_t frag = rx_desc->nbuf; 1361 1362 /* Check if the packet is from a valid peer */ 1363 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1364 mpdu_desc_info->peer_meta_data); 1365 peer = dp_peer_find_by_id(soc, peer_id); 1366 1367 if (!peer) { 1368 /* We should not receive anything from unknown peer 1369 * however, that might happen while we are in the monitor mode. 1370 * We don't need to handle that here 1371 */ 1372 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1373 "Unknown peer, dropping the fragment"); 1374 1375 qdf_nbuf_free(frag); 1376 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1377 *rx_bfs = 1; 1378 1379 return QDF_STATUS_E_DEFRAG_ERROR; 1380 } 1381 1382 pdev = peer->vdev->pdev; 1383 rx_tid = &peer->rx_tid[tid]; 1384 1385 rx_reorder_array_elem = peer->rx_tid[tid].array; 1386 1387 mpdu_sequence_control_valid = 1388 hal_rx_get_mpdu_sequence_control_valid(rx_desc->rx_buf_start); 1389 1390 /* Invalid MPDU sequence control field, MPDU is of no use */ 1391 if (!mpdu_sequence_control_valid) { 1392 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1393 "Invalid MPDU seq control field, dropping MPDU"); 1394 qdf_nbuf_free(frag); 1395 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1396 *rx_bfs = 1; 1397 1398 qdf_assert(0); 1399 goto end; 1400 } 1401 1402 mpdu_frame_control_valid = 1403 hal_rx_get_mpdu_frame_control_valid(rx_desc->rx_buf_start); 1404 1405 /* Invalid frame control field */ 1406 if (!mpdu_frame_control_valid) { 1407 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1408 "Invalid frame control field, dropping MPDU"); 1409 qdf_nbuf_free(frag); 1410 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1411 *rx_bfs = 1; 1412 1413 qdf_assert(0); 1414 goto end; 1415 } 1416 1417 /* Current mpdu sequence */ 1418 more_frag = dp_rx_frag_get_more_frag_bit(rx_desc->rx_buf_start); 1419 1420 /* HW does not populate the fragment number as of now 1421 * need to get from the 802.11 header 1422 */ 1423 fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc->rx_buf_start); 1424 1425 /* 1426 * !more_frag: no more fragments to be delivered 1427 * !frag_no: packet is not fragmented 1428 * !rx_reorder_array_elem->head: no saved fragments so far 1429 */ 1430 if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) { 1431 /* We should not get into this situation here. 1432 * It means an unfragmented packet with fragment flag 1433 * is delivered over the REO exception ring. 1434 * Typically it follows normal rx path. 1435 */ 1436 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1437 "Rcvd unfragmented pkt on REO Err srng, dropping"); 1438 qdf_nbuf_free(frag); 1439 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1440 *rx_bfs = 1; 1441 1442 qdf_assert(0); 1443 goto end; 1444 } 1445 1446 /* Check if the fragment is for the same sequence or a different one */ 1447 if (rx_reorder_array_elem->head) { 1448 if (rxseq != rx_tid->curr_seq_num) { 1449 1450 /* Drop stored fragments if out of sequence 1451 * fragment is received 1452 */ 1453 dp_rx_defrag_frames_free(rx_reorder_array_elem->head); 1454 1455 rx_reorder_array_elem->head = NULL; 1456 rx_reorder_array_elem->tail = NULL; 1457 1458 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1459 "%s mismatch, dropping earlier sequence ", 1460 (rxseq == rx_tid->curr_seq_num) 1461 ? "address" 1462 : "seq number"); 1463 1464 /* 1465 * The sequence number for this fragment becomes the 1466 * new sequence number to be processed 1467 */ 1468 rx_tid->curr_seq_num = rxseq; 1469 1470 } 1471 } else { 1472 /* Start of a new sequence */ 1473 dp_rx_defrag_cleanup(peer, tid); 1474 rx_tid->curr_seq_num = rxseq; 1475 } 1476 1477 /* 1478 * If the earlier sequence was dropped, this will be the fresh start. 1479 * Else, continue with next fragment in a given sequence 1480 */ 1481 status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head, 1482 &rx_reorder_array_elem->tail, frag, 1483 &all_frag_present); 1484 1485 /* 1486 * Currently, we can have only 6 MSDUs per-MPDU, if the current 1487 * packet sequence has more than 6 MSDUs for some reason, we will 1488 * have to use the next MSDU link descriptor and chain them together 1489 * before reinjection 1490 */ 1491 if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) && 1492 (rx_reorder_array_elem->head == frag)) { 1493 1494 status = dp_rx_defrag_save_info_from_ring_desc(ring_desc, 1495 rx_desc, peer, tid); 1496 1497 if (status != QDF_STATUS_SUCCESS) { 1498 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1499 "%s: Unable to store ring desc !", __func__); 1500 goto end; 1501 } 1502 } else { 1503 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1504 *rx_bfs = 1; 1505 1506 /* Return the non-head link desc */ 1507 if (dp_rx_link_desc_return(soc, ring_desc, 1508 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1509 QDF_STATUS_SUCCESS) 1510 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1511 "%s: Failed to return link desc", 1512 __func__); 1513 1514 } 1515 1516 if (pdev->soc->rx.flags.defrag_timeout_check) 1517 dp_rx_defrag_waitlist_remove(peer, tid); 1518 1519 /* Yet to receive more fragments for this sequence number */ 1520 if (!all_frag_present) { 1521 uint32_t now_ms = 1522 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1523 1524 peer->rx_tid[tid].defrag_timeout_ms = 1525 now_ms + pdev->soc->rx.defrag.timeout_ms; 1526 1527 dp_rx_defrag_waitlist_add(peer, tid); 1528 dp_peer_unref_del_find_by_id(peer); 1529 1530 return QDF_STATUS_SUCCESS; 1531 } 1532 1533 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1534 "All fragments received for sequence: %d", rxseq); 1535 1536 /* Process the fragments */ 1537 status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, 1538 rx_reorder_array_elem->tail); 1539 if (QDF_IS_STATUS_ERROR(status)) { 1540 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1541 "Fragment processing failed"); 1542 1543 dp_rx_add_to_free_desc_list(head, tail, 1544 peer->rx_tid[tid].head_frag_desc); 1545 *rx_bfs = 1; 1546 1547 if (dp_rx_link_desc_return(soc, 1548 peer->rx_tid[tid].dst_ring_desc, 1549 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1550 QDF_STATUS_SUCCESS) 1551 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1552 "%s: Failed to return link desc", 1553 __func__); 1554 dp_rx_defrag_cleanup(peer, tid); 1555 goto end; 1556 } 1557 1558 /* Re-inject the fragments back to REO for further processing */ 1559 status = dp_rx_defrag_reo_reinject(peer, tid, 1560 rx_reorder_array_elem->head); 1561 if (QDF_IS_STATUS_SUCCESS(status)) { 1562 rx_reorder_array_elem->head = NULL; 1563 rx_reorder_array_elem->tail = NULL; 1564 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1565 "Fragmented sequence successfully reinjected"); 1566 } else { 1567 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1568 "Fragmented sequence reinjection failed"); 1569 dp_rx_return_head_frag_desc(peer, tid); 1570 } 1571 1572 dp_rx_defrag_cleanup(peer, tid); 1573 1574 dp_peer_unref_del_find_by_id(peer); 1575 1576 return QDF_STATUS_SUCCESS; 1577 1578 end: 1579 dp_peer_unref_del_find_by_id(peer); 1580 1581 return QDF_STATUS_E_DEFRAG_ERROR; 1582 } 1583 1584 /** 1585 * dp_rx_frag_handle() - Handles fragmented Rx frames 1586 * 1587 * @soc: core txrx main context 1588 * @ring_desc: opaque pointer to the REO error ring descriptor 1589 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 1590 * @head: head of the local descriptor free-list 1591 * @tail: tail of the local descriptor free-list 1592 * @quota: No. of units (packets) that can be serviced in one shot. 1593 * 1594 * This function implements RX 802.11 fragmentation handling 1595 * The handling is mostly same as legacy fragmentation handling. 1596 * If required, this function can re-inject the frames back to 1597 * REO ring (with proper setting to by-pass fragmentation check 1598 * but use duplicate detection / re-ordering and routing these frames 1599 * to a different core. 1600 * 1601 * Return: uint32_t: No. of elements processed 1602 */ 1603 uint32_t dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc, 1604 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1605 union dp_rx_desc_list_elem_t **head, 1606 union dp_rx_desc_list_elem_t **tail, 1607 uint32_t quota) 1608 { 1609 uint32_t rx_bufs_used = 0; 1610 void *link_desc_va; 1611 struct hal_buf_info buf_info; 1612 struct hal_rx_msdu_list msdu_list; /* per MPDU list of MSDUs */ 1613 qdf_nbuf_t msdu = NULL; 1614 uint32_t tid, msdu_len; 1615 int idx, rx_bfs = 0; 1616 QDF_STATUS status; 1617 1618 qdf_assert(soc); 1619 qdf_assert(mpdu_desc_info); 1620 1621 /* Fragment from a valid peer */ 1622 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 1623 1624 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1625 1626 qdf_assert(link_desc_va); 1627 1628 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 1629 "Number of MSDUs to process, num_msdus: %d", 1630 mpdu_desc_info->msdu_count); 1631 1632 1633 if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) { 1634 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1635 "Not sufficient MSDUs to process"); 1636 return rx_bufs_used; 1637 } 1638 1639 /* Get msdu_list for the given MPDU */ 1640 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list, 1641 &mpdu_desc_info->msdu_count); 1642 1643 /* Process all MSDUs in the current MPDU */ 1644 for (idx = 0; (idx < mpdu_desc_info->msdu_count) && quota--; idx++) { 1645 struct dp_rx_desc *rx_desc = 1646 dp_rx_cookie_2_va_rxdma_buf(soc, 1647 msdu_list.sw_cookie[idx]); 1648 1649 qdf_assert(rx_desc); 1650 1651 msdu = rx_desc->nbuf; 1652 1653 qdf_nbuf_unmap_single(soc->osdev, msdu, 1654 QDF_DMA_BIDIRECTIONAL); 1655 1656 rx_desc->rx_buf_start = qdf_nbuf_data(msdu); 1657 1658 msdu_len = hal_rx_msdu_start_msdu_len_get( 1659 rx_desc->rx_buf_start); 1660 1661 qdf_nbuf_set_pktlen(msdu, (msdu_len + RX_PKT_TLVS_LEN)); 1662 qdf_nbuf_append_ext_list(msdu, NULL, 0); 1663 1664 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1665 rx_desc->rx_buf_start); 1666 1667 /* Process fragment-by-fragment */ 1668 status = dp_rx_defrag_store_fragment(soc, ring_desc, 1669 head, tail, mpdu_desc_info, 1670 tid, rx_desc, &rx_bfs); 1671 1672 if (rx_bfs) 1673 rx_bufs_used++; 1674 1675 if (!QDF_IS_STATUS_SUCCESS(status)) { 1676 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1677 "Rx Defrag err seq#:0x%x msdu_count:%d flags:%d", 1678 mpdu_desc_info->mpdu_seq, 1679 mpdu_desc_info->msdu_count, 1680 mpdu_desc_info->mpdu_flags); 1681 1682 /* No point in processing rest of the fragments */ 1683 break; 1684 } 1685 } 1686 1687 return rx_bufs_used; 1688 } 1689 1690 QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc, 1691 struct dp_peer *peer, uint16_t tid, 1692 uint16_t rxseq, qdf_nbuf_t nbuf) 1693 { 1694 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1695 struct dp_rx_reorder_array_elem *rx_reorder_array_elem; 1696 uint8_t all_frag_present; 1697 uint32_t msdu_len; 1698 QDF_STATUS status; 1699 1700 rx_reorder_array_elem = peer->rx_tid[tid].array; 1701 1702 if (rx_reorder_array_elem->head && 1703 rxseq != rx_tid->curr_seq_num) { 1704 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1705 "%s: No list found for TID %d Seq# %d", 1706 __func__, tid, rxseq); 1707 qdf_nbuf_free(nbuf); 1708 goto fail; 1709 } 1710 1711 msdu_len = hal_rx_msdu_start_msdu_len_get(qdf_nbuf_data(nbuf)); 1712 1713 qdf_nbuf_set_pktlen(nbuf, (msdu_len + RX_PKT_TLVS_LEN)); 1714 1715 status = dp_rx_defrag_fraglist_insert(peer, tid, 1716 &rx_reorder_array_elem->head, 1717 &rx_reorder_array_elem->tail, nbuf, 1718 &all_frag_present); 1719 1720 if (QDF_IS_STATUS_ERROR(status)) { 1721 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1722 "%s Fragment insert failed", __func__); 1723 1724 goto fail; 1725 } 1726 1727 if (soc->rx.flags.defrag_timeout_check) 1728 dp_rx_defrag_waitlist_remove(peer, tid); 1729 1730 if (!all_frag_present) { 1731 uint32_t now_ms = 1732 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1733 1734 peer->rx_tid[tid].defrag_timeout_ms = 1735 now_ms + soc->rx.defrag.timeout_ms; 1736 1737 dp_rx_defrag_waitlist_add(peer, tid); 1738 1739 return QDF_STATUS_SUCCESS; 1740 } 1741 1742 status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, 1743 rx_reorder_array_elem->tail); 1744 1745 if (QDF_IS_STATUS_ERROR(status)) { 1746 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1747 "%s Fragment processing failed", __func__); 1748 1749 dp_rx_return_head_frag_desc(peer, tid); 1750 dp_rx_defrag_cleanup(peer, tid); 1751 1752 goto fail; 1753 } 1754 1755 /* Re-inject the fragments back to REO for further processing */ 1756 status = dp_rx_defrag_reo_reinject(peer, tid, 1757 rx_reorder_array_elem->head); 1758 if (QDF_IS_STATUS_SUCCESS(status)) { 1759 rx_reorder_array_elem->head = NULL; 1760 rx_reorder_array_elem->tail = NULL; 1761 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1762 "%s: Frag seq successfully reinjected", 1763 __func__); 1764 } else { 1765 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1766 "%s: Frag seq reinjection failed", 1767 __func__); 1768 dp_rx_return_head_frag_desc(peer, tid); 1769 } 1770 1771 dp_rx_defrag_cleanup(peer, tid); 1772 return QDF_STATUS_SUCCESS; 1773 1774 fail: 1775 return QDF_STATUS_E_DEFRAG_ERROR; 1776 } 1777