1 /* 2 * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #include "dp_rx_defrag.h" 27 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 28 #include "dp_rx_defrag.h" 29 30 const struct dp_rx_defrag_cipher dp_f_ccmp = { 31 "AES-CCM", 32 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 33 IEEE80211_WEP_MICLEN, 34 0, 35 }; 36 37 const struct dp_rx_defrag_cipher dp_f_tkip = { 38 "TKIP", 39 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 40 IEEE80211_WEP_CRCLEN, 41 IEEE80211_WEP_MICLEN, 42 }; 43 44 const struct dp_rx_defrag_cipher dp_f_wep = { 45 "WEP", 46 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN, 47 IEEE80211_WEP_CRCLEN, 48 0, 49 }; 50 51 /* 52 * dp_rx_defrag_frames_free(): Free fragment chain 53 * @frames: Fragment chain 54 * 55 * Iterates through the fragment chain and frees them 56 * Returns: None 57 */ 58 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames) 59 { 60 qdf_nbuf_t next, frag = frames; 61 62 while (frag) { 63 next = qdf_nbuf_next(frag); 64 qdf_nbuf_free(frag); 65 frag = next; 66 } 67 } 68 69 /* 70 * dp_rx_clear_saved_desc_info(): Clears descriptor info 71 * @peer: Pointer to the peer data structure 72 * @tid: Transmit ID (TID) 73 * 74 * Saves MPDU descriptor info and MSDU link pointer from REO 75 * ring descriptor. The cache is created per peer, per TID 76 * 77 * Returns: None 78 */ 79 static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid) 80 { 81 if (peer->rx_tid[tid].dst_ring_desc) 82 qdf_mem_free(peer->rx_tid[tid].dst_ring_desc); 83 84 peer->rx_tid[tid].dst_ring_desc = NULL; 85 } 86 87 static void dp_rx_return_head_frag_desc(struct dp_peer *peer, 88 unsigned int tid) 89 { 90 struct dp_soc *soc; 91 struct dp_pdev *pdev; 92 struct dp_srng *dp_rxdma_srng; 93 struct rx_desc_pool *rx_desc_pool; 94 union dp_rx_desc_list_elem_t *head = NULL; 95 union dp_rx_desc_list_elem_t *tail = NULL; 96 97 if (peer->rx_tid[tid].head_frag_desc) { 98 pdev = peer->vdev->pdev; 99 soc = pdev->soc; 100 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 101 rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id]; 102 103 dp_rx_add_to_free_desc_list(&head, &tail, 104 peer->rx_tid[tid].head_frag_desc); 105 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 106 1, &head, &tail); 107 } 108 } 109 110 /* 111 * dp_rx_reorder_flush_frag(): Flush the frag list 112 * @peer: Pointer to the peer data structure 113 * @tid: Transmit ID (TID) 114 * 115 * Flush the per-TID frag list 116 * 117 * Returns: None 118 */ 119 void dp_rx_reorder_flush_frag(struct dp_peer *peer, 120 unsigned int tid) 121 { 122 struct dp_soc *soc; 123 124 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 125 FL("Flushing TID %d"), tid); 126 127 if (!peer) { 128 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 129 "%s: NULL peer", __func__); 130 return; 131 } 132 133 soc = peer->vdev->pdev->soc; 134 135 if (peer->rx_tid[tid].dst_ring_desc) { 136 if (dp_rx_link_desc_return(soc, 137 peer->rx_tid[tid].dst_ring_desc, 138 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 139 QDF_STATUS_SUCCESS) 140 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 141 "%s: Failed to return link desc", 142 __func__); 143 } 144 145 dp_rx_return_head_frag_desc(peer, tid); 146 dp_rx_defrag_cleanup(peer, tid); 147 } 148 149 /* 150 * dp_rx_defrag_waitlist_flush(): Flush SOC defrag wait list 151 * @soc: DP SOC 152 * 153 * Flush fragments of all waitlisted TID's 154 * 155 * Returns: None 156 */ 157 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc) 158 { 159 struct dp_rx_tid *rx_reorder; 160 struct dp_rx_tid *tmp; 161 uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); 162 TAILQ_HEAD(, dp_rx_tid) temp_list; 163 164 TAILQ_INIT(&temp_list); 165 166 qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); 167 TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist, 168 defrag_waitlist_elem, tmp) { 169 unsigned int tid; 170 171 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 172 FL("Current time %u"), now_ms); 173 174 if (rx_reorder->defrag_timeout_ms > now_ms) 175 break; 176 177 tid = rx_reorder->tid; 178 if (tid >= DP_MAX_TIDS) { 179 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 180 "%s: TID out of bounds: %d", __func__, tid); 181 qdf_assert(0); 182 continue; 183 } 184 185 TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder, 186 defrag_waitlist_elem); 187 188 /* Move to temp list and clean-up later */ 189 TAILQ_INSERT_TAIL(&temp_list, rx_reorder, 190 defrag_waitlist_elem); 191 } 192 qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); 193 194 TAILQ_FOREACH_SAFE(rx_reorder, &temp_list, 195 defrag_waitlist_elem, tmp) { 196 struct dp_peer *peer; 197 198 /* get address of current peer */ 199 peer = 200 container_of(rx_reorder, struct dp_peer, 201 rx_tid[rx_reorder->tid]); 202 dp_rx_reorder_flush_frag(peer, rx_reorder->tid); 203 } 204 } 205 206 /* 207 * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list 208 * @peer: Pointer to the peer data structure 209 * @tid: Transmit ID (TID) 210 * 211 * Appends per-tid fragments to global fragment wait list 212 * 213 * Returns: None 214 */ 215 static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid) 216 { 217 struct dp_soc *psoc = peer->vdev->pdev->soc; 218 struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid]; 219 220 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 221 FL("Adding TID %u to waitlist for peer %pK"), 222 tid, peer); 223 224 /* TODO: use LIST macros instead of TAIL macros */ 225 qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock); 226 TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder, 227 defrag_waitlist_elem); 228 qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock); 229 } 230 231 /* 232 * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist 233 * @peer: Pointer to the peer data structure 234 * @tid: Transmit ID (TID) 235 * 236 * Remove fragments from waitlist 237 * 238 * Returns: None 239 */ 240 void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid) 241 { 242 struct dp_pdev *pdev = peer->vdev->pdev; 243 struct dp_soc *soc = pdev->soc; 244 struct dp_rx_tid *rx_reorder; 245 246 if (tid > DP_MAX_TIDS) { 247 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 248 "TID out of bounds: %d", tid); 249 qdf_assert(0); 250 return; 251 } 252 253 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 254 FL("Remove TID %u from waitlist for peer %pK"), 255 tid, peer); 256 257 qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); 258 TAILQ_FOREACH(rx_reorder, &soc->rx.defrag.waitlist, 259 defrag_waitlist_elem) { 260 struct dp_peer *peer_on_waitlist; 261 262 /* get address of current peer */ 263 peer_on_waitlist = 264 container_of(rx_reorder, struct dp_peer, 265 rx_tid[rx_reorder->tid]); 266 267 /* Ensure it is TID for same peer */ 268 if (peer_on_waitlist == peer && rx_reorder->tid == tid) 269 TAILQ_REMOVE(&soc->rx.defrag.waitlist, 270 rx_reorder, defrag_waitlist_elem); 271 } 272 qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); 273 } 274 275 /* 276 * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list 277 * @peer: Pointer to the peer data structure 278 * @tid: Transmit ID (TID) 279 * @head_addr: Pointer to head list 280 * @tail_addr: Pointer to tail list 281 * @frag: Incoming fragment 282 * @all_frag_present: Flag to indicate whether all fragments are received 283 * 284 * Build a per-tid, per-sequence fragment list. 285 * 286 * Returns: Success, if inserted 287 */ 288 static QDF_STATUS dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid, 289 qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag, 290 uint8_t *all_frag_present) 291 { 292 qdf_nbuf_t next; 293 qdf_nbuf_t prev = NULL; 294 qdf_nbuf_t cur; 295 uint16_t head_fragno, cur_fragno, next_fragno; 296 uint8_t last_morefrag = 1, count = 0; 297 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 298 uint8_t *rx_desc_info; 299 300 301 qdf_assert(frag); 302 qdf_assert(head_addr); 303 qdf_assert(tail_addr); 304 305 *all_frag_present = 0; 306 rx_desc_info = qdf_nbuf_data(frag); 307 cur_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 308 309 /* If this is the first fragment */ 310 if (!(*head_addr)) { 311 *head_addr = *tail_addr = frag; 312 qdf_nbuf_set_next(*tail_addr, NULL); 313 rx_tid->curr_frag_num = cur_fragno; 314 315 goto insert_done; 316 } 317 318 /* In sequence fragment */ 319 if (cur_fragno > rx_tid->curr_frag_num) { 320 qdf_nbuf_set_next(*tail_addr, frag); 321 *tail_addr = frag; 322 qdf_nbuf_set_next(*tail_addr, NULL); 323 rx_tid->curr_frag_num = cur_fragno; 324 } else { 325 /* Out of sequence fragment */ 326 cur = *head_addr; 327 rx_desc_info = qdf_nbuf_data(cur); 328 head_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 329 330 if (cur_fragno == head_fragno) { 331 qdf_nbuf_free(frag); 332 goto insert_fail; 333 } else if (head_fragno > cur_fragno) { 334 qdf_nbuf_set_next(frag, cur); 335 cur = frag; 336 *head_addr = frag; /* head pointer to be updated */ 337 } else { 338 while ((cur_fragno > head_fragno) && cur != NULL) { 339 prev = cur; 340 cur = qdf_nbuf_next(cur); 341 rx_desc_info = qdf_nbuf_data(cur); 342 head_fragno = 343 dp_rx_frag_get_mpdu_frag_number( 344 rx_desc_info); 345 } 346 347 if (cur_fragno == head_fragno) { 348 qdf_nbuf_free(frag); 349 goto insert_fail; 350 } 351 352 qdf_nbuf_set_next(prev, frag); 353 qdf_nbuf_set_next(frag, cur); 354 } 355 } 356 357 next = qdf_nbuf_next(*head_addr); 358 359 rx_desc_info = qdf_nbuf_data(*tail_addr); 360 last_morefrag = dp_rx_frag_get_more_frag_bit(rx_desc_info); 361 362 /* TODO: optimize the loop */ 363 if (!last_morefrag) { 364 /* Check if all fragments are present */ 365 do { 366 rx_desc_info = qdf_nbuf_data(next); 367 next_fragno = 368 dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 369 count++; 370 371 if (next_fragno != count) 372 break; 373 374 next = qdf_nbuf_next(next); 375 } while (next); 376 377 if (!next) { 378 *all_frag_present = 1; 379 return QDF_STATUS_SUCCESS; 380 } 381 } 382 383 insert_done: 384 return QDF_STATUS_SUCCESS; 385 386 insert_fail: 387 return QDF_STATUS_E_FAILURE; 388 } 389 390 391 /* 392 * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment 393 * @msdu: Pointer to the fragment 394 * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) 395 * 396 * decap tkip encrypted fragment 397 * 398 * Returns: QDF_STATUS 399 */ 400 static QDF_STATUS dp_rx_defrag_tkip_decap(qdf_nbuf_t msdu, uint16_t hdrlen) 401 { 402 uint8_t *ivp, *orig_hdr; 403 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 404 405 /* start of 802.11 header info */ 406 orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len); 407 408 /* TKIP header is located post 802.11 header */ 409 ivp = orig_hdr + hdrlen; 410 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) { 411 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 412 "IEEE80211_WEP_EXTIV is missing in TKIP fragment"); 413 return QDF_STATUS_E_DEFRAG_ERROR; 414 } 415 416 qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer); 417 418 return QDF_STATUS_SUCCESS; 419 } 420 421 /* 422 * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment 423 * @nbuf: Pointer to the fragment buffer 424 * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) 425 * 426 * Remove MIC information from CCMP fragment 427 * 428 * Returns: QDF_STATUS 429 */ 430 static QDF_STATUS dp_rx_defrag_ccmp_demic(qdf_nbuf_t nbuf, uint16_t hdrlen) 431 { 432 uint8_t *ivp, *orig_hdr; 433 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 434 435 /* start of the 802.11 header */ 436 orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); 437 438 /* CCMP header is located after 802.11 header */ 439 ivp = orig_hdr + hdrlen; 440 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 441 return QDF_STATUS_E_DEFRAG_ERROR; 442 443 qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer); 444 445 return QDF_STATUS_SUCCESS; 446 } 447 448 /* 449 * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment 450 * @nbuf: Pointer to the fragment 451 * @hdrlen: length of the header information 452 * 453 * decap CCMP encrypted fragment 454 * 455 * Returns: QDF_STATUS 456 */ 457 static QDF_STATUS dp_rx_defrag_ccmp_decap(qdf_nbuf_t nbuf, uint16_t hdrlen) 458 { 459 uint8_t *ivp, *origHdr; 460 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 461 462 origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len); 463 ivp = origHdr + hdrlen; 464 465 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 466 return QDF_STATUS_E_DEFRAG_ERROR; 467 468 /* Let's pull the header later */ 469 470 return QDF_STATUS_SUCCESS; 471 } 472 473 /* 474 * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment 475 * @msdu: Pointer to the fragment 476 * @hdrlen: length of the header information 477 * 478 * decap WEP encrypted fragment 479 * 480 * Returns: QDF_STATUS 481 */ 482 static QDF_STATUS dp_rx_defrag_wep_decap(qdf_nbuf_t msdu, uint16_t hdrlen) 483 { 484 uint8_t *origHdr; 485 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 486 487 origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); 488 qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen); 489 490 qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer); 491 492 return QDF_STATUS_SUCCESS; 493 } 494 495 /* 496 * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment 497 * @nbuf: Pointer to the fragment 498 * 499 * Calculate the header size of the received fragment 500 * 501 * Returns: header size (uint16_t) 502 */ 503 static uint16_t dp_rx_defrag_hdrsize(qdf_nbuf_t nbuf) 504 { 505 uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf); 506 uint16_t size = sizeof(struct ieee80211_frame); 507 uint16_t fc = 0; 508 uint32_t to_ds, fr_ds; 509 uint8_t frm_ctrl_valid; 510 uint16_t frm_ctrl_field; 511 512 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); 513 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); 514 frm_ctrl_valid = hal_rx_get_mpdu_frame_control_valid(rx_tlv_hdr); 515 frm_ctrl_field = hal_rx_get_frame_ctrl_field(rx_tlv_hdr); 516 517 if (to_ds && fr_ds) 518 size += IEEE80211_ADDR_LEN; 519 520 if (frm_ctrl_valid) { 521 fc = frm_ctrl_field; 522 523 /* use 1-st byte for validation */ 524 if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) { 525 size += sizeof(uint16_t); 526 /* use 2-nd byte for validation */ 527 if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER) 528 size += sizeof(struct ieee80211_htc); 529 } 530 } 531 532 return size; 533 } 534 535 /* 536 * dp_rx_defrag_michdr(): Calculate a pseudo MIC header 537 * @wh0: Pointer to the wireless header of the fragment 538 * @hdr: Array to hold the pseudo header 539 * 540 * Calculate a pseudo MIC header 541 * 542 * Returns: None 543 */ 544 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0, 545 uint8_t hdr[]) 546 { 547 const struct ieee80211_frame_addr4 *wh = 548 (const struct ieee80211_frame_addr4 *)wh0; 549 550 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { 551 case IEEE80211_FC1_DIR_NODS: 552 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 553 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 554 wh->i_addr2); 555 break; 556 case IEEE80211_FC1_DIR_TODS: 557 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 558 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 559 wh->i_addr2); 560 break; 561 case IEEE80211_FC1_DIR_FROMDS: 562 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 563 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 564 wh->i_addr3); 565 break; 566 case IEEE80211_FC1_DIR_DSTODS: 567 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 568 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 569 wh->i_addr4); 570 break; 571 } 572 573 /* 574 * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but 575 * it could also be set for deauth, disassoc, action, etc. for 576 * a mgt type frame. It comes into picture for MFP. 577 */ 578 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { 579 const struct ieee80211_qosframe *qwh = 580 (const struct ieee80211_qosframe *)wh; 581 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; 582 } else { 583 hdr[12] = 0; 584 } 585 586 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ 587 } 588 589 /* 590 * dp_rx_defrag_mic(): Calculate MIC header 591 * @key: Pointer to the key 592 * @wbuf: fragment buffer 593 * @off: Offset 594 * @data_len: Data length 595 * @mic: Array to hold MIC 596 * 597 * Calculate a pseudo MIC header 598 * 599 * Returns: QDF_STATUS 600 */ 601 static QDF_STATUS dp_rx_defrag_mic(const uint8_t *key, qdf_nbuf_t wbuf, 602 uint16_t off, uint16_t data_len, uint8_t mic[]) 603 { 604 uint8_t hdr[16] = { 0, }; 605 uint32_t l, r; 606 const uint8_t *data; 607 uint32_t space; 608 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 609 610 dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) 611 + rx_desc_len), hdr); 612 613 l = dp_rx_get_le32(key); 614 r = dp_rx_get_le32(key + 4); 615 616 /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ 617 l ^= dp_rx_get_le32(hdr); 618 dp_rx_michael_block(l, r); 619 l ^= dp_rx_get_le32(&hdr[4]); 620 dp_rx_michael_block(l, r); 621 l ^= dp_rx_get_le32(&hdr[8]); 622 dp_rx_michael_block(l, r); 623 l ^= dp_rx_get_le32(&hdr[12]); 624 dp_rx_michael_block(l, r); 625 626 /* first buffer has special handling */ 627 data = (uint8_t *)qdf_nbuf_data(wbuf) + off; 628 space = qdf_nbuf_len(wbuf) - off; 629 630 for (;; ) { 631 if (space > data_len) 632 space = data_len; 633 634 /* collect 32-bit blocks from current buffer */ 635 while (space >= sizeof(uint32_t)) { 636 l ^= dp_rx_get_le32(data); 637 dp_rx_michael_block(l, r); 638 data += sizeof(uint32_t); 639 space -= sizeof(uint32_t); 640 data_len -= sizeof(uint32_t); 641 } 642 if (data_len < sizeof(uint32_t)) 643 break; 644 645 wbuf = qdf_nbuf_next(wbuf); 646 if (wbuf == NULL) 647 return QDF_STATUS_E_DEFRAG_ERROR; 648 649 if (space != 0) { 650 const uint8_t *data_next; 651 /* 652 * Block straddles buffers, split references. 653 */ 654 data_next = 655 (uint8_t *)qdf_nbuf_data(wbuf) + off; 656 if ((qdf_nbuf_len(wbuf)) < 657 sizeof(uint32_t) - space) { 658 return QDF_STATUS_E_DEFRAG_ERROR; 659 } 660 switch (space) { 661 case 1: 662 l ^= dp_rx_get_le32_split(data[0], 663 data_next[0], data_next[1], 664 data_next[2]); 665 data = data_next + 3; 666 space = (qdf_nbuf_len(wbuf) - off) - 3; 667 break; 668 case 2: 669 l ^= dp_rx_get_le32_split(data[0], data[1], 670 data_next[0], data_next[1]); 671 data = data_next + 2; 672 space = (qdf_nbuf_len(wbuf) - off) - 2; 673 break; 674 case 3: 675 l ^= dp_rx_get_le32_split(data[0], data[1], 676 data[2], data_next[0]); 677 data = data_next + 1; 678 space = (qdf_nbuf_len(wbuf) - off) - 1; 679 break; 680 } 681 dp_rx_michael_block(l, r); 682 data_len -= sizeof(uint32_t); 683 } else { 684 /* 685 * Setup for next buffer. 686 */ 687 data = (uint8_t *)qdf_nbuf_data(wbuf) + off; 688 space = qdf_nbuf_len(wbuf) - off; 689 } 690 } 691 /* Last block and padding (0x5a, 4..7 x 0) */ 692 switch (data_len) { 693 case 0: 694 l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0); 695 break; 696 case 1: 697 l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0); 698 break; 699 case 2: 700 l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0); 701 break; 702 case 3: 703 l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a); 704 break; 705 } 706 dp_rx_michael_block(l, r); 707 dp_rx_michael_block(l, r); 708 dp_rx_put_le32(mic, l); 709 dp_rx_put_le32(mic + 4, r); 710 711 return QDF_STATUS_SUCCESS; 712 } 713 714 /* 715 * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame 716 * @key: Pointer to the key 717 * @msdu: fragment buffer 718 * @hdrlen: Length of the header information 719 * 720 * Remove MIC information from the TKIP frame 721 * 722 * Returns: QDF_STATUS 723 */ 724 static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key, 725 qdf_nbuf_t msdu, uint16_t hdrlen) 726 { 727 QDF_STATUS status; 728 uint32_t pktlen = 0; 729 uint8_t mic[IEEE80211_WEP_MICLEN]; 730 uint8_t mic0[IEEE80211_WEP_MICLEN]; 731 qdf_nbuf_t prev = NULL, next; 732 733 next = msdu; 734 while (next) { 735 pktlen += (qdf_nbuf_len(next) - hdrlen); 736 prev = next; 737 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 738 "%s pktlen %ld", __func__, 739 qdf_nbuf_len(next) - hdrlen); 740 next = qdf_nbuf_next(next); 741 } 742 743 if (!prev) { 744 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 745 "%s Defrag chaining failed !\n", __func__); 746 return QDF_STATUS_E_DEFRAG_ERROR; 747 } 748 749 qdf_nbuf_copy_bits(prev, qdf_nbuf_len(prev) - dp_f_tkip.ic_miclen, 750 dp_f_tkip.ic_miclen, (caddr_t)mic0); 751 qdf_nbuf_trim_tail(prev, dp_f_tkip.ic_miclen); 752 pktlen -= dp_f_tkip.ic_miclen; 753 754 status = dp_rx_defrag_mic(key, msdu, hdrlen, 755 pktlen, mic); 756 757 if (QDF_IS_STATUS_ERROR(status)) 758 return status; 759 760 if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen)) 761 return QDF_STATUS_E_DEFRAG_ERROR; 762 763 return QDF_STATUS_SUCCESS; 764 } 765 766 /* 767 * dp_rx_frag_pull_hdr(): Pulls the RXTLV & the 802.11 headers 768 * @nbuf: buffer pointer 769 * @hdrsize: size of the header to be pulled 770 * 771 * Pull the RXTLV & the 802.11 headers 772 * 773 * Returns: None 774 */ 775 static void dp_rx_frag_pull_hdr(qdf_nbuf_t nbuf, uint16_t hdrsize) 776 { 777 qdf_nbuf_pull_head(nbuf, 778 RX_PKT_TLVS_LEN + hdrsize); 779 780 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 781 "%s: final pktlen %d .11len %d", 782 __func__, 783 (uint32_t)qdf_nbuf_len(nbuf), hdrsize); 784 } 785 786 /* 787 * dp_rx_construct_fraglist(): Construct a nbuf fraglist 788 * @peer: Pointer to the peer 789 * @head: Pointer to list of fragments 790 * @hdrsize: Size of the header to be pulled 791 * 792 * Construct a nbuf fraglist 793 * 794 * Returns: None 795 */ 796 static void 797 dp_rx_construct_fraglist(struct dp_peer *peer, 798 qdf_nbuf_t head, uint16_t hdrsize) 799 { 800 qdf_nbuf_t msdu = qdf_nbuf_next(head); 801 qdf_nbuf_t rx_nbuf = msdu; 802 uint32_t len = 0; 803 804 while (msdu) { 805 dp_rx_frag_pull_hdr(msdu, hdrsize); 806 len += qdf_nbuf_len(msdu); 807 msdu = qdf_nbuf_next(msdu); 808 } 809 810 qdf_nbuf_append_ext_list(head, rx_nbuf, len); 811 qdf_nbuf_set_next(head, NULL); 812 813 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 814 "%s: head len %d ext len %d data len %d ", 815 __func__, 816 (uint32_t)qdf_nbuf_len(head), 817 (uint32_t)qdf_nbuf_len(rx_nbuf), 818 (uint32_t)(head->data_len)); 819 } 820 821 /** 822 * dp_rx_defrag_err() - rx err handler 823 * @pdev: handle to pdev object 824 * @vdev_id: vdev id 825 * @peer_mac_addr: peer mac address 826 * @tid: TID 827 * @tsf32: TSF 828 * @err_type: error type 829 * @rx_frame: rx frame 830 * @pn: PN Number 831 * @key_id: key id 832 * 833 * This function handles rx error and send MIC error notification 834 * 835 * Return: None 836 */ 837 static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 838 { 839 struct ol_if_ops *tops = NULL; 840 struct dp_pdev *pdev = vdev->pdev; 841 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 842 uint8_t *orig_hdr; 843 struct ieee80211_frame *wh; 844 845 orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); 846 wh = (struct ieee80211_frame *)orig_hdr; 847 848 tops = pdev->soc->cdp_soc.ol_ops; 849 if (tops->rx_mic_error) 850 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 851 } 852 853 854 /* 855 * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3 856 * @nbuf: Pointer to the fragment buffer 857 * @hdrsize: Size of headers 858 * 859 * Transcap the fragment from 802.11 to 802.3 860 * 861 * Returns: None 862 */ 863 static void 864 dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t nbuf, uint16_t hdrsize) 865 { 866 struct llc_snap_hdr_t *llchdr; 867 struct ethernet_hdr_t *eth_hdr; 868 uint8_t ether_type[2]; 869 uint16_t fc = 0; 870 union dp_align_mac_addr mac_addr; 871 uint8_t *rx_desc_info = qdf_mem_malloc(RX_PKT_TLVS_LEN); 872 873 if (rx_desc_info == NULL) { 874 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 875 "%s: Memory alloc failed ! ", __func__); 876 QDF_ASSERT(0); 877 return; 878 } 879 880 qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), RX_PKT_TLVS_LEN); 881 882 llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) + 883 RX_PKT_TLVS_LEN + hdrsize); 884 qdf_mem_copy(ether_type, llchdr->ethertype, 2); 885 886 qdf_nbuf_pull_head(nbuf, (RX_PKT_TLVS_LEN + hdrsize + 887 sizeof(struct llc_snap_hdr_t) - 888 sizeof(struct ethernet_hdr_t))); 889 890 eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf)); 891 892 if (hal_rx_get_mpdu_frame_control_valid(rx_desc_info)) 893 fc = hal_rx_get_frame_ctrl_field(rx_desc_info); 894 895 switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) { 896 897 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 898 "%s: frame control type: 0x%x", __func__, fc); 899 900 case IEEE80211_FC1_DIR_NODS: 901 hal_rx_mpdu_get_addr1(rx_desc_info, 902 &mac_addr.raw[0]); 903 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 904 IEEE80211_ADDR_LEN); 905 hal_rx_mpdu_get_addr2(rx_desc_info, 906 &mac_addr.raw[0]); 907 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 908 IEEE80211_ADDR_LEN); 909 break; 910 case IEEE80211_FC1_DIR_TODS: 911 hal_rx_mpdu_get_addr3(rx_desc_info, 912 &mac_addr.raw[0]); 913 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 914 IEEE80211_ADDR_LEN); 915 hal_rx_mpdu_get_addr2(rx_desc_info, 916 &mac_addr.raw[0]); 917 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 918 IEEE80211_ADDR_LEN); 919 break; 920 case IEEE80211_FC1_DIR_FROMDS: 921 hal_rx_mpdu_get_addr1(rx_desc_info, 922 &mac_addr.raw[0]); 923 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 924 IEEE80211_ADDR_LEN); 925 hal_rx_mpdu_get_addr3(rx_desc_info, 926 &mac_addr.raw[0]); 927 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 928 IEEE80211_ADDR_LEN); 929 break; 930 931 case IEEE80211_FC1_DIR_DSTODS: 932 hal_rx_mpdu_get_addr3(rx_desc_info, 933 &mac_addr.raw[0]); 934 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 935 IEEE80211_ADDR_LEN); 936 hal_rx_mpdu_get_addr4(rx_desc_info, 937 &mac_addr.raw[0]); 938 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 939 IEEE80211_ADDR_LEN); 940 break; 941 942 default: 943 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 944 "%s: Unknown frame control type: 0x%x", __func__, fc); 945 } 946 947 qdf_mem_copy(eth_hdr->ethertype, ether_type, 948 sizeof(ether_type)); 949 950 qdf_nbuf_push_head(nbuf, RX_PKT_TLVS_LEN); 951 qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, RX_PKT_TLVS_LEN); 952 qdf_mem_free(rx_desc_info); 953 } 954 955 /* 956 * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO 957 * @peer: Pointer to the peer 958 * @tid: Transmit Identifier 959 * @head: Buffer to be reinjected back 960 * 961 * Reinject the fragment chain back into REO 962 * 963 * Returns: QDF_STATUS 964 */ 965 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer, 966 unsigned tid, qdf_nbuf_t head) 967 { 968 struct dp_pdev *pdev = peer->vdev->pdev; 969 struct dp_soc *soc = pdev->soc; 970 struct hal_buf_info buf_info; 971 void *link_desc_va; 972 void *msdu0, *msdu_desc_info; 973 void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr; 974 void *dst_mpdu_desc_info, *dst_qdesc_addr; 975 qdf_dma_addr_t paddr; 976 uint32_t nbuf_len, seq_no, dst_ind; 977 uint32_t *mpdu_wrd; 978 uint32_t ret, cookie; 979 980 void *dst_ring_desc = 981 peer->rx_tid[tid].dst_ring_desc; 982 void *hal_srng = soc->reo_reinject_ring.hal_srng; 983 984 hal_rx_reo_buf_paddr_get(dst_ring_desc, &buf_info); 985 986 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 987 988 qdf_assert(link_desc_va); 989 990 msdu0 = (uint8_t *)link_desc_va + 991 RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET; 992 993 nbuf_len = qdf_nbuf_len(head) - RX_PKT_TLVS_LEN; 994 995 HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW); 996 HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE, 997 UNI_DESC_BUF_TYPE_RX_MSDU_LINK); 998 999 /* msdu reconfig */ 1000 msdu_desc_info = (uint8_t *)msdu0 + 1001 RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET; 1002 1003 dst_ind = hal_rx_msdu_reo_dst_ind_get(link_desc_va); 1004 1005 qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info)); 1006 1007 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1008 FIRST_MSDU_IN_MPDU_FLAG, 1); 1009 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1010 LAST_MSDU_IN_MPDU_FLAG, 1); 1011 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1012 MSDU_CONTINUATION, 0x0); 1013 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1014 REO_DESTINATION_INDICATION, dst_ind); 1015 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1016 MSDU_LENGTH, nbuf_len); 1017 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1018 SA_IS_VALID, 1); 1019 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1020 DA_IS_VALID, 1); 1021 1022 /* change RX TLV's */ 1023 hal_rx_msdu_start_msdu_len_set( 1024 qdf_nbuf_data(head), nbuf_len); 1025 1026 cookie = HAL_RX_BUF_COOKIE_GET(msdu0); 1027 1028 /* map the nbuf before reinject it into HW */ 1029 ret = qdf_nbuf_map_single(soc->osdev, head, 1030 QDF_DMA_BIDIRECTIONAL); 1031 1032 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 1033 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1034 "%s: nbuf map failed !", __func__); 1035 qdf_nbuf_free(head); 1036 return QDF_STATUS_E_FAILURE; 1037 } 1038 1039 paddr = qdf_nbuf_get_frag_paddr(head, 0); 1040 1041 ret = check_x86_paddr(soc, &head, &paddr, pdev); 1042 1043 if (ret == QDF_STATUS_E_FAILURE) { 1044 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1045 "%s: x86 check failed !", __func__); 1046 return QDF_STATUS_E_FAILURE; 1047 } 1048 1049 hal_rxdma_buff_addr_info_set(msdu0, paddr, cookie, DP_WBM2SW_RBM); 1050 1051 /* Lets fill entrance ring now !!! */ 1052 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 1053 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1054 "HAL RING Access For REO entrance SRNG Failed: %pK", 1055 hal_srng); 1056 1057 return QDF_STATUS_E_FAILURE; 1058 } 1059 1060 ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng); 1061 1062 qdf_assert(ent_ring_desc); 1063 1064 paddr = (uint64_t)buf_info.paddr; 1065 /* buf addr */ 1066 hal_rxdma_buff_addr_info_set(ent_ring_desc, paddr, 1067 buf_info.sw_cookie, 1068 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 1069 /* mpdu desc info */ 1070 ent_mpdu_desc_info = (uint8_t *)ent_ring_desc + 1071 RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET; 1072 1073 dst_mpdu_desc_info = (uint8_t *)dst_ring_desc + 1074 REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET; 1075 1076 qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info, 1077 sizeof(struct rx_mpdu_desc_info)); 1078 qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t)); 1079 1080 mpdu_wrd = (uint32_t *)dst_mpdu_desc_info; 1081 seq_no = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_wrd); 1082 1083 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1084 MSDU_COUNT, 0x1); 1085 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1086 MPDU_SEQUENCE_NUMBER, seq_no); 1087 1088 /* unset frag bit */ 1089 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1090 FRAGMENT_FLAG, 0x0); 1091 1092 /* set sa/da valid bits */ 1093 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1094 SA_IS_VALID, 0x1); 1095 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1096 DA_IS_VALID, 0x1); 1097 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1098 RAW_MPDU, 0x0); 1099 1100 /* qdesc addr */ 1101 ent_qdesc_addr = (uint8_t *)ent_ring_desc + 1102 REO_ENTRANCE_RING_4_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; 1103 1104 dst_qdesc_addr = (uint8_t *)dst_ring_desc + 1105 REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; 1106 1107 qdf_mem_copy(ent_qdesc_addr, dst_qdesc_addr, 8); 1108 1109 HAL_RX_FLD_SET(ent_ring_desc, REO_ENTRANCE_RING_5, 1110 REO_DESTINATION_INDICATION, dst_ind); 1111 1112 hal_srng_access_end(soc->hal_soc, hal_srng); 1113 1114 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, 1115 "%s: reinjection done !", __func__); 1116 return QDF_STATUS_SUCCESS; 1117 } 1118 1119 /* 1120 * dp_rx_defrag(): Defragment the fragment chain 1121 * @peer: Pointer to the peer 1122 * @tid: Transmit Identifier 1123 * @frag_list_head: Pointer to head list 1124 * @frag_list_tail: Pointer to tail list 1125 * 1126 * Defragment the fragment chain 1127 * 1128 * Returns: QDF_STATUS 1129 */ 1130 static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid, 1131 qdf_nbuf_t frag_list_head, qdf_nbuf_t frag_list_tail) 1132 { 1133 qdf_nbuf_t tmp_next, prev; 1134 qdf_nbuf_t cur = frag_list_head, msdu; 1135 uint32_t index, tkip_demic = 0; 1136 uint16_t hdr_space; 1137 uint8_t key[DEFRAG_IEEE80211_KEY_LEN]; 1138 struct dp_vdev *vdev = peer->vdev; 1139 struct dp_soc *soc = vdev->pdev->soc; 1140 uint8_t status = 0; 1141 1142 hdr_space = dp_rx_defrag_hdrsize(cur); 1143 index = hal_rx_msdu_is_wlan_mcast(cur) ? 1144 dp_sec_mcast : dp_sec_ucast; 1145 1146 /* Remove FCS from all fragments */ 1147 while (cur) { 1148 tmp_next = qdf_nbuf_next(cur); 1149 qdf_nbuf_set_next(cur, NULL); 1150 qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN); 1151 prev = cur; 1152 qdf_nbuf_set_next(cur, tmp_next); 1153 cur = tmp_next; 1154 } 1155 cur = frag_list_head; 1156 1157 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1158 "%s: index %d Security type: %d", __func__, 1159 index, peer->security[index].sec_type); 1160 1161 switch (peer->security[index].sec_type) { 1162 case htt_sec_type_tkip: 1163 tkip_demic = 1; 1164 1165 case htt_sec_type_tkip_nomic: 1166 while (cur) { 1167 tmp_next = qdf_nbuf_next(cur); 1168 if (dp_rx_defrag_tkip_decap(cur, hdr_space)) { 1169 1170 QDF_TRACE(QDF_MODULE_ID_TXRX, 1171 QDF_TRACE_LEVEL_ERROR, 1172 "dp_rx_defrag: TKIP decap failed"); 1173 1174 return QDF_STATUS_E_DEFRAG_ERROR; 1175 } 1176 cur = tmp_next; 1177 } 1178 1179 /* If success, increment header to be stripped later */ 1180 hdr_space += dp_f_tkip.ic_header; 1181 break; 1182 1183 case htt_sec_type_aes_ccmp: 1184 while (cur) { 1185 tmp_next = qdf_nbuf_next(cur); 1186 if (dp_rx_defrag_ccmp_demic(cur, hdr_space)) { 1187 1188 QDF_TRACE(QDF_MODULE_ID_TXRX, 1189 QDF_TRACE_LEVEL_ERROR, 1190 "dp_rx_defrag: CCMP demic failed"); 1191 1192 return QDF_STATUS_E_DEFRAG_ERROR; 1193 } 1194 if (dp_rx_defrag_ccmp_decap(cur, hdr_space)) { 1195 1196 QDF_TRACE(QDF_MODULE_ID_TXRX, 1197 QDF_TRACE_LEVEL_ERROR, 1198 "dp_rx_defrag: CCMP decap failed"); 1199 1200 return QDF_STATUS_E_DEFRAG_ERROR; 1201 } 1202 cur = tmp_next; 1203 } 1204 1205 /* If success, increment header to be stripped later */ 1206 hdr_space += dp_f_ccmp.ic_header; 1207 break; 1208 1209 case htt_sec_type_wep40: 1210 case htt_sec_type_wep104: 1211 case htt_sec_type_wep128: 1212 while (cur) { 1213 tmp_next = qdf_nbuf_next(cur); 1214 if (dp_rx_defrag_wep_decap(cur, hdr_space)) { 1215 1216 QDF_TRACE(QDF_MODULE_ID_TXRX, 1217 QDF_TRACE_LEVEL_ERROR, 1218 "dp_rx_defrag: WEP decap failed"); 1219 1220 return QDF_STATUS_E_DEFRAG_ERROR; 1221 } 1222 cur = tmp_next; 1223 } 1224 1225 /* If success, increment header to be stripped later */ 1226 hdr_space += dp_f_wep.ic_header; 1227 break; 1228 default: 1229 QDF_TRACE(QDF_MODULE_ID_TXRX, 1230 QDF_TRACE_LEVEL_ERROR, 1231 "dp_rx_defrag: Did not match any security type"); 1232 break; 1233 } 1234 1235 if (tkip_demic) { 1236 msdu = frag_list_head; 1237 if (soc->cdp_soc.ol_ops->rx_frag_tkip_demic) { 1238 status = soc->cdp_soc.ol_ops->rx_frag_tkip_demic( 1239 (void *)peer->ctrl_peer, msdu, hdr_space); 1240 } else { 1241 qdf_mem_copy(key, 1242 &peer->security[index].michael_key[0], 1243 IEEE80211_WEP_MICLEN); 1244 status = dp_rx_defrag_tkip_demic(key, msdu, 1245 RX_PKT_TLVS_LEN + 1246 hdr_space); 1247 1248 if (status) { 1249 dp_rx_defrag_err(vdev, frag_list_head); 1250 1251 QDF_TRACE(QDF_MODULE_ID_TXRX, 1252 QDF_TRACE_LEVEL_ERROR, 1253 "%s: TKIP demic failed status %d", 1254 __func__, status); 1255 1256 return QDF_STATUS_E_DEFRAG_ERROR; 1257 } 1258 } 1259 } 1260 1261 /* Convert the header to 802.3 header */ 1262 dp_rx_defrag_nwifi_to_8023(frag_list_head, hdr_space); 1263 dp_rx_construct_fraglist(peer, frag_list_head, hdr_space); 1264 1265 return QDF_STATUS_SUCCESS; 1266 } 1267 1268 /* 1269 * dp_rx_defrag_cleanup(): Clean up activities 1270 * @peer: Pointer to the peer 1271 * @tid: Transmit Identifier 1272 * 1273 * Returns: None 1274 */ 1275 void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid) 1276 { 1277 struct dp_rx_reorder_array_elem *rx_reorder_array_elem = 1278 peer->rx_tid[tid].array; 1279 1280 if (!rx_reorder_array_elem) { 1281 /* 1282 * if this condition is hit then somebody 1283 * must have reset this pointer to NULL. 1284 * array pointer usually points to base variable 1285 * of TID queue structure: "struct dp_rx_tid" 1286 */ 1287 QDF_ASSERT(0); 1288 return; 1289 } 1290 /* Free up nbufs */ 1291 dp_rx_defrag_frames_free(rx_reorder_array_elem->head); 1292 1293 /* Free up saved ring descriptors */ 1294 dp_rx_clear_saved_desc_info(peer, tid); 1295 1296 rx_reorder_array_elem->head = NULL; 1297 rx_reorder_array_elem->tail = NULL; 1298 peer->rx_tid[tid].defrag_timeout_ms = 0; 1299 peer->rx_tid[tid].curr_frag_num = 0; 1300 peer->rx_tid[tid].curr_seq_num = 0; 1301 peer->rx_tid[tid].head_frag_desc = NULL; 1302 } 1303 1304 /* 1305 * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor 1306 * @ring_desc: Pointer to the dst ring descriptor 1307 * @peer: Pointer to the peer 1308 * @tid: Transmit Identifier 1309 * 1310 * Returns: None 1311 */ 1312 static QDF_STATUS dp_rx_defrag_save_info_from_ring_desc(void *ring_desc, 1313 struct dp_rx_desc *rx_desc, struct dp_peer *peer, unsigned tid) 1314 { 1315 void *dst_ring_desc = qdf_mem_malloc( 1316 sizeof(struct reo_destination_ring)); 1317 1318 if (dst_ring_desc == NULL) { 1319 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1320 "%s: Memory alloc failed !", __func__); 1321 QDF_ASSERT(0); 1322 return QDF_STATUS_E_NOMEM; 1323 } 1324 1325 qdf_mem_copy(dst_ring_desc, ring_desc, 1326 sizeof(struct reo_destination_ring)); 1327 1328 peer->rx_tid[tid].dst_ring_desc = dst_ring_desc; 1329 peer->rx_tid[tid].head_frag_desc = rx_desc; 1330 1331 return QDF_STATUS_SUCCESS; 1332 } 1333 1334 /* 1335 * dp_rx_defrag_store_fragment(): Store incoming fragments 1336 * @soc: Pointer to the SOC data structure 1337 * @ring_desc: Pointer to the ring descriptor 1338 * @mpdu_desc_info: MPDU descriptor info 1339 * @tid: Traffic Identifier 1340 * @rx_desc: Pointer to rx descriptor 1341 * @rx_bfs: Number of bfs consumed 1342 * 1343 * Returns: QDF_STATUS 1344 */ 1345 static QDF_STATUS dp_rx_defrag_store_fragment(struct dp_soc *soc, 1346 void *ring_desc, 1347 union dp_rx_desc_list_elem_t **head, 1348 union dp_rx_desc_list_elem_t **tail, 1349 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1350 unsigned tid, struct dp_rx_desc *rx_desc, 1351 uint32_t *rx_bfs) 1352 { 1353 struct dp_rx_reorder_array_elem *rx_reorder_array_elem; 1354 struct dp_pdev *pdev; 1355 struct dp_peer *peer; 1356 uint16_t peer_id; 1357 uint8_t fragno, more_frag, all_frag_present = 0; 1358 uint16_t rxseq = mpdu_desc_info->mpdu_seq; 1359 QDF_STATUS status; 1360 struct dp_rx_tid *rx_tid; 1361 uint8_t mpdu_sequence_control_valid; 1362 uint8_t mpdu_frame_control_valid; 1363 qdf_nbuf_t frag = rx_desc->nbuf; 1364 1365 /* Check if the packet is from a valid peer */ 1366 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1367 mpdu_desc_info->peer_meta_data); 1368 peer = dp_peer_find_by_id(soc, peer_id); 1369 1370 if (!peer) { 1371 /* We should not receive anything from unknown peer 1372 * however, that might happen while we are in the monitor mode. 1373 * We don't need to handle that here 1374 */ 1375 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1376 "Unknown peer, dropping the fragment"); 1377 1378 qdf_nbuf_free(frag); 1379 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1380 *rx_bfs = 1; 1381 1382 return QDF_STATUS_E_DEFRAG_ERROR; 1383 } 1384 1385 pdev = peer->vdev->pdev; 1386 rx_tid = &peer->rx_tid[tid]; 1387 1388 rx_reorder_array_elem = peer->rx_tid[tid].array; 1389 1390 mpdu_sequence_control_valid = 1391 hal_rx_get_mpdu_sequence_control_valid(rx_desc->rx_buf_start); 1392 1393 /* Invalid MPDU sequence control field, MPDU is of no use */ 1394 if (!mpdu_sequence_control_valid) { 1395 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1396 "Invalid MPDU seq control field, dropping MPDU"); 1397 qdf_nbuf_free(frag); 1398 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1399 *rx_bfs = 1; 1400 1401 qdf_assert(0); 1402 goto end; 1403 } 1404 1405 mpdu_frame_control_valid = 1406 hal_rx_get_mpdu_frame_control_valid(rx_desc->rx_buf_start); 1407 1408 /* Invalid frame control field */ 1409 if (!mpdu_frame_control_valid) { 1410 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1411 "Invalid frame control field, dropping MPDU"); 1412 qdf_nbuf_free(frag); 1413 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1414 *rx_bfs = 1; 1415 1416 qdf_assert(0); 1417 goto end; 1418 } 1419 1420 /* Current mpdu sequence */ 1421 more_frag = dp_rx_frag_get_more_frag_bit(rx_desc->rx_buf_start); 1422 1423 /* HW does not populate the fragment number as of now 1424 * need to get from the 802.11 header 1425 */ 1426 fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc->rx_buf_start); 1427 1428 /* 1429 * !more_frag: no more fragments to be delivered 1430 * !frag_no: packet is not fragmented 1431 * !rx_reorder_array_elem->head: no saved fragments so far 1432 */ 1433 if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) { 1434 /* We should not get into this situation here. 1435 * It means an unfragmented packet with fragment flag 1436 * is delivered over the REO exception ring. 1437 * Typically it follows normal rx path. 1438 */ 1439 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1440 "Rcvd unfragmented pkt on REO Err srng, dropping"); 1441 qdf_nbuf_free(frag); 1442 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1443 *rx_bfs = 1; 1444 1445 qdf_assert(0); 1446 goto end; 1447 } 1448 1449 /* Check if the fragment is for the same sequence or a different one */ 1450 if (rx_reorder_array_elem->head) { 1451 if (rxseq != rx_tid->curr_seq_num) { 1452 1453 /* Drop stored fragments if out of sequence 1454 * fragment is received 1455 */ 1456 dp_rx_defrag_frames_free(rx_reorder_array_elem->head); 1457 1458 rx_reorder_array_elem->head = NULL; 1459 rx_reorder_array_elem->tail = NULL; 1460 1461 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1462 "%s mismatch, dropping earlier sequence ", 1463 (rxseq == rx_tid->curr_seq_num) 1464 ? "address" 1465 : "seq number"); 1466 1467 /* 1468 * The sequence number for this fragment becomes the 1469 * new sequence number to be processed 1470 */ 1471 rx_tid->curr_seq_num = rxseq; 1472 1473 } 1474 } else { 1475 /* Start of a new sequence */ 1476 dp_rx_defrag_cleanup(peer, tid); 1477 rx_tid->curr_seq_num = rxseq; 1478 } 1479 1480 /* 1481 * If the earlier sequence was dropped, this will be the fresh start. 1482 * Else, continue with next fragment in a given sequence 1483 */ 1484 status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head, 1485 &rx_reorder_array_elem->tail, frag, 1486 &all_frag_present); 1487 1488 /* 1489 * Currently, we can have only 6 MSDUs per-MPDU, if the current 1490 * packet sequence has more than 6 MSDUs for some reason, we will 1491 * have to use the next MSDU link descriptor and chain them together 1492 * before reinjection 1493 */ 1494 if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) && 1495 (rx_reorder_array_elem->head == frag)) { 1496 1497 status = dp_rx_defrag_save_info_from_ring_desc(ring_desc, 1498 rx_desc, peer, tid); 1499 1500 if (status != QDF_STATUS_SUCCESS) { 1501 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1502 "%s: Unable to store ring desc !", __func__); 1503 goto end; 1504 } 1505 } else { 1506 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1507 *rx_bfs = 1; 1508 1509 /* Return the non-head link desc */ 1510 if (dp_rx_link_desc_return(soc, ring_desc, 1511 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1512 QDF_STATUS_SUCCESS) 1513 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1514 "%s: Failed to return link desc", 1515 __func__); 1516 1517 } 1518 1519 if (pdev->soc->rx.flags.defrag_timeout_check) 1520 dp_rx_defrag_waitlist_remove(peer, tid); 1521 1522 /* Yet to receive more fragments for this sequence number */ 1523 if (!all_frag_present) { 1524 uint32_t now_ms = 1525 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1526 1527 peer->rx_tid[tid].defrag_timeout_ms = 1528 now_ms + pdev->soc->rx.defrag.timeout_ms; 1529 1530 dp_rx_defrag_waitlist_add(peer, tid); 1531 1532 return QDF_STATUS_SUCCESS; 1533 } 1534 1535 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1536 "All fragments received for sequence: %d", rxseq); 1537 1538 /* Process the fragments */ 1539 status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, 1540 rx_reorder_array_elem->tail); 1541 if (QDF_IS_STATUS_ERROR(status)) { 1542 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1543 "Fragment processing failed"); 1544 1545 dp_rx_add_to_free_desc_list(head, tail, 1546 peer->rx_tid[tid].head_frag_desc); 1547 *rx_bfs = 1; 1548 1549 if (dp_rx_link_desc_return(soc, 1550 peer->rx_tid[tid].dst_ring_desc, 1551 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1552 QDF_STATUS_SUCCESS) 1553 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1554 "%s: Failed to return link desc", 1555 __func__); 1556 dp_rx_defrag_cleanup(peer, tid); 1557 goto end; 1558 } 1559 1560 /* Re-inject the fragments back to REO for further processing */ 1561 status = dp_rx_defrag_reo_reinject(peer, tid, 1562 rx_reorder_array_elem->head); 1563 if (QDF_IS_STATUS_SUCCESS(status)) { 1564 rx_reorder_array_elem->head = NULL; 1565 rx_reorder_array_elem->tail = NULL; 1566 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1567 "Fragmented sequence successfully reinjected"); 1568 } else { 1569 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1570 "Fragmented sequence reinjection failed"); 1571 dp_rx_return_head_frag_desc(peer, tid); 1572 } 1573 1574 dp_rx_defrag_cleanup(peer, tid); 1575 return QDF_STATUS_SUCCESS; 1576 1577 end: 1578 return QDF_STATUS_E_DEFRAG_ERROR; 1579 } 1580 1581 /** 1582 * dp_rx_frag_handle() - Handles fragmented Rx frames 1583 * 1584 * @soc: core txrx main context 1585 * @ring_desc: opaque pointer to the REO error ring descriptor 1586 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 1587 * @head: head of the local descriptor free-list 1588 * @tail: tail of the local descriptor free-list 1589 * @quota: No. of units (packets) that can be serviced in one shot. 1590 * 1591 * This function implements RX 802.11 fragmentation handling 1592 * The handling is mostly same as legacy fragmentation handling. 1593 * If required, this function can re-inject the frames back to 1594 * REO ring (with proper setting to by-pass fragmentation check 1595 * but use duplicate detection / re-ordering and routing these frames 1596 * to a different core. 1597 * 1598 * Return: uint32_t: No. of elements processed 1599 */ 1600 uint32_t dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc, 1601 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1602 union dp_rx_desc_list_elem_t **head, 1603 union dp_rx_desc_list_elem_t **tail, 1604 uint32_t quota) 1605 { 1606 uint32_t rx_bufs_used = 0; 1607 void *link_desc_va; 1608 struct hal_buf_info buf_info; 1609 struct hal_rx_msdu_list msdu_list; /* per MPDU list of MSDUs */ 1610 qdf_nbuf_t msdu = NULL; 1611 uint32_t tid, msdu_len; 1612 int idx, rx_bfs = 0; 1613 QDF_STATUS status; 1614 1615 qdf_assert(soc); 1616 qdf_assert(mpdu_desc_info); 1617 1618 /* Fragment from a valid peer */ 1619 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 1620 1621 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1622 1623 qdf_assert(link_desc_va); 1624 1625 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 1626 "Number of MSDUs to process, num_msdus: %d", 1627 mpdu_desc_info->msdu_count); 1628 1629 1630 if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) { 1631 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1632 "Not sufficient MSDUs to process"); 1633 return rx_bufs_used; 1634 } 1635 1636 /* Get msdu_list for the given MPDU */ 1637 hal_rx_msdu_list_get(link_desc_va, &msdu_list, 1638 &mpdu_desc_info->msdu_count); 1639 1640 /* Process all MSDUs in the current MPDU */ 1641 for (idx = 0; (idx < mpdu_desc_info->msdu_count) && quota--; idx++) { 1642 struct dp_rx_desc *rx_desc = 1643 dp_rx_cookie_2_va_rxdma_buf(soc, 1644 msdu_list.sw_cookie[idx]); 1645 1646 qdf_assert(rx_desc); 1647 1648 msdu = rx_desc->nbuf; 1649 1650 qdf_nbuf_unmap_single(soc->osdev, msdu, 1651 QDF_DMA_BIDIRECTIONAL); 1652 1653 rx_desc->rx_buf_start = qdf_nbuf_data(msdu); 1654 1655 msdu_len = hal_rx_msdu_start_msdu_len_get( 1656 rx_desc->rx_buf_start); 1657 1658 qdf_nbuf_set_pktlen(msdu, (msdu_len + RX_PKT_TLVS_LEN)); 1659 1660 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, 1661 rx_desc->rx_buf_start); 1662 1663 /* Process fragment-by-fragment */ 1664 status = dp_rx_defrag_store_fragment(soc, ring_desc, 1665 head, tail, mpdu_desc_info, 1666 tid, rx_desc, &rx_bfs); 1667 1668 if (rx_bfs) 1669 rx_bufs_used++; 1670 1671 if (!QDF_IS_STATUS_SUCCESS(status)) { 1672 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1673 "Rx Defrag err seq#:0x%x msdu_count:%d flags:%d", 1674 mpdu_desc_info->mpdu_seq, 1675 mpdu_desc_info->msdu_count, 1676 mpdu_desc_info->mpdu_flags); 1677 1678 /* No point in processing rest of the fragments */ 1679 break; 1680 } 1681 } 1682 1683 return rx_bufs_used; 1684 } 1685 1686 QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc, 1687 struct dp_peer *peer, uint16_t tid, 1688 uint16_t rxseq, qdf_nbuf_t nbuf) 1689 { 1690 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1691 struct dp_rx_reorder_array_elem *rx_reorder_array_elem; 1692 uint8_t all_frag_present; 1693 uint32_t msdu_len; 1694 QDF_STATUS status; 1695 1696 rx_reorder_array_elem = peer->rx_tid[tid].array; 1697 1698 if (rx_reorder_array_elem->head && 1699 rxseq != rx_tid->curr_seq_num) { 1700 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1701 "%s: No list found for TID %d Seq# %d", 1702 __func__, tid, rxseq); 1703 qdf_nbuf_free(nbuf); 1704 goto fail; 1705 } 1706 1707 msdu_len = hal_rx_msdu_start_msdu_len_get(qdf_nbuf_data(nbuf)); 1708 1709 qdf_nbuf_set_pktlen(nbuf, (msdu_len + RX_PKT_TLVS_LEN)); 1710 1711 status = dp_rx_defrag_fraglist_insert(peer, tid, 1712 &rx_reorder_array_elem->head, 1713 &rx_reorder_array_elem->tail, nbuf, 1714 &all_frag_present); 1715 1716 if (QDF_IS_STATUS_ERROR(status)) { 1717 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1718 "%s Fragment insert failed", __func__); 1719 1720 goto fail; 1721 } 1722 1723 if (soc->rx.flags.defrag_timeout_check) 1724 dp_rx_defrag_waitlist_remove(peer, tid); 1725 1726 if (!all_frag_present) { 1727 uint32_t now_ms = 1728 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1729 1730 peer->rx_tid[tid].defrag_timeout_ms = 1731 now_ms + soc->rx.defrag.timeout_ms; 1732 1733 dp_rx_defrag_waitlist_add(peer, tid); 1734 1735 return QDF_STATUS_SUCCESS; 1736 } 1737 1738 status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, 1739 rx_reorder_array_elem->tail); 1740 1741 if (QDF_IS_STATUS_ERROR(status)) { 1742 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1743 "%s Fragment processing failed", __func__); 1744 1745 dp_rx_return_head_frag_desc(peer, tid); 1746 dp_rx_defrag_cleanup(peer, tid); 1747 1748 goto fail; 1749 } 1750 1751 /* Re-inject the fragments back to REO for further processing */ 1752 status = dp_rx_defrag_reo_reinject(peer, tid, 1753 rx_reorder_array_elem->head); 1754 if (QDF_IS_STATUS_SUCCESS(status)) { 1755 rx_reorder_array_elem->head = NULL; 1756 rx_reorder_array_elem->tail = NULL; 1757 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1758 "%s: Frag seq successfully reinjected", 1759 __func__); 1760 } else { 1761 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1762 "%s: Frag seq reinjection failed", 1763 __func__); 1764 dp_rx_return_head_frag_desc(peer, tid); 1765 } 1766 1767 dp_rx_defrag_cleanup(peer, tid); 1768 return QDF_STATUS_SUCCESS; 1769 1770 fail: 1771 return QDF_STATUS_E_DEFRAG_ERROR; 1772 } 1773