1 /* 2 * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #include "dp_internal.h" 27 #include "dp_rx_defrag.h" 28 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 29 #include "dp_rx_defrag.h" 30 31 const struct dp_rx_defrag_cipher dp_f_ccmp = { 32 "AES-CCM", 33 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 34 IEEE80211_WEP_MICLEN, 35 0, 36 }; 37 38 const struct dp_rx_defrag_cipher dp_f_tkip = { 39 "TKIP", 40 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 41 IEEE80211_WEP_CRCLEN, 42 IEEE80211_WEP_MICLEN, 43 }; 44 45 const struct dp_rx_defrag_cipher dp_f_wep = { 46 "WEP", 47 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN, 48 IEEE80211_WEP_CRCLEN, 49 0, 50 }; 51 52 /* 53 * dp_rx_defrag_frames_free(): Free fragment chain 54 * @frames: Fragment chain 55 * 56 * Iterates through the fragment chain and frees them 57 * Returns: None 58 */ 59 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames) 60 { 61 qdf_nbuf_t next, frag = frames; 62 63 while (frag) { 64 next = qdf_nbuf_next(frag); 65 qdf_nbuf_free(frag); 66 frag = next; 67 } 68 } 69 70 /* 71 * dp_rx_clear_saved_desc_info(): Clears descriptor info 72 * @peer: Pointer to the peer data structure 73 * @tid: Transmit ID (TID) 74 * 75 * Saves MPDU descriptor info and MSDU link pointer from REO 76 * ring descriptor. The cache is created per peer, per TID 77 * 78 * Returns: None 79 */ 80 static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid) 81 { 82 if (peer->rx_tid[tid].dst_ring_desc) 83 qdf_mem_free(peer->rx_tid[tid].dst_ring_desc); 84 85 peer->rx_tid[tid].dst_ring_desc = NULL; 86 } 87 88 static void dp_rx_return_head_frag_desc(struct dp_peer *peer, 89 unsigned int tid) 90 { 91 struct dp_soc *soc; 92 struct dp_pdev *pdev; 93 struct dp_srng *dp_rxdma_srng; 94 struct rx_desc_pool *rx_desc_pool; 95 union dp_rx_desc_list_elem_t *head = NULL; 96 union dp_rx_desc_list_elem_t *tail = NULL; 97 98 pdev = peer->vdev->pdev; 99 soc = pdev->soc; 100 101 if (peer->rx_tid[tid].head_frag_desc) { 102 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 103 rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id]; 104 105 dp_rx_add_to_free_desc_list(&head, &tail, 106 peer->rx_tid[tid].head_frag_desc); 107 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 108 1, &head, &tail); 109 } 110 111 if (peer->rx_tid[tid].dst_ring_desc) { 112 if (dp_rx_link_desc_return(soc, 113 peer->rx_tid[tid].dst_ring_desc, 114 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 115 QDF_STATUS_SUCCESS) 116 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 117 "%s: Failed to return link desc", __func__); 118 } 119 } 120 121 /* 122 * dp_rx_reorder_flush_frag(): Flush the frag list 123 * @peer: Pointer to the peer data structure 124 * @tid: Transmit ID (TID) 125 * 126 * Flush the per-TID frag list 127 * 128 * Returns: None 129 */ 130 void dp_rx_reorder_flush_frag(struct dp_peer *peer, 131 unsigned int tid) 132 { 133 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 134 FL("Flushing TID %d"), tid); 135 136 if (!peer) { 137 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 138 "%s: NULL peer", __func__); 139 return; 140 } 141 142 dp_rx_return_head_frag_desc(peer, tid); 143 dp_rx_defrag_cleanup(peer, tid); 144 } 145 146 /* 147 * dp_rx_defrag_waitlist_flush(): Flush SOC defrag wait list 148 * @soc: DP SOC 149 * 150 * Flush fragments of all waitlisted TID's 151 * 152 * Returns: None 153 */ 154 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc) 155 { 156 struct dp_rx_tid *rx_reorder = NULL; 157 struct dp_rx_tid *tmp; 158 uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); 159 TAILQ_HEAD(, dp_rx_tid) temp_list; 160 161 TAILQ_INIT(&temp_list); 162 163 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 164 FL("Current time %u"), now_ms); 165 166 qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); 167 TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist, 168 defrag_waitlist_elem, tmp) { 169 uint32_t tid; 170 171 if (rx_reorder->defrag_timeout_ms > now_ms) 172 break; 173 174 tid = rx_reorder->tid; 175 if (tid >= DP_MAX_TIDS) { 176 qdf_assert(0); 177 continue; 178 } 179 180 TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder, 181 defrag_waitlist_elem); 182 DP_STATS_DEC(soc, rx.rx_frag_wait, 1); 183 184 /* Move to temp list and clean-up later */ 185 TAILQ_INSERT_TAIL(&temp_list, rx_reorder, 186 defrag_waitlist_elem); 187 } 188 if (rx_reorder) { 189 soc->rx.defrag.next_flush_ms = 190 rx_reorder->defrag_timeout_ms; 191 } else { 192 soc->rx.defrag.next_flush_ms = 193 now_ms + soc->rx.defrag.timeout_ms; 194 } 195 196 qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); 197 198 TAILQ_FOREACH_SAFE(rx_reorder, &temp_list, 199 defrag_waitlist_elem, tmp) { 200 struct dp_peer *peer, *temp_peer = NULL; 201 202 qdf_spin_lock_bh(&rx_reorder->tid_lock); 203 TAILQ_REMOVE(&temp_list, rx_reorder, 204 defrag_waitlist_elem); 205 /* get address of current peer */ 206 peer = 207 container_of(rx_reorder, struct dp_peer, 208 rx_tid[rx_reorder->tid]); 209 qdf_spin_unlock_bh(&rx_reorder->tid_lock); 210 211 temp_peer = dp_peer_find_by_id(soc, peer->peer_ids[0]); 212 if (temp_peer == peer) { 213 qdf_spin_lock_bh(&rx_reorder->tid_lock); 214 dp_rx_reorder_flush_frag(peer, rx_reorder->tid); 215 qdf_spin_unlock_bh(&rx_reorder->tid_lock); 216 } 217 218 if (temp_peer) 219 dp_peer_unref_del_find_by_id(temp_peer); 220 221 } 222 } 223 224 /* 225 * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list 226 * @peer: Pointer to the peer data structure 227 * @tid: Transmit ID (TID) 228 * 229 * Appends per-tid fragments to global fragment wait list 230 * 231 * Returns: None 232 */ 233 static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid) 234 { 235 struct dp_soc *psoc = peer->vdev->pdev->soc; 236 struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid]; 237 238 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 239 FL("Adding TID %u to waitlist for peer %pK"), 240 tid, peer); 241 242 /* TODO: use LIST macros instead of TAIL macros */ 243 qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock); 244 if (TAILQ_EMPTY(&psoc->rx.defrag.waitlist)) 245 psoc->rx.defrag.next_flush_ms = rx_reorder->defrag_timeout_ms; 246 TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder, 247 defrag_waitlist_elem); 248 DP_STATS_INC(psoc, rx.rx_frag_wait, 1); 249 qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock); 250 } 251 252 /* 253 * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist 254 * @peer: Pointer to the peer data structure 255 * @tid: Transmit ID (TID) 256 * 257 * Remove fragments from waitlist 258 * 259 * Returns: None 260 */ 261 void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid) 262 { 263 struct dp_pdev *pdev = peer->vdev->pdev; 264 struct dp_soc *soc = pdev->soc; 265 struct dp_rx_tid *rx_reorder; 266 struct dp_rx_tid *tmp; 267 268 if (tid > DP_MAX_TIDS) { 269 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 270 "TID out of bounds: %d", tid); 271 qdf_assert(0); 272 return; 273 } 274 275 qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); 276 TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist, 277 defrag_waitlist_elem, tmp) { 278 struct dp_peer *peer_on_waitlist; 279 280 /* get address of current peer */ 281 peer_on_waitlist = 282 container_of(rx_reorder, struct dp_peer, 283 rx_tid[rx_reorder->tid]); 284 285 /* Ensure it is TID for same peer */ 286 if (peer_on_waitlist == peer && rx_reorder->tid == tid) { 287 TAILQ_REMOVE(&soc->rx.defrag.waitlist, 288 rx_reorder, defrag_waitlist_elem); 289 DP_STATS_DEC(soc, rx.rx_frag_wait, 1); 290 } 291 } 292 qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); 293 } 294 295 /* 296 * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list 297 * @peer: Pointer to the peer data structure 298 * @tid: Transmit ID (TID) 299 * @head_addr: Pointer to head list 300 * @tail_addr: Pointer to tail list 301 * @frag: Incoming fragment 302 * @all_frag_present: Flag to indicate whether all fragments are received 303 * 304 * Build a per-tid, per-sequence fragment list. 305 * 306 * Returns: Success, if inserted 307 */ 308 static QDF_STATUS dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid, 309 qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag, 310 uint8_t *all_frag_present) 311 { 312 qdf_nbuf_t next; 313 qdf_nbuf_t prev = NULL; 314 qdf_nbuf_t cur; 315 uint16_t head_fragno, cur_fragno, next_fragno; 316 uint8_t last_morefrag = 1, count = 0; 317 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 318 uint8_t *rx_desc_info; 319 320 321 qdf_assert(frag); 322 qdf_assert(head_addr); 323 qdf_assert(tail_addr); 324 325 *all_frag_present = 0; 326 rx_desc_info = qdf_nbuf_data(frag); 327 cur_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 328 329 /* If this is the first fragment */ 330 if (!(*head_addr)) { 331 *head_addr = *tail_addr = frag; 332 qdf_nbuf_set_next(*tail_addr, NULL); 333 rx_tid->curr_frag_num = cur_fragno; 334 335 goto insert_done; 336 } 337 338 /* In sequence fragment */ 339 if (cur_fragno > rx_tid->curr_frag_num) { 340 qdf_nbuf_set_next(*tail_addr, frag); 341 *tail_addr = frag; 342 qdf_nbuf_set_next(*tail_addr, NULL); 343 rx_tid->curr_frag_num = cur_fragno; 344 } else { 345 /* Out of sequence fragment */ 346 cur = *head_addr; 347 rx_desc_info = qdf_nbuf_data(cur); 348 head_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 349 350 if (cur_fragno == head_fragno) { 351 qdf_nbuf_free(frag); 352 goto insert_fail; 353 } else if (head_fragno > cur_fragno) { 354 qdf_nbuf_set_next(frag, cur); 355 cur = frag; 356 *head_addr = frag; /* head pointer to be updated */ 357 } else { 358 while ((cur_fragno > head_fragno) && cur) { 359 prev = cur; 360 cur = qdf_nbuf_next(cur); 361 rx_desc_info = qdf_nbuf_data(cur); 362 head_fragno = 363 dp_rx_frag_get_mpdu_frag_number( 364 rx_desc_info); 365 } 366 367 if (cur_fragno == head_fragno) { 368 qdf_nbuf_free(frag); 369 goto insert_fail; 370 } 371 372 qdf_nbuf_set_next(prev, frag); 373 qdf_nbuf_set_next(frag, cur); 374 } 375 } 376 377 next = qdf_nbuf_next(*head_addr); 378 379 rx_desc_info = qdf_nbuf_data(*tail_addr); 380 last_morefrag = dp_rx_frag_get_more_frag_bit(rx_desc_info); 381 382 /* TODO: optimize the loop */ 383 if (!last_morefrag) { 384 /* Check if all fragments are present */ 385 do { 386 rx_desc_info = qdf_nbuf_data(next); 387 next_fragno = 388 dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 389 count++; 390 391 if (next_fragno != count) 392 break; 393 394 next = qdf_nbuf_next(next); 395 } while (next); 396 397 if (!next) { 398 *all_frag_present = 1; 399 return QDF_STATUS_SUCCESS; 400 } 401 } 402 403 insert_done: 404 return QDF_STATUS_SUCCESS; 405 406 insert_fail: 407 return QDF_STATUS_E_FAILURE; 408 } 409 410 411 /* 412 * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment 413 * @msdu: Pointer to the fragment 414 * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) 415 * 416 * decap tkip encrypted fragment 417 * 418 * Returns: QDF_STATUS 419 */ 420 static QDF_STATUS dp_rx_defrag_tkip_decap(qdf_nbuf_t msdu, uint16_t hdrlen) 421 { 422 uint8_t *ivp, *orig_hdr; 423 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 424 425 /* start of 802.11 header info */ 426 orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len); 427 428 /* TKIP header is located post 802.11 header */ 429 ivp = orig_hdr + hdrlen; 430 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) { 431 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 432 "IEEE80211_WEP_EXTIV is missing in TKIP fragment"); 433 return QDF_STATUS_E_DEFRAG_ERROR; 434 } 435 436 qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer); 437 438 return QDF_STATUS_SUCCESS; 439 } 440 441 /* 442 * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment 443 * @nbuf: Pointer to the fragment buffer 444 * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) 445 * 446 * Remove MIC information from CCMP fragment 447 * 448 * Returns: QDF_STATUS 449 */ 450 static QDF_STATUS dp_rx_defrag_ccmp_demic(qdf_nbuf_t nbuf, uint16_t hdrlen) 451 { 452 uint8_t *ivp, *orig_hdr; 453 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 454 455 /* start of the 802.11 header */ 456 orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); 457 458 /* CCMP header is located after 802.11 header */ 459 ivp = orig_hdr + hdrlen; 460 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 461 return QDF_STATUS_E_DEFRAG_ERROR; 462 463 qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer); 464 465 return QDF_STATUS_SUCCESS; 466 } 467 468 /* 469 * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment 470 * @nbuf: Pointer to the fragment 471 * @hdrlen: length of the header information 472 * 473 * decap CCMP encrypted fragment 474 * 475 * Returns: QDF_STATUS 476 */ 477 static QDF_STATUS dp_rx_defrag_ccmp_decap(qdf_nbuf_t nbuf, uint16_t hdrlen) 478 { 479 uint8_t *ivp, *origHdr; 480 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 481 482 origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len); 483 ivp = origHdr + hdrlen; 484 485 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 486 return QDF_STATUS_E_DEFRAG_ERROR; 487 488 /* Let's pull the header later */ 489 490 return QDF_STATUS_SUCCESS; 491 } 492 493 /* 494 * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment 495 * @msdu: Pointer to the fragment 496 * @hdrlen: length of the header information 497 * 498 * decap WEP encrypted fragment 499 * 500 * Returns: QDF_STATUS 501 */ 502 static QDF_STATUS dp_rx_defrag_wep_decap(qdf_nbuf_t msdu, uint16_t hdrlen) 503 { 504 uint8_t *origHdr; 505 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 506 507 origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); 508 qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen); 509 510 qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer); 511 512 return QDF_STATUS_SUCCESS; 513 } 514 515 /* 516 * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment 517 * @nbuf: Pointer to the fragment 518 * 519 * Calculate the header size of the received fragment 520 * 521 * Returns: header size (uint16_t) 522 */ 523 static uint16_t dp_rx_defrag_hdrsize(qdf_nbuf_t nbuf) 524 { 525 uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf); 526 uint16_t size = sizeof(struct ieee80211_frame); 527 uint16_t fc = 0; 528 uint32_t to_ds, fr_ds; 529 uint8_t frm_ctrl_valid; 530 uint16_t frm_ctrl_field; 531 532 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); 533 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); 534 frm_ctrl_valid = hal_rx_get_mpdu_frame_control_valid(rx_tlv_hdr); 535 frm_ctrl_field = hal_rx_get_frame_ctrl_field(rx_tlv_hdr); 536 537 if (to_ds && fr_ds) 538 size += QDF_MAC_ADDR_SIZE; 539 540 if (frm_ctrl_valid) { 541 fc = frm_ctrl_field; 542 543 /* use 1-st byte for validation */ 544 if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) { 545 size += sizeof(uint16_t); 546 /* use 2-nd byte for validation */ 547 if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER) 548 size += sizeof(struct ieee80211_htc); 549 } 550 } 551 552 return size; 553 } 554 555 /* 556 * dp_rx_defrag_michdr(): Calculate a pseudo MIC header 557 * @wh0: Pointer to the wireless header of the fragment 558 * @hdr: Array to hold the pseudo header 559 * 560 * Calculate a pseudo MIC header 561 * 562 * Returns: None 563 */ 564 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0, 565 uint8_t hdr[]) 566 { 567 const struct ieee80211_frame_addr4 *wh = 568 (const struct ieee80211_frame_addr4 *)wh0; 569 570 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { 571 case IEEE80211_FC1_DIR_NODS: 572 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 573 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 574 wh->i_addr2); 575 break; 576 case IEEE80211_FC1_DIR_TODS: 577 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 578 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 579 wh->i_addr2); 580 break; 581 case IEEE80211_FC1_DIR_FROMDS: 582 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 583 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 584 wh->i_addr3); 585 break; 586 case IEEE80211_FC1_DIR_DSTODS: 587 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 588 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 589 wh->i_addr4); 590 break; 591 } 592 593 /* 594 * Bit 7 is QDF_IEEE80211_FC0_SUBTYPE_QOS for data frame, but 595 * it could also be set for deauth, disassoc, action, etc. for 596 * a mgt type frame. It comes into picture for MFP. 597 */ 598 if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) { 599 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == 600 IEEE80211_FC1_DIR_DSTODS) { 601 const struct ieee80211_qosframe_addr4 *qwh = 602 (const struct ieee80211_qosframe_addr4 *)wh; 603 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; 604 } else { 605 const struct ieee80211_qosframe *qwh = 606 (const struct ieee80211_qosframe *)wh; 607 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; 608 } 609 } else { 610 hdr[12] = 0; 611 } 612 613 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ 614 } 615 616 /* 617 * dp_rx_defrag_mic(): Calculate MIC header 618 * @key: Pointer to the key 619 * @wbuf: fragment buffer 620 * @off: Offset 621 * @data_len: Data length 622 * @mic: Array to hold MIC 623 * 624 * Calculate a pseudo MIC header 625 * 626 * Returns: QDF_STATUS 627 */ 628 static QDF_STATUS dp_rx_defrag_mic(const uint8_t *key, qdf_nbuf_t wbuf, 629 uint16_t off, uint16_t data_len, uint8_t mic[]) 630 { 631 uint8_t hdr[16] = { 0, }; 632 uint32_t l, r; 633 const uint8_t *data; 634 uint32_t space; 635 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 636 637 dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) 638 + rx_desc_len), hdr); 639 640 l = dp_rx_get_le32(key); 641 r = dp_rx_get_le32(key + 4); 642 643 /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ 644 l ^= dp_rx_get_le32(hdr); 645 dp_rx_michael_block(l, r); 646 l ^= dp_rx_get_le32(&hdr[4]); 647 dp_rx_michael_block(l, r); 648 l ^= dp_rx_get_le32(&hdr[8]); 649 dp_rx_michael_block(l, r); 650 l ^= dp_rx_get_le32(&hdr[12]); 651 dp_rx_michael_block(l, r); 652 653 /* first buffer has special handling */ 654 data = (uint8_t *)qdf_nbuf_data(wbuf) + off; 655 space = qdf_nbuf_len(wbuf) - off; 656 657 for (;; ) { 658 if (space > data_len) 659 space = data_len; 660 661 /* collect 32-bit blocks from current buffer */ 662 while (space >= sizeof(uint32_t)) { 663 l ^= dp_rx_get_le32(data); 664 dp_rx_michael_block(l, r); 665 data += sizeof(uint32_t); 666 space -= sizeof(uint32_t); 667 data_len -= sizeof(uint32_t); 668 } 669 if (data_len < sizeof(uint32_t)) 670 break; 671 672 wbuf = qdf_nbuf_next(wbuf); 673 if (!wbuf) 674 return QDF_STATUS_E_DEFRAG_ERROR; 675 676 if (space != 0) { 677 const uint8_t *data_next; 678 /* 679 * Block straddles buffers, split references. 680 */ 681 data_next = 682 (uint8_t *)qdf_nbuf_data(wbuf) + off; 683 if ((qdf_nbuf_len(wbuf)) < 684 sizeof(uint32_t) - space) { 685 return QDF_STATUS_E_DEFRAG_ERROR; 686 } 687 switch (space) { 688 case 1: 689 l ^= dp_rx_get_le32_split(data[0], 690 data_next[0], data_next[1], 691 data_next[2]); 692 data = data_next + 3; 693 space = (qdf_nbuf_len(wbuf) - off) - 3; 694 break; 695 case 2: 696 l ^= dp_rx_get_le32_split(data[0], data[1], 697 data_next[0], data_next[1]); 698 data = data_next + 2; 699 space = (qdf_nbuf_len(wbuf) - off) - 2; 700 break; 701 case 3: 702 l ^= dp_rx_get_le32_split(data[0], data[1], 703 data[2], data_next[0]); 704 data = data_next + 1; 705 space = (qdf_nbuf_len(wbuf) - off) - 1; 706 break; 707 } 708 dp_rx_michael_block(l, r); 709 data_len -= sizeof(uint32_t); 710 } else { 711 /* 712 * Setup for next buffer. 713 */ 714 data = (uint8_t *)qdf_nbuf_data(wbuf) + off; 715 space = qdf_nbuf_len(wbuf) - off; 716 } 717 } 718 /* Last block and padding (0x5a, 4..7 x 0) */ 719 switch (data_len) { 720 case 0: 721 l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0); 722 break; 723 case 1: 724 l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0); 725 break; 726 case 2: 727 l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0); 728 break; 729 case 3: 730 l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a); 731 break; 732 } 733 dp_rx_michael_block(l, r); 734 dp_rx_michael_block(l, r); 735 dp_rx_put_le32(mic, l); 736 dp_rx_put_le32(mic + 4, r); 737 738 return QDF_STATUS_SUCCESS; 739 } 740 741 /* 742 * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame 743 * @key: Pointer to the key 744 * @msdu: fragment buffer 745 * @hdrlen: Length of the header information 746 * 747 * Remove MIC information from the TKIP frame 748 * 749 * Returns: QDF_STATUS 750 */ 751 static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key, 752 qdf_nbuf_t msdu, uint16_t hdrlen) 753 { 754 QDF_STATUS status; 755 uint32_t pktlen = 0; 756 uint8_t mic[IEEE80211_WEP_MICLEN]; 757 uint8_t mic0[IEEE80211_WEP_MICLEN]; 758 qdf_nbuf_t prev = NULL, next; 759 760 next = msdu; 761 while (next) { 762 pktlen += (qdf_nbuf_len(next) - hdrlen); 763 prev = next; 764 dp_debug("%s pktlen %u", __func__, 765 (uint32_t)(qdf_nbuf_len(next) - hdrlen)); 766 next = qdf_nbuf_next(next); 767 } 768 769 if (!prev) { 770 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 771 "%s Defrag chaining failed !\n", __func__); 772 return QDF_STATUS_E_DEFRAG_ERROR; 773 } 774 775 qdf_nbuf_copy_bits(prev, qdf_nbuf_len(prev) - dp_f_tkip.ic_miclen, 776 dp_f_tkip.ic_miclen, (caddr_t)mic0); 777 qdf_nbuf_trim_tail(prev, dp_f_tkip.ic_miclen); 778 pktlen -= dp_f_tkip.ic_miclen; 779 780 status = dp_rx_defrag_mic(key, msdu, hdrlen, 781 pktlen, mic); 782 783 if (QDF_IS_STATUS_ERROR(status)) 784 return status; 785 786 if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen)) 787 return QDF_STATUS_E_DEFRAG_ERROR; 788 789 return QDF_STATUS_SUCCESS; 790 } 791 792 /* 793 * dp_rx_frag_pull_hdr(): Pulls the RXTLV & the 802.11 headers 794 * @nbuf: buffer pointer 795 * @hdrsize: size of the header to be pulled 796 * 797 * Pull the RXTLV & the 802.11 headers 798 * 799 * Returns: None 800 */ 801 static void dp_rx_frag_pull_hdr(qdf_nbuf_t nbuf, uint16_t hdrsize) 802 { 803 qdf_nbuf_pull_head(nbuf, 804 RX_PKT_TLVS_LEN + hdrsize); 805 806 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 807 "%s: final pktlen %d .11len %d", 808 __func__, (uint32_t)qdf_nbuf_len(nbuf), hdrsize); 809 } 810 811 /* 812 * dp_rx_construct_fraglist(): Construct a nbuf fraglist 813 * @peer: Pointer to the peer 814 * @head: Pointer to list of fragments 815 * @hdrsize: Size of the header to be pulled 816 * 817 * Construct a nbuf fraglist 818 * 819 * Returns: None 820 */ 821 static void 822 dp_rx_construct_fraglist(struct dp_peer *peer, 823 qdf_nbuf_t head, uint16_t hdrsize) 824 { 825 qdf_nbuf_t msdu = qdf_nbuf_next(head); 826 qdf_nbuf_t rx_nbuf = msdu; 827 uint32_t len = 0; 828 829 while (msdu) { 830 dp_rx_frag_pull_hdr(msdu, hdrsize); 831 len += qdf_nbuf_len(msdu); 832 msdu = qdf_nbuf_next(msdu); 833 } 834 835 qdf_nbuf_append_ext_list(head, rx_nbuf, len); 836 qdf_nbuf_set_next(head, NULL); 837 qdf_nbuf_set_is_frag(head, 1); 838 839 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 840 "%s: head len %d ext len %d data len %d ", 841 __func__, 842 (uint32_t)qdf_nbuf_len(head), 843 (uint32_t)qdf_nbuf_len(rx_nbuf), 844 (uint32_t)(head->data_len)); 845 } 846 847 /** 848 * dp_rx_defrag_err() - rx err handler 849 * @pdev: handle to pdev object 850 * @vdev_id: vdev id 851 * @peer_mac_addr: peer mac address 852 * @tid: TID 853 * @tsf32: TSF 854 * @err_type: error type 855 * @rx_frame: rx frame 856 * @pn: PN Number 857 * @key_id: key id 858 * 859 * This function handles rx error and send MIC error notification 860 * 861 * Return: None 862 */ 863 static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 864 { 865 struct ol_if_ops *tops = NULL; 866 struct dp_pdev *pdev = vdev->pdev; 867 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 868 uint8_t *orig_hdr; 869 struct ieee80211_frame *wh; 870 871 orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); 872 wh = (struct ieee80211_frame *)orig_hdr; 873 874 tops = pdev->soc->cdp_soc.ol_ops; 875 if (tops->rx_mic_error) 876 tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh); 877 } 878 879 880 /* 881 * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3 882 * @nbuf: Pointer to the fragment buffer 883 * @hdrsize: Size of headers 884 * 885 * Transcap the fragment from 802.11 to 802.3 886 * 887 * Returns: None 888 */ 889 static void 890 dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t nbuf, uint16_t hdrsize) 891 { 892 struct llc_snap_hdr_t *llchdr; 893 struct ethernet_hdr_t *eth_hdr; 894 uint8_t ether_type[2]; 895 uint16_t fc = 0; 896 union dp_align_mac_addr mac_addr; 897 uint8_t *rx_desc_info = qdf_mem_malloc(RX_PKT_TLVS_LEN); 898 899 if (!rx_desc_info) { 900 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 901 "%s: Memory alloc failed ! ", __func__); 902 QDF_ASSERT(0); 903 return; 904 } 905 906 qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), RX_PKT_TLVS_LEN); 907 908 llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) + 909 RX_PKT_TLVS_LEN + hdrsize); 910 qdf_mem_copy(ether_type, llchdr->ethertype, 2); 911 912 qdf_nbuf_pull_head(nbuf, (RX_PKT_TLVS_LEN + hdrsize + 913 sizeof(struct llc_snap_hdr_t) - 914 sizeof(struct ethernet_hdr_t))); 915 916 eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf)); 917 918 if (hal_rx_get_mpdu_frame_control_valid(rx_desc_info)) 919 fc = hal_rx_get_frame_ctrl_field(rx_desc_info); 920 921 dp_debug("%s: frame control type: 0x%x", __func__, fc); 922 923 switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) { 924 case IEEE80211_FC1_DIR_NODS: 925 hal_rx_mpdu_get_addr1(rx_desc_info, 926 &mac_addr.raw[0]); 927 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 928 QDF_MAC_ADDR_SIZE); 929 hal_rx_mpdu_get_addr2(rx_desc_info, 930 &mac_addr.raw[0]); 931 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 932 QDF_MAC_ADDR_SIZE); 933 break; 934 case IEEE80211_FC1_DIR_TODS: 935 hal_rx_mpdu_get_addr3(rx_desc_info, 936 &mac_addr.raw[0]); 937 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 938 QDF_MAC_ADDR_SIZE); 939 hal_rx_mpdu_get_addr2(rx_desc_info, 940 &mac_addr.raw[0]); 941 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 942 QDF_MAC_ADDR_SIZE); 943 break; 944 case IEEE80211_FC1_DIR_FROMDS: 945 hal_rx_mpdu_get_addr1(rx_desc_info, 946 &mac_addr.raw[0]); 947 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 948 QDF_MAC_ADDR_SIZE); 949 hal_rx_mpdu_get_addr3(rx_desc_info, 950 &mac_addr.raw[0]); 951 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 952 QDF_MAC_ADDR_SIZE); 953 break; 954 955 case IEEE80211_FC1_DIR_DSTODS: 956 hal_rx_mpdu_get_addr3(rx_desc_info, 957 &mac_addr.raw[0]); 958 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 959 QDF_MAC_ADDR_SIZE); 960 hal_rx_mpdu_get_addr4(rx_desc_info, 961 &mac_addr.raw[0]); 962 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 963 QDF_MAC_ADDR_SIZE); 964 break; 965 966 default: 967 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 968 "%s: Unknown frame control type: 0x%x", __func__, fc); 969 } 970 971 qdf_mem_copy(eth_hdr->ethertype, ether_type, 972 sizeof(ether_type)); 973 974 qdf_nbuf_push_head(nbuf, RX_PKT_TLVS_LEN); 975 qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, RX_PKT_TLVS_LEN); 976 qdf_mem_free(rx_desc_info); 977 } 978 979 /* 980 * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO 981 * @peer: Pointer to the peer 982 * @tid: Transmit Identifier 983 * @head: Buffer to be reinjected back 984 * 985 * Reinject the fragment chain back into REO 986 * 987 * Returns: QDF_STATUS 988 */ 989 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer, 990 unsigned tid, qdf_nbuf_t head) 991 { 992 struct dp_pdev *pdev = peer->vdev->pdev; 993 struct dp_soc *soc = pdev->soc; 994 struct hal_buf_info buf_info; 995 void *link_desc_va; 996 void *msdu0, *msdu_desc_info; 997 void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr; 998 void *dst_mpdu_desc_info, *dst_qdesc_addr; 999 qdf_dma_addr_t paddr; 1000 uint32_t nbuf_len, seq_no, dst_ind; 1001 uint32_t *mpdu_wrd; 1002 uint32_t ret, cookie; 1003 1004 void *dst_ring_desc = 1005 peer->rx_tid[tid].dst_ring_desc; 1006 void *hal_srng = soc->reo_reinject_ring.hal_srng; 1007 1008 ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng); 1009 if (!ent_ring_desc) { 1010 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1011 "HAL src ring next entry NULL"); 1012 return QDF_STATUS_E_FAILURE; 1013 } 1014 1015 hal_rx_reo_buf_paddr_get(dst_ring_desc, &buf_info); 1016 1017 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1018 1019 qdf_assert(link_desc_va); 1020 1021 msdu0 = (uint8_t *)link_desc_va + 1022 RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET; 1023 1024 nbuf_len = qdf_nbuf_len(head) - RX_PKT_TLVS_LEN; 1025 1026 HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW); 1027 HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE, 1028 UNI_DESC_BUF_TYPE_RX_MSDU_LINK); 1029 1030 /* msdu reconfig */ 1031 msdu_desc_info = (uint8_t *)msdu0 + 1032 RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET; 1033 1034 dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va); 1035 1036 qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info)); 1037 1038 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1039 FIRST_MSDU_IN_MPDU_FLAG, 1); 1040 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1041 LAST_MSDU_IN_MPDU_FLAG, 1); 1042 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1043 MSDU_CONTINUATION, 0x0); 1044 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1045 REO_DESTINATION_INDICATION, dst_ind); 1046 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1047 MSDU_LENGTH, nbuf_len); 1048 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1049 SA_IS_VALID, 1); 1050 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1051 DA_IS_VALID, 1); 1052 1053 /* change RX TLV's */ 1054 hal_rx_msdu_start_msdu_len_set( 1055 qdf_nbuf_data(head), nbuf_len); 1056 1057 cookie = HAL_RX_BUF_COOKIE_GET(msdu0); 1058 1059 /* map the nbuf before reinject it into HW */ 1060 ret = qdf_nbuf_map_single(soc->osdev, head, 1061 QDF_DMA_FROM_DEVICE); 1062 1063 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 1064 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1065 "%s: nbuf map failed !", __func__); 1066 return QDF_STATUS_E_FAILURE; 1067 } 1068 1069 paddr = qdf_nbuf_get_frag_paddr(head, 0); 1070 1071 ret = check_x86_paddr(soc, &head, &paddr, pdev); 1072 1073 if (ret == QDF_STATUS_E_FAILURE) { 1074 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1075 "%s: x86 check failed !", __func__); 1076 return QDF_STATUS_E_FAILURE; 1077 } 1078 1079 hal_rxdma_buff_addr_info_set(msdu0, paddr, cookie, DP_WBM2SW_RBM); 1080 1081 /* Lets fill entrance ring now !!! */ 1082 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 1083 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1084 "HAL RING Access For REO entrance SRNG Failed: %pK", 1085 hal_srng); 1086 1087 return QDF_STATUS_E_FAILURE; 1088 } 1089 1090 paddr = (uint64_t)buf_info.paddr; 1091 /* buf addr */ 1092 hal_rxdma_buff_addr_info_set(ent_ring_desc, paddr, 1093 buf_info.sw_cookie, 1094 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 1095 /* mpdu desc info */ 1096 ent_mpdu_desc_info = (uint8_t *)ent_ring_desc + 1097 RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET; 1098 1099 dst_mpdu_desc_info = (uint8_t *)dst_ring_desc + 1100 REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET; 1101 1102 qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info, 1103 sizeof(struct rx_mpdu_desc_info)); 1104 qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t)); 1105 1106 mpdu_wrd = (uint32_t *)dst_mpdu_desc_info; 1107 seq_no = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_wrd); 1108 1109 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1110 MSDU_COUNT, 0x1); 1111 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1112 MPDU_SEQUENCE_NUMBER, seq_no); 1113 1114 /* unset frag bit */ 1115 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1116 FRAGMENT_FLAG, 0x0); 1117 1118 /* set sa/da valid bits */ 1119 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1120 SA_IS_VALID, 0x1); 1121 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1122 DA_IS_VALID, 0x1); 1123 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1124 RAW_MPDU, 0x0); 1125 1126 /* qdesc addr */ 1127 ent_qdesc_addr = (uint8_t *)ent_ring_desc + 1128 REO_ENTRANCE_RING_4_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; 1129 1130 dst_qdesc_addr = (uint8_t *)dst_ring_desc + 1131 REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; 1132 1133 qdf_mem_copy(ent_qdesc_addr, dst_qdesc_addr, 8); 1134 1135 HAL_RX_FLD_SET(ent_ring_desc, REO_ENTRANCE_RING_5, 1136 REO_DESTINATION_INDICATION, dst_ind); 1137 1138 hal_srng_access_end(soc->hal_soc, hal_srng); 1139 1140 DP_STATS_INC(soc, rx.reo_reinject, 1); 1141 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1142 "%s: reinjection done !", __func__); 1143 return QDF_STATUS_SUCCESS; 1144 } 1145 1146 /* 1147 * dp_rx_defrag(): Defragment the fragment chain 1148 * @peer: Pointer to the peer 1149 * @tid: Transmit Identifier 1150 * @frag_list_head: Pointer to head list 1151 * @frag_list_tail: Pointer to tail list 1152 * 1153 * Defragment the fragment chain 1154 * 1155 * Returns: QDF_STATUS 1156 */ 1157 static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid, 1158 qdf_nbuf_t frag_list_head, qdf_nbuf_t frag_list_tail) 1159 { 1160 qdf_nbuf_t tmp_next, prev; 1161 qdf_nbuf_t cur = frag_list_head, msdu; 1162 uint32_t index, tkip_demic = 0; 1163 uint16_t hdr_space; 1164 uint8_t key[DEFRAG_IEEE80211_KEY_LEN]; 1165 struct dp_vdev *vdev = peer->vdev; 1166 struct dp_soc *soc = vdev->pdev->soc; 1167 uint8_t status = 0; 1168 1169 hdr_space = dp_rx_defrag_hdrsize(cur); 1170 index = hal_rx_msdu_is_wlan_mcast(cur) ? 1171 dp_sec_mcast : dp_sec_ucast; 1172 1173 /* Remove FCS from all fragments */ 1174 while (cur) { 1175 tmp_next = qdf_nbuf_next(cur); 1176 qdf_nbuf_set_next(cur, NULL); 1177 qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN); 1178 prev = cur; 1179 qdf_nbuf_set_next(cur, tmp_next); 1180 cur = tmp_next; 1181 } 1182 cur = frag_list_head; 1183 1184 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1185 "%s: index %d Security type: %d", __func__, 1186 index, peer->security[index].sec_type); 1187 1188 switch (peer->security[index].sec_type) { 1189 case cdp_sec_type_tkip: 1190 tkip_demic = 1; 1191 1192 case cdp_sec_type_tkip_nomic: 1193 while (cur) { 1194 tmp_next = qdf_nbuf_next(cur); 1195 if (dp_rx_defrag_tkip_decap(cur, hdr_space)) { 1196 1197 QDF_TRACE(QDF_MODULE_ID_TXRX, 1198 QDF_TRACE_LEVEL_ERROR, 1199 "dp_rx_defrag: TKIP decap failed"); 1200 1201 return QDF_STATUS_E_DEFRAG_ERROR; 1202 } 1203 cur = tmp_next; 1204 } 1205 1206 /* If success, increment header to be stripped later */ 1207 hdr_space += dp_f_tkip.ic_header; 1208 break; 1209 1210 case cdp_sec_type_aes_ccmp: 1211 while (cur) { 1212 tmp_next = qdf_nbuf_next(cur); 1213 if (dp_rx_defrag_ccmp_demic(cur, hdr_space)) { 1214 1215 QDF_TRACE(QDF_MODULE_ID_TXRX, 1216 QDF_TRACE_LEVEL_ERROR, 1217 "dp_rx_defrag: CCMP demic failed"); 1218 1219 return QDF_STATUS_E_DEFRAG_ERROR; 1220 } 1221 if (dp_rx_defrag_ccmp_decap(cur, hdr_space)) { 1222 1223 QDF_TRACE(QDF_MODULE_ID_TXRX, 1224 QDF_TRACE_LEVEL_ERROR, 1225 "dp_rx_defrag: CCMP decap failed"); 1226 1227 return QDF_STATUS_E_DEFRAG_ERROR; 1228 } 1229 cur = tmp_next; 1230 } 1231 1232 /* If success, increment header to be stripped later */ 1233 hdr_space += dp_f_ccmp.ic_header; 1234 break; 1235 1236 case cdp_sec_type_wep40: 1237 case cdp_sec_type_wep104: 1238 case cdp_sec_type_wep128: 1239 while (cur) { 1240 tmp_next = qdf_nbuf_next(cur); 1241 if (dp_rx_defrag_wep_decap(cur, hdr_space)) { 1242 1243 QDF_TRACE(QDF_MODULE_ID_TXRX, 1244 QDF_TRACE_LEVEL_ERROR, 1245 "dp_rx_defrag: WEP decap failed"); 1246 1247 return QDF_STATUS_E_DEFRAG_ERROR; 1248 } 1249 cur = tmp_next; 1250 } 1251 1252 /* If success, increment header to be stripped later */ 1253 hdr_space += dp_f_wep.ic_header; 1254 break; 1255 default: 1256 QDF_TRACE(QDF_MODULE_ID_TXRX, 1257 QDF_TRACE_LEVEL_ERROR, 1258 "dp_rx_defrag: Did not match any security type"); 1259 break; 1260 } 1261 1262 if (tkip_demic) { 1263 msdu = frag_list_head; 1264 if (soc->cdp_soc.ol_ops->rx_frag_tkip_demic) { 1265 status = soc->cdp_soc.ol_ops->rx_frag_tkip_demic( 1266 (void *)peer->ctrl_peer, msdu, hdr_space); 1267 } else { 1268 qdf_mem_copy(key, 1269 &peer->security[index].michael_key[0], 1270 IEEE80211_WEP_MICLEN); 1271 status = dp_rx_defrag_tkip_demic(key, msdu, 1272 RX_PKT_TLVS_LEN + 1273 hdr_space); 1274 1275 if (status) { 1276 dp_rx_defrag_err(vdev, frag_list_head); 1277 1278 QDF_TRACE(QDF_MODULE_ID_TXRX, 1279 QDF_TRACE_LEVEL_ERROR, 1280 "%s: TKIP demic failed status %d", 1281 __func__, status); 1282 1283 return QDF_STATUS_E_DEFRAG_ERROR; 1284 } 1285 } 1286 } 1287 1288 /* Convert the header to 802.3 header */ 1289 dp_rx_defrag_nwifi_to_8023(frag_list_head, hdr_space); 1290 dp_rx_construct_fraglist(peer, frag_list_head, hdr_space); 1291 1292 return QDF_STATUS_SUCCESS; 1293 } 1294 1295 /* 1296 * dp_rx_defrag_cleanup(): Clean up activities 1297 * @peer: Pointer to the peer 1298 * @tid: Transmit Identifier 1299 * 1300 * Returns: None 1301 */ 1302 void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid) 1303 { 1304 struct dp_rx_reorder_array_elem *rx_reorder_array_elem = 1305 peer->rx_tid[tid].array; 1306 1307 if (!rx_reorder_array_elem) { 1308 /* 1309 * if this condition is hit then somebody 1310 * must have reset this pointer to NULL. 1311 * array pointer usually points to base variable 1312 * of TID queue structure: "struct dp_rx_tid" 1313 */ 1314 QDF_ASSERT(0); 1315 return; 1316 } 1317 /* Free up nbufs */ 1318 dp_rx_defrag_frames_free(rx_reorder_array_elem->head); 1319 1320 /* Free up saved ring descriptors */ 1321 dp_rx_clear_saved_desc_info(peer, tid); 1322 1323 rx_reorder_array_elem->head = NULL; 1324 rx_reorder_array_elem->tail = NULL; 1325 peer->rx_tid[tid].defrag_timeout_ms = 0; 1326 peer->rx_tid[tid].curr_frag_num = 0; 1327 peer->rx_tid[tid].curr_seq_num = 0; 1328 peer->rx_tid[tid].head_frag_desc = NULL; 1329 } 1330 1331 /* 1332 * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor 1333 * @ring_desc: Pointer to the dst ring descriptor 1334 * @peer: Pointer to the peer 1335 * @tid: Transmit Identifier 1336 * 1337 * Returns: None 1338 */ 1339 static QDF_STATUS dp_rx_defrag_save_info_from_ring_desc(void *ring_desc, 1340 struct dp_rx_desc *rx_desc, struct dp_peer *peer, unsigned tid) 1341 { 1342 void *dst_ring_desc = qdf_mem_malloc( 1343 sizeof(struct reo_destination_ring)); 1344 1345 if (!dst_ring_desc) { 1346 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1347 "%s: Memory alloc failed !", __func__); 1348 QDF_ASSERT(0); 1349 return QDF_STATUS_E_NOMEM; 1350 } 1351 1352 qdf_mem_copy(dst_ring_desc, ring_desc, 1353 sizeof(struct reo_destination_ring)); 1354 1355 peer->rx_tid[tid].dst_ring_desc = dst_ring_desc; 1356 peer->rx_tid[tid].head_frag_desc = rx_desc; 1357 1358 return QDF_STATUS_SUCCESS; 1359 } 1360 1361 /* 1362 * dp_rx_defrag_store_fragment(): Store incoming fragments 1363 * @soc: Pointer to the SOC data structure 1364 * @ring_desc: Pointer to the ring descriptor 1365 * @mpdu_desc_info: MPDU descriptor info 1366 * @tid: Traffic Identifier 1367 * @rx_desc: Pointer to rx descriptor 1368 * @rx_bfs: Number of bfs consumed 1369 * 1370 * Returns: QDF_STATUS 1371 */ 1372 static QDF_STATUS dp_rx_defrag_store_fragment(struct dp_soc *soc, 1373 void *ring_desc, 1374 union dp_rx_desc_list_elem_t **head, 1375 union dp_rx_desc_list_elem_t **tail, 1376 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1377 unsigned tid, struct dp_rx_desc *rx_desc, 1378 uint32_t *rx_bfs) 1379 { 1380 struct dp_rx_reorder_array_elem *rx_reorder_array_elem; 1381 struct dp_pdev *pdev; 1382 struct dp_peer *peer; 1383 uint16_t peer_id; 1384 uint8_t fragno, more_frag, all_frag_present = 0; 1385 uint16_t rxseq = mpdu_desc_info->mpdu_seq; 1386 QDF_STATUS status; 1387 struct dp_rx_tid *rx_tid; 1388 uint8_t mpdu_sequence_control_valid; 1389 uint8_t mpdu_frame_control_valid; 1390 qdf_nbuf_t frag = rx_desc->nbuf; 1391 1392 /* Check if the packet is from a valid peer */ 1393 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1394 mpdu_desc_info->peer_meta_data); 1395 peer = dp_peer_find_by_id(soc, peer_id); 1396 1397 if (!peer) { 1398 /* We should not receive anything from unknown peer 1399 * however, that might happen while we are in the monitor mode. 1400 * We don't need to handle that here 1401 */ 1402 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1403 "Unknown peer, dropping the fragment"); 1404 1405 goto discard_frag; 1406 } 1407 1408 pdev = peer->vdev->pdev; 1409 rx_tid = &peer->rx_tid[tid]; 1410 1411 mpdu_sequence_control_valid = 1412 hal_rx_get_mpdu_sequence_control_valid(rx_desc->rx_buf_start); 1413 1414 /* Invalid MPDU sequence control field, MPDU is of no use */ 1415 if (!mpdu_sequence_control_valid) { 1416 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1417 "Invalid MPDU seq control field, dropping MPDU"); 1418 1419 qdf_assert(0); 1420 goto discard_frag; 1421 } 1422 1423 mpdu_frame_control_valid = 1424 hal_rx_get_mpdu_frame_control_valid(rx_desc->rx_buf_start); 1425 1426 /* Invalid frame control field */ 1427 if (!mpdu_frame_control_valid) { 1428 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1429 "Invalid frame control field, dropping MPDU"); 1430 1431 qdf_assert(0); 1432 goto discard_frag; 1433 } 1434 1435 /* Current mpdu sequence */ 1436 more_frag = dp_rx_frag_get_more_frag_bit(rx_desc->rx_buf_start); 1437 1438 /* HW does not populate the fragment number as of now 1439 * need to get from the 802.11 header 1440 */ 1441 fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc->rx_buf_start); 1442 1443 rx_reorder_array_elem = peer->rx_tid[tid].array; 1444 if (!rx_reorder_array_elem) { 1445 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1446 "Rcvd Fragmented pkt before peer_tid is setup"); 1447 goto discard_frag; 1448 } 1449 1450 /* 1451 * !more_frag: no more fragments to be delivered 1452 * !frag_no: packet is not fragmented 1453 * !rx_reorder_array_elem->head: no saved fragments so far 1454 */ 1455 if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) { 1456 /* We should not get into this situation here. 1457 * It means an unfragmented packet with fragment flag 1458 * is delivered over the REO exception ring. 1459 * Typically it follows normal rx path. 1460 */ 1461 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1462 "Rcvd unfragmented pkt on REO Err srng, dropping"); 1463 1464 qdf_assert(0); 1465 goto discard_frag; 1466 } 1467 1468 /* Check if the fragment is for the same sequence or a different one */ 1469 if (rx_reorder_array_elem->head) { 1470 if (rxseq != rx_tid->curr_seq_num) { 1471 1472 /* Drop stored fragments if out of sequence 1473 * fragment is received 1474 */ 1475 dp_rx_reorder_flush_frag(peer, tid); 1476 1477 DP_STATS_INC(soc, rx.rx_frag_err, 1); 1478 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1479 "%s mismatch, dropping earlier sequence ", 1480 (rxseq == rx_tid->curr_seq_num) 1481 ? "address" 1482 : "seq number"); 1483 1484 /* 1485 * The sequence number for this fragment becomes the 1486 * new sequence number to be processed 1487 */ 1488 rx_tid->curr_seq_num = rxseq; 1489 } 1490 } else { 1491 /* Start of a new sequence */ 1492 dp_rx_defrag_cleanup(peer, tid); 1493 rx_tid->curr_seq_num = rxseq; 1494 } 1495 1496 /* 1497 * If the earlier sequence was dropped, this will be the fresh start. 1498 * Else, continue with next fragment in a given sequence 1499 */ 1500 status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head, 1501 &rx_reorder_array_elem->tail, frag, 1502 &all_frag_present); 1503 1504 /* 1505 * Currently, we can have only 6 MSDUs per-MPDU, if the current 1506 * packet sequence has more than 6 MSDUs for some reason, we will 1507 * have to use the next MSDU link descriptor and chain them together 1508 * before reinjection 1509 */ 1510 if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) && 1511 (rx_reorder_array_elem->head == frag)) { 1512 1513 qdf_assert_always(ring_desc); 1514 status = dp_rx_defrag_save_info_from_ring_desc(ring_desc, 1515 rx_desc, peer, tid); 1516 1517 if (status != QDF_STATUS_SUCCESS) { 1518 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1519 "%s: Unable to store ring desc !", __func__); 1520 goto discard_frag; 1521 } 1522 } else { 1523 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1524 *rx_bfs = 1; 1525 1526 /* Return the non-head link desc */ 1527 if (ring_desc && 1528 dp_rx_link_desc_return(soc, ring_desc, 1529 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1530 QDF_STATUS_SUCCESS) 1531 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1532 "%s: Failed to return link desc", __func__); 1533 1534 } 1535 1536 if (pdev->soc->rx.flags.defrag_timeout_check) 1537 dp_rx_defrag_waitlist_remove(peer, tid); 1538 1539 /* Yet to receive more fragments for this sequence number */ 1540 if (!all_frag_present) { 1541 uint32_t now_ms = 1542 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1543 1544 peer->rx_tid[tid].defrag_timeout_ms = 1545 now_ms + pdev->soc->rx.defrag.timeout_ms; 1546 1547 dp_rx_defrag_waitlist_add(peer, tid); 1548 dp_peer_unref_del_find_by_id(peer); 1549 1550 return QDF_STATUS_SUCCESS; 1551 } 1552 1553 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1554 "All fragments received for sequence: %d", rxseq); 1555 1556 /* Process the fragments */ 1557 status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, 1558 rx_reorder_array_elem->tail); 1559 if (QDF_IS_STATUS_ERROR(status)) { 1560 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1561 "Fragment processing failed"); 1562 1563 dp_rx_add_to_free_desc_list(head, tail, 1564 peer->rx_tid[tid].head_frag_desc); 1565 *rx_bfs = 1; 1566 1567 if (dp_rx_link_desc_return(soc, 1568 peer->rx_tid[tid].dst_ring_desc, 1569 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1570 QDF_STATUS_SUCCESS) 1571 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1572 "%s: Failed to return link desc", 1573 __func__); 1574 dp_rx_defrag_cleanup(peer, tid); 1575 goto end; 1576 } 1577 1578 /* Re-inject the fragments back to REO for further processing */ 1579 status = dp_rx_defrag_reo_reinject(peer, tid, 1580 rx_reorder_array_elem->head); 1581 if (QDF_IS_STATUS_SUCCESS(status)) { 1582 rx_reorder_array_elem->head = NULL; 1583 rx_reorder_array_elem->tail = NULL; 1584 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1585 "Fragmented sequence successfully reinjected"); 1586 } else { 1587 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1588 "Fragmented sequence reinjection failed"); 1589 dp_rx_return_head_frag_desc(peer, tid); 1590 } 1591 1592 dp_rx_defrag_cleanup(peer, tid); 1593 1594 dp_peer_unref_del_find_by_id(peer); 1595 1596 return QDF_STATUS_SUCCESS; 1597 1598 discard_frag: 1599 qdf_nbuf_free(frag); 1600 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1601 if (dp_rx_link_desc_return(soc, ring_desc, 1602 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1603 QDF_STATUS_SUCCESS) 1604 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1605 "%s: Failed to return link desc", __func__); 1606 *rx_bfs = 1; 1607 1608 end: 1609 if (peer) 1610 dp_peer_unref_del_find_by_id(peer); 1611 1612 DP_STATS_INC(soc, rx.rx_frag_err, 1); 1613 return QDF_STATUS_E_DEFRAG_ERROR; 1614 } 1615 1616 /** 1617 * dp_rx_frag_handle() - Handles fragmented Rx frames 1618 * 1619 * @soc: core txrx main context 1620 * @ring_desc: opaque pointer to the REO error ring descriptor 1621 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 1622 * @head: head of the local descriptor free-list 1623 * @tail: tail of the local descriptor free-list 1624 * @quota: No. of units (packets) that can be serviced in one shot. 1625 * 1626 * This function implements RX 802.11 fragmentation handling 1627 * The handling is mostly same as legacy fragmentation handling. 1628 * If required, this function can re-inject the frames back to 1629 * REO ring (with proper setting to by-pass fragmentation check 1630 * but use duplicate detection / re-ordering and routing these frames 1631 * to a different core. 1632 * 1633 * Return: uint32_t: No. of elements processed 1634 */ 1635 uint32_t dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc, 1636 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1637 struct dp_rx_desc *rx_desc, 1638 uint8_t *mac_id, 1639 uint32_t quota) 1640 { 1641 uint32_t rx_bufs_used = 0; 1642 qdf_nbuf_t msdu = NULL; 1643 uint32_t tid, msdu_len; 1644 int rx_bfs = 0; 1645 struct dp_pdev *pdev; 1646 QDF_STATUS status = QDF_STATUS_SUCCESS; 1647 1648 qdf_assert(soc); 1649 qdf_assert(mpdu_desc_info); 1650 qdf_assert(rx_desc); 1651 1652 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 1653 "Number of MSDUs to process, num_msdus: %d", 1654 mpdu_desc_info->msdu_count); 1655 1656 1657 if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) { 1658 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1659 "Not sufficient MSDUs to process"); 1660 return rx_bufs_used; 1661 } 1662 1663 /* all buffers in MSDU link belong to same pdev */ 1664 pdev = soc->pdev_list[rx_desc->pool_id]; 1665 *mac_id = rx_desc->pool_id; 1666 1667 msdu = rx_desc->nbuf; 1668 1669 qdf_nbuf_unmap_single(soc->osdev, msdu, QDF_DMA_FROM_DEVICE); 1670 1671 rx_desc->rx_buf_start = qdf_nbuf_data(msdu); 1672 1673 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_desc->rx_buf_start); 1674 1675 qdf_nbuf_set_pktlen(msdu, (msdu_len + RX_PKT_TLVS_LEN)); 1676 qdf_nbuf_append_ext_list(msdu, NULL, 0); 1677 1678 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_desc->rx_buf_start); 1679 1680 /* Process fragment-by-fragment */ 1681 status = dp_rx_defrag_store_fragment(soc, ring_desc, 1682 &pdev->free_list_head, 1683 &pdev->free_list_tail, 1684 mpdu_desc_info, 1685 tid, rx_desc, &rx_bfs); 1686 1687 if (rx_bfs) 1688 rx_bufs_used++; 1689 1690 if (!QDF_IS_STATUS_SUCCESS(status)) 1691 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1692 "Rx Defrag err seq#:0x%x msdu_count:%d flags:%d", 1693 mpdu_desc_info->mpdu_seq, 1694 mpdu_desc_info->msdu_count, 1695 mpdu_desc_info->mpdu_flags); 1696 1697 return rx_bufs_used; 1698 } 1699 1700 QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc, 1701 struct dp_peer *peer, uint16_t tid, 1702 uint16_t rxseq, qdf_nbuf_t nbuf) 1703 { 1704 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1705 struct dp_rx_reorder_array_elem *rx_reorder_array_elem; 1706 uint8_t all_frag_present; 1707 uint32_t msdu_len; 1708 QDF_STATUS status; 1709 1710 rx_reorder_array_elem = peer->rx_tid[tid].array; 1711 1712 if (rx_reorder_array_elem->head && 1713 rxseq != rx_tid->curr_seq_num) { 1714 /* Drop stored fragments if out of sequence 1715 * fragment is received 1716 */ 1717 dp_rx_reorder_flush_frag(peer, tid); 1718 1719 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1720 "%s: No list found for TID %d Seq# %d", 1721 __func__, tid, rxseq); 1722 qdf_nbuf_free(nbuf); 1723 goto fail; 1724 } 1725 1726 msdu_len = hal_rx_msdu_start_msdu_len_get(qdf_nbuf_data(nbuf)); 1727 1728 qdf_nbuf_set_pktlen(nbuf, (msdu_len + RX_PKT_TLVS_LEN)); 1729 1730 status = dp_rx_defrag_fraglist_insert(peer, tid, 1731 &rx_reorder_array_elem->head, 1732 &rx_reorder_array_elem->tail, nbuf, 1733 &all_frag_present); 1734 1735 if (QDF_IS_STATUS_ERROR(status)) { 1736 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1737 "%s Fragment insert failed", __func__); 1738 1739 goto fail; 1740 } 1741 1742 if (soc->rx.flags.defrag_timeout_check) 1743 dp_rx_defrag_waitlist_remove(peer, tid); 1744 1745 if (!all_frag_present) { 1746 uint32_t now_ms = 1747 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1748 1749 peer->rx_tid[tid].defrag_timeout_ms = 1750 now_ms + soc->rx.defrag.timeout_ms; 1751 1752 dp_rx_defrag_waitlist_add(peer, tid); 1753 1754 return QDF_STATUS_SUCCESS; 1755 } 1756 1757 status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, 1758 rx_reorder_array_elem->tail); 1759 1760 if (QDF_IS_STATUS_ERROR(status)) { 1761 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1762 "%s Fragment processing failed", __func__); 1763 1764 dp_rx_return_head_frag_desc(peer, tid); 1765 dp_rx_defrag_cleanup(peer, tid); 1766 1767 goto fail; 1768 } 1769 1770 /* Re-inject the fragments back to REO for further processing */ 1771 status = dp_rx_defrag_reo_reinject(peer, tid, 1772 rx_reorder_array_elem->head); 1773 if (QDF_IS_STATUS_SUCCESS(status)) { 1774 rx_reorder_array_elem->head = NULL; 1775 rx_reorder_array_elem->tail = NULL; 1776 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1777 "%s: Frag seq successfully reinjected", 1778 __func__); 1779 } else { 1780 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1781 "%s: Frag seq reinjection failed", __func__); 1782 dp_rx_return_head_frag_desc(peer, tid); 1783 } 1784 1785 dp_rx_defrag_cleanup(peer, tid); 1786 return QDF_STATUS_SUCCESS; 1787 1788 fail: 1789 return QDF_STATUS_E_DEFRAG_ERROR; 1790 } 1791