1 /* 2 * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #include "dp_internal.h" 27 #include "dp_rx_defrag.h" 28 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 29 #include "dp_rx_defrag.h" 30 #include "dp_ipa.h" 31 32 const struct dp_rx_defrag_cipher dp_f_ccmp = { 33 "AES-CCM", 34 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 35 IEEE80211_WEP_MICLEN, 36 0, 37 }; 38 39 const struct dp_rx_defrag_cipher dp_f_tkip = { 40 "TKIP", 41 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 42 IEEE80211_WEP_CRCLEN, 43 IEEE80211_WEP_MICLEN, 44 }; 45 46 const struct dp_rx_defrag_cipher dp_f_wep = { 47 "WEP", 48 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN, 49 IEEE80211_WEP_CRCLEN, 50 0, 51 }; 52 53 /* 54 * dp_rx_defrag_frames_free(): Free fragment chain 55 * @frames: Fragment chain 56 * 57 * Iterates through the fragment chain and frees them 58 * Returns: None 59 */ 60 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames) 61 { 62 qdf_nbuf_t next, frag = frames; 63 64 while (frag) { 65 next = qdf_nbuf_next(frag); 66 qdf_nbuf_free(frag); 67 frag = next; 68 } 69 } 70 71 /* 72 * dp_rx_clear_saved_desc_info(): Clears descriptor info 73 * @peer: Pointer to the peer data structure 74 * @tid: Transmit ID (TID) 75 * 76 * Saves MPDU descriptor info and MSDU link pointer from REO 77 * ring descriptor. The cache is created per peer, per TID 78 * 79 * Returns: None 80 */ 81 static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid) 82 { 83 if (peer->rx_tid[tid].dst_ring_desc) 84 qdf_mem_free(peer->rx_tid[tid].dst_ring_desc); 85 86 peer->rx_tid[tid].dst_ring_desc = NULL; 87 peer->rx_tid[tid].head_frag_desc = NULL; 88 } 89 90 static void dp_rx_return_head_frag_desc(struct dp_peer *peer, 91 unsigned int tid) 92 { 93 struct dp_soc *soc; 94 struct dp_pdev *pdev; 95 struct dp_srng *dp_rxdma_srng; 96 struct rx_desc_pool *rx_desc_pool; 97 union dp_rx_desc_list_elem_t *head = NULL; 98 union dp_rx_desc_list_elem_t *tail = NULL; 99 100 pdev = peer->vdev->pdev; 101 soc = pdev->soc; 102 103 if (peer->rx_tid[tid].head_frag_desc) { 104 dp_rxdma_srng = &pdev->rx_refill_buf_ring; 105 rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id]; 106 107 dp_rx_add_to_free_desc_list(&head, &tail, 108 peer->rx_tid[tid].head_frag_desc); 109 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 110 1, &head, &tail); 111 } 112 113 if (peer->rx_tid[tid].dst_ring_desc) { 114 if (dp_rx_link_desc_return(soc, 115 peer->rx_tid[tid].dst_ring_desc, 116 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 117 QDF_STATUS_SUCCESS) 118 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 119 "%s: Failed to return link desc", __func__); 120 } 121 } 122 123 /* 124 * dp_rx_reorder_flush_frag(): Flush the frag list 125 * @peer: Pointer to the peer data structure 126 * @tid: Transmit ID (TID) 127 * 128 * Flush the per-TID frag list 129 * 130 * Returns: None 131 */ 132 void dp_rx_reorder_flush_frag(struct dp_peer *peer, 133 unsigned int tid) 134 { 135 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 136 FL("Flushing TID %d"), tid); 137 138 if (!peer) { 139 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 140 "%s: NULL peer", __func__); 141 return; 142 } 143 144 dp_rx_return_head_frag_desc(peer, tid); 145 dp_rx_defrag_cleanup(peer, tid); 146 } 147 148 /* 149 * dp_rx_defrag_waitlist_flush(): Flush SOC defrag wait list 150 * @soc: DP SOC 151 * 152 * Flush fragments of all waitlisted TID's 153 * 154 * Returns: None 155 */ 156 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc) 157 { 158 struct dp_rx_tid *rx_reorder = NULL; 159 struct dp_rx_tid *tmp; 160 uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); 161 TAILQ_HEAD(, dp_rx_tid) temp_list; 162 163 TAILQ_INIT(&temp_list); 164 165 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 166 FL("Current time %u"), now_ms); 167 168 qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); 169 TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist, 170 defrag_waitlist_elem, tmp) { 171 uint32_t tid; 172 173 if (rx_reorder->defrag_timeout_ms > now_ms) 174 break; 175 176 tid = rx_reorder->tid; 177 if (tid >= DP_MAX_TIDS) { 178 qdf_assert(0); 179 continue; 180 } 181 182 TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder, 183 defrag_waitlist_elem); 184 DP_STATS_DEC(soc, rx.rx_frag_wait, 1); 185 186 /* Move to temp list and clean-up later */ 187 TAILQ_INSERT_TAIL(&temp_list, rx_reorder, 188 defrag_waitlist_elem); 189 } 190 if (rx_reorder) { 191 soc->rx.defrag.next_flush_ms = 192 rx_reorder->defrag_timeout_ms; 193 } else { 194 soc->rx.defrag.next_flush_ms = 195 now_ms + soc->rx.defrag.timeout_ms; 196 } 197 198 qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); 199 200 TAILQ_FOREACH_SAFE(rx_reorder, &temp_list, 201 defrag_waitlist_elem, tmp) { 202 struct dp_peer *peer, *temp_peer = NULL; 203 204 qdf_spin_lock_bh(&rx_reorder->tid_lock); 205 TAILQ_REMOVE(&temp_list, rx_reorder, 206 defrag_waitlist_elem); 207 /* get address of current peer */ 208 peer = 209 container_of(rx_reorder, struct dp_peer, 210 rx_tid[rx_reorder->tid]); 211 qdf_spin_unlock_bh(&rx_reorder->tid_lock); 212 213 temp_peer = dp_peer_find_by_id(soc, peer->peer_ids[0]); 214 if (temp_peer == peer) { 215 qdf_spin_lock_bh(&rx_reorder->tid_lock); 216 dp_rx_reorder_flush_frag(peer, rx_reorder->tid); 217 qdf_spin_unlock_bh(&rx_reorder->tid_lock); 218 } 219 220 if (temp_peer) 221 dp_peer_unref_del_find_by_id(temp_peer); 222 223 } 224 } 225 226 /* 227 * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list 228 * @peer: Pointer to the peer data structure 229 * @tid: Transmit ID (TID) 230 * 231 * Appends per-tid fragments to global fragment wait list 232 * 233 * Returns: None 234 */ 235 static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid) 236 { 237 struct dp_soc *psoc = peer->vdev->pdev->soc; 238 struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid]; 239 240 dp_debug("Adding TID %u to waitlist for peer %pK at MAC address %pM", 241 tid, peer, peer->mac_addr.raw); 242 243 /* TODO: use LIST macros instead of TAIL macros */ 244 qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock); 245 if (TAILQ_EMPTY(&psoc->rx.defrag.waitlist)) 246 psoc->rx.defrag.next_flush_ms = rx_reorder->defrag_timeout_ms; 247 TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder, 248 defrag_waitlist_elem); 249 DP_STATS_INC(psoc, rx.rx_frag_wait, 1); 250 qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock); 251 } 252 253 /* 254 * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist 255 * @peer: Pointer to the peer data structure 256 * @tid: Transmit ID (TID) 257 * 258 * Remove fragments from waitlist 259 * 260 * Returns: None 261 */ 262 void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid) 263 { 264 struct dp_pdev *pdev = peer->vdev->pdev; 265 struct dp_soc *soc = pdev->soc; 266 struct dp_rx_tid *rx_reorder; 267 struct dp_rx_tid *tmp; 268 269 dp_debug("Removing TID %u to waitlist for peer %pK at MAC address %pM", 270 tid, peer, peer->mac_addr.raw); 271 272 if (tid >= DP_MAX_TIDS) { 273 dp_err("TID out of bounds: %d", tid); 274 qdf_assert_always(0); 275 } 276 277 qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); 278 TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist, 279 defrag_waitlist_elem, tmp) { 280 struct dp_peer *peer_on_waitlist; 281 282 /* get address of current peer */ 283 peer_on_waitlist = 284 container_of(rx_reorder, struct dp_peer, 285 rx_tid[rx_reorder->tid]); 286 287 /* Ensure it is TID for same peer */ 288 if (peer_on_waitlist == peer && rx_reorder->tid == tid) { 289 TAILQ_REMOVE(&soc->rx.defrag.waitlist, 290 rx_reorder, defrag_waitlist_elem); 291 DP_STATS_DEC(soc, rx.rx_frag_wait, 1); 292 } 293 } 294 qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); 295 } 296 297 /* 298 * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list 299 * @peer: Pointer to the peer data structure 300 * @tid: Transmit ID (TID) 301 * @head_addr: Pointer to head list 302 * @tail_addr: Pointer to tail list 303 * @frag: Incoming fragment 304 * @all_frag_present: Flag to indicate whether all fragments are received 305 * 306 * Build a per-tid, per-sequence fragment list. 307 * 308 * Returns: Success, if inserted 309 */ 310 static QDF_STATUS dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid, 311 qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag, 312 uint8_t *all_frag_present) 313 { 314 qdf_nbuf_t next; 315 qdf_nbuf_t prev = NULL; 316 qdf_nbuf_t cur; 317 uint16_t head_fragno, cur_fragno, next_fragno; 318 uint8_t last_morefrag = 1, count = 0; 319 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 320 uint8_t *rx_desc_info; 321 322 323 qdf_assert(frag); 324 qdf_assert(head_addr); 325 qdf_assert(tail_addr); 326 327 *all_frag_present = 0; 328 rx_desc_info = qdf_nbuf_data(frag); 329 cur_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 330 331 /* If this is the first fragment */ 332 if (!(*head_addr)) { 333 *head_addr = *tail_addr = frag; 334 qdf_nbuf_set_next(*tail_addr, NULL); 335 rx_tid->curr_frag_num = cur_fragno; 336 337 goto insert_done; 338 } 339 340 /* In sequence fragment */ 341 if (cur_fragno > rx_tid->curr_frag_num) { 342 qdf_nbuf_set_next(*tail_addr, frag); 343 *tail_addr = frag; 344 qdf_nbuf_set_next(*tail_addr, NULL); 345 rx_tid->curr_frag_num = cur_fragno; 346 } else { 347 /* Out of sequence fragment */ 348 cur = *head_addr; 349 rx_desc_info = qdf_nbuf_data(cur); 350 head_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 351 352 if (cur_fragno == head_fragno) { 353 qdf_nbuf_free(frag); 354 goto insert_fail; 355 } else if (head_fragno > cur_fragno) { 356 qdf_nbuf_set_next(frag, cur); 357 cur = frag; 358 *head_addr = frag; /* head pointer to be updated */ 359 } else { 360 while ((cur_fragno > head_fragno) && cur) { 361 prev = cur; 362 cur = qdf_nbuf_next(cur); 363 rx_desc_info = qdf_nbuf_data(cur); 364 head_fragno = 365 dp_rx_frag_get_mpdu_frag_number( 366 rx_desc_info); 367 } 368 369 if (cur_fragno == head_fragno) { 370 qdf_nbuf_free(frag); 371 goto insert_fail; 372 } 373 374 qdf_nbuf_set_next(prev, frag); 375 qdf_nbuf_set_next(frag, cur); 376 } 377 } 378 379 next = qdf_nbuf_next(*head_addr); 380 381 rx_desc_info = qdf_nbuf_data(*tail_addr); 382 last_morefrag = dp_rx_frag_get_more_frag_bit(rx_desc_info); 383 384 /* TODO: optimize the loop */ 385 if (!last_morefrag) { 386 /* Check if all fragments are present */ 387 do { 388 rx_desc_info = qdf_nbuf_data(next); 389 next_fragno = 390 dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 391 count++; 392 393 if (next_fragno != count) 394 break; 395 396 next = qdf_nbuf_next(next); 397 } while (next); 398 399 if (!next) { 400 *all_frag_present = 1; 401 return QDF_STATUS_SUCCESS; 402 } 403 } 404 405 insert_done: 406 return QDF_STATUS_SUCCESS; 407 408 insert_fail: 409 return QDF_STATUS_E_FAILURE; 410 } 411 412 413 /* 414 * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment 415 * @msdu: Pointer to the fragment 416 * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) 417 * 418 * decap tkip encrypted fragment 419 * 420 * Returns: QDF_STATUS 421 */ 422 static QDF_STATUS dp_rx_defrag_tkip_decap(qdf_nbuf_t msdu, uint16_t hdrlen) 423 { 424 uint8_t *ivp, *orig_hdr; 425 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 426 427 /* start of 802.11 header info */ 428 orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len); 429 430 /* TKIP header is located post 802.11 header */ 431 ivp = orig_hdr + hdrlen; 432 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) { 433 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 434 "IEEE80211_WEP_EXTIV is missing in TKIP fragment"); 435 return QDF_STATUS_E_DEFRAG_ERROR; 436 } 437 438 qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer); 439 440 return QDF_STATUS_SUCCESS; 441 } 442 443 /* 444 * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment 445 * @nbuf: Pointer to the fragment buffer 446 * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) 447 * 448 * Remove MIC information from CCMP fragment 449 * 450 * Returns: QDF_STATUS 451 */ 452 static QDF_STATUS dp_rx_defrag_ccmp_demic(qdf_nbuf_t nbuf, uint16_t hdrlen) 453 { 454 uint8_t *ivp, *orig_hdr; 455 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 456 457 /* start of the 802.11 header */ 458 orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); 459 460 /* CCMP header is located after 802.11 header */ 461 ivp = orig_hdr + hdrlen; 462 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 463 return QDF_STATUS_E_DEFRAG_ERROR; 464 465 qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer); 466 467 return QDF_STATUS_SUCCESS; 468 } 469 470 /* 471 * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment 472 * @nbuf: Pointer to the fragment 473 * @hdrlen: length of the header information 474 * 475 * decap CCMP encrypted fragment 476 * 477 * Returns: QDF_STATUS 478 */ 479 static QDF_STATUS dp_rx_defrag_ccmp_decap(qdf_nbuf_t nbuf, uint16_t hdrlen) 480 { 481 uint8_t *ivp, *origHdr; 482 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 483 484 origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len); 485 ivp = origHdr + hdrlen; 486 487 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 488 return QDF_STATUS_E_DEFRAG_ERROR; 489 490 /* Let's pull the header later */ 491 492 return QDF_STATUS_SUCCESS; 493 } 494 495 /* 496 * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment 497 * @msdu: Pointer to the fragment 498 * @hdrlen: length of the header information 499 * 500 * decap WEP encrypted fragment 501 * 502 * Returns: QDF_STATUS 503 */ 504 static QDF_STATUS dp_rx_defrag_wep_decap(qdf_nbuf_t msdu, uint16_t hdrlen) 505 { 506 uint8_t *origHdr; 507 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 508 509 origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); 510 qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen); 511 512 qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer); 513 514 return QDF_STATUS_SUCCESS; 515 } 516 517 /* 518 * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment 519 * @soc: soc handle 520 * @nbuf: Pointer to the fragment 521 * 522 * Calculate the header size of the received fragment 523 * 524 * Returns: header size (uint16_t) 525 */ 526 static uint16_t dp_rx_defrag_hdrsize(struct dp_soc *soc, qdf_nbuf_t nbuf) 527 { 528 uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf); 529 uint16_t size = sizeof(struct ieee80211_frame); 530 uint16_t fc = 0; 531 uint32_t to_ds, fr_ds; 532 uint8_t frm_ctrl_valid; 533 uint16_t frm_ctrl_field; 534 535 to_ds = hal_rx_mpdu_get_to_ds(soc->hal_soc, rx_tlv_hdr); 536 fr_ds = hal_rx_mpdu_get_fr_ds(soc->hal_soc, rx_tlv_hdr); 537 frm_ctrl_valid = 538 hal_rx_get_mpdu_frame_control_valid(soc->hal_soc, 539 rx_tlv_hdr); 540 frm_ctrl_field = hal_rx_get_frame_ctrl_field(rx_tlv_hdr); 541 542 if (to_ds && fr_ds) 543 size += QDF_MAC_ADDR_SIZE; 544 545 if (frm_ctrl_valid) { 546 fc = frm_ctrl_field; 547 548 /* use 1-st byte for validation */ 549 if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) { 550 size += sizeof(uint16_t); 551 /* use 2-nd byte for validation */ 552 if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER) 553 size += sizeof(struct ieee80211_htc); 554 } 555 } 556 557 return size; 558 } 559 560 /* 561 * dp_rx_defrag_michdr(): Calculate a pseudo MIC header 562 * @wh0: Pointer to the wireless header of the fragment 563 * @hdr: Array to hold the pseudo header 564 * 565 * Calculate a pseudo MIC header 566 * 567 * Returns: None 568 */ 569 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0, 570 uint8_t hdr[]) 571 { 572 const struct ieee80211_frame_addr4 *wh = 573 (const struct ieee80211_frame_addr4 *)wh0; 574 575 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { 576 case IEEE80211_FC1_DIR_NODS: 577 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 578 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 579 wh->i_addr2); 580 break; 581 case IEEE80211_FC1_DIR_TODS: 582 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 583 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 584 wh->i_addr2); 585 break; 586 case IEEE80211_FC1_DIR_FROMDS: 587 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 588 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 589 wh->i_addr3); 590 break; 591 case IEEE80211_FC1_DIR_DSTODS: 592 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 593 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 594 wh->i_addr4); 595 break; 596 } 597 598 /* 599 * Bit 7 is QDF_IEEE80211_FC0_SUBTYPE_QOS for data frame, but 600 * it could also be set for deauth, disassoc, action, etc. for 601 * a mgt type frame. It comes into picture for MFP. 602 */ 603 if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) { 604 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == 605 IEEE80211_FC1_DIR_DSTODS) { 606 const struct ieee80211_qosframe_addr4 *qwh = 607 (const struct ieee80211_qosframe_addr4 *)wh; 608 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; 609 } else { 610 const struct ieee80211_qosframe *qwh = 611 (const struct ieee80211_qosframe *)wh; 612 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; 613 } 614 } else { 615 hdr[12] = 0; 616 } 617 618 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ 619 } 620 621 /* 622 * dp_rx_defrag_mic(): Calculate MIC header 623 * @key: Pointer to the key 624 * @wbuf: fragment buffer 625 * @off: Offset 626 * @data_len: Data length 627 * @mic: Array to hold MIC 628 * 629 * Calculate a pseudo MIC header 630 * 631 * Returns: QDF_STATUS 632 */ 633 static QDF_STATUS dp_rx_defrag_mic(const uint8_t *key, qdf_nbuf_t wbuf, 634 uint16_t off, uint16_t data_len, uint8_t mic[]) 635 { 636 uint8_t hdr[16] = { 0, }; 637 uint32_t l, r; 638 const uint8_t *data; 639 uint32_t space; 640 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 641 642 dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) 643 + rx_desc_len), hdr); 644 645 l = dp_rx_get_le32(key); 646 r = dp_rx_get_le32(key + 4); 647 648 /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ 649 l ^= dp_rx_get_le32(hdr); 650 dp_rx_michael_block(l, r); 651 l ^= dp_rx_get_le32(&hdr[4]); 652 dp_rx_michael_block(l, r); 653 l ^= dp_rx_get_le32(&hdr[8]); 654 dp_rx_michael_block(l, r); 655 l ^= dp_rx_get_le32(&hdr[12]); 656 dp_rx_michael_block(l, r); 657 658 /* first buffer has special handling */ 659 data = (uint8_t *)qdf_nbuf_data(wbuf) + off; 660 space = qdf_nbuf_len(wbuf) - off; 661 662 for (;; ) { 663 if (space > data_len) 664 space = data_len; 665 666 /* collect 32-bit blocks from current buffer */ 667 while (space >= sizeof(uint32_t)) { 668 l ^= dp_rx_get_le32(data); 669 dp_rx_michael_block(l, r); 670 data += sizeof(uint32_t); 671 space -= sizeof(uint32_t); 672 data_len -= sizeof(uint32_t); 673 } 674 if (data_len < sizeof(uint32_t)) 675 break; 676 677 wbuf = qdf_nbuf_next(wbuf); 678 if (!wbuf) 679 return QDF_STATUS_E_DEFRAG_ERROR; 680 681 if (space != 0) { 682 const uint8_t *data_next; 683 /* 684 * Block straddles buffers, split references. 685 */ 686 data_next = 687 (uint8_t *)qdf_nbuf_data(wbuf) + off; 688 if ((qdf_nbuf_len(wbuf)) < 689 sizeof(uint32_t) - space) { 690 return QDF_STATUS_E_DEFRAG_ERROR; 691 } 692 switch (space) { 693 case 1: 694 l ^= dp_rx_get_le32_split(data[0], 695 data_next[0], data_next[1], 696 data_next[2]); 697 data = data_next + 3; 698 space = (qdf_nbuf_len(wbuf) - off) - 3; 699 break; 700 case 2: 701 l ^= dp_rx_get_le32_split(data[0], data[1], 702 data_next[0], data_next[1]); 703 data = data_next + 2; 704 space = (qdf_nbuf_len(wbuf) - off) - 2; 705 break; 706 case 3: 707 l ^= dp_rx_get_le32_split(data[0], data[1], 708 data[2], data_next[0]); 709 data = data_next + 1; 710 space = (qdf_nbuf_len(wbuf) - off) - 1; 711 break; 712 } 713 dp_rx_michael_block(l, r); 714 data_len -= sizeof(uint32_t); 715 } else { 716 /* 717 * Setup for next buffer. 718 */ 719 data = (uint8_t *)qdf_nbuf_data(wbuf) + off; 720 space = qdf_nbuf_len(wbuf) - off; 721 } 722 } 723 /* Last block and padding (0x5a, 4..7 x 0) */ 724 switch (data_len) { 725 case 0: 726 l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0); 727 break; 728 case 1: 729 l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0); 730 break; 731 case 2: 732 l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0); 733 break; 734 case 3: 735 l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a); 736 break; 737 } 738 dp_rx_michael_block(l, r); 739 dp_rx_michael_block(l, r); 740 dp_rx_put_le32(mic, l); 741 dp_rx_put_le32(mic + 4, r); 742 743 return QDF_STATUS_SUCCESS; 744 } 745 746 /* 747 * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame 748 * @key: Pointer to the key 749 * @msdu: fragment buffer 750 * @hdrlen: Length of the header information 751 * 752 * Remove MIC information from the TKIP frame 753 * 754 * Returns: QDF_STATUS 755 */ 756 static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key, 757 qdf_nbuf_t msdu, uint16_t hdrlen) 758 { 759 QDF_STATUS status; 760 uint32_t pktlen = 0; 761 uint8_t mic[IEEE80211_WEP_MICLEN]; 762 uint8_t mic0[IEEE80211_WEP_MICLEN]; 763 qdf_nbuf_t prev = NULL, next; 764 765 next = msdu; 766 while (next) { 767 pktlen += (qdf_nbuf_len(next) - hdrlen); 768 prev = next; 769 dp_debug("%s pktlen %u", __func__, 770 (uint32_t)(qdf_nbuf_len(next) - hdrlen)); 771 next = qdf_nbuf_next(next); 772 } 773 774 if (!prev) { 775 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 776 "%s Defrag chaining failed !\n", __func__); 777 return QDF_STATUS_E_DEFRAG_ERROR; 778 } 779 780 qdf_nbuf_copy_bits(prev, qdf_nbuf_len(prev) - dp_f_tkip.ic_miclen, 781 dp_f_tkip.ic_miclen, (caddr_t)mic0); 782 qdf_nbuf_trim_tail(prev, dp_f_tkip.ic_miclen); 783 pktlen -= dp_f_tkip.ic_miclen; 784 785 status = dp_rx_defrag_mic(key, msdu, hdrlen, 786 pktlen, mic); 787 788 if (QDF_IS_STATUS_ERROR(status)) 789 return status; 790 791 if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen)) 792 return QDF_STATUS_E_DEFRAG_ERROR; 793 794 return QDF_STATUS_SUCCESS; 795 } 796 797 /* 798 * dp_rx_frag_pull_hdr(): Pulls the RXTLV & the 802.11 headers 799 * @nbuf: buffer pointer 800 * @hdrsize: size of the header to be pulled 801 * 802 * Pull the RXTLV & the 802.11 headers 803 * 804 * Returns: None 805 */ 806 static void dp_rx_frag_pull_hdr(qdf_nbuf_t nbuf, uint16_t hdrsize) 807 { 808 qdf_nbuf_pull_head(nbuf, 809 RX_PKT_TLVS_LEN + hdrsize); 810 811 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 812 "%s: final pktlen %d .11len %d", 813 __func__, (uint32_t)qdf_nbuf_len(nbuf), hdrsize); 814 } 815 816 /* 817 * dp_rx_construct_fraglist(): Construct a nbuf fraglist 818 * @peer: Pointer to the peer 819 * @head: Pointer to list of fragments 820 * @hdrsize: Size of the header to be pulled 821 * 822 * Construct a nbuf fraglist 823 * 824 * Returns: None 825 */ 826 static void 827 dp_rx_construct_fraglist(struct dp_peer *peer, 828 qdf_nbuf_t head, uint16_t hdrsize) 829 { 830 qdf_nbuf_t msdu = qdf_nbuf_next(head); 831 qdf_nbuf_t rx_nbuf = msdu; 832 uint32_t len = 0; 833 834 while (msdu) { 835 dp_rx_frag_pull_hdr(msdu, hdrsize); 836 len += qdf_nbuf_len(msdu); 837 msdu = qdf_nbuf_next(msdu); 838 } 839 840 qdf_nbuf_append_ext_list(head, rx_nbuf, len); 841 qdf_nbuf_set_next(head, NULL); 842 qdf_nbuf_set_is_frag(head, 1); 843 844 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 845 "%s: head len %d ext len %d data len %d ", 846 __func__, 847 (uint32_t)qdf_nbuf_len(head), 848 (uint32_t)qdf_nbuf_len(rx_nbuf), 849 (uint32_t)(head->data_len)); 850 } 851 852 /** 853 * dp_rx_defrag_err() - rx err handler 854 * @pdev: handle to pdev object 855 * @vdev_id: vdev id 856 * @peer_mac_addr: peer mac address 857 * @tid: TID 858 * @tsf32: TSF 859 * @err_type: error type 860 * @rx_frame: rx frame 861 * @pn: PN Number 862 * @key_id: key id 863 * 864 * This function handles rx error and send MIC error notification 865 * 866 * Return: None 867 */ 868 static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 869 { 870 struct ol_if_ops *tops = NULL; 871 struct dp_pdev *pdev = vdev->pdev; 872 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 873 uint8_t *orig_hdr; 874 struct ieee80211_frame *wh; 875 struct cdp_rx_mic_err_info mic_failure_info; 876 877 orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); 878 wh = (struct ieee80211_frame *)orig_hdr; 879 880 qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.da_mac_addr, 881 (struct qdf_mac_addr *)&wh->i_addr1); 882 qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.ta_mac_addr, 883 (struct qdf_mac_addr *)&wh->i_addr2); 884 mic_failure_info.key_id = 0; 885 mic_failure_info.multicast = 886 IEEE80211_IS_MULTICAST(wh->i_addr1); 887 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 888 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 889 mic_failure_info.data = (uint8_t *)wh; 890 mic_failure_info.vdev_id = vdev->vdev_id; 891 892 tops = pdev->soc->cdp_soc.ol_ops; 893 if (tops->rx_mic_error) 894 tops->rx_mic_error(pdev->soc->ctrl_psoc, pdev->pdev_id, 895 &mic_failure_info); 896 } 897 898 899 /* 900 * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3 901 * @soc: dp soc handle 902 * @nbuf: Pointer to the fragment buffer 903 * @hdrsize: Size of headers 904 * 905 * Transcap the fragment from 802.11 to 802.3 906 * 907 * Returns: None 908 */ 909 static void 910 dp_rx_defrag_nwifi_to_8023(struct dp_soc *soc, 911 qdf_nbuf_t nbuf, uint16_t hdrsize) 912 { 913 struct llc_snap_hdr_t *llchdr; 914 struct ethernet_hdr_t *eth_hdr; 915 uint8_t ether_type[2]; 916 uint16_t fc = 0; 917 union dp_align_mac_addr mac_addr; 918 uint8_t *rx_desc_info = qdf_mem_malloc(RX_PKT_TLVS_LEN); 919 920 if (!rx_desc_info) { 921 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 922 "%s: Memory alloc failed ! ", __func__); 923 QDF_ASSERT(0); 924 return; 925 } 926 927 qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), RX_PKT_TLVS_LEN); 928 929 llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) + 930 RX_PKT_TLVS_LEN + hdrsize); 931 qdf_mem_copy(ether_type, llchdr->ethertype, 2); 932 933 qdf_nbuf_pull_head(nbuf, (RX_PKT_TLVS_LEN + hdrsize + 934 sizeof(struct llc_snap_hdr_t) - 935 sizeof(struct ethernet_hdr_t))); 936 937 eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf)); 938 939 if (hal_rx_get_mpdu_frame_control_valid(soc->hal_soc, 940 rx_desc_info)) 941 fc = hal_rx_get_frame_ctrl_field(rx_desc_info); 942 943 dp_debug("%s: frame control type: 0x%x", __func__, fc); 944 945 switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) { 946 case IEEE80211_FC1_DIR_NODS: 947 hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info, 948 &mac_addr.raw[0]); 949 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 950 QDF_MAC_ADDR_SIZE); 951 hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info, 952 &mac_addr.raw[0]); 953 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 954 QDF_MAC_ADDR_SIZE); 955 break; 956 case IEEE80211_FC1_DIR_TODS: 957 hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info, 958 &mac_addr.raw[0]); 959 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 960 QDF_MAC_ADDR_SIZE); 961 hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info, 962 &mac_addr.raw[0]); 963 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 964 QDF_MAC_ADDR_SIZE); 965 break; 966 case IEEE80211_FC1_DIR_FROMDS: 967 hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info, 968 &mac_addr.raw[0]); 969 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 970 QDF_MAC_ADDR_SIZE); 971 hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info, 972 &mac_addr.raw[0]); 973 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 974 QDF_MAC_ADDR_SIZE); 975 break; 976 977 case IEEE80211_FC1_DIR_DSTODS: 978 hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info, 979 &mac_addr.raw[0]); 980 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 981 QDF_MAC_ADDR_SIZE); 982 hal_rx_mpdu_get_addr4(soc->hal_soc, rx_desc_info, 983 &mac_addr.raw[0]); 984 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 985 QDF_MAC_ADDR_SIZE); 986 break; 987 988 default: 989 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 990 "%s: Unknown frame control type: 0x%x", __func__, fc); 991 } 992 993 qdf_mem_copy(eth_hdr->ethertype, ether_type, 994 sizeof(ether_type)); 995 996 qdf_nbuf_push_head(nbuf, RX_PKT_TLVS_LEN); 997 qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, RX_PKT_TLVS_LEN); 998 qdf_mem_free(rx_desc_info); 999 } 1000 1001 /* 1002 * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO 1003 * @peer: Pointer to the peer 1004 * @tid: Transmit Identifier 1005 * @head: Buffer to be reinjected back 1006 * 1007 * Reinject the fragment chain back into REO 1008 * 1009 * Returns: QDF_STATUS 1010 */ 1011 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer, 1012 unsigned int tid, qdf_nbuf_t head) 1013 { 1014 struct dp_pdev *pdev = peer->vdev->pdev; 1015 struct dp_soc *soc = pdev->soc; 1016 struct hal_buf_info buf_info; 1017 void *link_desc_va; 1018 void *msdu0, *msdu_desc_info; 1019 void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr; 1020 void *dst_mpdu_desc_info, *dst_qdesc_addr; 1021 qdf_dma_addr_t paddr; 1022 uint32_t nbuf_len, seq_no, dst_ind; 1023 uint32_t *mpdu_wrd; 1024 uint32_t ret, cookie; 1025 hal_ring_desc_t dst_ring_desc = 1026 peer->rx_tid[tid].dst_ring_desc; 1027 hal_ring_handle_t hal_srng = soc->reo_reinject_ring.hal_srng; 1028 struct dp_rx_desc *rx_desc = peer->rx_tid[tid].head_frag_desc; 1029 struct dp_rx_reorder_array_elem *rx_reorder_array_elem = 1030 peer->rx_tid[tid].array; 1031 qdf_nbuf_t nbuf_head; 1032 1033 nbuf_head = dp_ipa_handle_rx_reo_reinject(soc, head); 1034 if (qdf_unlikely(!nbuf_head)) { 1035 dp_err_rl("IPA RX REO reinject failed"); 1036 return QDF_STATUS_E_FAILURE; 1037 } 1038 1039 /* update new allocated skb in case IPA is enabled */ 1040 if (nbuf_head != head) { 1041 head = nbuf_head; 1042 rx_desc->nbuf = head; 1043 rx_reorder_array_elem->head = head; 1044 } 1045 1046 ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng); 1047 if (!ent_ring_desc) { 1048 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1049 "HAL src ring next entry NULL"); 1050 return QDF_STATUS_E_FAILURE; 1051 } 1052 1053 hal_rx_reo_buf_paddr_get(dst_ring_desc, &buf_info); 1054 1055 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1056 1057 qdf_assert(link_desc_va); 1058 1059 msdu0 = hal_rx_msdu0_buffer_addr_lsb(soc->hal_soc, link_desc_va); 1060 nbuf_len = qdf_nbuf_len(head) - RX_PKT_TLVS_LEN; 1061 1062 HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW); 1063 HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE, 1064 UNI_DESC_BUF_TYPE_RX_MSDU_LINK); 1065 1066 /* msdu reconfig */ 1067 msdu_desc_info = hal_rx_msdu_desc_info_ptr_get(soc->hal_soc, msdu0); 1068 1069 dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va); 1070 1071 qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info)); 1072 1073 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1074 FIRST_MSDU_IN_MPDU_FLAG, 1); 1075 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1076 LAST_MSDU_IN_MPDU_FLAG, 1); 1077 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1078 MSDU_CONTINUATION, 0x0); 1079 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1080 REO_DESTINATION_INDICATION, dst_ind); 1081 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1082 MSDU_LENGTH, nbuf_len); 1083 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1084 SA_IS_VALID, 1); 1085 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1086 DA_IS_VALID, 1); 1087 1088 /* change RX TLV's */ 1089 hal_rx_msdu_start_msdu_len_set( 1090 qdf_nbuf_data(head), nbuf_len); 1091 1092 cookie = HAL_RX_BUF_COOKIE_GET(msdu0); 1093 1094 /* map the nbuf before reinject it into HW */ 1095 ret = qdf_nbuf_map_single(soc->osdev, head, 1096 QDF_DMA_FROM_DEVICE); 1097 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 1098 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1099 "%s: nbuf map failed !", __func__); 1100 return QDF_STATUS_E_FAILURE; 1101 } 1102 1103 /* 1104 * As part of rx frag handler bufffer was unmapped and rx desc 1105 * unmapped is set to 1. So again for defrag reinject frame reset 1106 * it back to 0. 1107 */ 1108 rx_desc->unmapped = 0; 1109 1110 dp_ipa_handle_rx_buf_smmu_mapping(soc, head, true); 1111 1112 paddr = qdf_nbuf_get_frag_paddr(head, 0); 1113 1114 ret = check_x86_paddr(soc, &head, &paddr, pdev); 1115 1116 if (ret == QDF_STATUS_E_FAILURE) { 1117 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1118 "%s: x86 check failed !", __func__); 1119 return QDF_STATUS_E_FAILURE; 1120 } 1121 1122 hal_rxdma_buff_addr_info_set(msdu0, paddr, cookie, DP_DEFRAG_RBM); 1123 1124 /* Lets fill entrance ring now !!! */ 1125 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 1126 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1127 "HAL RING Access For REO entrance SRNG Failed: %pK", 1128 hal_srng); 1129 1130 return QDF_STATUS_E_FAILURE; 1131 } 1132 1133 paddr = (uint64_t)buf_info.paddr; 1134 /* buf addr */ 1135 hal_rxdma_buff_addr_info_set(ent_ring_desc, paddr, 1136 buf_info.sw_cookie, 1137 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 1138 /* mpdu desc info */ 1139 ent_mpdu_desc_info = hal_ent_mpdu_desc_info(soc->hal_soc, 1140 ent_ring_desc); 1141 dst_mpdu_desc_info = hal_dst_mpdu_desc_info(soc->hal_soc, 1142 dst_ring_desc); 1143 1144 qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info, 1145 sizeof(struct rx_mpdu_desc_info)); 1146 qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t)); 1147 1148 mpdu_wrd = (uint32_t *)dst_mpdu_desc_info; 1149 seq_no = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_wrd); 1150 1151 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1152 MSDU_COUNT, 0x1); 1153 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1154 MPDU_SEQUENCE_NUMBER, seq_no); 1155 /* unset frag bit */ 1156 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1157 FRAGMENT_FLAG, 0x0); 1158 /* set sa/da valid bits */ 1159 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1160 SA_IS_VALID, 0x1); 1161 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1162 DA_IS_VALID, 0x1); 1163 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1164 RAW_MPDU, 0x0); 1165 1166 /* qdesc addr */ 1167 ent_qdesc_addr = (uint8_t *)ent_ring_desc + 1168 REO_ENTRANCE_RING_4_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; 1169 1170 dst_qdesc_addr = (uint8_t *)dst_ring_desc + 1171 REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; 1172 1173 qdf_mem_copy(ent_qdesc_addr, dst_qdesc_addr, 8); 1174 1175 HAL_RX_FLD_SET(ent_ring_desc, REO_ENTRANCE_RING_5, 1176 REO_DESTINATION_INDICATION, dst_ind); 1177 1178 hal_srng_access_end(soc->hal_soc, hal_srng); 1179 1180 DP_STATS_INC(soc, rx.reo_reinject, 1); 1181 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1182 "%s: reinjection done !", __func__); 1183 return QDF_STATUS_SUCCESS; 1184 } 1185 1186 /* 1187 * dp_rx_defrag(): Defragment the fragment chain 1188 * @peer: Pointer to the peer 1189 * @tid: Transmit Identifier 1190 * @frag_list_head: Pointer to head list 1191 * @frag_list_tail: Pointer to tail list 1192 * 1193 * Defragment the fragment chain 1194 * 1195 * Returns: QDF_STATUS 1196 */ 1197 static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid, 1198 qdf_nbuf_t frag_list_head, qdf_nbuf_t frag_list_tail) 1199 { 1200 qdf_nbuf_t tmp_next, prev; 1201 qdf_nbuf_t cur = frag_list_head, msdu; 1202 uint32_t index, tkip_demic = 0; 1203 uint16_t hdr_space; 1204 uint8_t key[DEFRAG_IEEE80211_KEY_LEN]; 1205 struct dp_vdev *vdev = peer->vdev; 1206 struct dp_soc *soc = vdev->pdev->soc; 1207 uint8_t status = 0; 1208 1209 hdr_space = dp_rx_defrag_hdrsize(soc, cur); 1210 index = hal_rx_msdu_is_wlan_mcast(cur) ? 1211 dp_sec_mcast : dp_sec_ucast; 1212 1213 /* Remove FCS from all fragments */ 1214 while (cur) { 1215 tmp_next = qdf_nbuf_next(cur); 1216 qdf_nbuf_set_next(cur, NULL); 1217 qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN); 1218 prev = cur; 1219 qdf_nbuf_set_next(cur, tmp_next); 1220 cur = tmp_next; 1221 } 1222 cur = frag_list_head; 1223 1224 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1225 "%s: index %d Security type: %d", __func__, 1226 index, peer->security[index].sec_type); 1227 1228 switch (peer->security[index].sec_type) { 1229 case cdp_sec_type_tkip: 1230 tkip_demic = 1; 1231 1232 case cdp_sec_type_tkip_nomic: 1233 while (cur) { 1234 tmp_next = qdf_nbuf_next(cur); 1235 if (dp_rx_defrag_tkip_decap(cur, hdr_space)) { 1236 1237 QDF_TRACE(QDF_MODULE_ID_TXRX, 1238 QDF_TRACE_LEVEL_ERROR, 1239 "dp_rx_defrag: TKIP decap failed"); 1240 1241 return QDF_STATUS_E_DEFRAG_ERROR; 1242 } 1243 cur = tmp_next; 1244 } 1245 1246 /* If success, increment header to be stripped later */ 1247 hdr_space += dp_f_tkip.ic_header; 1248 break; 1249 1250 case cdp_sec_type_aes_ccmp: 1251 while (cur) { 1252 tmp_next = qdf_nbuf_next(cur); 1253 if (dp_rx_defrag_ccmp_demic(cur, hdr_space)) { 1254 1255 QDF_TRACE(QDF_MODULE_ID_TXRX, 1256 QDF_TRACE_LEVEL_ERROR, 1257 "dp_rx_defrag: CCMP demic failed"); 1258 1259 return QDF_STATUS_E_DEFRAG_ERROR; 1260 } 1261 if (dp_rx_defrag_ccmp_decap(cur, hdr_space)) { 1262 1263 QDF_TRACE(QDF_MODULE_ID_TXRX, 1264 QDF_TRACE_LEVEL_ERROR, 1265 "dp_rx_defrag: CCMP decap failed"); 1266 1267 return QDF_STATUS_E_DEFRAG_ERROR; 1268 } 1269 cur = tmp_next; 1270 } 1271 1272 /* If success, increment header to be stripped later */ 1273 hdr_space += dp_f_ccmp.ic_header; 1274 break; 1275 1276 case cdp_sec_type_wep40: 1277 case cdp_sec_type_wep104: 1278 case cdp_sec_type_wep128: 1279 while (cur) { 1280 tmp_next = qdf_nbuf_next(cur); 1281 if (dp_rx_defrag_wep_decap(cur, hdr_space)) { 1282 1283 QDF_TRACE(QDF_MODULE_ID_TXRX, 1284 QDF_TRACE_LEVEL_ERROR, 1285 "dp_rx_defrag: WEP decap failed"); 1286 1287 return QDF_STATUS_E_DEFRAG_ERROR; 1288 } 1289 cur = tmp_next; 1290 } 1291 1292 /* If success, increment header to be stripped later */ 1293 hdr_space += dp_f_wep.ic_header; 1294 break; 1295 default: 1296 QDF_TRACE(QDF_MODULE_ID_TXRX, 1297 QDF_TRACE_LEVEL_ERROR, 1298 "dp_rx_defrag: Did not match any security type"); 1299 break; 1300 } 1301 1302 if (tkip_demic) { 1303 msdu = frag_list_head; 1304 qdf_mem_copy(key, 1305 &peer->security[index].michael_key[0], 1306 IEEE80211_WEP_MICLEN); 1307 status = dp_rx_defrag_tkip_demic(key, msdu, 1308 RX_PKT_TLVS_LEN + 1309 hdr_space); 1310 1311 if (status) { 1312 dp_rx_defrag_err(vdev, frag_list_head); 1313 1314 QDF_TRACE(QDF_MODULE_ID_TXRX, 1315 QDF_TRACE_LEVEL_ERROR, 1316 "%s: TKIP demic failed status %d", 1317 __func__, status); 1318 1319 return QDF_STATUS_E_DEFRAG_ERROR; 1320 } 1321 } 1322 1323 /* Convert the header to 802.3 header */ 1324 dp_rx_defrag_nwifi_to_8023(soc, frag_list_head, hdr_space); 1325 dp_rx_construct_fraglist(peer, frag_list_head, hdr_space); 1326 1327 return QDF_STATUS_SUCCESS; 1328 } 1329 1330 /* 1331 * dp_rx_defrag_cleanup(): Clean up activities 1332 * @peer: Pointer to the peer 1333 * @tid: Transmit Identifier 1334 * 1335 * Returns: None 1336 */ 1337 void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid) 1338 { 1339 struct dp_rx_reorder_array_elem *rx_reorder_array_elem = 1340 peer->rx_tid[tid].array; 1341 1342 if (rx_reorder_array_elem) { 1343 /* Free up nbufs */ 1344 dp_rx_defrag_frames_free(rx_reorder_array_elem->head); 1345 rx_reorder_array_elem->head = NULL; 1346 rx_reorder_array_elem->tail = NULL; 1347 } else { 1348 dp_info("Cleanup self peer %pK and TID %u at MAC address %pM", 1349 peer, tid, peer->mac_addr.raw); 1350 } 1351 1352 /* Free up saved ring descriptors */ 1353 dp_rx_clear_saved_desc_info(peer, tid); 1354 1355 peer->rx_tid[tid].defrag_timeout_ms = 0; 1356 peer->rx_tid[tid].curr_frag_num = 0; 1357 peer->rx_tid[tid].curr_seq_num = 0; 1358 } 1359 1360 /* 1361 * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor 1362 * @ring_desc: Pointer to the dst ring descriptor 1363 * @peer: Pointer to the peer 1364 * @tid: Transmit Identifier 1365 * 1366 * Returns: None 1367 */ 1368 static QDF_STATUS 1369 dp_rx_defrag_save_info_from_ring_desc(hal_ring_desc_t ring_desc, 1370 struct dp_rx_desc *rx_desc, 1371 struct dp_peer *peer, 1372 unsigned int tid) 1373 { 1374 void *dst_ring_desc = qdf_mem_malloc( 1375 sizeof(struct reo_destination_ring)); 1376 1377 if (!dst_ring_desc) { 1378 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1379 "%s: Memory alloc failed !", __func__); 1380 QDF_ASSERT(0); 1381 return QDF_STATUS_E_NOMEM; 1382 } 1383 1384 qdf_mem_copy(dst_ring_desc, ring_desc, 1385 sizeof(struct reo_destination_ring)); 1386 1387 peer->rx_tid[tid].dst_ring_desc = dst_ring_desc; 1388 peer->rx_tid[tid].head_frag_desc = rx_desc; 1389 1390 return QDF_STATUS_SUCCESS; 1391 } 1392 1393 /* 1394 * dp_rx_defrag_store_fragment(): Store incoming fragments 1395 * @soc: Pointer to the SOC data structure 1396 * @ring_desc: Pointer to the ring descriptor 1397 * @mpdu_desc_info: MPDU descriptor info 1398 * @tid: Traffic Identifier 1399 * @rx_desc: Pointer to rx descriptor 1400 * @rx_bfs: Number of bfs consumed 1401 * 1402 * Returns: QDF_STATUS 1403 */ 1404 static QDF_STATUS 1405 dp_rx_defrag_store_fragment(struct dp_soc *soc, 1406 hal_ring_desc_t ring_desc, 1407 union dp_rx_desc_list_elem_t **head, 1408 union dp_rx_desc_list_elem_t **tail, 1409 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1410 unsigned int tid, struct dp_rx_desc *rx_desc, 1411 uint32_t *rx_bfs) 1412 { 1413 struct dp_rx_reorder_array_elem *rx_reorder_array_elem; 1414 struct dp_pdev *pdev; 1415 struct dp_peer *peer = NULL; 1416 uint16_t peer_id; 1417 uint8_t fragno, more_frag, all_frag_present = 0; 1418 uint16_t rxseq = mpdu_desc_info->mpdu_seq; 1419 QDF_STATUS status; 1420 struct dp_rx_tid *rx_tid; 1421 uint8_t mpdu_sequence_control_valid; 1422 uint8_t mpdu_frame_control_valid; 1423 qdf_nbuf_t frag = rx_desc->nbuf; 1424 uint32_t msdu_len; 1425 1426 if (qdf_nbuf_len(frag) > 0) { 1427 dp_info("Dropping unexpected packet with skb_len: %d," 1428 "data len: %d, cookie: %d", 1429 (uint32_t)qdf_nbuf_len(frag), frag->data_len, 1430 rx_desc->cookie); 1431 DP_STATS_INC(soc, rx.rx_frag_err_len_error, 1); 1432 goto discard_frag; 1433 } 1434 1435 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_desc->rx_buf_start); 1436 1437 qdf_nbuf_set_pktlen(frag, (msdu_len + RX_PKT_TLVS_LEN)); 1438 qdf_nbuf_append_ext_list(frag, NULL, 0); 1439 1440 /* Check if the packet is from a valid peer */ 1441 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1442 mpdu_desc_info->peer_meta_data); 1443 peer = dp_peer_find_by_id(soc, peer_id); 1444 1445 if (!peer) { 1446 /* We should not receive anything from unknown peer 1447 * however, that might happen while we are in the monitor mode. 1448 * We don't need to handle that here 1449 */ 1450 dp_info_rl("Unknown peer with peer_id %d, dropping fragment", 1451 peer_id); 1452 DP_STATS_INC(soc, rx.rx_frag_err_no_peer, 1); 1453 goto discard_frag; 1454 } 1455 1456 if (tid >= DP_MAX_TIDS) { 1457 dp_info("TID out of bounds: %d", tid); 1458 qdf_assert_always(0); 1459 } 1460 1461 pdev = peer->vdev->pdev; 1462 rx_tid = &peer->rx_tid[tid]; 1463 1464 mpdu_sequence_control_valid = 1465 hal_rx_get_mpdu_sequence_control_valid(soc->hal_soc, 1466 rx_desc->rx_buf_start); 1467 1468 /* Invalid MPDU sequence control field, MPDU is of no use */ 1469 if (!mpdu_sequence_control_valid) { 1470 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1471 "Invalid MPDU seq control field, dropping MPDU"); 1472 1473 qdf_assert(0); 1474 goto discard_frag; 1475 } 1476 1477 mpdu_frame_control_valid = 1478 hal_rx_get_mpdu_frame_control_valid(soc->hal_soc, 1479 rx_desc->rx_buf_start); 1480 1481 /* Invalid frame control field */ 1482 if (!mpdu_frame_control_valid) { 1483 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1484 "Invalid frame control field, dropping MPDU"); 1485 1486 qdf_assert(0); 1487 goto discard_frag; 1488 } 1489 1490 /* Current mpdu sequence */ 1491 more_frag = dp_rx_frag_get_more_frag_bit(rx_desc->rx_buf_start); 1492 1493 /* HW does not populate the fragment number as of now 1494 * need to get from the 802.11 header 1495 */ 1496 fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc->rx_buf_start); 1497 1498 rx_reorder_array_elem = peer->rx_tid[tid].array; 1499 if (!rx_reorder_array_elem) { 1500 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1501 "Rcvd Fragmented pkt before peer_tid is setup"); 1502 goto discard_frag; 1503 } 1504 1505 /* 1506 * !more_frag: no more fragments to be delivered 1507 * !frag_no: packet is not fragmented 1508 * !rx_reorder_array_elem->head: no saved fragments so far 1509 */ 1510 if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) { 1511 /* We should not get into this situation here. 1512 * It means an unfragmented packet with fragment flag 1513 * is delivered over the REO exception ring. 1514 * Typically it follows normal rx path. 1515 */ 1516 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1517 "Rcvd unfragmented pkt on REO Err srng, dropping"); 1518 1519 qdf_assert(0); 1520 goto discard_frag; 1521 } 1522 1523 /* Check if the fragment is for the same sequence or a different one */ 1524 if (rx_reorder_array_elem->head) { 1525 if (rxseq != rx_tid->curr_seq_num) { 1526 1527 /* Drop stored fragments if out of sequence 1528 * fragment is received 1529 */ 1530 dp_rx_reorder_flush_frag(peer, tid); 1531 1532 DP_STATS_INC(soc, rx.rx_frag_err, 1); 1533 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1534 "%s mismatch, dropping earlier sequence ", 1535 (rxseq == rx_tid->curr_seq_num) 1536 ? "address" 1537 : "seq number"); 1538 1539 /* 1540 * The sequence number for this fragment becomes the 1541 * new sequence number to be processed 1542 */ 1543 rx_tid->curr_seq_num = rxseq; 1544 } 1545 } else { 1546 /* Start of a new sequence */ 1547 dp_rx_defrag_cleanup(peer, tid); 1548 rx_tid->curr_seq_num = rxseq; 1549 } 1550 1551 /* 1552 * If the earlier sequence was dropped, this will be the fresh start. 1553 * Else, continue with next fragment in a given sequence 1554 */ 1555 status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head, 1556 &rx_reorder_array_elem->tail, frag, 1557 &all_frag_present); 1558 1559 /* 1560 * Currently, we can have only 6 MSDUs per-MPDU, if the current 1561 * packet sequence has more than 6 MSDUs for some reason, we will 1562 * have to use the next MSDU link descriptor and chain them together 1563 * before reinjection 1564 */ 1565 if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) && 1566 (rx_reorder_array_elem->head == frag)) { 1567 1568 qdf_assert_always(ring_desc); 1569 status = dp_rx_defrag_save_info_from_ring_desc(ring_desc, 1570 rx_desc, peer, tid); 1571 1572 if (status != QDF_STATUS_SUCCESS) { 1573 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1574 "%s: Unable to store ring desc !", __func__); 1575 goto discard_frag; 1576 } 1577 } else { 1578 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1579 *rx_bfs = 1; 1580 1581 /* Return the non-head link desc */ 1582 if (ring_desc && 1583 dp_rx_link_desc_return(soc, ring_desc, 1584 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1585 QDF_STATUS_SUCCESS) 1586 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1587 "%s: Failed to return link desc", __func__); 1588 1589 } 1590 1591 if (pdev->soc->rx.flags.defrag_timeout_check) 1592 dp_rx_defrag_waitlist_remove(peer, tid); 1593 1594 /* Yet to receive more fragments for this sequence number */ 1595 if (!all_frag_present) { 1596 uint32_t now_ms = 1597 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1598 1599 peer->rx_tid[tid].defrag_timeout_ms = 1600 now_ms + pdev->soc->rx.defrag.timeout_ms; 1601 1602 dp_rx_defrag_waitlist_add(peer, tid); 1603 dp_peer_unref_del_find_by_id(peer); 1604 1605 return QDF_STATUS_SUCCESS; 1606 } 1607 1608 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1609 "All fragments received for sequence: %d", rxseq); 1610 1611 /* Process the fragments */ 1612 status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, 1613 rx_reorder_array_elem->tail); 1614 if (QDF_IS_STATUS_ERROR(status)) { 1615 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1616 "Fragment processing failed"); 1617 1618 dp_rx_add_to_free_desc_list(head, tail, 1619 peer->rx_tid[tid].head_frag_desc); 1620 *rx_bfs = 1; 1621 1622 if (dp_rx_link_desc_return(soc, 1623 peer->rx_tid[tid].dst_ring_desc, 1624 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1625 QDF_STATUS_SUCCESS) 1626 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1627 "%s: Failed to return link desc", 1628 __func__); 1629 dp_rx_defrag_cleanup(peer, tid); 1630 goto end; 1631 } 1632 1633 /* Re-inject the fragments back to REO for further processing */ 1634 status = dp_rx_defrag_reo_reinject(peer, tid, 1635 rx_reorder_array_elem->head); 1636 if (QDF_IS_STATUS_SUCCESS(status)) { 1637 rx_reorder_array_elem->head = NULL; 1638 rx_reorder_array_elem->tail = NULL; 1639 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1640 "Fragmented sequence successfully reinjected"); 1641 } else { 1642 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1643 "Fragmented sequence reinjection failed"); 1644 dp_rx_return_head_frag_desc(peer, tid); 1645 } 1646 1647 dp_rx_defrag_cleanup(peer, tid); 1648 1649 dp_peer_unref_del_find_by_id(peer); 1650 1651 return QDF_STATUS_SUCCESS; 1652 1653 discard_frag: 1654 qdf_nbuf_free(frag); 1655 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1656 if (dp_rx_link_desc_return(soc, ring_desc, 1657 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1658 QDF_STATUS_SUCCESS) 1659 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1660 "%s: Failed to return link desc", __func__); 1661 *rx_bfs = 1; 1662 1663 end: 1664 if (peer) 1665 dp_peer_unref_del_find_by_id(peer); 1666 1667 DP_STATS_INC(soc, rx.rx_frag_err, 1); 1668 return QDF_STATUS_E_DEFRAG_ERROR; 1669 } 1670 1671 /** 1672 * dp_rx_frag_handle() - Handles fragmented Rx frames 1673 * 1674 * @soc: core txrx main context 1675 * @ring_desc: opaque pointer to the REO error ring descriptor 1676 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 1677 * @head: head of the local descriptor free-list 1678 * @tail: tail of the local descriptor free-list 1679 * @quota: No. of units (packets) that can be serviced in one shot. 1680 * 1681 * This function implements RX 802.11 fragmentation handling 1682 * The handling is mostly same as legacy fragmentation handling. 1683 * If required, this function can re-inject the frames back to 1684 * REO ring (with proper setting to by-pass fragmentation check 1685 * but use duplicate detection / re-ordering and routing these frames 1686 * to a different core. 1687 * 1688 * Return: uint32_t: No. of elements processed 1689 */ 1690 uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 1691 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1692 struct dp_rx_desc *rx_desc, 1693 uint8_t *mac_id, 1694 uint32_t quota) 1695 { 1696 uint32_t rx_bufs_used = 0; 1697 qdf_nbuf_t msdu = NULL; 1698 uint32_t tid; 1699 int rx_bfs = 0; 1700 struct dp_pdev *pdev; 1701 QDF_STATUS status = QDF_STATUS_SUCCESS; 1702 1703 qdf_assert(soc); 1704 qdf_assert(mpdu_desc_info); 1705 qdf_assert(rx_desc); 1706 1707 dp_debug("Number of MSDUs to process, num_msdus: %d", 1708 mpdu_desc_info->msdu_count); 1709 1710 1711 if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) { 1712 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1713 "Not sufficient MSDUs to process"); 1714 return rx_bufs_used; 1715 } 1716 1717 /* all buffers in MSDU link belong to same pdev */ 1718 pdev = soc->pdev_list[rx_desc->pool_id]; 1719 *mac_id = rx_desc->pool_id; 1720 1721 msdu = rx_desc->nbuf; 1722 1723 qdf_nbuf_unmap_single(soc->osdev, msdu, QDF_DMA_FROM_DEVICE); 1724 rx_desc->unmapped = 1; 1725 1726 rx_desc->rx_buf_start = qdf_nbuf_data(msdu); 1727 1728 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_desc->rx_buf_start); 1729 1730 /* Process fragment-by-fragment */ 1731 status = dp_rx_defrag_store_fragment(soc, ring_desc, 1732 &pdev->free_list_head, 1733 &pdev->free_list_tail, 1734 mpdu_desc_info, 1735 tid, rx_desc, &rx_bfs); 1736 1737 if (rx_bfs) 1738 rx_bufs_used++; 1739 1740 if (!QDF_IS_STATUS_SUCCESS(status)) 1741 dp_info_rl("Rx Defrag err seq#:0x%x msdu_count:%d flags:%d", 1742 mpdu_desc_info->mpdu_seq, 1743 mpdu_desc_info->msdu_count, 1744 mpdu_desc_info->mpdu_flags); 1745 1746 return rx_bufs_used; 1747 } 1748 1749 QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc, 1750 struct dp_peer *peer, uint16_t tid, 1751 uint16_t rxseq, qdf_nbuf_t nbuf) 1752 { 1753 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1754 struct dp_rx_reorder_array_elem *rx_reorder_array_elem; 1755 uint8_t all_frag_present; 1756 uint32_t msdu_len; 1757 QDF_STATUS status; 1758 1759 rx_reorder_array_elem = peer->rx_tid[tid].array; 1760 1761 /* 1762 * HW may fill in unexpected peer_id in RX PKT TLV, 1763 * if this peer_id related peer is valid by coincidence, 1764 * but actually this peer won't do dp_peer_rx_init(like SAP vdev 1765 * self peer), then invalid access to rx_reorder_array_elem happened. 1766 */ 1767 if (!rx_reorder_array_elem) { 1768 dp_verbose_debug( 1769 "peer id:%d mac:" QDF_MAC_ADDR_STR "drop rx frame!", 1770 peer->peer_ids[0], 1771 QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw)); 1772 DP_STATS_INC(soc, rx.err.defrag_peer_uninit, 1); 1773 qdf_nbuf_free(nbuf); 1774 goto fail; 1775 } 1776 1777 if (rx_reorder_array_elem->head && 1778 rxseq != rx_tid->curr_seq_num) { 1779 /* Drop stored fragments if out of sequence 1780 * fragment is received 1781 */ 1782 dp_rx_reorder_flush_frag(peer, tid); 1783 1784 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1785 "%s: No list found for TID %d Seq# %d", 1786 __func__, tid, rxseq); 1787 qdf_nbuf_free(nbuf); 1788 goto fail; 1789 } 1790 1791 msdu_len = hal_rx_msdu_start_msdu_len_get(qdf_nbuf_data(nbuf)); 1792 1793 qdf_nbuf_set_pktlen(nbuf, (msdu_len + RX_PKT_TLVS_LEN)); 1794 1795 status = dp_rx_defrag_fraglist_insert(peer, tid, 1796 &rx_reorder_array_elem->head, 1797 &rx_reorder_array_elem->tail, nbuf, 1798 &all_frag_present); 1799 1800 if (QDF_IS_STATUS_ERROR(status)) { 1801 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1802 "%s Fragment insert failed", __func__); 1803 1804 goto fail; 1805 } 1806 1807 if (soc->rx.flags.defrag_timeout_check) 1808 dp_rx_defrag_waitlist_remove(peer, tid); 1809 1810 if (!all_frag_present) { 1811 uint32_t now_ms = 1812 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1813 1814 peer->rx_tid[tid].defrag_timeout_ms = 1815 now_ms + soc->rx.defrag.timeout_ms; 1816 1817 dp_rx_defrag_waitlist_add(peer, tid); 1818 1819 return QDF_STATUS_SUCCESS; 1820 } 1821 1822 status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, 1823 rx_reorder_array_elem->tail); 1824 1825 if (QDF_IS_STATUS_ERROR(status)) { 1826 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1827 "%s Fragment processing failed", __func__); 1828 1829 dp_rx_return_head_frag_desc(peer, tid); 1830 dp_rx_defrag_cleanup(peer, tid); 1831 1832 goto fail; 1833 } 1834 1835 /* Re-inject the fragments back to REO for further processing */ 1836 status = dp_rx_defrag_reo_reinject(peer, tid, 1837 rx_reorder_array_elem->head); 1838 if (QDF_IS_STATUS_SUCCESS(status)) { 1839 rx_reorder_array_elem->head = NULL; 1840 rx_reorder_array_elem->tail = NULL; 1841 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1842 "%s: Frag seq successfully reinjected", 1843 __func__); 1844 } else { 1845 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1846 "%s: Frag seq reinjection failed", __func__); 1847 dp_rx_return_head_frag_desc(peer, tid); 1848 } 1849 1850 dp_rx_defrag_cleanup(peer, tid); 1851 return QDF_STATUS_SUCCESS; 1852 1853 fail: 1854 return QDF_STATUS_E_DEFRAG_ERROR; 1855 } 1856