1 /* 2 * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "hal_hw_headers.h" 20 #include "dp_types.h" 21 #include "dp_rx.h" 22 #include "dp_peer.h" 23 #include "hal_api.h" 24 #include "qdf_trace.h" 25 #include "qdf_nbuf.h" 26 #include "dp_internal.h" 27 #include "dp_rx_defrag.h" 28 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 29 #include "dp_rx_defrag.h" 30 #include "dp_ipa.h" 31 32 const struct dp_rx_defrag_cipher dp_f_ccmp = { 33 "AES-CCM", 34 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 35 IEEE80211_WEP_MICLEN, 36 0, 37 }; 38 39 const struct dp_rx_defrag_cipher dp_f_tkip = { 40 "TKIP", 41 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 42 IEEE80211_WEP_CRCLEN, 43 IEEE80211_WEP_MICLEN, 44 }; 45 46 const struct dp_rx_defrag_cipher dp_f_wep = { 47 "WEP", 48 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN, 49 IEEE80211_WEP_CRCLEN, 50 0, 51 }; 52 53 /* 54 * dp_rx_defrag_frames_free(): Free fragment chain 55 * @frames: Fragment chain 56 * 57 * Iterates through the fragment chain and frees them 58 * Returns: None 59 */ 60 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames) 61 { 62 qdf_nbuf_t next, frag = frames; 63 64 while (frag) { 65 next = qdf_nbuf_next(frag); 66 qdf_nbuf_free(frag); 67 frag = next; 68 } 69 } 70 71 /* 72 * dp_rx_clear_saved_desc_info(): Clears descriptor info 73 * @peer: Pointer to the peer data structure 74 * @tid: Transmit ID (TID) 75 * 76 * Saves MPDU descriptor info and MSDU link pointer from REO 77 * ring descriptor. The cache is created per peer, per TID 78 * 79 * Returns: None 80 */ 81 static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid) 82 { 83 if (peer->rx_tid[tid].dst_ring_desc) 84 qdf_mem_free(peer->rx_tid[tid].dst_ring_desc); 85 86 peer->rx_tid[tid].dst_ring_desc = NULL; 87 peer->rx_tid[tid].head_frag_desc = NULL; 88 } 89 90 static void dp_rx_return_head_frag_desc(struct dp_peer *peer, 91 unsigned int tid) 92 { 93 struct dp_soc *soc; 94 struct dp_pdev *pdev; 95 struct dp_srng *dp_rxdma_srng; 96 struct rx_desc_pool *rx_desc_pool; 97 union dp_rx_desc_list_elem_t *head = NULL; 98 union dp_rx_desc_list_elem_t *tail = NULL; 99 uint8_t pool_id; 100 101 pdev = peer->vdev->pdev; 102 soc = pdev->soc; 103 104 if (peer->rx_tid[tid].head_frag_desc) { 105 pool_id = peer->rx_tid[tid].head_frag_desc->pool_id; 106 dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id]; 107 rx_desc_pool = &soc->rx_desc_buf[pool_id]; 108 109 dp_rx_add_to_free_desc_list(&head, &tail, 110 peer->rx_tid[tid].head_frag_desc); 111 dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, 112 1, &head, &tail); 113 } 114 115 if (peer->rx_tid[tid].dst_ring_desc) { 116 if (dp_rx_link_desc_return(soc, 117 peer->rx_tid[tid].dst_ring_desc, 118 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 119 QDF_STATUS_SUCCESS) 120 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 121 "%s: Failed to return link desc", __func__); 122 } 123 } 124 125 /* 126 * dp_rx_reorder_flush_frag(): Flush the frag list 127 * @peer: Pointer to the peer data structure 128 * @tid: Transmit ID (TID) 129 * 130 * Flush the per-TID frag list 131 * 132 * Returns: None 133 */ 134 void dp_rx_reorder_flush_frag(struct dp_peer *peer, 135 unsigned int tid) 136 { 137 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, 138 FL("Flushing TID %d"), tid); 139 140 if (!peer) { 141 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 142 "%s: NULL peer", __func__); 143 return; 144 } 145 146 dp_rx_return_head_frag_desc(peer, tid); 147 dp_rx_defrag_cleanup(peer, tid); 148 } 149 150 /* 151 * dp_rx_defrag_waitlist_flush(): Flush SOC defrag wait list 152 * @soc: DP SOC 153 * 154 * Flush fragments of all waitlisted TID's 155 * 156 * Returns: None 157 */ 158 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc) 159 { 160 struct dp_rx_tid *rx_reorder = NULL; 161 struct dp_rx_tid *tmp; 162 uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); 163 TAILQ_HEAD(, dp_rx_tid) temp_list; 164 165 TAILQ_INIT(&temp_list); 166 167 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 168 FL("Current time %u"), now_ms); 169 170 qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); 171 TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist, 172 defrag_waitlist_elem, tmp) { 173 uint32_t tid; 174 175 if (rx_reorder->defrag_timeout_ms > now_ms) 176 break; 177 178 tid = rx_reorder->tid; 179 if (tid >= DP_MAX_TIDS) { 180 qdf_assert(0); 181 continue; 182 } 183 184 TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder, 185 defrag_waitlist_elem); 186 DP_STATS_DEC(soc, rx.rx_frag_wait, 1); 187 188 /* Move to temp list and clean-up later */ 189 TAILQ_INSERT_TAIL(&temp_list, rx_reorder, 190 defrag_waitlist_elem); 191 } 192 if (rx_reorder) { 193 soc->rx.defrag.next_flush_ms = 194 rx_reorder->defrag_timeout_ms; 195 } else { 196 soc->rx.defrag.next_flush_ms = 197 now_ms + soc->rx.defrag.timeout_ms; 198 } 199 200 qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); 201 202 TAILQ_FOREACH_SAFE(rx_reorder, &temp_list, 203 defrag_waitlist_elem, tmp) { 204 struct dp_peer *peer, *temp_peer = NULL; 205 206 qdf_spin_lock_bh(&rx_reorder->tid_lock); 207 TAILQ_REMOVE(&temp_list, rx_reorder, 208 defrag_waitlist_elem); 209 /* get address of current peer */ 210 peer = 211 container_of(rx_reorder, struct dp_peer, 212 rx_tid[rx_reorder->tid]); 213 qdf_spin_unlock_bh(&rx_reorder->tid_lock); 214 215 temp_peer = dp_peer_find_by_id(soc, peer->peer_ids[0]); 216 if (temp_peer == peer) { 217 qdf_spin_lock_bh(&rx_reorder->tid_lock); 218 dp_rx_reorder_flush_frag(peer, rx_reorder->tid); 219 qdf_spin_unlock_bh(&rx_reorder->tid_lock); 220 } 221 222 if (temp_peer) 223 dp_peer_unref_del_find_by_id(temp_peer); 224 225 } 226 } 227 228 /* 229 * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list 230 * @peer: Pointer to the peer data structure 231 * @tid: Transmit ID (TID) 232 * 233 * Appends per-tid fragments to global fragment wait list 234 * 235 * Returns: None 236 */ 237 static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid) 238 { 239 struct dp_soc *psoc = peer->vdev->pdev->soc; 240 struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid]; 241 242 dp_debug("Adding TID %u to waitlist for peer %pK at MAC address %pM", 243 tid, peer, peer->mac_addr.raw); 244 245 /* TODO: use LIST macros instead of TAIL macros */ 246 qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock); 247 if (TAILQ_EMPTY(&psoc->rx.defrag.waitlist)) 248 psoc->rx.defrag.next_flush_ms = rx_reorder->defrag_timeout_ms; 249 TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder, 250 defrag_waitlist_elem); 251 DP_STATS_INC(psoc, rx.rx_frag_wait, 1); 252 qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock); 253 } 254 255 /* 256 * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist 257 * @peer: Pointer to the peer data structure 258 * @tid: Transmit ID (TID) 259 * 260 * Remove fragments from waitlist 261 * 262 * Returns: None 263 */ 264 void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid) 265 { 266 struct dp_pdev *pdev = peer->vdev->pdev; 267 struct dp_soc *soc = pdev->soc; 268 struct dp_rx_tid *rx_reorder; 269 struct dp_rx_tid *tmp; 270 271 dp_debug("Removing TID %u to waitlist for peer %pK at MAC address %pM", 272 tid, peer, peer->mac_addr.raw); 273 274 if (tid >= DP_MAX_TIDS) { 275 dp_err("TID out of bounds: %d", tid); 276 qdf_assert_always(0); 277 } 278 279 qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); 280 TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist, 281 defrag_waitlist_elem, tmp) { 282 struct dp_peer *peer_on_waitlist; 283 284 /* get address of current peer */ 285 peer_on_waitlist = 286 container_of(rx_reorder, struct dp_peer, 287 rx_tid[rx_reorder->tid]); 288 289 /* Ensure it is TID for same peer */ 290 if (peer_on_waitlist == peer && rx_reorder->tid == tid) { 291 TAILQ_REMOVE(&soc->rx.defrag.waitlist, 292 rx_reorder, defrag_waitlist_elem); 293 DP_STATS_DEC(soc, rx.rx_frag_wait, 1); 294 } 295 } 296 qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); 297 } 298 299 /* 300 * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list 301 * @peer: Pointer to the peer data structure 302 * @tid: Transmit ID (TID) 303 * @head_addr: Pointer to head list 304 * @tail_addr: Pointer to tail list 305 * @frag: Incoming fragment 306 * @all_frag_present: Flag to indicate whether all fragments are received 307 * 308 * Build a per-tid, per-sequence fragment list. 309 * 310 * Returns: Success, if inserted 311 */ 312 static QDF_STATUS dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid, 313 qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag, 314 uint8_t *all_frag_present) 315 { 316 qdf_nbuf_t next; 317 qdf_nbuf_t prev = NULL; 318 qdf_nbuf_t cur; 319 uint16_t head_fragno, cur_fragno, next_fragno; 320 uint8_t last_morefrag = 1, count = 0; 321 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 322 uint8_t *rx_desc_info; 323 324 325 qdf_assert(frag); 326 qdf_assert(head_addr); 327 qdf_assert(tail_addr); 328 329 *all_frag_present = 0; 330 rx_desc_info = qdf_nbuf_data(frag); 331 cur_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 332 333 /* If this is the first fragment */ 334 if (!(*head_addr)) { 335 *head_addr = *tail_addr = frag; 336 qdf_nbuf_set_next(*tail_addr, NULL); 337 rx_tid->curr_frag_num = cur_fragno; 338 339 goto insert_done; 340 } 341 342 /* In sequence fragment */ 343 if (cur_fragno > rx_tid->curr_frag_num) { 344 qdf_nbuf_set_next(*tail_addr, frag); 345 *tail_addr = frag; 346 qdf_nbuf_set_next(*tail_addr, NULL); 347 rx_tid->curr_frag_num = cur_fragno; 348 } else { 349 /* Out of sequence fragment */ 350 cur = *head_addr; 351 rx_desc_info = qdf_nbuf_data(cur); 352 head_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 353 354 if (cur_fragno == head_fragno) { 355 qdf_nbuf_free(frag); 356 goto insert_fail; 357 } else if (head_fragno > cur_fragno) { 358 qdf_nbuf_set_next(frag, cur); 359 cur = frag; 360 *head_addr = frag; /* head pointer to be updated */ 361 } else { 362 while ((cur_fragno > head_fragno) && cur) { 363 prev = cur; 364 cur = qdf_nbuf_next(cur); 365 rx_desc_info = qdf_nbuf_data(cur); 366 head_fragno = 367 dp_rx_frag_get_mpdu_frag_number( 368 rx_desc_info); 369 } 370 371 if (cur_fragno == head_fragno) { 372 qdf_nbuf_free(frag); 373 goto insert_fail; 374 } 375 376 qdf_nbuf_set_next(prev, frag); 377 qdf_nbuf_set_next(frag, cur); 378 } 379 } 380 381 next = qdf_nbuf_next(*head_addr); 382 383 rx_desc_info = qdf_nbuf_data(*tail_addr); 384 last_morefrag = dp_rx_frag_get_more_frag_bit(rx_desc_info); 385 386 /* TODO: optimize the loop */ 387 if (!last_morefrag) { 388 /* Check if all fragments are present */ 389 do { 390 rx_desc_info = qdf_nbuf_data(next); 391 next_fragno = 392 dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 393 count++; 394 395 if (next_fragno != count) 396 break; 397 398 next = qdf_nbuf_next(next); 399 } while (next); 400 401 if (!next) { 402 *all_frag_present = 1; 403 return QDF_STATUS_SUCCESS; 404 } 405 } 406 407 insert_done: 408 return QDF_STATUS_SUCCESS; 409 410 insert_fail: 411 return QDF_STATUS_E_FAILURE; 412 } 413 414 415 /* 416 * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment 417 * @msdu: Pointer to the fragment 418 * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) 419 * 420 * decap tkip encrypted fragment 421 * 422 * Returns: QDF_STATUS 423 */ 424 static QDF_STATUS dp_rx_defrag_tkip_decap(qdf_nbuf_t msdu, uint16_t hdrlen) 425 { 426 uint8_t *ivp, *orig_hdr; 427 int rx_desc_len = SIZE_OF_DATA_RX_TLV; 428 429 /* start of 802.11 header info */ 430 orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len); 431 432 /* TKIP header is located post 802.11 header */ 433 ivp = orig_hdr + hdrlen; 434 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) { 435 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 436 "IEEE80211_WEP_EXTIV is missing in TKIP fragment"); 437 return QDF_STATUS_E_DEFRAG_ERROR; 438 } 439 440 qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer); 441 442 return QDF_STATUS_SUCCESS; 443 } 444 445 /* 446 * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment 447 * @nbuf: Pointer to the fragment buffer 448 * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) 449 * 450 * Remove MIC information from CCMP fragment 451 * 452 * Returns: QDF_STATUS 453 */ 454 static QDF_STATUS dp_rx_defrag_ccmp_demic(qdf_nbuf_t nbuf, uint16_t hdrlen) 455 { 456 uint8_t *ivp, *orig_hdr; 457 int rx_desc_len = SIZE_OF_DATA_RX_TLV; 458 459 /* start of the 802.11 header */ 460 orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); 461 462 /* CCMP header is located after 802.11 header */ 463 ivp = orig_hdr + hdrlen; 464 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 465 return QDF_STATUS_E_DEFRAG_ERROR; 466 467 qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer); 468 469 return QDF_STATUS_SUCCESS; 470 } 471 472 /* 473 * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment 474 * @nbuf: Pointer to the fragment 475 * @hdrlen: length of the header information 476 * 477 * decap CCMP encrypted fragment 478 * 479 * Returns: QDF_STATUS 480 */ 481 static QDF_STATUS dp_rx_defrag_ccmp_decap(qdf_nbuf_t nbuf, uint16_t hdrlen) 482 { 483 uint8_t *ivp, *origHdr; 484 int rx_desc_len = SIZE_OF_DATA_RX_TLV; 485 486 origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len); 487 ivp = origHdr + hdrlen; 488 489 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 490 return QDF_STATUS_E_DEFRAG_ERROR; 491 492 /* Let's pull the header later */ 493 494 return QDF_STATUS_SUCCESS; 495 } 496 497 /* 498 * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment 499 * @msdu: Pointer to the fragment 500 * @hdrlen: length of the header information 501 * 502 * decap WEP encrypted fragment 503 * 504 * Returns: QDF_STATUS 505 */ 506 static QDF_STATUS dp_rx_defrag_wep_decap(qdf_nbuf_t msdu, uint16_t hdrlen) 507 { 508 uint8_t *origHdr; 509 int rx_desc_len = SIZE_OF_DATA_RX_TLV; 510 511 origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); 512 qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen); 513 514 qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer); 515 516 return QDF_STATUS_SUCCESS; 517 } 518 519 /* 520 * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment 521 * @soc: soc handle 522 * @nbuf: Pointer to the fragment 523 * 524 * Calculate the header size of the received fragment 525 * 526 * Returns: header size (uint16_t) 527 */ 528 static uint16_t dp_rx_defrag_hdrsize(struct dp_soc *soc, qdf_nbuf_t nbuf) 529 { 530 uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf); 531 uint16_t size = sizeof(struct ieee80211_frame); 532 uint16_t fc = 0; 533 uint32_t to_ds, fr_ds; 534 uint8_t frm_ctrl_valid; 535 uint16_t frm_ctrl_field; 536 537 to_ds = hal_rx_mpdu_get_to_ds(soc->hal_soc, rx_tlv_hdr); 538 fr_ds = hal_rx_mpdu_get_fr_ds(soc->hal_soc, rx_tlv_hdr); 539 frm_ctrl_valid = 540 hal_rx_get_mpdu_frame_control_valid(soc->hal_soc, 541 rx_tlv_hdr); 542 frm_ctrl_field = hal_rx_get_frame_ctrl_field(rx_tlv_hdr); 543 544 if (to_ds && fr_ds) 545 size += QDF_MAC_ADDR_SIZE; 546 547 if (frm_ctrl_valid) { 548 fc = frm_ctrl_field; 549 550 /* use 1-st byte for validation */ 551 if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) { 552 size += sizeof(uint16_t); 553 /* use 2-nd byte for validation */ 554 if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER) 555 size += sizeof(struct ieee80211_htc); 556 } 557 } 558 559 return size; 560 } 561 562 /* 563 * dp_rx_defrag_michdr(): Calculate a pseudo MIC header 564 * @wh0: Pointer to the wireless header of the fragment 565 * @hdr: Array to hold the pseudo header 566 * 567 * Calculate a pseudo MIC header 568 * 569 * Returns: None 570 */ 571 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0, 572 uint8_t hdr[]) 573 { 574 const struct ieee80211_frame_addr4 *wh = 575 (const struct ieee80211_frame_addr4 *)wh0; 576 577 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { 578 case IEEE80211_FC1_DIR_NODS: 579 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 580 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 581 wh->i_addr2); 582 break; 583 case IEEE80211_FC1_DIR_TODS: 584 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 585 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 586 wh->i_addr2); 587 break; 588 case IEEE80211_FC1_DIR_FROMDS: 589 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 590 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 591 wh->i_addr3); 592 break; 593 case IEEE80211_FC1_DIR_DSTODS: 594 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 595 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 596 wh->i_addr4); 597 break; 598 } 599 600 /* 601 * Bit 7 is QDF_IEEE80211_FC0_SUBTYPE_QOS for data frame, but 602 * it could also be set for deauth, disassoc, action, etc. for 603 * a mgt type frame. It comes into picture for MFP. 604 */ 605 if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) { 606 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == 607 IEEE80211_FC1_DIR_DSTODS) { 608 const struct ieee80211_qosframe_addr4 *qwh = 609 (const struct ieee80211_qosframe_addr4 *)wh; 610 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; 611 } else { 612 const struct ieee80211_qosframe *qwh = 613 (const struct ieee80211_qosframe *)wh; 614 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; 615 } 616 } else { 617 hdr[12] = 0; 618 } 619 620 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ 621 } 622 623 /* 624 * dp_rx_defrag_mic(): Calculate MIC header 625 * @key: Pointer to the key 626 * @wbuf: fragment buffer 627 * @off: Offset 628 * @data_len: Data length 629 * @mic: Array to hold MIC 630 * 631 * Calculate a pseudo MIC header 632 * 633 * Returns: QDF_STATUS 634 */ 635 static QDF_STATUS dp_rx_defrag_mic(const uint8_t *key, qdf_nbuf_t wbuf, 636 uint16_t off, uint16_t data_len, uint8_t mic[]) 637 { 638 uint8_t hdr[16] = { 0, }; 639 uint32_t l, r; 640 const uint8_t *data; 641 uint32_t space; 642 int rx_desc_len = SIZE_OF_DATA_RX_TLV; 643 644 dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) 645 + rx_desc_len), hdr); 646 647 l = dp_rx_get_le32(key); 648 r = dp_rx_get_le32(key + 4); 649 650 /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ 651 l ^= dp_rx_get_le32(hdr); 652 dp_rx_michael_block(l, r); 653 l ^= dp_rx_get_le32(&hdr[4]); 654 dp_rx_michael_block(l, r); 655 l ^= dp_rx_get_le32(&hdr[8]); 656 dp_rx_michael_block(l, r); 657 l ^= dp_rx_get_le32(&hdr[12]); 658 dp_rx_michael_block(l, r); 659 660 /* first buffer has special handling */ 661 data = (uint8_t *)qdf_nbuf_data(wbuf) + off; 662 space = qdf_nbuf_len(wbuf) - off; 663 664 for (;; ) { 665 if (space > data_len) 666 space = data_len; 667 668 /* collect 32-bit blocks from current buffer */ 669 while (space >= sizeof(uint32_t)) { 670 l ^= dp_rx_get_le32(data); 671 dp_rx_michael_block(l, r); 672 data += sizeof(uint32_t); 673 space -= sizeof(uint32_t); 674 data_len -= sizeof(uint32_t); 675 } 676 if (data_len < sizeof(uint32_t)) 677 break; 678 679 wbuf = qdf_nbuf_next(wbuf); 680 if (!wbuf) 681 return QDF_STATUS_E_DEFRAG_ERROR; 682 683 if (space != 0) { 684 const uint8_t *data_next; 685 /* 686 * Block straddles buffers, split references. 687 */ 688 data_next = 689 (uint8_t *)qdf_nbuf_data(wbuf) + off; 690 if ((qdf_nbuf_len(wbuf)) < 691 sizeof(uint32_t) - space) { 692 return QDF_STATUS_E_DEFRAG_ERROR; 693 } 694 switch (space) { 695 case 1: 696 l ^= dp_rx_get_le32_split(data[0], 697 data_next[0], data_next[1], 698 data_next[2]); 699 data = data_next + 3; 700 space = (qdf_nbuf_len(wbuf) - off) - 3; 701 break; 702 case 2: 703 l ^= dp_rx_get_le32_split(data[0], data[1], 704 data_next[0], data_next[1]); 705 data = data_next + 2; 706 space = (qdf_nbuf_len(wbuf) - off) - 2; 707 break; 708 case 3: 709 l ^= dp_rx_get_le32_split(data[0], data[1], 710 data[2], data_next[0]); 711 data = data_next + 1; 712 space = (qdf_nbuf_len(wbuf) - off) - 1; 713 break; 714 } 715 dp_rx_michael_block(l, r); 716 data_len -= sizeof(uint32_t); 717 } else { 718 /* 719 * Setup for next buffer. 720 */ 721 data = (uint8_t *)qdf_nbuf_data(wbuf) + off; 722 space = qdf_nbuf_len(wbuf) - off; 723 } 724 } 725 /* Last block and padding (0x5a, 4..7 x 0) */ 726 switch (data_len) { 727 case 0: 728 l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0); 729 break; 730 case 1: 731 l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0); 732 break; 733 case 2: 734 l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0); 735 break; 736 case 3: 737 l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a); 738 break; 739 } 740 dp_rx_michael_block(l, r); 741 dp_rx_michael_block(l, r); 742 dp_rx_put_le32(mic, l); 743 dp_rx_put_le32(mic + 4, r); 744 745 return QDF_STATUS_SUCCESS; 746 } 747 748 /* 749 * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame 750 * @key: Pointer to the key 751 * @msdu: fragment buffer 752 * @hdrlen: Length of the header information 753 * 754 * Remove MIC information from the TKIP frame 755 * 756 * Returns: QDF_STATUS 757 */ 758 static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key, 759 qdf_nbuf_t msdu, uint16_t hdrlen) 760 { 761 QDF_STATUS status; 762 uint32_t pktlen = 0; 763 uint8_t mic[IEEE80211_WEP_MICLEN]; 764 uint8_t mic0[IEEE80211_WEP_MICLEN]; 765 qdf_nbuf_t prev = NULL, next; 766 767 next = msdu; 768 while (next) { 769 pktlen += (qdf_nbuf_len(next) - hdrlen); 770 prev = next; 771 dp_debug("%s pktlen %u", __func__, 772 (uint32_t)(qdf_nbuf_len(next) - hdrlen)); 773 next = qdf_nbuf_next(next); 774 } 775 776 if (!prev) { 777 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 778 "%s Defrag chaining failed !\n", __func__); 779 return QDF_STATUS_E_DEFRAG_ERROR; 780 } 781 782 qdf_nbuf_copy_bits(prev, qdf_nbuf_len(prev) - dp_f_tkip.ic_miclen, 783 dp_f_tkip.ic_miclen, (caddr_t)mic0); 784 qdf_nbuf_trim_tail(prev, dp_f_tkip.ic_miclen); 785 pktlen -= dp_f_tkip.ic_miclen; 786 787 status = dp_rx_defrag_mic(key, msdu, hdrlen, 788 pktlen, mic); 789 790 if (QDF_IS_STATUS_ERROR(status)) 791 return status; 792 793 if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen)) 794 return QDF_STATUS_E_DEFRAG_ERROR; 795 796 return QDF_STATUS_SUCCESS; 797 } 798 799 /* 800 * dp_rx_frag_pull_hdr(): Pulls the RXTLV & the 802.11 headers 801 * @nbuf: buffer pointer 802 * @hdrsize: size of the header to be pulled 803 * 804 * Pull the RXTLV & the 802.11 headers 805 * 806 * Returns: None 807 */ 808 static void dp_rx_frag_pull_hdr(qdf_nbuf_t nbuf, uint16_t hdrsize) 809 { 810 qdf_nbuf_pull_head(nbuf, 811 RX_PKT_TLVS_LEN + hdrsize); 812 813 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 814 "%s: final pktlen %d .11len %d", 815 __func__, (uint32_t)qdf_nbuf_len(nbuf), hdrsize); 816 } 817 818 /* 819 * dp_rx_construct_fraglist(): Construct a nbuf fraglist 820 * @peer: Pointer to the peer 821 * @head: Pointer to list of fragments 822 * @hdrsize: Size of the header to be pulled 823 * 824 * Construct a nbuf fraglist 825 * 826 * Returns: None 827 */ 828 static void 829 dp_rx_construct_fraglist(struct dp_peer *peer, 830 qdf_nbuf_t head, uint16_t hdrsize) 831 { 832 qdf_nbuf_t msdu = qdf_nbuf_next(head); 833 qdf_nbuf_t rx_nbuf = msdu; 834 uint32_t len = 0; 835 836 while (msdu) { 837 dp_rx_frag_pull_hdr(msdu, hdrsize); 838 len += qdf_nbuf_len(msdu); 839 msdu = qdf_nbuf_next(msdu); 840 } 841 842 qdf_nbuf_append_ext_list(head, rx_nbuf, len); 843 qdf_nbuf_set_next(head, NULL); 844 qdf_nbuf_set_is_frag(head, 1); 845 846 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 847 "%s: head len %d ext len %d data len %d ", 848 __func__, 849 (uint32_t)qdf_nbuf_len(head), 850 (uint32_t)qdf_nbuf_len(rx_nbuf), 851 (uint32_t)(head->data_len)); 852 } 853 854 /** 855 * dp_rx_defrag_err() - rx err handler 856 * @pdev: handle to pdev object 857 * @vdev_id: vdev id 858 * @peer_mac_addr: peer mac address 859 * @tid: TID 860 * @tsf32: TSF 861 * @err_type: error type 862 * @rx_frame: rx frame 863 * @pn: PN Number 864 * @key_id: key id 865 * 866 * This function handles rx error and send MIC error notification 867 * 868 * Return: None 869 */ 870 static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf) 871 { 872 struct ol_if_ops *tops = NULL; 873 struct dp_pdev *pdev = vdev->pdev; 874 int rx_desc_len = SIZE_OF_DATA_RX_TLV; 875 uint8_t *orig_hdr; 876 struct ieee80211_frame *wh; 877 struct cdp_rx_mic_err_info mic_failure_info; 878 879 orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); 880 wh = (struct ieee80211_frame *)orig_hdr; 881 882 qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.da_mac_addr, 883 (struct qdf_mac_addr *)&wh->i_addr1); 884 qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.ta_mac_addr, 885 (struct qdf_mac_addr *)&wh->i_addr2); 886 mic_failure_info.key_id = 0; 887 mic_failure_info.multicast = 888 IEEE80211_IS_MULTICAST(wh->i_addr1); 889 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE); 890 mic_failure_info.frame_type = cdp_rx_frame_type_802_11; 891 mic_failure_info.data = (uint8_t *)wh; 892 mic_failure_info.vdev_id = vdev->vdev_id; 893 894 tops = pdev->soc->cdp_soc.ol_ops; 895 if (tops->rx_mic_error) 896 tops->rx_mic_error(pdev->soc->ctrl_psoc, pdev->pdev_id, 897 &mic_failure_info); 898 } 899 900 901 /* 902 * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3 903 * @soc: dp soc handle 904 * @nbuf: Pointer to the fragment buffer 905 * @hdrsize: Size of headers 906 * 907 * Transcap the fragment from 802.11 to 802.3 908 * 909 * Returns: None 910 */ 911 static void 912 dp_rx_defrag_nwifi_to_8023(struct dp_soc *soc, 913 qdf_nbuf_t nbuf, uint16_t hdrsize) 914 { 915 struct llc_snap_hdr_t *llchdr; 916 struct ethernet_hdr_t *eth_hdr; 917 uint8_t ether_type[2]; 918 uint16_t fc = 0; 919 union dp_align_mac_addr mac_addr; 920 uint8_t *rx_desc_info = qdf_mem_malloc(RX_PKT_TLVS_LEN); 921 922 if (!rx_desc_info) { 923 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 924 "%s: Memory alloc failed ! ", __func__); 925 QDF_ASSERT(0); 926 return; 927 } 928 929 qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), RX_PKT_TLVS_LEN); 930 931 llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) + 932 RX_PKT_TLVS_LEN + hdrsize); 933 qdf_mem_copy(ether_type, llchdr->ethertype, 2); 934 935 qdf_nbuf_pull_head(nbuf, (RX_PKT_TLVS_LEN + hdrsize + 936 sizeof(struct llc_snap_hdr_t) - 937 sizeof(struct ethernet_hdr_t))); 938 939 eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf)); 940 941 if (hal_rx_get_mpdu_frame_control_valid(soc->hal_soc, 942 rx_desc_info)) 943 fc = hal_rx_get_frame_ctrl_field(rx_desc_info); 944 945 dp_debug("%s: frame control type: 0x%x", __func__, fc); 946 947 switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) { 948 case IEEE80211_FC1_DIR_NODS: 949 hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info, 950 &mac_addr.raw[0]); 951 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 952 QDF_MAC_ADDR_SIZE); 953 hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info, 954 &mac_addr.raw[0]); 955 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 956 QDF_MAC_ADDR_SIZE); 957 break; 958 case IEEE80211_FC1_DIR_TODS: 959 hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info, 960 &mac_addr.raw[0]); 961 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 962 QDF_MAC_ADDR_SIZE); 963 hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info, 964 &mac_addr.raw[0]); 965 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 966 QDF_MAC_ADDR_SIZE); 967 break; 968 case IEEE80211_FC1_DIR_FROMDS: 969 hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info, 970 &mac_addr.raw[0]); 971 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 972 QDF_MAC_ADDR_SIZE); 973 hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info, 974 &mac_addr.raw[0]); 975 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 976 QDF_MAC_ADDR_SIZE); 977 break; 978 979 case IEEE80211_FC1_DIR_DSTODS: 980 hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info, 981 &mac_addr.raw[0]); 982 qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], 983 QDF_MAC_ADDR_SIZE); 984 hal_rx_mpdu_get_addr4(soc->hal_soc, rx_desc_info, 985 &mac_addr.raw[0]); 986 qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], 987 QDF_MAC_ADDR_SIZE); 988 break; 989 990 default: 991 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 992 "%s: Unknown frame control type: 0x%x", __func__, fc); 993 } 994 995 qdf_mem_copy(eth_hdr->ethertype, ether_type, 996 sizeof(ether_type)); 997 998 qdf_nbuf_push_head(nbuf, RX_PKT_TLVS_LEN); 999 qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, RX_PKT_TLVS_LEN); 1000 qdf_mem_free(rx_desc_info); 1001 } 1002 1003 /* 1004 * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO 1005 * @peer: Pointer to the peer 1006 * @tid: Transmit Identifier 1007 * @head: Buffer to be reinjected back 1008 * 1009 * Reinject the fragment chain back into REO 1010 * 1011 * Returns: QDF_STATUS 1012 */ 1013 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer, 1014 unsigned int tid, qdf_nbuf_t head) 1015 { 1016 struct dp_pdev *pdev = peer->vdev->pdev; 1017 struct dp_soc *soc = pdev->soc; 1018 struct hal_buf_info buf_info; 1019 void *link_desc_va; 1020 void *msdu0, *msdu_desc_info; 1021 void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr; 1022 void *dst_mpdu_desc_info, *dst_qdesc_addr; 1023 qdf_dma_addr_t paddr; 1024 uint32_t nbuf_len, seq_no, dst_ind; 1025 uint32_t *mpdu_wrd; 1026 uint32_t ret, cookie; 1027 hal_ring_desc_t dst_ring_desc = 1028 peer->rx_tid[tid].dst_ring_desc; 1029 hal_ring_handle_t hal_srng = soc->reo_reinject_ring.hal_srng; 1030 struct dp_rx_desc *rx_desc = peer->rx_tid[tid].head_frag_desc; 1031 struct dp_rx_reorder_array_elem *rx_reorder_array_elem = 1032 peer->rx_tid[tid].array; 1033 qdf_nbuf_t nbuf_head; 1034 struct rx_desc_pool *rx_desc_pool = NULL; 1035 1036 nbuf_head = dp_ipa_handle_rx_reo_reinject(soc, head); 1037 if (qdf_unlikely(!nbuf_head)) { 1038 dp_err_rl("IPA RX REO reinject failed"); 1039 return QDF_STATUS_E_FAILURE; 1040 } 1041 1042 /* update new allocated skb in case IPA is enabled */ 1043 if (nbuf_head != head) { 1044 head = nbuf_head; 1045 rx_desc->nbuf = head; 1046 rx_reorder_array_elem->head = head; 1047 } 1048 1049 ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng); 1050 if (!ent_ring_desc) { 1051 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1052 "HAL src ring next entry NULL"); 1053 return QDF_STATUS_E_FAILURE; 1054 } 1055 1056 hal_rx_reo_buf_paddr_get(dst_ring_desc, &buf_info); 1057 1058 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1059 1060 qdf_assert(link_desc_va); 1061 1062 msdu0 = hal_rx_msdu0_buffer_addr_lsb(soc->hal_soc, link_desc_va); 1063 nbuf_len = qdf_nbuf_len(head) - RX_PKT_TLVS_LEN; 1064 1065 HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW); 1066 HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE, 1067 UNI_DESC_BUF_TYPE_RX_MSDU_LINK); 1068 1069 /* msdu reconfig */ 1070 msdu_desc_info = hal_rx_msdu_desc_info_ptr_get(soc->hal_soc, msdu0); 1071 1072 dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va); 1073 1074 qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info)); 1075 1076 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1077 FIRST_MSDU_IN_MPDU_FLAG, 1); 1078 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1079 LAST_MSDU_IN_MPDU_FLAG, 1); 1080 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1081 MSDU_CONTINUATION, 0x0); 1082 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1083 REO_DESTINATION_INDICATION, dst_ind); 1084 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1085 MSDU_LENGTH, nbuf_len); 1086 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1087 SA_IS_VALID, 1); 1088 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 1089 DA_IS_VALID, 1); 1090 1091 /* change RX TLV's */ 1092 hal_rx_msdu_start_msdu_len_set( 1093 qdf_nbuf_data(head), nbuf_len); 1094 1095 cookie = HAL_RX_BUF_COOKIE_GET(msdu0); 1096 1097 /* map the nbuf before reinject it into HW */ 1098 ret = qdf_nbuf_map_single(soc->osdev, head, 1099 QDF_DMA_FROM_DEVICE); 1100 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { 1101 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1102 "%s: nbuf map failed !", __func__); 1103 return QDF_STATUS_E_FAILURE; 1104 } 1105 1106 /* 1107 * As part of rx frag handler bufffer was unmapped and rx desc 1108 * unmapped is set to 1. So again for defrag reinject frame reset 1109 * it back to 0. 1110 */ 1111 rx_desc->unmapped = 0; 1112 1113 dp_ipa_handle_rx_buf_smmu_mapping(soc, head, true); 1114 1115 paddr = qdf_nbuf_get_frag_paddr(head, 0); 1116 rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id]; 1117 1118 ret = check_x86_paddr(soc, &head, &paddr, rx_desc_pool); 1119 1120 if (ret == QDF_STATUS_E_FAILURE) { 1121 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1122 "%s: x86 check failed !", __func__); 1123 return QDF_STATUS_E_FAILURE; 1124 } 1125 1126 hal_rxdma_buff_addr_info_set(msdu0, paddr, cookie, DP_DEFRAG_RBM); 1127 1128 /* Lets fill entrance ring now !!! */ 1129 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 1130 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1131 "HAL RING Access For REO entrance SRNG Failed: %pK", 1132 hal_srng); 1133 1134 return QDF_STATUS_E_FAILURE; 1135 } 1136 1137 paddr = (uint64_t)buf_info.paddr; 1138 /* buf addr */ 1139 hal_rxdma_buff_addr_info_set(ent_ring_desc, paddr, 1140 buf_info.sw_cookie, 1141 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 1142 /* mpdu desc info */ 1143 ent_mpdu_desc_info = hal_ent_mpdu_desc_info(soc->hal_soc, 1144 ent_ring_desc); 1145 dst_mpdu_desc_info = hal_dst_mpdu_desc_info(soc->hal_soc, 1146 dst_ring_desc); 1147 1148 qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info, 1149 sizeof(struct rx_mpdu_desc_info)); 1150 qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t)); 1151 1152 mpdu_wrd = (uint32_t *)dst_mpdu_desc_info; 1153 seq_no = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_wrd); 1154 1155 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1156 MSDU_COUNT, 0x1); 1157 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1158 MPDU_SEQUENCE_NUMBER, seq_no); 1159 /* unset frag bit */ 1160 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1161 FRAGMENT_FLAG, 0x0); 1162 /* set sa/da valid bits */ 1163 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1164 SA_IS_VALID, 0x1); 1165 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1166 DA_IS_VALID, 0x1); 1167 HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, 1168 RAW_MPDU, 0x0); 1169 1170 /* qdesc addr */ 1171 ent_qdesc_addr = (uint8_t *)ent_ring_desc + 1172 REO_ENTRANCE_RING_4_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; 1173 1174 dst_qdesc_addr = (uint8_t *)dst_ring_desc + 1175 REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; 1176 1177 qdf_mem_copy(ent_qdesc_addr, dst_qdesc_addr, 8); 1178 1179 HAL_RX_FLD_SET(ent_ring_desc, REO_ENTRANCE_RING_5, 1180 REO_DESTINATION_INDICATION, dst_ind); 1181 1182 hal_srng_access_end(soc->hal_soc, hal_srng); 1183 1184 DP_STATS_INC(soc, rx.reo_reinject, 1); 1185 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, 1186 "%s: reinjection done !", __func__); 1187 return QDF_STATUS_SUCCESS; 1188 } 1189 1190 /* 1191 * dp_rx_defrag(): Defragment the fragment chain 1192 * @peer: Pointer to the peer 1193 * @tid: Transmit Identifier 1194 * @frag_list_head: Pointer to head list 1195 * @frag_list_tail: Pointer to tail list 1196 * 1197 * Defragment the fragment chain 1198 * 1199 * Returns: QDF_STATUS 1200 */ 1201 static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid, 1202 qdf_nbuf_t frag_list_head, qdf_nbuf_t frag_list_tail) 1203 { 1204 qdf_nbuf_t tmp_next, prev; 1205 qdf_nbuf_t cur = frag_list_head, msdu; 1206 uint32_t index, tkip_demic = 0; 1207 uint16_t hdr_space; 1208 uint8_t key[DEFRAG_IEEE80211_KEY_LEN]; 1209 struct dp_vdev *vdev = peer->vdev; 1210 struct dp_soc *soc = vdev->pdev->soc; 1211 uint8_t status = 0; 1212 1213 hdr_space = dp_rx_defrag_hdrsize(soc, cur); 1214 index = hal_rx_msdu_is_wlan_mcast(cur) ? 1215 dp_sec_mcast : dp_sec_ucast; 1216 1217 /* Remove FCS from all fragments */ 1218 while (cur) { 1219 tmp_next = qdf_nbuf_next(cur); 1220 qdf_nbuf_set_next(cur, NULL); 1221 qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN); 1222 prev = cur; 1223 qdf_nbuf_set_next(cur, tmp_next); 1224 cur = tmp_next; 1225 } 1226 cur = frag_list_head; 1227 1228 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1229 "%s: index %d Security type: %d", __func__, 1230 index, peer->security[index].sec_type); 1231 1232 switch (peer->security[index].sec_type) { 1233 case cdp_sec_type_tkip: 1234 tkip_demic = 1; 1235 1236 case cdp_sec_type_tkip_nomic: 1237 while (cur) { 1238 tmp_next = qdf_nbuf_next(cur); 1239 if (dp_rx_defrag_tkip_decap(cur, hdr_space)) { 1240 1241 QDF_TRACE(QDF_MODULE_ID_TXRX, 1242 QDF_TRACE_LEVEL_ERROR, 1243 "dp_rx_defrag: TKIP decap failed"); 1244 1245 return QDF_STATUS_E_DEFRAG_ERROR; 1246 } 1247 cur = tmp_next; 1248 } 1249 1250 /* If success, increment header to be stripped later */ 1251 hdr_space += dp_f_tkip.ic_header; 1252 break; 1253 1254 case cdp_sec_type_aes_ccmp: 1255 while (cur) { 1256 tmp_next = qdf_nbuf_next(cur); 1257 if (dp_rx_defrag_ccmp_demic(cur, hdr_space)) { 1258 1259 QDF_TRACE(QDF_MODULE_ID_TXRX, 1260 QDF_TRACE_LEVEL_ERROR, 1261 "dp_rx_defrag: CCMP demic failed"); 1262 1263 return QDF_STATUS_E_DEFRAG_ERROR; 1264 } 1265 if (dp_rx_defrag_ccmp_decap(cur, hdr_space)) { 1266 1267 QDF_TRACE(QDF_MODULE_ID_TXRX, 1268 QDF_TRACE_LEVEL_ERROR, 1269 "dp_rx_defrag: CCMP decap failed"); 1270 1271 return QDF_STATUS_E_DEFRAG_ERROR; 1272 } 1273 cur = tmp_next; 1274 } 1275 1276 /* If success, increment header to be stripped later */ 1277 hdr_space += dp_f_ccmp.ic_header; 1278 break; 1279 1280 case cdp_sec_type_wep40: 1281 case cdp_sec_type_wep104: 1282 case cdp_sec_type_wep128: 1283 while (cur) { 1284 tmp_next = qdf_nbuf_next(cur); 1285 if (dp_rx_defrag_wep_decap(cur, hdr_space)) { 1286 1287 QDF_TRACE(QDF_MODULE_ID_TXRX, 1288 QDF_TRACE_LEVEL_ERROR, 1289 "dp_rx_defrag: WEP decap failed"); 1290 1291 return QDF_STATUS_E_DEFRAG_ERROR; 1292 } 1293 cur = tmp_next; 1294 } 1295 1296 /* If success, increment header to be stripped later */ 1297 hdr_space += dp_f_wep.ic_header; 1298 break; 1299 default: 1300 QDF_TRACE(QDF_MODULE_ID_TXRX, 1301 QDF_TRACE_LEVEL_ERROR, 1302 "dp_rx_defrag: Did not match any security type"); 1303 break; 1304 } 1305 1306 if (tkip_demic) { 1307 msdu = frag_list_head; 1308 qdf_mem_copy(key, 1309 &peer->security[index].michael_key[0], 1310 IEEE80211_WEP_MICLEN); 1311 status = dp_rx_defrag_tkip_demic(key, msdu, 1312 RX_PKT_TLVS_LEN + 1313 hdr_space); 1314 1315 if (status) { 1316 dp_rx_defrag_err(vdev, frag_list_head); 1317 1318 QDF_TRACE(QDF_MODULE_ID_TXRX, 1319 QDF_TRACE_LEVEL_ERROR, 1320 "%s: TKIP demic failed status %d", 1321 __func__, status); 1322 1323 return QDF_STATUS_E_DEFRAG_ERROR; 1324 } 1325 } 1326 1327 /* Convert the header to 802.3 header */ 1328 dp_rx_defrag_nwifi_to_8023(soc, frag_list_head, hdr_space); 1329 dp_rx_construct_fraglist(peer, frag_list_head, hdr_space); 1330 1331 return QDF_STATUS_SUCCESS; 1332 } 1333 1334 /* 1335 * dp_rx_defrag_cleanup(): Clean up activities 1336 * @peer: Pointer to the peer 1337 * @tid: Transmit Identifier 1338 * 1339 * Returns: None 1340 */ 1341 void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid) 1342 { 1343 struct dp_rx_reorder_array_elem *rx_reorder_array_elem = 1344 peer->rx_tid[tid].array; 1345 1346 if (rx_reorder_array_elem) { 1347 /* Free up nbufs */ 1348 dp_rx_defrag_frames_free(rx_reorder_array_elem->head); 1349 rx_reorder_array_elem->head = NULL; 1350 rx_reorder_array_elem->tail = NULL; 1351 } else { 1352 dp_info("Cleanup self peer %pK and TID %u at MAC address %pM", 1353 peer, tid, peer->mac_addr.raw); 1354 } 1355 1356 /* Free up saved ring descriptors */ 1357 dp_rx_clear_saved_desc_info(peer, tid); 1358 1359 peer->rx_tid[tid].defrag_timeout_ms = 0; 1360 peer->rx_tid[tid].curr_frag_num = 0; 1361 peer->rx_tid[tid].curr_seq_num = 0; 1362 } 1363 1364 /* 1365 * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor 1366 * @ring_desc: Pointer to the dst ring descriptor 1367 * @peer: Pointer to the peer 1368 * @tid: Transmit Identifier 1369 * 1370 * Returns: None 1371 */ 1372 static QDF_STATUS 1373 dp_rx_defrag_save_info_from_ring_desc(hal_ring_desc_t ring_desc, 1374 struct dp_rx_desc *rx_desc, 1375 struct dp_peer *peer, 1376 unsigned int tid) 1377 { 1378 void *dst_ring_desc = qdf_mem_malloc( 1379 sizeof(struct reo_destination_ring)); 1380 1381 if (!dst_ring_desc) { 1382 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1383 "%s: Memory alloc failed !", __func__); 1384 QDF_ASSERT(0); 1385 return QDF_STATUS_E_NOMEM; 1386 } 1387 1388 qdf_mem_copy(dst_ring_desc, ring_desc, 1389 sizeof(struct reo_destination_ring)); 1390 1391 peer->rx_tid[tid].dst_ring_desc = dst_ring_desc; 1392 peer->rx_tid[tid].head_frag_desc = rx_desc; 1393 1394 return QDF_STATUS_SUCCESS; 1395 } 1396 1397 /* 1398 * dp_rx_defrag_store_fragment(): Store incoming fragments 1399 * @soc: Pointer to the SOC data structure 1400 * @ring_desc: Pointer to the ring descriptor 1401 * @mpdu_desc_info: MPDU descriptor info 1402 * @tid: Traffic Identifier 1403 * @rx_desc: Pointer to rx descriptor 1404 * @rx_bfs: Number of bfs consumed 1405 * 1406 * Returns: QDF_STATUS 1407 */ 1408 static QDF_STATUS 1409 dp_rx_defrag_store_fragment(struct dp_soc *soc, 1410 hal_ring_desc_t ring_desc, 1411 union dp_rx_desc_list_elem_t **head, 1412 union dp_rx_desc_list_elem_t **tail, 1413 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1414 unsigned int tid, struct dp_rx_desc *rx_desc, 1415 uint32_t *rx_bfs) 1416 { 1417 struct dp_rx_reorder_array_elem *rx_reorder_array_elem; 1418 struct dp_pdev *pdev; 1419 struct dp_peer *peer = NULL; 1420 uint16_t peer_id; 1421 uint8_t fragno, more_frag, all_frag_present = 0; 1422 uint16_t rxseq = mpdu_desc_info->mpdu_seq; 1423 QDF_STATUS status; 1424 struct dp_rx_tid *rx_tid; 1425 uint8_t mpdu_sequence_control_valid; 1426 uint8_t mpdu_frame_control_valid; 1427 qdf_nbuf_t frag = rx_desc->nbuf; 1428 uint32_t msdu_len; 1429 1430 if (qdf_nbuf_len(frag) > 0) { 1431 dp_info("Dropping unexpected packet with skb_len: %d," 1432 "data len: %d, cookie: %d", 1433 (uint32_t)qdf_nbuf_len(frag), frag->data_len, 1434 rx_desc->cookie); 1435 DP_STATS_INC(soc, rx.rx_frag_err_len_error, 1); 1436 goto discard_frag; 1437 } 1438 1439 msdu_len = hal_rx_msdu_start_msdu_len_get(rx_desc->rx_buf_start); 1440 1441 qdf_nbuf_set_pktlen(frag, (msdu_len + RX_PKT_TLVS_LEN)); 1442 qdf_nbuf_append_ext_list(frag, NULL, 0); 1443 1444 /* Check if the packet is from a valid peer */ 1445 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1446 mpdu_desc_info->peer_meta_data); 1447 peer = dp_peer_find_by_id(soc, peer_id); 1448 1449 if (!peer) { 1450 /* We should not receive anything from unknown peer 1451 * however, that might happen while we are in the monitor mode. 1452 * We don't need to handle that here 1453 */ 1454 dp_info_rl("Unknown peer with peer_id %d, dropping fragment", 1455 peer_id); 1456 DP_STATS_INC(soc, rx.rx_frag_err_no_peer, 1); 1457 goto discard_frag; 1458 } 1459 1460 if (tid >= DP_MAX_TIDS) { 1461 dp_info("TID out of bounds: %d", tid); 1462 qdf_assert_always(0); 1463 } 1464 1465 pdev = peer->vdev->pdev; 1466 rx_tid = &peer->rx_tid[tid]; 1467 1468 mpdu_sequence_control_valid = 1469 hal_rx_get_mpdu_sequence_control_valid(soc->hal_soc, 1470 rx_desc->rx_buf_start); 1471 1472 /* Invalid MPDU sequence control field, MPDU is of no use */ 1473 if (!mpdu_sequence_control_valid) { 1474 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1475 "Invalid MPDU seq control field, dropping MPDU"); 1476 1477 qdf_assert(0); 1478 goto discard_frag; 1479 } 1480 1481 mpdu_frame_control_valid = 1482 hal_rx_get_mpdu_frame_control_valid(soc->hal_soc, 1483 rx_desc->rx_buf_start); 1484 1485 /* Invalid frame control field */ 1486 if (!mpdu_frame_control_valid) { 1487 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1488 "Invalid frame control field, dropping MPDU"); 1489 1490 qdf_assert(0); 1491 goto discard_frag; 1492 } 1493 1494 /* Current mpdu sequence */ 1495 more_frag = dp_rx_frag_get_more_frag_bit(rx_desc->rx_buf_start); 1496 1497 /* HW does not populate the fragment number as of now 1498 * need to get from the 802.11 header 1499 */ 1500 fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc->rx_buf_start); 1501 1502 rx_reorder_array_elem = peer->rx_tid[tid].array; 1503 if (!rx_reorder_array_elem) { 1504 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1505 "Rcvd Fragmented pkt before peer_tid is setup"); 1506 goto discard_frag; 1507 } 1508 1509 /* 1510 * !more_frag: no more fragments to be delivered 1511 * !frag_no: packet is not fragmented 1512 * !rx_reorder_array_elem->head: no saved fragments so far 1513 */ 1514 if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) { 1515 /* We should not get into this situation here. 1516 * It means an unfragmented packet with fragment flag 1517 * is delivered over the REO exception ring. 1518 * Typically it follows normal rx path. 1519 */ 1520 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1521 "Rcvd unfragmented pkt on REO Err srng, dropping"); 1522 1523 qdf_assert(0); 1524 goto discard_frag; 1525 } 1526 1527 /* Check if the fragment is for the same sequence or a different one */ 1528 if (rx_reorder_array_elem->head) { 1529 if (rxseq != rx_tid->curr_seq_num) { 1530 1531 /* Drop stored fragments if out of sequence 1532 * fragment is received 1533 */ 1534 dp_rx_reorder_flush_frag(peer, tid); 1535 1536 DP_STATS_INC(soc, rx.rx_frag_err, 1); 1537 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1538 "%s mismatch, dropping earlier sequence ", 1539 (rxseq == rx_tid->curr_seq_num) 1540 ? "address" 1541 : "seq number"); 1542 1543 /* 1544 * The sequence number for this fragment becomes the 1545 * new sequence number to be processed 1546 */ 1547 rx_tid->curr_seq_num = rxseq; 1548 } 1549 } else { 1550 /* Start of a new sequence */ 1551 dp_rx_defrag_cleanup(peer, tid); 1552 rx_tid->curr_seq_num = rxseq; 1553 } 1554 1555 /* 1556 * If the earlier sequence was dropped, this will be the fresh start. 1557 * Else, continue with next fragment in a given sequence 1558 */ 1559 status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head, 1560 &rx_reorder_array_elem->tail, frag, 1561 &all_frag_present); 1562 1563 /* 1564 * Currently, we can have only 6 MSDUs per-MPDU, if the current 1565 * packet sequence has more than 6 MSDUs for some reason, we will 1566 * have to use the next MSDU link descriptor and chain them together 1567 * before reinjection 1568 */ 1569 if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) && 1570 (rx_reorder_array_elem->head == frag)) { 1571 1572 qdf_assert_always(ring_desc); 1573 status = dp_rx_defrag_save_info_from_ring_desc(ring_desc, 1574 rx_desc, peer, tid); 1575 1576 if (status != QDF_STATUS_SUCCESS) { 1577 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1578 "%s: Unable to store ring desc !", __func__); 1579 goto discard_frag; 1580 } 1581 } else { 1582 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1583 (*rx_bfs)++; 1584 1585 /* Return the non-head link desc */ 1586 if (ring_desc && 1587 dp_rx_link_desc_return(soc, ring_desc, 1588 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1589 QDF_STATUS_SUCCESS) 1590 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1591 "%s: Failed to return link desc", __func__); 1592 1593 } 1594 1595 if (pdev->soc->rx.flags.defrag_timeout_check) 1596 dp_rx_defrag_waitlist_remove(peer, tid); 1597 1598 /* Yet to receive more fragments for this sequence number */ 1599 if (!all_frag_present) { 1600 uint32_t now_ms = 1601 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1602 1603 peer->rx_tid[tid].defrag_timeout_ms = 1604 now_ms + pdev->soc->rx.defrag.timeout_ms; 1605 1606 dp_rx_defrag_waitlist_add(peer, tid); 1607 dp_peer_unref_del_find_by_id(peer); 1608 1609 return QDF_STATUS_SUCCESS; 1610 } 1611 1612 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1613 "All fragments received for sequence: %d", rxseq); 1614 1615 /* Process the fragments */ 1616 status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, 1617 rx_reorder_array_elem->tail); 1618 if (QDF_IS_STATUS_ERROR(status)) { 1619 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1620 "Fragment processing failed"); 1621 1622 dp_rx_add_to_free_desc_list(head, tail, 1623 peer->rx_tid[tid].head_frag_desc); 1624 (*rx_bfs)++; 1625 1626 if (dp_rx_link_desc_return(soc, 1627 peer->rx_tid[tid].dst_ring_desc, 1628 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1629 QDF_STATUS_SUCCESS) 1630 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1631 "%s: Failed to return link desc", 1632 __func__); 1633 dp_rx_defrag_cleanup(peer, tid); 1634 goto end; 1635 } 1636 1637 /* Re-inject the fragments back to REO for further processing */ 1638 status = dp_rx_defrag_reo_reinject(peer, tid, 1639 rx_reorder_array_elem->head); 1640 if (QDF_IS_STATUS_SUCCESS(status)) { 1641 rx_reorder_array_elem->head = NULL; 1642 rx_reorder_array_elem->tail = NULL; 1643 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, 1644 "Fragmented sequence successfully reinjected"); 1645 } else { 1646 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1647 "Fragmented sequence reinjection failed"); 1648 dp_rx_return_head_frag_desc(peer, tid); 1649 } 1650 1651 dp_rx_defrag_cleanup(peer, tid); 1652 1653 dp_peer_unref_del_find_by_id(peer); 1654 1655 return QDF_STATUS_SUCCESS; 1656 1657 discard_frag: 1658 qdf_nbuf_free(frag); 1659 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1660 if (dp_rx_link_desc_return(soc, ring_desc, 1661 HAL_BM_ACTION_PUT_IN_IDLE_LIST) != 1662 QDF_STATUS_SUCCESS) 1663 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1664 "%s: Failed to return link desc", __func__); 1665 (*rx_bfs)++; 1666 1667 end: 1668 if (peer) 1669 dp_peer_unref_del_find_by_id(peer); 1670 1671 DP_STATS_INC(soc, rx.rx_frag_err, 1); 1672 return QDF_STATUS_E_DEFRAG_ERROR; 1673 } 1674 1675 /** 1676 * dp_rx_frag_handle() - Handles fragmented Rx frames 1677 * 1678 * @soc: core txrx main context 1679 * @ring_desc: opaque pointer to the REO error ring descriptor 1680 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 1681 * @head: head of the local descriptor free-list 1682 * @tail: tail of the local descriptor free-list 1683 * @quota: No. of units (packets) that can be serviced in one shot. 1684 * 1685 * This function implements RX 802.11 fragmentation handling 1686 * The handling is mostly same as legacy fragmentation handling. 1687 * If required, this function can re-inject the frames back to 1688 * REO ring (with proper setting to by-pass fragmentation check 1689 * but use duplicate detection / re-ordering and routing these frames 1690 * to a different core. 1691 * 1692 * Return: uint32_t: No. of elements processed 1693 */ 1694 uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc, 1695 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1696 struct dp_rx_desc *rx_desc, 1697 uint8_t *mac_id, 1698 uint32_t quota) 1699 { 1700 uint32_t rx_bufs_used = 0; 1701 qdf_nbuf_t msdu = NULL; 1702 uint32_t tid; 1703 uint32_t rx_bfs = 0; 1704 struct dp_pdev *pdev; 1705 QDF_STATUS status = QDF_STATUS_SUCCESS; 1706 1707 qdf_assert(soc); 1708 qdf_assert(mpdu_desc_info); 1709 qdf_assert(rx_desc); 1710 1711 dp_debug("Number of MSDUs to process, num_msdus: %d", 1712 mpdu_desc_info->msdu_count); 1713 1714 1715 if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) { 1716 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1717 "Not sufficient MSDUs to process"); 1718 return rx_bufs_used; 1719 } 1720 1721 /* all buffers in MSDU link belong to same pdev */ 1722 pdev = soc->pdev_list[rx_desc->pool_id]; 1723 *mac_id = rx_desc->pool_id; 1724 1725 msdu = rx_desc->nbuf; 1726 1727 qdf_nbuf_unmap_single(soc->osdev, msdu, QDF_DMA_FROM_DEVICE); 1728 rx_desc->unmapped = 1; 1729 1730 rx_desc->rx_buf_start = qdf_nbuf_data(msdu); 1731 1732 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_desc->rx_buf_start); 1733 1734 /* Process fragment-by-fragment */ 1735 status = dp_rx_defrag_store_fragment(soc, ring_desc, 1736 &pdev->free_list_head, 1737 &pdev->free_list_tail, 1738 mpdu_desc_info, 1739 tid, rx_desc, &rx_bfs); 1740 1741 if (rx_bfs) 1742 rx_bufs_used += rx_bfs; 1743 1744 if (!QDF_IS_STATUS_SUCCESS(status)) 1745 dp_info_rl("Rx Defrag err seq#:0x%x msdu_count:%d flags:%d", 1746 mpdu_desc_info->mpdu_seq, 1747 mpdu_desc_info->msdu_count, 1748 mpdu_desc_info->mpdu_flags); 1749 1750 return rx_bufs_used; 1751 } 1752 1753 QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc, 1754 struct dp_peer *peer, uint16_t tid, 1755 uint16_t rxseq, qdf_nbuf_t nbuf) 1756 { 1757 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 1758 struct dp_rx_reorder_array_elem *rx_reorder_array_elem; 1759 uint8_t all_frag_present; 1760 uint32_t msdu_len; 1761 QDF_STATUS status; 1762 1763 rx_reorder_array_elem = peer->rx_tid[tid].array; 1764 1765 /* 1766 * HW may fill in unexpected peer_id in RX PKT TLV, 1767 * if this peer_id related peer is valid by coincidence, 1768 * but actually this peer won't do dp_peer_rx_init(like SAP vdev 1769 * self peer), then invalid access to rx_reorder_array_elem happened. 1770 */ 1771 if (!rx_reorder_array_elem) { 1772 dp_verbose_debug( 1773 "peer id:%d mac: %pM drop rx frame!", 1774 peer->peer_ids[0], 1775 peer->mac_addr.raw); 1776 DP_STATS_INC(soc, rx.err.defrag_peer_uninit, 1); 1777 qdf_nbuf_free(nbuf); 1778 goto fail; 1779 } 1780 1781 if (rx_reorder_array_elem->head && 1782 rxseq != rx_tid->curr_seq_num) { 1783 /* Drop stored fragments if out of sequence 1784 * fragment is received 1785 */ 1786 dp_rx_reorder_flush_frag(peer, tid); 1787 1788 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1789 "%s: No list found for TID %d Seq# %d", 1790 __func__, tid, rxseq); 1791 qdf_nbuf_free(nbuf); 1792 goto fail; 1793 } 1794 1795 msdu_len = hal_rx_msdu_start_msdu_len_get(qdf_nbuf_data(nbuf)); 1796 1797 qdf_nbuf_set_pktlen(nbuf, (msdu_len + RX_PKT_TLVS_LEN)); 1798 1799 status = dp_rx_defrag_fraglist_insert(peer, tid, 1800 &rx_reorder_array_elem->head, 1801 &rx_reorder_array_elem->tail, nbuf, 1802 &all_frag_present); 1803 1804 if (QDF_IS_STATUS_ERROR(status)) { 1805 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1806 "%s Fragment insert failed", __func__); 1807 1808 goto fail; 1809 } 1810 1811 if (soc->rx.flags.defrag_timeout_check) 1812 dp_rx_defrag_waitlist_remove(peer, tid); 1813 1814 if (!all_frag_present) { 1815 uint32_t now_ms = 1816 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1817 1818 peer->rx_tid[tid].defrag_timeout_ms = 1819 now_ms + soc->rx.defrag.timeout_ms; 1820 1821 dp_rx_defrag_waitlist_add(peer, tid); 1822 1823 return QDF_STATUS_SUCCESS; 1824 } 1825 1826 status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, 1827 rx_reorder_array_elem->tail); 1828 1829 if (QDF_IS_STATUS_ERROR(status)) { 1830 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1831 "%s Fragment processing failed", __func__); 1832 1833 dp_rx_return_head_frag_desc(peer, tid); 1834 dp_rx_defrag_cleanup(peer, tid); 1835 1836 goto fail; 1837 } 1838 1839 /* Re-inject the fragments back to REO for further processing */ 1840 status = dp_rx_defrag_reo_reinject(peer, tid, 1841 rx_reorder_array_elem->head); 1842 if (QDF_IS_STATUS_SUCCESS(status)) { 1843 rx_reorder_array_elem->head = NULL; 1844 rx_reorder_array_elem->tail = NULL; 1845 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1846 "%s: Frag seq successfully reinjected", 1847 __func__); 1848 } else { 1849 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1850 "%s: Frag seq reinjection failed", __func__); 1851 dp_rx_return_head_frag_desc(peer, tid); 1852 } 1853 1854 dp_rx_defrag_cleanup(peer, tid); 1855 return QDF_STATUS_SUCCESS; 1856 1857 fail: 1858 return QDF_STATUS_E_DEFRAG_ERROR; 1859 } 1860