1 /* 2 * Copyright (c) 2017 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "dp_types.h" 20 #include "dp_rx.h" 21 #include "dp_peer.h" 22 #include "hal_api.h" 23 #include "qdf_trace.h" 24 #include "qdf_nbuf.h" 25 #include "dp_rx_defrag.h" 26 #include <enet.h> /* LLC_SNAP_HDR_LEN */ 27 #include "dp_rx_defrag.h" 28 29 const struct dp_rx_defrag_cipher dp_f_ccmp = { 30 "AES-CCM", 31 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 32 IEEE80211_WEP_MICLEN, 33 0, 34 }; 35 36 const struct dp_rx_defrag_cipher dp_f_tkip = { 37 "TKIP", 38 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 39 IEEE80211_WEP_CRCLEN, 40 IEEE80211_WEP_MICLEN, 41 }; 42 43 const struct dp_rx_defrag_cipher dp_f_wep = { 44 "WEP", 45 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN, 46 IEEE80211_WEP_CRCLEN, 47 0, 48 }; 49 50 /* 51 * dp_rx_defrag_frames_free(): Free fragment chain 52 * @frames: Fragment chain 53 * 54 * Iterates through the fragment chain and frees them 55 * Returns: None 56 */ 57 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames) 58 { 59 qdf_nbuf_t next, frag = frames; 60 61 while (frag) { 62 next = qdf_nbuf_next(frag); 63 qdf_nbuf_free(frag); 64 frag = next; 65 } 66 } 67 68 /* 69 * dp_rx_clear_saved_desc_info(): Clears descriptor info 70 * @peer: Pointer to the peer data structure 71 * @tid: Transmit ID (TID) 72 * 73 * Saves MPDU descriptor info and MSDU link pointer from REO 74 * ring descriptor. The cache is created per peer, per TID 75 * 76 * Returns: None 77 */ 78 static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid) 79 { 80 hal_rx_clear_mpdu_desc_info( 81 &peer->rx_tid[tid].transcap_rx_mpdu_desc_info); 82 83 hal_rx_clear_msdu_link_ptr( 84 &peer->rx_tid[tid].transcap_msdu_link_ptr[0], 85 HAL_RX_MAX_SAVED_RING_DESC); 86 } 87 88 /* 89 * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list 90 * @peer: Pointer to the peer data structure 91 * @tid: Transmit ID (TID) 92 * 93 * Appends per-tid fragments to global fragment wait list 94 * 95 * Returns: None 96 */ 97 static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid) 98 { 99 struct dp_soc *psoc = peer->vdev->pdev->soc; 100 struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid]; 101 102 /* TODO: use LIST macros instead of TAIL macros */ 103 TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder, 104 defrag_waitlist_elem); 105 } 106 107 /* 108 * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist 109 * @peer: Pointer to the peer data structure 110 * @tid: Transmit ID (TID) 111 * 112 * Remove fragments from waitlist 113 * 114 * Returns: None 115 */ 116 static void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid) 117 { 118 struct dp_pdev *pdev = peer->vdev->pdev; 119 struct dp_soc *soc = pdev->soc; 120 struct dp_rx_tid *rx_reorder; 121 122 if (tid > DP_MAX_TIDS) { 123 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 124 "TID out of bounds: %d", tid); 125 qdf_assert(0); 126 return; 127 } 128 129 rx_reorder = &peer->rx_tid[tid]; 130 131 if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) { 132 133 TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder, 134 defrag_waitlist_elem); 135 rx_reorder->defrag_waitlist_elem.tqe_next = NULL; 136 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL; 137 } else if (rx_reorder->defrag_waitlist_elem.tqe_prev == NULL) { 138 139 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 140 "waitlist->tqe_prev is NULL"); 141 rx_reorder->defrag_waitlist_elem.tqe_next = NULL; 142 qdf_assert(0); 143 } 144 } 145 146 /* 147 * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list 148 * @peer: Pointer to the peer data structure 149 * @tid: Transmit ID (TID) 150 * @head_addr: Pointer to head list 151 * @tail_addr: Pointer to tail list 152 * @frag: Incoming fragment 153 * @all_frag_present: Flag to indicate whether all fragments are received 154 * 155 * Build a per-tid, per-sequence fragment list. 156 * 157 * Returns: None 158 */ 159 static void dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid, 160 qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag, 161 uint8_t *all_frag_present) 162 { 163 qdf_nbuf_t next; 164 qdf_nbuf_t prev = NULL; 165 qdf_nbuf_t cur; 166 uint16_t head_fragno, cur_fragno, next_fragno; 167 uint8_t last_morefrag = 1, count = 0; 168 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 169 uint8_t *rx_desc_info; 170 171 qdf_assert(frag); 172 qdf_assert(head_addr); 173 qdf_assert(tail_addr); 174 175 rx_desc_info = qdf_nbuf_data(frag); 176 cur_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 177 178 /* If this is the first fragment */ 179 if (!(*head_addr)) { 180 *head_addr = *tail_addr = frag; 181 qdf_nbuf_set_next(*tail_addr, NULL); 182 rx_tid->curr_frag_num = cur_fragno; 183 184 goto end; 185 } 186 187 /* In sequence fragment */ 188 if (cur_fragno > rx_tid->curr_frag_num) { 189 qdf_nbuf_set_next(*tail_addr, frag); 190 *tail_addr = frag; 191 qdf_nbuf_set_next(*tail_addr, NULL); 192 rx_tid->curr_frag_num = cur_fragno; 193 } else { 194 /* Out of sequence fragment */ 195 cur = *head_addr; 196 rx_desc_info = qdf_nbuf_data(cur); 197 head_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 198 199 if (cur_fragno == head_fragno) { 200 qdf_nbuf_free(frag); 201 *all_frag_present = 0; 202 } else if (head_fragno > cur_fragno) { 203 qdf_nbuf_set_next(frag, cur); 204 cur = frag; 205 *head_addr = frag; /* head pointer to be updated */ 206 } else { 207 while ((cur_fragno > head_fragno) && cur != NULL) { 208 prev = cur; 209 cur = qdf_nbuf_next(cur); 210 rx_desc_info = qdf_nbuf_data(cur); 211 head_fragno = 212 dp_rx_frag_get_mpdu_frag_number( 213 rx_desc_info); 214 } 215 qdf_nbuf_set_next(prev, frag); 216 qdf_nbuf_set_next(frag, cur); 217 } 218 } 219 220 next = qdf_nbuf_next(*head_addr); 221 222 rx_desc_info = qdf_nbuf_data(*tail_addr); 223 last_morefrag = hal_rx_get_rx_more_frag_bit(rx_desc_info); 224 225 /* TODO: optimize the loop */ 226 if (!last_morefrag) { 227 /* Check if all fragments are present */ 228 do { 229 rx_desc_info = qdf_nbuf_data(next); 230 next_fragno = 231 dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 232 count++; 233 234 if (next_fragno != count) 235 break; 236 237 next = qdf_nbuf_next(next); 238 } while (next); 239 240 if (!next) { 241 *all_frag_present = 1; 242 return; 243 } 244 } 245 246 end: 247 *all_frag_present = 0; 248 } 249 250 251 /* 252 * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment 253 * @msdu: Pointer to the fragment 254 * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) 255 * 256 * decap tkip encrypted fragment 257 * 258 * Returns: QDF_STATUS 259 */ 260 static QDF_STATUS dp_rx_defrag_tkip_decap(qdf_nbuf_t msdu, uint16_t hdrlen) 261 { 262 uint8_t *ivp, *orig_hdr; 263 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 264 265 /* start of 802.11 header info */ 266 orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len); 267 268 /* TKIP header is located post 802.11 header */ 269 ivp = orig_hdr + hdrlen; 270 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) { 271 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 272 "IEEE80211_WEP_EXTIV is missing in TKIP fragment"); 273 return QDF_STATUS_E_DEFRAG_ERROR; 274 } 275 276 qdf_mem_move(orig_hdr + dp_f_tkip.ic_header, orig_hdr, hdrlen); 277 278 qdf_nbuf_pull_head(msdu, dp_f_tkip.ic_header); 279 qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer); 280 281 return QDF_STATUS_SUCCESS; 282 } 283 284 /* 285 * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment 286 * @nbuf: Pointer to the fragment buffer 287 * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) 288 * 289 * Remove MIC information from CCMP fragment 290 * 291 * Returns: QDF_STATUS 292 */ 293 static QDF_STATUS dp_rx_defrag_ccmp_demic(qdf_nbuf_t nbuf, uint16_t hdrlen) 294 { 295 uint8_t *ivp, *orig_hdr; 296 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 297 298 /* start of the 802.11 header */ 299 orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); 300 301 /* CCMP header is located after 802.11 header */ 302 ivp = orig_hdr + hdrlen; 303 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 304 return QDF_STATUS_E_DEFRAG_ERROR; 305 306 qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer); 307 308 return QDF_STATUS_SUCCESS; 309 } 310 311 /* 312 * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment 313 * @nbuf: Pointer to the fragment 314 * @hdrlen: length of the header information 315 * 316 * decap CCMP encrypted fragment 317 * 318 * Returns: QDF_STATUS 319 */ 320 static QDF_STATUS dp_rx_defrag_ccmp_decap(qdf_nbuf_t nbuf, uint16_t hdrlen) 321 { 322 uint8_t *ivp, *origHdr; 323 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 324 325 origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len); 326 ivp = origHdr + hdrlen; 327 328 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 329 return QDF_STATUS_E_DEFRAG_ERROR; 330 331 qdf_mem_move(origHdr + dp_f_ccmp.ic_header, origHdr, hdrlen); 332 qdf_nbuf_pull_head(nbuf, dp_f_ccmp.ic_header); 333 334 return QDF_STATUS_SUCCESS; 335 } 336 337 /* 338 * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment 339 * @msdu: Pointer to the fragment 340 * @hdrlen: length of the header information 341 * 342 * decap WEP encrypted fragment 343 * 344 * Returns: QDF_STATUS 345 */ 346 static QDF_STATUS dp_rx_defrag_wep_decap(qdf_nbuf_t msdu, uint16_t hdrlen) 347 { 348 uint8_t *origHdr; 349 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 350 351 origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); 352 qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen); 353 354 qdf_nbuf_pull_head(msdu, dp_f_wep.ic_header); 355 qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer); 356 357 return QDF_STATUS_SUCCESS; 358 } 359 360 /* 361 * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment 362 * @nbuf: Pointer to the fragment 363 * 364 * Calculate the header size of the received fragment 365 * 366 * Returns: header size (uint16_t) 367 */ 368 static uint16_t dp_rx_defrag_hdrsize(qdf_nbuf_t nbuf) 369 { 370 uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf); 371 uint16_t size = sizeof(struct ieee80211_frame); 372 uint16_t fc = 0; 373 uint32_t to_ds, fr_ds; 374 uint8_t frm_ctrl_valid; 375 uint16_t frm_ctrl_field; 376 377 to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); 378 fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); 379 frm_ctrl_valid = hal_rx_get_mpdu_frame_control_valid(rx_tlv_hdr); 380 frm_ctrl_field = hal_rx_get_frame_ctrl_field(rx_tlv_hdr); 381 382 if (to_ds && fr_ds) 383 size += IEEE80211_ADDR_LEN; 384 385 if (frm_ctrl_valid) { 386 fc = frm_ctrl_field; 387 388 /* use 1-st byte for validation */ 389 if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) { 390 size += sizeof(uint16_t); 391 /* use 2-nd byte for validation */ 392 if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER) 393 size += sizeof(struct ieee80211_htc); 394 } 395 } 396 397 return size; 398 } 399 400 /* 401 * dp_rx_defrag_michdr(): Calculate a psuedo MIC header 402 * @wh0: Pointer to the wireless header of the fragment 403 * @hdr: Array to hold the psuedo header 404 * 405 * Calculate a psuedo MIC header 406 * 407 * Returns: None 408 */ 409 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0, 410 uint8_t hdr[]) 411 { 412 const struct ieee80211_frame_addr4 *wh = 413 (const struct ieee80211_frame_addr4 *)wh0; 414 415 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { 416 case IEEE80211_FC1_DIR_NODS: 417 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 418 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 419 wh->i_addr2); 420 break; 421 case IEEE80211_FC1_DIR_TODS: 422 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 423 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 424 wh->i_addr2); 425 break; 426 case IEEE80211_FC1_DIR_FROMDS: 427 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 428 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 429 wh->i_addr3); 430 break; 431 case IEEE80211_FC1_DIR_DSTODS: 432 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 433 DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, 434 wh->i_addr4); 435 break; 436 } 437 438 /* 439 * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but 440 * it could also be set for deauth, disassoc, action, etc. for 441 * a mgt type frame. It comes into picture for MFP. 442 */ 443 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { 444 const struct ieee80211_qosframe *qwh = 445 (const struct ieee80211_qosframe *)wh; 446 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; 447 } else { 448 hdr[12] = 0; 449 } 450 451 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ 452 } 453 454 /* 455 * dp_rx_defrag_mic(): Calculate MIC header 456 * @key: Pointer to the key 457 * @wbuf: fragment buffer 458 * @off: Offset 459 * @data_len: Data lengh 460 * @mic: Array to hold MIC 461 * 462 * Calculate a psuedo MIC header 463 * 464 * Returns: QDF_STATUS 465 */ 466 static QDF_STATUS dp_rx_defrag_mic(const uint8_t *key, qdf_nbuf_t wbuf, 467 uint16_t off, uint16_t data_len, uint8_t mic[]) 468 { 469 uint8_t hdr[16] = { 0, }; 470 uint32_t l, r; 471 const uint8_t *data; 472 uint32_t space; 473 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 474 475 dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) 476 + rx_desc_len), hdr); 477 l = dp_rx_get_le32(key); 478 r = dp_rx_get_le32(key + 4); 479 480 /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ 481 l ^= dp_rx_get_le32(hdr); 482 dp_rx_michael_block(l, r); 483 l ^= dp_rx_get_le32(&hdr[4]); 484 dp_rx_michael_block(l, r); 485 l ^= dp_rx_get_le32(&hdr[8]); 486 dp_rx_michael_block(l, r); 487 l ^= dp_rx_get_le32(&hdr[12]); 488 dp_rx_michael_block(l, r); 489 490 /* first buffer has special handling */ 491 data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len + off; 492 space = qdf_nbuf_len(wbuf) - rx_desc_len - off; 493 494 for (;; ) { 495 if (space > data_len) 496 space = data_len; 497 498 /* collect 32-bit blocks from current buffer */ 499 while (space >= sizeof(uint32_t)) { 500 l ^= dp_rx_get_le32(data); 501 dp_rx_michael_block(l, r); 502 data += sizeof(uint32_t); 503 space -= sizeof(uint32_t); 504 data_len -= sizeof(uint32_t); 505 } 506 if (data_len < sizeof(uint32_t)) 507 break; 508 509 wbuf = qdf_nbuf_next(wbuf); 510 if (wbuf == NULL) 511 return QDF_STATUS_E_DEFRAG_ERROR; 512 513 if (space != 0) { 514 const uint8_t *data_next; 515 /* 516 * Block straddles buffers, split references. 517 */ 518 data_next = 519 (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len; 520 if ((qdf_nbuf_len(wbuf) - rx_desc_len) < 521 sizeof(uint32_t) - space) { 522 return QDF_STATUS_E_DEFRAG_ERROR; 523 } 524 switch (space) { 525 case 1: 526 l ^= dp_rx_get_le32_split(data[0], 527 data_next[0], data_next[1], 528 data_next[2]); 529 data = data_next + 3; 530 space = (qdf_nbuf_len(wbuf) - rx_desc_len) 531 - 3; 532 break; 533 case 2: 534 l ^= dp_rx_get_le32_split(data[0], data[1], 535 data_next[0], data_next[1]); 536 data = data_next + 2; 537 space = (qdf_nbuf_len(wbuf) - rx_desc_len) 538 - 2; 539 break; 540 case 3: 541 l ^= dp_rx_get_le32_split(data[0], data[1], 542 data[2], data_next[0]); 543 data = data_next + 1; 544 space = (qdf_nbuf_len(wbuf) - rx_desc_len) 545 - 1; 546 break; 547 } 548 dp_rx_michael_block(l, r); 549 data_len -= sizeof(uint32_t); 550 } else { 551 /* 552 * Setup for next buffer. 553 */ 554 data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len; 555 space = qdf_nbuf_len(wbuf) - rx_desc_len; 556 } 557 } 558 /* Last block and padding (0x5a, 4..7 x 0) */ 559 switch (data_len) { 560 case 0: 561 l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0); 562 break; 563 case 1: 564 l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0); 565 break; 566 case 2: 567 l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0); 568 break; 569 case 3: 570 l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a); 571 break; 572 } 573 dp_rx_michael_block(l, r); 574 dp_rx_michael_block(l, r); 575 dp_rx_put_le32(mic, l); 576 dp_rx_put_le32(mic + 4, r); 577 578 return QDF_STATUS_SUCCESS; 579 } 580 581 /* 582 * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame 583 * @key: Pointer to the key 584 * @msdu: fragment buffer 585 * @hdrlen: Length of the header information 586 * 587 * Remove MIC information from the TKIP frame 588 * 589 * Returns: QDF_STATUS 590 */ 591 static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key, 592 qdf_nbuf_t msdu, uint16_t hdrlen) 593 { 594 QDF_STATUS status; 595 uint32_t pktlen; 596 uint8_t mic[IEEE80211_WEP_MICLEN]; 597 uint8_t mic0[IEEE80211_WEP_MICLEN]; 598 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 599 600 pktlen = qdf_nbuf_len(msdu) - rx_desc_len; 601 602 status = dp_rx_defrag_mic(key, msdu, hdrlen, 603 pktlen - (hdrlen + dp_f_tkip.ic_miclen), mic); 604 605 if (QDF_IS_STATUS_ERROR(status)) 606 return status; 607 608 qdf_nbuf_copy_bits(msdu, pktlen - dp_f_tkip.ic_miclen + rx_desc_len, 609 dp_f_tkip.ic_miclen, (caddr_t)mic0); 610 611 if (!qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen)) 612 return QDF_STATUS_E_DEFRAG_ERROR; 613 614 qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_miclen); 615 616 return QDF_STATUS_SUCCESS; 617 } 618 619 /* 620 * dp_rx_defrag_decap_recombine(): Recombine the fragments 621 * @peer: Pointer to the peer 622 * @frag_list: list of fragments 623 * @tid: Transmit identifier 624 * @hdrsize: Header size 625 * 626 * Recombine fragments 627 * 628 * Returns: QDF_STATUS 629 */ 630 static QDF_STATUS dp_rx_defrag_decap_recombine(struct dp_peer *peer, 631 qdf_nbuf_t head_msdu, unsigned tid, uint16_t hdrsize) 632 { 633 qdf_nbuf_t msdu = head_msdu; 634 uint8_t i; 635 uint8_t num_ring_desc_saved = peer->rx_tid[tid].curr_ring_desc_idx; 636 uint8_t num_msdus; 637 638 /* Stitch fragments together */ 639 for (i = 0; (i < num_ring_desc_saved) && msdu; i++) { 640 641 struct hal_rx_msdu_link_ptr_info *msdu_link_ptr_info = 642 &peer->rx_tid[tid].transcap_msdu_link_ptr[i]; 643 644 struct hal_rx_mpdu_desc_info *mpdu_desc_info = 645 &peer->rx_tid[tid].transcap_rx_mpdu_desc_info; 646 647 num_msdus = hal_rx_chain_msdu_links(msdu, msdu_link_ptr_info, 648 mpdu_desc_info); 649 650 msdu = qdf_nbuf_next(msdu); 651 } 652 653 return QDF_STATUS_SUCCESS; 654 } 655 656 /** 657 * dp_rx_defrag_err() - rx err handler 658 * @pdev: handle to pdev object 659 * @vdev_id: vdev id 660 * @peer_mac_addr: peer mac address 661 * @tid: TID 662 * @tsf32: TSF 663 * @err_type: error type 664 * @rx_frame: rx frame 665 * @pn: PN Number 666 * @key_id: key id 667 * 668 * This function handles rx error and send MIC error notification 669 * 670 * Return: None 671 */ 672 static void dp_rx_defrag_err(uint8_t vdev_id, uint8_t *peer_mac_addr, 673 int tid, uint32_t tsf32, uint32_t err_type, qdf_nbuf_t rx_frame, 674 uint64_t *pn, uint8_t key_id) 675 { 676 /* TODO: Who needs to know about the TKIP MIC error */ 677 } 678 679 /* 680 * dp_rx_defrag_qos_decap(): Remove QOS header from the frame 681 * @nbuf: Pointer to the frame buffer 682 * @hdrlen: Length of the header information 683 * 684 * Recombine fragments 685 * 686 * Returns: None 687 */ 688 static void dp_rx_defrag_qos_decap(qdf_nbuf_t nbuf, uint16_t hdrlen) 689 { 690 struct ieee80211_frame *wh; 691 uint16_t qoslen; 692 int pkt_tlv_size = sizeof(struct rx_pkt_tlvs); /* pkt TLV hdr size */ 693 uint16_t fc = 0; 694 695 uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf); 696 697 /* Get the frame control field if it is valid */ 698 if (hal_rx_get_mpdu_frame_control_valid(rx_tlv_hdr)) 699 fc = hal_rx_get_frame_ctrl_field(rx_tlv_hdr); 700 701 wh = (struct ieee80211_frame *)(qdf_nbuf_data(nbuf) + pkt_tlv_size); 702 703 if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) { 704 qoslen = sizeof(struct ieee80211_qoscntl); 705 706 /* Qos frame with Order bit set indicates a HTC frame */ 707 if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER) 708 qoslen += sizeof(struct ieee80211_htc); 709 710 /* remove QoS field from header */ 711 hdrlen -= qoslen; 712 qdf_mem_move((uint8_t *)wh + qoslen, wh, hdrlen); 713 714 wh = (struct ieee80211_frame *)qdf_nbuf_pull_head(nbuf, 715 pkt_tlv_size + 716 qoslen); 717 /* clear QoS bit */ 718 if (wh) 719 wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS; 720 } 721 } 722 723 /* 724 * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3 725 * @msdu: Pointer to the fragment buffer 726 * 727 * Transcap the fragment from 802.11 to 802.3 728 * 729 * Returns: None 730 */ 731 static void dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t msdu) 732 { 733 struct ieee80211_frame wh; 734 uint32_t hdrsize; 735 struct llc_snap_hdr_t llchdr; 736 struct ethernet_hdr_t *eth_hdr; 737 int rx_desc_len = sizeof(struct rx_pkt_tlvs); 738 struct ieee80211_frame *wh_ptr; 739 740 wh_ptr = (struct ieee80211_frame *)(qdf_nbuf_data(msdu) + 741 rx_desc_len); 742 qdf_mem_copy(&wh, wh_ptr, sizeof(wh)); 743 hdrsize = sizeof(struct ieee80211_frame); 744 qdf_mem_copy(&llchdr, ((uint8_t *) (qdf_nbuf_data(msdu) + 745 rx_desc_len)) + hdrsize, 746 sizeof(struct llc_snap_hdr_t)); 747 748 /* 749 * Now move the data pointer to the beginning of the mac header : 750 * new-header = old-hdr + (wifihdrsize + llchdrsize - ethhdrsize) 751 */ 752 qdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize + 753 sizeof(struct llc_snap_hdr_t) - 754 sizeof(struct ethernet_hdr_t))); 755 eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(msdu)); 756 757 switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) { 758 case IEEE80211_FC1_DIR_NODS: 759 qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1, 760 IEEE80211_ADDR_LEN); 761 qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, 762 IEEE80211_ADDR_LEN); 763 break; 764 case IEEE80211_FC1_DIR_TODS: 765 qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr3, 766 IEEE80211_ADDR_LEN); 767 qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, 768 IEEE80211_ADDR_LEN); 769 break; 770 case IEEE80211_FC1_DIR_FROMDS: 771 qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1, 772 IEEE80211_ADDR_LEN); 773 qdf_mem_copy(eth_hdr->src_addr, wh.i_addr3, 774 IEEE80211_ADDR_LEN); 775 break; 776 case IEEE80211_FC1_DIR_DSTODS: 777 break; 778 } 779 780 /* TODO: Is it requried to copy rx_pkt_tlvs 781 * to the start of data buffer? 782 */ 783 qdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype, 784 sizeof(llchdr.ethertype)); 785 } 786 787 /* 788 * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO 789 * @peer: Pointer to the peer 790 * @tid: Transmit Identifier 791 * 792 * Reinject the fragment chain back into REO 793 * 794 * Returns: QDF_STATUS 795 */ 796 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer, 797 unsigned tid) 798 { 799 struct dp_pdev *pdev = peer->vdev->pdev; 800 struct dp_soc *soc = pdev->soc; 801 QDF_STATUS status = QDF_STATUS_E_FAILURE; 802 void *ring_desc; 803 enum hal_reo_error_status error; 804 struct hal_rx_mpdu_desc_info *saved_mpdu_desc_info; 805 void *hal_srng = soc->reo_reinject_ring.hal_srng; 806 struct hal_rx_msdu_link_ptr_info *saved_msdu_link_ptr; 807 808 if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { 809 810 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 811 "HAL RING Access For WBM Release SRNG Failed: %pK", 812 hal_srng); 813 goto done; 814 } 815 816 ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng); 817 818 qdf_assert(ring_desc); 819 820 error = HAL_RX_ERROR_STATUS_GET(ring_desc); 821 822 if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { 823 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 824 "HAL RING 0x%pK:error %d", hal_srng, error); 825 826 /* Don't know how to deal with this condition -- assert */ 827 qdf_assert(0); 828 goto done; 829 } 830 831 saved_mpdu_desc_info = 832 &peer->rx_tid[tid].transcap_rx_mpdu_desc_info; 833 834 /* first msdu link pointer */ 835 saved_msdu_link_ptr = 836 &peer->rx_tid[tid].transcap_msdu_link_ptr[0]; 837 838 hal_rx_defrag_update_src_ring_desc(ring_desc, 839 saved_mpdu_desc_info, saved_msdu_link_ptr); 840 841 status = QDF_STATUS_SUCCESS; 842 done: 843 hal_srng_access_end(soc->hal_soc, hal_srng); 844 return status; 845 } 846 847 /* 848 * dp_rx_defrag(): Defragment the fragment chain 849 * @peer: Pointer to the peer 850 * @tid: Transmit Identifier 851 * @frag_list: Pointer to head list 852 * @frag_list_tail: Pointer to tail list 853 * 854 * Defragment the fragment chain 855 * 856 * Returns: QDF_STATUS 857 */ 858 static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid, 859 qdf_nbuf_t frag_list, qdf_nbuf_t frag_list_tail) 860 { 861 qdf_nbuf_t tmp_next; 862 qdf_nbuf_t cur = frag_list, msdu; 863 864 uint32_t index, tkip_demic = 0; 865 uint16_t hdr_space; 866 QDF_STATUS status; 867 uint8_t key[DEFRAG_IEEE80211_KEY_LEN]; 868 struct dp_vdev *vdev = peer->vdev; 869 870 cur = frag_list; 871 hdr_space = dp_rx_defrag_hdrsize(cur); 872 index = hal_rx_msdu_is_wlan_mcast(cur) ? 873 dp_sec_mcast : dp_sec_ucast; 874 875 switch (peer->security[index].sec_type) { 876 case htt_sec_type_tkip: 877 tkip_demic = 1; 878 879 case htt_sec_type_tkip_nomic: 880 while (cur) { 881 tmp_next = qdf_nbuf_next(cur); 882 if (dp_rx_defrag_tkip_decap(cur, hdr_space)) { 883 884 /* TKIP decap failed, discard frags */ 885 dp_rx_defrag_frames_free(frag_list); 886 887 QDF_TRACE(QDF_MODULE_ID_TXRX, 888 QDF_TRACE_LEVEL_ERROR, 889 "dp_rx_defrag: TKIP decap failed"); 890 891 return QDF_STATUS_E_DEFRAG_ERROR; 892 } 893 cur = tmp_next; 894 } 895 break; 896 897 case htt_sec_type_aes_ccmp: 898 while (cur) { 899 tmp_next = qdf_nbuf_next(cur); 900 if (dp_rx_defrag_ccmp_demic(cur, hdr_space)) { 901 902 /* CCMP demic failed, discard frags */ 903 dp_rx_defrag_frames_free(frag_list); 904 905 QDF_TRACE(QDF_MODULE_ID_TXRX, 906 QDF_TRACE_LEVEL_ERROR, 907 "dp_rx_defrag: CCMP demic failed"); 908 909 return QDF_STATUS_E_DEFRAG_ERROR; 910 } 911 if (dp_rx_defrag_ccmp_decap(cur, hdr_space)) { 912 913 /* CCMP decap failed, discard frags */ 914 dp_rx_defrag_frames_free(frag_list); 915 916 QDF_TRACE(QDF_MODULE_ID_TXRX, 917 QDF_TRACE_LEVEL_ERROR, 918 "dp_rx_defrag: CCMP decap failed"); 919 920 return QDF_STATUS_E_DEFRAG_ERROR; 921 } 922 cur = tmp_next; 923 } 924 break; 925 case htt_sec_type_wep40: 926 case htt_sec_type_wep104: 927 case htt_sec_type_wep128: 928 while (cur) { 929 tmp_next = qdf_nbuf_next(cur); 930 if (dp_rx_defrag_wep_decap(cur, hdr_space)) { 931 932 /* WEP decap failed, discard frags */ 933 dp_rx_defrag_frames_free(frag_list); 934 935 QDF_TRACE(QDF_MODULE_ID_TXRX, 936 QDF_TRACE_LEVEL_ERROR, 937 "dp_rx_defrag: WEP decap failed"); 938 939 return QDF_STATUS_E_DEFRAG_ERROR; 940 } 941 cur = tmp_next; 942 } 943 break; 944 default: 945 QDF_TRACE(QDF_MODULE_ID_TXRX, 946 QDF_TRACE_LEVEL_ERROR, 947 "dp_rx_defrag: Did not match any security type"); 948 break; 949 } 950 951 if (tkip_demic) { 952 msdu = frag_list_tail; /* Only last fragment has the MIC */ 953 954 qdf_mem_copy(key, 955 peer->security[index].michael_key, 956 sizeof(peer->security[index].michael_key)); 957 if (dp_rx_defrag_tkip_demic(key, msdu, hdr_space)) { 958 qdf_nbuf_free(msdu); 959 dp_rx_defrag_err(vdev->vdev_id, peer->mac_addr.raw, 960 tid, 0, QDF_STATUS_E_DEFRAG_ERROR, msdu, 961 NULL, 0); 962 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 963 "dp_rx_defrag: TKIP demic failed"); 964 return QDF_STATUS_E_DEFRAG_ERROR; 965 } 966 } 967 968 dp_rx_defrag_qos_decap(cur, hdr_space); 969 970 /* Convert the header to 802.3 header */ 971 dp_rx_defrag_nwifi_to_8023(cur); 972 973 status = dp_rx_defrag_decap_recombine(peer, cur, tid, hdr_space); 974 975 if (QDF_IS_STATUS_ERROR(status)) { 976 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 977 "dp_rx_defrag_decap_recombine failed"); 978 979 qdf_assert(0); 980 } 981 982 return status; 983 } 984 985 /* 986 * dp_rx_defrag_cleanup(): Clean up activities 987 * @peer: Pointer to the peer 988 * @tid: Transmit Identifier 989 * @seq: Sequence number 990 * 991 * Returns: None 992 */ 993 static void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid, 994 uint16_t seq) 995 { 996 struct dp_rx_reorder_array_elem *rx_reorder_array_elem = 997 &peer->rx_tid[tid].array[seq]; 998 999 /* Free up nbufs */ 1000 dp_rx_defrag_frames_free(rx_reorder_array_elem->head); 1001 1002 /* Free up saved ring descriptors */ 1003 dp_rx_clear_saved_desc_info(peer, tid); 1004 1005 rx_reorder_array_elem->head = NULL; 1006 rx_reorder_array_elem->tail = NULL; 1007 peer->rx_tid[tid].defrag_timeout_ms = 0; 1008 peer->rx_tid[tid].curr_frag_num = 0; 1009 peer->rx_tid[tid].curr_seq_num = 0; 1010 peer->rx_tid[tid].curr_ring_desc_idx = 0; 1011 } 1012 1013 /* 1014 * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor 1015 * @ring_desc: Pointer to the ring descriptor 1016 * @peer: Pointer to the peer 1017 * @tid: Transmit Identifier 1018 * @mpdu_desc_info: MPDU descriptor info 1019 * 1020 * Returns: None 1021 */ 1022 static void dp_rx_defrag_save_info_from_ring_desc(void *ring_desc, 1023 struct dp_peer *peer, unsigned tid, 1024 struct hal_rx_mpdu_desc_info *mpdu_desc_info) 1025 { 1026 struct dp_pdev *pdev = peer->vdev->pdev; 1027 void *msdu_link_desc_va = NULL; 1028 uint8_t idx = peer->rx_tid[tid].curr_ring_desc_idx; 1029 uint8_t rbm; 1030 1031 struct hal_rx_msdu_link_ptr_info *msdu_link_ptr_info = 1032 &peer->rx_tid[tid].transcap_msdu_link_ptr[++idx]; 1033 struct hal_rx_mpdu_desc_info *tmp_mpdu_desc_info = 1034 &peer->rx_tid[tid].transcap_rx_mpdu_desc_info; 1035 struct hal_buf_info hbi; 1036 1037 rbm = hal_rx_ret_buf_manager_get(ring_desc); 1038 if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { 1039 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1040 "Invalid RBM while chaining frag MSDUs"); 1041 return; 1042 } 1043 1044 hal_rx_reo_buf_paddr_get(ring_desc, &hbi); 1045 1046 msdu_link_desc_va = 1047 dp_rx_cookie_2_link_desc_va(pdev->soc, &hbi); 1048 1049 hal_rx_defrag_save_info_from_ring_desc(msdu_link_desc_va, 1050 msdu_link_ptr_info, &hbi); 1051 1052 qdf_mem_copy(tmp_mpdu_desc_info, mpdu_desc_info, 1053 sizeof(*tmp_mpdu_desc_info)); 1054 } 1055 1056 /* 1057 * dp_rx_defrag_store_fragment(): Store incoming fragments 1058 * @soc: Pointer to the SOC data structure 1059 * @ring_desc: Pointer to the ring descriptor 1060 * @mpdu_desc_info: MPDU descriptor info 1061 * @msdu_info: Pointer to MSDU descriptor info 1062 * @tid: Traffic Identifier 1063 * @rx_desc: Pointer to rx descriptor 1064 * 1065 * Returns: QDF_STATUS 1066 */ 1067 static QDF_STATUS dp_rx_defrag_store_fragment(struct dp_soc *soc, 1068 void *ring_desc, 1069 union dp_rx_desc_list_elem_t **head, 1070 union dp_rx_desc_list_elem_t **tail, 1071 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1072 struct hal_rx_msdu_desc_info *msdu_info, 1073 unsigned tid, struct dp_rx_desc *rx_desc) 1074 { 1075 uint8_t idx; 1076 struct dp_rx_reorder_array_elem *rx_reorder_array_elem; 1077 struct dp_pdev *pdev; 1078 struct dp_peer *peer; 1079 uint16_t peer_id; 1080 uint16_t rxseq, seq; 1081 uint8_t fragno, more_frag, all_frag_present = 0; 1082 uint16_t seq_num = mpdu_desc_info->mpdu_seq; 1083 QDF_STATUS status; 1084 struct dp_rx_tid *rx_tid; 1085 uint8_t mpdu_sequence_control_valid; 1086 uint8_t mpdu_frame_control_valid; 1087 qdf_nbuf_t frag = rx_desc->nbuf; 1088 uint8_t *rx_desc_info; 1089 1090 /* Check if the packet is from a valid peer */ 1091 peer_id = DP_PEER_METADATA_PEER_ID_GET( 1092 mpdu_desc_info->peer_meta_data); 1093 peer = dp_peer_find_by_id(soc, peer_id); 1094 1095 if (!peer) { 1096 /* We should not recieve anything from unknown peer 1097 * however, that might happen while we are in the monitor mode. 1098 * We don't need to handle that here 1099 */ 1100 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1101 "Unknown peer, dropping the fragment"); 1102 1103 qdf_nbuf_free(frag); 1104 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1105 1106 return QDF_STATUS_E_DEFRAG_ERROR; 1107 } 1108 1109 pdev = peer->vdev->pdev; 1110 rx_tid = &peer->rx_tid[tid]; 1111 1112 seq = seq_num & (peer->rx_tid[tid].ba_win_size - 1); 1113 qdf_assert(seq == 0); 1114 rx_reorder_array_elem = &peer->rx_tid[tid].array[seq]; 1115 1116 rx_desc_info = qdf_nbuf_data(frag); 1117 mpdu_sequence_control_valid = 1118 hal_rx_get_mpdu_sequence_control_valid(rx_desc_info); 1119 1120 /* Invalid MPDU sequence control field, MPDU is of no use */ 1121 if (!mpdu_sequence_control_valid) { 1122 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1123 "Invalid MPDU seq control field, dropping MPDU"); 1124 qdf_nbuf_free(frag); 1125 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1126 1127 qdf_assert(0); 1128 goto end; 1129 } 1130 1131 mpdu_frame_control_valid = 1132 hal_rx_get_mpdu_frame_control_valid(rx_desc_info); 1133 1134 /* Invalid frame control field */ 1135 if (!mpdu_frame_control_valid) { 1136 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1137 "Invalid frame control field, dropping MPDU"); 1138 qdf_nbuf_free(frag); 1139 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1140 1141 qdf_assert(0); 1142 goto end; 1143 } 1144 1145 /* Current mpdu sequence */ 1146 rxseq = hal_rx_get_rx_sequence(rx_desc_info); 1147 more_frag = hal_rx_get_rx_more_frag_bit(rx_desc_info); 1148 1149 /* HW does not populate the fragment number as of now 1150 * need to get from the 802.11 header 1151 */ 1152 fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); 1153 1154 /* 1155 * !more_frag: no more fragments to be delivered 1156 * !frag_no: packet is not fragmented 1157 * !rx_reorder_array_elem->head: no saved fragments so far 1158 */ 1159 if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) { 1160 /* We should not get into this situation here. 1161 * It means an unfragmented packet with fragment flag 1162 * is delivered over the REO exception ring. 1163 * Typically it follows normal rx path. 1164 */ 1165 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1166 "Rcvd unfragmented pkt on REO Err srng, dropping"); 1167 qdf_nbuf_free(frag); 1168 dp_rx_add_to_free_desc_list(head, tail, rx_desc); 1169 1170 qdf_assert(0); 1171 goto end; 1172 } 1173 1174 /* Check if the fragment is for the same sequence or a different one */ 1175 if (rx_reorder_array_elem->head) { 1176 1177 if (rxseq != rx_tid->curr_seq_num) { 1178 1179 /* Drop stored fragments if out of sequence 1180 * fragment is received 1181 */ 1182 dp_rx_defrag_frames_free(rx_reorder_array_elem->head); 1183 1184 rx_reorder_array_elem->head = NULL; 1185 rx_reorder_array_elem->tail = NULL; 1186 1187 /* 1188 * The sequence number for this fragment becomes the 1189 * new sequence number to be processed 1190 */ 1191 rx_tid->curr_seq_num = rxseq; 1192 1193 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1194 "%s mismatch, dropping earlier sequence ", 1195 (rxseq == rx_tid->curr_seq_num) 1196 ? "address" 1197 : "seq number"); 1198 } 1199 } else { 1200 /* Start of a new sequence */ 1201 rx_tid->curr_seq_num = rxseq; 1202 } 1203 1204 /* 1205 * If the earlier sequence was dropped, this will be the fresh start. 1206 * Else, continue with next fragment in a given sequence 1207 */ 1208 dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head, 1209 &rx_reorder_array_elem->tail, frag, 1210 &all_frag_present); 1211 1212 /* 1213 * Currently, we can have only 6 MSDUs per-MPDU, if the current 1214 * packet sequence has more than 6 MSDUs for some reason, we will 1215 * have to use the next MSDU link descriptor and chain them together 1216 * before reinjection 1217 */ 1218 if (more_frag == 0 || fragno == HAL_RX_NUM_MSDU_DESC) { 1219 /* 1220 * Deep copy of MSDU link pointer and msdu descriptor structs 1221 */ 1222 idx = peer->rx_tid[tid].curr_ring_desc_idx; 1223 if (idx < HAL_RX_MAX_SAVED_RING_DESC) { 1224 dp_rx_defrag_save_info_from_ring_desc(ring_desc, 1225 peer, tid, mpdu_desc_info); 1226 1227 peer->rx_tid[tid].curr_ring_desc_idx++; 1228 } else { 1229 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1230 "Max ring descr saved, dropping fragment"); 1231 /* 1232 * Free up saved fragments and ring descriptors if any 1233 */ 1234 goto end; 1235 } 1236 } 1237 1238 /* TODO: handle fragment timeout gracefully */ 1239 if (pdev->soc->rx.flags.defrag_timeout_check) { 1240 dp_rx_defrag_waitlist_remove(peer, tid); 1241 goto end; 1242 } 1243 1244 /* Yet to receive more fragments for this sequence number */ 1245 if (!all_frag_present) { 1246 uint32_t now_ms = 1247 qdf_system_ticks_to_msecs(qdf_system_ticks()); 1248 1249 peer->rx_tid[tid].defrag_timeout_ms = 1250 now_ms + pdev->soc->rx.defrag.timeout_ms; 1251 1252 dp_rx_defrag_waitlist_add(peer, tid); 1253 goto end; 1254 } 1255 1256 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1257 "All fragments received for sequence: %d", rxseq); 1258 1259 /* Process the fragments */ 1260 status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, 1261 rx_reorder_array_elem->tail); 1262 if (QDF_IS_STATUS_ERROR(status)) { 1263 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1264 "Fragment processing failed"); 1265 goto end; 1266 } 1267 1268 /* Re-inject the fragments back to REO for further processing */ 1269 status = dp_rx_defrag_reo_reinject(peer, tid); 1270 if (QDF_IS_STATUS_SUCCESS(status)) 1271 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 1272 "Fragmented sequence successfully reinjected"); 1273 else 1274 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1275 "Fragmented sequence reinjection failed"); 1276 1277 end: 1278 dp_rx_defrag_cleanup(peer, tid, seq); 1279 return QDF_STATUS_E_DEFRAG_ERROR; 1280 } 1281 1282 /** 1283 * dp_rx_frag_handle() - Handles fragmented Rx frames 1284 * 1285 * @soc: core txrx main context 1286 * @ring_desc: opaque pointer to the REO error ring descriptor 1287 * @mpdu_desc_info: MPDU descriptor information from ring descriptor 1288 * @head: head of the local descriptor free-list 1289 * @tail: tail of the local descriptor free-list 1290 * @quota: No. of units (packets) that can be serviced in one shot. 1291 * 1292 * This function implements RX 802.11 fragmentation handling 1293 * The handling is mostly same as legacy fragmentation handling. 1294 * If required, this function can re-inject the frames back to 1295 * REO ring (with proper setting to by-pass fragmentation check 1296 * but use duplicate detection / re-ordering and routing these frames 1297 * to a different core. 1298 * 1299 * Return: uint32_t: No. of elements processed 1300 */ 1301 uint32_t dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc, 1302 struct hal_rx_mpdu_desc_info *mpdu_desc_info, 1303 union dp_rx_desc_list_elem_t **head, 1304 union dp_rx_desc_list_elem_t **tail, 1305 uint32_t quota) 1306 { 1307 uint32_t rx_bufs_used = 0; 1308 void *link_desc_va; 1309 struct hal_buf_info buf_info; 1310 struct hal_rx_msdu_list msdu_list; /* per MPDU list of MSDUs */ 1311 uint32_t tid; 1312 int idx; 1313 QDF_STATUS status; 1314 1315 qdf_assert(soc); 1316 qdf_assert(mpdu_desc_info); 1317 1318 /* Fragment from a valid peer */ 1319 hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); 1320 1321 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); 1322 1323 qdf_assert(link_desc_va); 1324 1325 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, 1326 "Number of MSDUs to process, num_msdus: %d", 1327 mpdu_desc_info->msdu_count); 1328 1329 1330 if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) { 1331 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1332 "Not sufficient MSDUs to process"); 1333 return rx_bufs_used; 1334 } 1335 1336 /* Get msdu_list for the given MPDU */ 1337 hal_rx_msdu_list_get(link_desc_va, &msdu_list, 1338 &mpdu_desc_info->msdu_count); 1339 1340 /* Process all MSDUs in the current MPDU */ 1341 for (idx = 0; (idx < mpdu_desc_info->msdu_count) && quota--; idx++) { 1342 struct dp_rx_desc *rx_desc = 1343 dp_rx_cookie_2_va_rxdma_buf(soc, 1344 msdu_list.sw_cookie[idx]); 1345 1346 qdf_assert(rx_desc); 1347 1348 tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start); 1349 1350 /* Process fragment-by-fragment */ 1351 status = dp_rx_defrag_store_fragment(soc, ring_desc, 1352 head, tail, mpdu_desc_info, 1353 &msdu_list.msdu_info[idx], tid, 1354 rx_desc); 1355 if (QDF_IS_STATUS_SUCCESS(status)) 1356 rx_bufs_used++; 1357 else 1358 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1359 "Rx Defragmentation error. mpdu_seq: 0x%x msdu_count: %d mpdu_flags: %d", 1360 mpdu_desc_info->mpdu_seq, mpdu_desc_info->msdu_count, 1361 mpdu_desc_info->mpdu_flags); 1362 } 1363 1364 return rx_bufs_used; 1365 } 1366