1 /* 2 * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting 22 * All rights reserved. 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted provided that the following conditions 26 * are met: 27 * 1. Redistributions of source code must retain the above copyright 28 * notice, this list of conditions and the following disclaimer. 29 * 2. Redistributions in binary form must reproduce the above copyright 30 * notice, this list of conditions and the following disclaimer in the 31 * documentation and/or other materials provided with the distribution. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 43 */ 44 #include <ol_htt_api.h> 45 #include <ol_txrx_api.h> 46 #include <ol_txrx_htt_api.h> 47 #include <ol_htt_rx_api.h> 48 #include <ol_rx_reorder.h> 49 #include <ol_rx_pn.h> 50 #include <ol_rx_fwd.h> 51 #include <ol_rx.h> 52 #include <ol_txrx_internal.h> 53 #include <ol_ctrl_txrx_api.h> 54 #include <ol_txrx_peer_find.h> 55 #include <qdf_nbuf.h> 56 #include <qdf_util.h> 57 #include <athdefs.h> 58 #include <qdf_mem.h> 59 #include <ol_rx_defrag.h> 60 #include <enet.h> 61 #include <qdf_time.h> /* qdf_system_time */ 62 #include <wlan_pkt_capture_ucfg_api.h> 63 64 #define DEFRAG_IEEE80211_ADDR_EQ(a1, a2) \ 65 (!qdf_mem_cmp(a1, a2, QDF_MAC_ADDR_SIZE)) 66 67 #define DEFRAG_IEEE80211_ADDR_COPY(dst, src) \ 68 qdf_mem_copy(dst, src, QDF_MAC_ADDR_SIZE) 69 70 #define DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \ 71 (((wh)->i_fc[0] & \ 72 (IEEE80211_FC0_TYPE_MASK | QDF_IEEE80211_FC0_SUBTYPE_QOS)) == \ 73 (IEEE80211_FC0_TYPE_DATA | QDF_IEEE80211_FC0_SUBTYPE_QOS)) 74 75 #define DEFRAG_IEEE80211_QOS_GET_TID(_x) \ 76 ((_x)->i_qos[0] & IEEE80211_QOS_TID) 77 78 const struct ol_rx_defrag_cipher f_ccmp = { 79 "AES-CCM", 80 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 81 IEEE80211_WEP_MICLEN, 82 0, 83 }; 84 85 const struct ol_rx_defrag_cipher f_tkip = { 86 "TKIP", 87 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, 88 IEEE80211_WEP_CRCLEN, 89 IEEE80211_WEP_MICLEN, 90 }; 91 92 const struct ol_rx_defrag_cipher f_wep = { 93 "WEP", 94 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN, 95 IEEE80211_WEP_CRCLEN, 96 0, 97 }; 98 99 const struct ol_rx_defrag_cipher f_gcmp = { 100 "AES-GCMP", 101 WLAN_IEEE80211_GCMP_HEADERLEN, 102 WLAN_IEEE80211_GCMP_MICLEN, 103 WLAN_IEEE80211_GCMP_MICLEN, 104 }; 105 106 #if defined(CONFIG_HL_SUPPORT) 107 108 /** 109 * ol_rx_frag_get_mac_hdr() - retrieve mac header 110 * @htt_pdev: pointer to htt pdev handle 111 * @frag: rx fragment 112 * 113 * Return: pointer to ieee mac header of frag 114 */ ol_rx_frag_get_mac_hdr(htt_pdev_handle htt_pdev,qdf_nbuf_t frag)115 static struct ieee80211_frame *ol_rx_frag_get_mac_hdr( 116 htt_pdev_handle htt_pdev, qdf_nbuf_t frag) 117 { 118 void *rx_desc; 119 int rx_desc_len; 120 121 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag); 122 rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc); 123 return (struct ieee80211_frame *)(qdf_nbuf_data(frag) + rx_desc_len); 124 } 125 126 /** 127 * ol_rx_frag_pull_hdr() - point to payload of rx frag 128 * @htt_pdev: pointer to htt pdev handle 129 * @frag: rx fragment 130 * @hdrsize: header size 131 * 132 * Return: None 133 */ ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev,qdf_nbuf_t frag,int hdrsize)134 static void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev, 135 qdf_nbuf_t frag, int hdrsize) 136 { 137 void *rx_desc; 138 int rx_desc_len; 139 140 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag); 141 rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc); 142 qdf_nbuf_pull_head(frag, rx_desc_len + hdrsize); 143 } 144 145 /** 146 * ol_rx_frag_desc_adjust() - adjust rx frag descriptor position 147 * @pdev: pointer to txrx handle 148 * @msdu: msdu 149 * @rx_desc_old_position: rx descriptor old position 150 * @ind_old_position:index of old position 151 * @rx_desc_len: rx descriptor length 152 * 153 * Return: None 154 */ 155 static void ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,qdf_nbuf_t msdu,void ** rx_desc_old_position,void ** ind_old_position,int * rx_desc_len)156 ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev, 157 qdf_nbuf_t msdu, 158 void **rx_desc_old_position, 159 void **ind_old_position, int *rx_desc_len) 160 { 161 *rx_desc_old_position = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, 162 msdu); 163 *ind_old_position = *rx_desc_old_position - HTT_RX_IND_HL_BYTES; 164 *rx_desc_len = htt_rx_msdu_rx_desc_size_hl(pdev->htt_pdev, 165 *rx_desc_old_position); 166 } 167 168 /** 169 * ol_rx_frag_restructure() - point to payload for HL 170 * @pdev: physical device object 171 * @msdu: the buffer containing the MSDU payload 172 * @rx_desc_old_position: rx MSDU descriptor 173 * @ind_old_position: rx msdu indication 174 * @f_type: pointing to rx defrag cipher 175 * @rx_desc_len: length by which rx descriptor to move 176 * 177 * Return: None 178 */ 179 static void ol_rx_frag_restructure(ol_txrx_pdev_handle pdev,qdf_nbuf_t msdu,void * rx_desc_old_position,void * ind_old_position,const struct ol_rx_defrag_cipher * f_type,int rx_desc_len)180 ol_rx_frag_restructure( 181 ol_txrx_pdev_handle pdev, 182 qdf_nbuf_t msdu, 183 void *rx_desc_old_position, 184 void *ind_old_position, 185 const struct ol_rx_defrag_cipher *f_type, 186 int rx_desc_len) 187 { 188 if ((!ind_old_position) || (!rx_desc_old_position)) { 189 ol_txrx_err("ind_old_position,rx_desc_old_position is NULL"); 190 ASSERT(0); 191 return; 192 } 193 /* move rx description*/ 194 qdf_mem_move(rx_desc_old_position + f_type->ic_header, 195 rx_desc_old_position, rx_desc_len); 196 /* move rx indication*/ 197 qdf_mem_move(ind_old_position + f_type->ic_header, ind_old_position, 198 HTT_RX_IND_HL_BYTES); 199 } 200 201 /** 202 * ol_rx_get_desc_len() - point to payload for HL 203 * @htt_pdev: the HTT instance the rx data was received on 204 * @wbuf: buffer containing the MSDU payload 205 * @rx_desc_old_position: rx MSDU descriptor 206 * 207 * Return: Return the HL rx desc size 208 */ 209 static ol_rx_get_desc_len(htt_pdev_handle htt_pdev,qdf_nbuf_t wbuf,void ** rx_desc_old_position)210 int ol_rx_get_desc_len(htt_pdev_handle htt_pdev, 211 qdf_nbuf_t wbuf, 212 void **rx_desc_old_position) 213 { 214 int rx_desc_len = 0; 215 *rx_desc_old_position = htt_rx_msdu_desc_retrieve(htt_pdev, wbuf); 216 rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, 217 *rx_desc_old_position); 218 219 return rx_desc_len; 220 } 221 222 /** 223 * ol_rx_defrag_push_rx_desc() - point to payload for HL 224 * @nbuf: buffer containing the MSDU payload 225 * @rx_desc_old_position: rx MSDU descriptor 226 * @ind_old_position: rx msdu indication 227 * @rx_desc_len: HL rx desc size 228 * 229 * Return: Return the HL rx desc size 230 */ 231 static ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf,void * rx_desc_old_position,void * ind_old_position,int rx_desc_len)232 void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf, 233 void *rx_desc_old_position, 234 void *ind_old_position, 235 int rx_desc_len) 236 { 237 qdf_nbuf_push_head(nbuf, rx_desc_len); 238 qdf_mem_move( 239 qdf_nbuf_data(nbuf), rx_desc_old_position, rx_desc_len); 240 qdf_mem_move( 241 qdf_nbuf_data(nbuf) - HTT_RX_IND_HL_BYTES, ind_old_position, 242 HTT_RX_IND_HL_BYTES); 243 } 244 #else 245 ol_rx_frag_get_mac_hdr(htt_pdev_handle htt_pdev,qdf_nbuf_t frag)246 static inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr( 247 htt_pdev_handle htt_pdev, 248 qdf_nbuf_t frag) 249 { 250 return 251 (struct ieee80211_frame *) qdf_nbuf_data(frag); 252 } 253 ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev,qdf_nbuf_t frag,int hdrsize)254 static inline void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev, 255 qdf_nbuf_t frag, int hdrsize) 256 { 257 qdf_nbuf_pull_head(frag, hdrsize); 258 } 259 260 static inline void ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,qdf_nbuf_t msdu,void ** rx_desc_old_position,void ** ind_old_position,int * rx_desc_len)261 ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev, 262 qdf_nbuf_t msdu, 263 void **rx_desc_old_position, 264 void **ind_old_position, int *rx_desc_len) 265 { 266 *rx_desc_old_position = NULL; 267 *ind_old_position = NULL; 268 *rx_desc_len = 0; 269 } 270 271 static inline void ol_rx_frag_restructure(ol_txrx_pdev_handle pdev,qdf_nbuf_t msdu,void * rx_desc_old_position,void * ind_old_position,const struct ol_rx_defrag_cipher * f_type,int rx_desc_len)272 ol_rx_frag_restructure( 273 ol_txrx_pdev_handle pdev, 274 qdf_nbuf_t msdu, 275 void *rx_desc_old_position, 276 void *ind_old_position, 277 const struct ol_rx_defrag_cipher *f_type, 278 int rx_desc_len) 279 { 280 /* no op */ 281 } 282 283 static inline ol_rx_get_desc_len(htt_pdev_handle htt_pdev,qdf_nbuf_t wbuf,void ** rx_desc_old_position)284 int ol_rx_get_desc_len(htt_pdev_handle htt_pdev, 285 qdf_nbuf_t wbuf, 286 void **rx_desc_old_position) 287 { 288 return 0; 289 } 290 291 static inline ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf,void * rx_desc_old_position,void * ind_old_position,int rx_desc_len)292 void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf, 293 void *rx_desc_old_position, 294 void *ind_old_position, 295 int rx_desc_len) 296 { 297 return; 298 } 299 #endif /* CONFIG_HL_SUPPORT */ 300 301 /* 302 * Process incoming fragments 303 */ 304 void ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev,qdf_nbuf_t rx_frag_ind_msg,uint16_t peer_id,uint8_t tid)305 ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev, 306 qdf_nbuf_t rx_frag_ind_msg, 307 uint16_t peer_id, uint8_t tid) 308 { 309 uint16_t seq_num; 310 uint16_t seq_num_start, seq_num_end; 311 struct ol_txrx_peer_t *peer; 312 htt_pdev_handle htt_pdev; 313 qdf_nbuf_t head_msdu, tail_msdu; 314 void *rx_mpdu_desc; 315 uint8_t pktlog_bit; 316 uint32_t msdu_count = 0; 317 int ret; 318 void *rx_desc; 319 320 if (tid >= OL_TXRX_NUM_EXT_TIDS) { 321 ol_txrx_err("Invalid tid: %u", tid); 322 return; 323 } 324 325 htt_pdev = pdev->htt_pdev; 326 peer = ol_txrx_peer_find_by_id(pdev, peer_id); 327 328 if (!ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev) && 329 htt_rx_ind_flush(pdev->htt_pdev, rx_frag_ind_msg) && peer) { 330 htt_rx_frag_ind_flush_seq_num_range(pdev->htt_pdev, 331 rx_frag_ind_msg, 332 &seq_num_start, 333 &seq_num_end); 334 /* 335 * Assuming flush indication for frags sent from target is 336 * separate from normal frames 337 */ 338 ol_rx_reorder_flush_frag(htt_pdev, peer, tid, seq_num_start); 339 } else { 340 uint32_t *msg_word; 341 uint8_t *rx_ind_data; 342 343 rx_ind_data = qdf_nbuf_data(rx_frag_ind_msg); 344 msg_word = (uint32_t *)rx_ind_data; 345 msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 346 1)); 347 } 348 349 pktlog_bit = 350 (htt_rx_amsdu_rx_in_order_get_pktlog(rx_frag_ind_msg) == 0x01); 351 ret = htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu, 352 &tail_msdu, &msdu_count); 353 /* Return if msdu pop fails from rx hash table, as recovery 354 * is triggered and we exit gracefully. 355 */ 356 if (!ret) 357 return; 358 if (peer) { 359 qdf_assert(head_msdu == tail_msdu); 360 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) { 361 rx_mpdu_desc = 362 htt_rx_mpdu_desc_list_next(htt_pdev, head_msdu); 363 } else { 364 rx_mpdu_desc = 365 htt_rx_mpdu_desc_list_next(htt_pdev, 366 rx_frag_ind_msg); 367 } 368 seq_num = htt_rx_mpdu_desc_seq_num(htt_pdev, 369 rx_mpdu_desc, true); 370 OL_RX_ERR_STATISTICS_1(pdev, peer->vdev, peer, rx_mpdu_desc, 371 OL_RX_ERR_NONE_FRAG); 372 ol_rx_send_pktlog_event(pdev, peer, head_msdu, pktlog_bit); 373 rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, head_msdu); 374 ol_rx_timestamp(pdev->ctrl_pdev, rx_desc, head_msdu); 375 ol_rx_reorder_store_frag(pdev, peer, tid, seq_num, head_msdu); 376 } else { 377 /* invalid frame - discard it */ 378 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) 379 htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu); 380 else 381 htt_rx_mpdu_desc_list_next(htt_pdev, rx_frag_ind_msg); 382 383 ol_rx_send_pktlog_event(pdev, peer, head_msdu, pktlog_bit); 384 htt_rx_desc_frame_free(htt_pdev, head_msdu); 385 } 386 /* request HTT to provide new rx MSDU buffers for the target to fill. */ 387 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev) && 388 !pdev->cfg.is_high_latency) 389 htt_rx_msdu_buff_in_order_replenish(htt_pdev, msdu_count); 390 else 391 htt_rx_msdu_buff_replenish(htt_pdev); 392 } 393 394 /* 395 * Flushing fragments 396 */ 397 void ol_rx_reorder_flush_frag(htt_pdev_handle htt_pdev,struct ol_txrx_peer_t * peer,unsigned int tid,uint16_t seq_num)398 ol_rx_reorder_flush_frag(htt_pdev_handle htt_pdev, 399 struct ol_txrx_peer_t *peer, 400 unsigned int tid, uint16_t seq_num) 401 { 402 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem; 403 int seq; 404 405 seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask; 406 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq]; 407 if (rx_reorder_array_elem->head) { 408 ol_rx_frames_free(htt_pdev, rx_reorder_array_elem->head); 409 rx_reorder_array_elem->head = NULL; 410 rx_reorder_array_elem->tail = NULL; 411 } 412 } 413 414 /* 415 * Reorder and store fragments 416 */ 417 void ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev,struct ol_txrx_peer_t * peer,unsigned int tid,uint16_t seq_num,qdf_nbuf_t frag)418 ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev, 419 struct ol_txrx_peer_t *peer, 420 unsigned int tid, uint16_t seq_num, qdf_nbuf_t frag) 421 { 422 struct ieee80211_frame *fmac_hdr, *mac_hdr; 423 uint8_t fragno, more_frag, all_frag_present = 0; 424 struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem; 425 uint16_t frxseq, rxseq, seq; 426 htt_pdev_handle htt_pdev = pdev->htt_pdev; 427 void *rx_desc; 428 uint8_t index; 429 430 seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask; 431 qdf_assert(seq == 0); 432 rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq]; 433 434 mac_hdr = (struct ieee80211_frame *) 435 ol_rx_frag_get_mac_hdr(htt_pdev, frag); 436 rxseq = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >> 437 IEEE80211_SEQ_SEQ_SHIFT; 438 fragno = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) & 439 IEEE80211_SEQ_FRAG_MASK; 440 more_frag = mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG; 441 442 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag); 443 qdf_assert(htt_rx_msdu_has_wlan_mcast_flag(htt_pdev, rx_desc)); 444 index = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc) ? 445 txrx_sec_mcast : txrx_sec_ucast; 446 447 /* 448 * Multicast/Broadcast frames should not be fragmented so drop 449 * such frames. 450 */ 451 if (index != txrx_sec_ucast) { 452 ol_rx_frames_free(htt_pdev, frag); 453 return; 454 } 455 456 if (peer->security[index].sec_type != htt_sec_type_none && 457 !htt_rx_mpdu_is_encrypted(htt_pdev, rx_desc)) { 458 ol_txrx_err("Unencrypted fragment received in security mode %d", 459 peer->security[index].sec_type); 460 ol_rx_frames_free(htt_pdev, frag); 461 return; 462 } 463 464 if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) { 465 rx_reorder_array_elem->head = frag; 466 rx_reorder_array_elem->tail = frag; 467 qdf_nbuf_set_next(frag, NULL); 468 ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head); 469 rx_reorder_array_elem->head = NULL; 470 rx_reorder_array_elem->tail = NULL; 471 return; 472 } 473 if (rx_reorder_array_elem->head) { 474 fmac_hdr = (struct ieee80211_frame *) 475 ol_rx_frag_get_mac_hdr(htt_pdev, 476 rx_reorder_array_elem->head); 477 frxseq = qdf_le16_to_cpu(*(uint16_t *) fmac_hdr->i_seq) >> 478 IEEE80211_SEQ_SEQ_SHIFT; 479 if (rxseq != frxseq 480 || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr1, 481 fmac_hdr->i_addr1) 482 || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr2, 483 fmac_hdr->i_addr2)) { 484 ol_rx_frames_free(htt_pdev, 485 rx_reorder_array_elem->head); 486 rx_reorder_array_elem->head = NULL; 487 rx_reorder_array_elem->tail = NULL; 488 ol_txrx_err("ol_rx_reorder_store:%s mismatch", 489 (rxseq == frxseq) 490 ? "address" 491 : "seq number"); 492 } 493 } 494 495 ol_rx_fraglist_insert(htt_pdev, &rx_reorder_array_elem->head, 496 &rx_reorder_array_elem->tail, frag, 497 &all_frag_present); 498 499 if (pdev->rx.flags.defrag_timeout_check) 500 ol_rx_defrag_waitlist_remove(peer, tid); 501 502 if (all_frag_present) { 503 ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head); 504 rx_reorder_array_elem->head = NULL; 505 rx_reorder_array_elem->tail = NULL; 506 peer->tids_rx_reorder[tid].defrag_timeout_ms = 0; 507 peer->tids_last_seq[tid] = seq_num; 508 } else if (pdev->rx.flags.defrag_timeout_check) { 509 uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); 510 511 peer->tids_rx_reorder[tid].defrag_timeout_ms = 512 now_ms + pdev->rx.defrag.timeout_ms; 513 ol_rx_defrag_waitlist_add(peer, tid); 514 } 515 } 516 517 /* 518 * Insert and store fragments 519 */ 520 void ol_rx_fraglist_insert(htt_pdev_handle htt_pdev,qdf_nbuf_t * head_addr,qdf_nbuf_t * tail_addr,qdf_nbuf_t frag,uint8_t * all_frag_present)521 ol_rx_fraglist_insert(htt_pdev_handle htt_pdev, 522 qdf_nbuf_t *head_addr, 523 qdf_nbuf_t *tail_addr, 524 qdf_nbuf_t frag, uint8_t *all_frag_present) 525 { 526 qdf_nbuf_t next, prev = NULL, cur = *head_addr; 527 struct ieee80211_frame *mac_hdr, *cmac_hdr, *next_hdr, *lmac_hdr; 528 uint8_t fragno, cur_fragno, lfragno, next_fragno; 529 uint8_t last_morefrag = 1, count = 0; 530 531 qdf_assert(frag); 532 533 mac_hdr = (struct ieee80211_frame *) 534 ol_rx_frag_get_mac_hdr(htt_pdev, frag); 535 fragno = qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) & 536 IEEE80211_SEQ_FRAG_MASK; 537 538 if (!(*head_addr)) { 539 *head_addr = frag; 540 *tail_addr = frag; 541 qdf_nbuf_set_next(*tail_addr, NULL); 542 return; 543 } 544 /* For efficiency, compare with tail first */ 545 lmac_hdr = (struct ieee80211_frame *) 546 ol_rx_frag_get_mac_hdr(htt_pdev, *tail_addr); 547 lfragno = qdf_le16_to_cpu(*(uint16_t *) lmac_hdr->i_seq) & 548 IEEE80211_SEQ_FRAG_MASK; 549 if (fragno > lfragno) { 550 qdf_nbuf_set_next(*tail_addr, frag); 551 *tail_addr = frag; 552 qdf_nbuf_set_next(*tail_addr, NULL); 553 } else { 554 do { 555 cmac_hdr = (struct ieee80211_frame *) 556 ol_rx_frag_get_mac_hdr(htt_pdev, cur); 557 cur_fragno = 558 qdf_le16_to_cpu(*(uint16_t *) cmac_hdr->i_seq) & 559 IEEE80211_SEQ_FRAG_MASK; 560 prev = cur; 561 cur = qdf_nbuf_next(cur); 562 } while (fragno > cur_fragno); 563 564 if (fragno == cur_fragno) { 565 htt_rx_desc_frame_free(htt_pdev, frag); 566 *all_frag_present = 0; 567 return; 568 } 569 570 qdf_nbuf_set_next(prev, frag); 571 qdf_nbuf_set_next(frag, cur); 572 } 573 next = qdf_nbuf_next(*head_addr); 574 lmac_hdr = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, 575 *tail_addr); 576 last_morefrag = lmac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG; 577 if (!last_morefrag) { 578 do { 579 next_hdr = 580 (struct ieee80211_frame *) 581 ol_rx_frag_get_mac_hdr(htt_pdev, next); 582 next_fragno = 583 qdf_le16_to_cpu(*(uint16_t *) next_hdr->i_seq) & 584 IEEE80211_SEQ_FRAG_MASK; 585 count++; 586 if (next_fragno != count) 587 break; 588 589 next = qdf_nbuf_next(next); 590 } while (next); 591 592 if (!next) { 593 *all_frag_present = 1; 594 return; 595 } 596 } 597 *all_frag_present = 0; 598 } 599 600 /* 601 * add tid to pending fragment wait list 602 */ ol_rx_defrag_waitlist_add(struct ol_txrx_peer_t * peer,unsigned int tid)603 void ol_rx_defrag_waitlist_add(struct ol_txrx_peer_t *peer, unsigned int tid) 604 { 605 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev; 606 struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid]; 607 608 TAILQ_INSERT_TAIL(&pdev->rx.defrag.waitlist, rx_reorder, 609 defrag_waitlist_elem); 610 } 611 612 /* 613 * remove tid from pending fragment wait list 614 */ ol_rx_defrag_waitlist_remove(struct ol_txrx_peer_t * peer,unsigned int tid)615 void ol_rx_defrag_waitlist_remove(struct ol_txrx_peer_t *peer, unsigned int tid) 616 { 617 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev; 618 struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid]; 619 620 if (rx_reorder->defrag_waitlist_elem.tqe_next) { 621 622 TAILQ_REMOVE(&pdev->rx.defrag.waitlist, rx_reorder, 623 defrag_waitlist_elem); 624 625 rx_reorder->defrag_waitlist_elem.tqe_next = NULL; 626 rx_reorder->defrag_waitlist_elem.tqe_prev = NULL; 627 } else if (rx_reorder->defrag_waitlist_elem.tqe_next) { 628 ol_txrx_alert("waitlist->tqe_prv = NULL"); 629 QDF_ASSERT(0); 630 rx_reorder->defrag_waitlist_elem.tqe_next = NULL; 631 } 632 } 633 634 #ifndef container_of 635 #define container_of(ptr, type, member) \ 636 ((type *)((char *)(ptr) - (char *)(&((type *)0)->member))) 637 #endif 638 639 /* 640 * flush stale fragments from the waitlist 641 */ ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t * pdev)642 void ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t *pdev) 643 { 644 struct ol_rx_reorder_t *rx_reorder, *tmp; 645 uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); 646 647 TAILQ_FOREACH_SAFE(rx_reorder, &pdev->rx.defrag.waitlist, 648 defrag_waitlist_elem, tmp) { 649 struct ol_txrx_peer_t *peer; 650 struct ol_rx_reorder_t *rx_reorder_base; 651 unsigned int tid; 652 653 if (rx_reorder->defrag_timeout_ms > now_ms) 654 break; 655 656 tid = rx_reorder->tid; 657 if (tid >= OL_TXRX_NUM_EXT_TIDS) { 658 ol_txrx_err("Invalid tid: %u", tid); 659 WARN_ON(1); 660 continue; 661 } 662 /* get index 0 of the rx_reorder array */ 663 rx_reorder_base = rx_reorder - tid; 664 peer = 665 container_of(rx_reorder_base, struct ol_txrx_peer_t, 666 tids_rx_reorder[0]); 667 668 ol_rx_defrag_waitlist_remove(peer, tid); 669 ol_rx_reorder_flush_frag(pdev->htt_pdev, peer, tid, 670 0 /* frags always stored at seq 0 */); 671 } 672 } 673 674 /** 675 * ol_rx_frag_gcmp_decap() - Remove GCMP header from fragment 676 * @pdev : data path pdev handle 677 * @nbuf : network buffer 678 * @hdrlen : MAC header len 679 * 680 * Return: OL_RX_DEFRAG_OK on success else failure code 681 */ 682 static int ol_rx_frag_gcmp_decap(ol_txrx_pdev_handle pdev,qdf_nbuf_t nbuf,uint16_t hdrlen)683 ol_rx_frag_gcmp_decap(ol_txrx_pdev_handle pdev, 684 qdf_nbuf_t nbuf, uint16_t hdrlen) 685 { 686 uint8_t *ivp, *orig_hdr; 687 void *rx_desc_old_position = NULL; 688 void *ind_old_position = NULL; 689 int rx_desc_len = 0; 690 691 ol_rx_frag_desc_adjust(pdev, 692 nbuf, 693 &rx_desc_old_position, 694 &ind_old_position, &rx_desc_len); 695 696 orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); 697 ivp = orig_hdr + hdrlen; 698 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 699 return OL_RX_DEFRAG_ERR; 700 701 qdf_mem_move(orig_hdr + f_gcmp.ic_header, orig_hdr, hdrlen); 702 ol_rx_frag_restructure( 703 pdev, 704 nbuf, 705 rx_desc_old_position, 706 ind_old_position, 707 &f_gcmp, 708 rx_desc_len); 709 qdf_nbuf_pull_head(nbuf, f_gcmp.ic_header); 710 711 return OL_RX_DEFRAG_OK; 712 } 713 714 /** 715 * ol_rx_frag_gcmp_demic() - Remove MIC info from GCMP fragment 716 * @pdev : data path pdev handle 717 * @nbuf : network buffer 718 * @hdrlen : MAC header len 719 * 720 * Return: OL_RX_DEFRAG_OK on success else failure code 721 */ 722 static int ol_rx_frag_gcmp_demic(ol_txrx_pdev_handle pdev,qdf_nbuf_t wbuf,uint16_t hdrlen)723 ol_rx_frag_gcmp_demic(ol_txrx_pdev_handle pdev, 724 qdf_nbuf_t wbuf, uint16_t hdrlen) 725 { 726 uint8_t *ivp, *orig_hdr; 727 void *rx_desc_old_position = NULL; 728 void *ind_old_position = NULL; 729 int rx_desc_len = 0; 730 731 ol_rx_frag_desc_adjust(pdev, 732 wbuf, 733 &rx_desc_old_position, 734 &ind_old_position, &rx_desc_len); 735 736 orig_hdr = (uint8_t *)(qdf_nbuf_data(wbuf) + rx_desc_len); 737 738 ivp = orig_hdr + hdrlen; 739 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 740 return OL_RX_DEFRAG_ERR; 741 742 qdf_nbuf_trim_tail(wbuf, f_gcmp.ic_trailer); 743 744 return OL_RX_DEFRAG_OK; 745 } 746 747 /* 748 * Handling security checking and processing fragments 749 */ 750 void ol_rx_defrag(ol_txrx_pdev_handle pdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t frag_list)751 ol_rx_defrag(ol_txrx_pdev_handle pdev, 752 struct ol_txrx_peer_t *peer, unsigned int tid, 753 qdf_nbuf_t frag_list) 754 { 755 struct ol_txrx_vdev_t *vdev = NULL; 756 qdf_nbuf_t tmp_next, msdu, prev = NULL, cur = frag_list; 757 uint8_t index, tkip_demic = 0; 758 uint16_t hdr_space; 759 void *rx_desc; 760 struct ieee80211_frame *wh; 761 uint8_t key[DEFRAG_IEEE80211_KEY_LEN]; 762 htt_pdev_handle htt_pdev = pdev->htt_pdev; 763 struct ol_txrx_peer_t *peer_head = NULL; 764 uint8_t bssid[QDF_MAC_ADDR_SIZE]; 765 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC); 766 767 if (qdf_unlikely(!soc)) 768 return; 769 770 vdev = peer->vdev; 771 772 /* bypass defrag for safe mode */ 773 if (vdev->safemode) { 774 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) 775 ol_rx_in_order_deliver(vdev, peer, tid, frag_list); 776 else 777 ol_rx_deliver(vdev, peer, tid, frag_list); 778 return; 779 } 780 781 while (cur) { 782 tmp_next = qdf_nbuf_next(cur); 783 qdf_nbuf_set_next(cur, NULL); 784 /* 785 * Strict PN check between the first fragment of the current 786 * frame and the last fragment of the previous frame is not 787 * necessary. 788 */ 789 if (!ol_rx_pn_check_base(vdev, peer, tid, cur, 790 (cur == frag_list) ? false : true)) { 791 /* PN check failed,discard frags */ 792 if (prev) { 793 qdf_nbuf_set_next(prev, NULL); 794 ol_rx_frames_free(htt_pdev, frag_list); 795 } 796 ol_rx_frames_free(htt_pdev, tmp_next); 797 ol_txrx_err("PN Check failed"); 798 return; 799 } 800 /* remove FCS from each fragment */ 801 qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN); 802 prev = cur; 803 qdf_nbuf_set_next(cur, tmp_next); 804 cur = tmp_next; 805 } 806 cur = frag_list; 807 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, cur); 808 hdr_space = ol_rx_frag_hdrsize(wh); 809 rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag_list); 810 qdf_assert(htt_rx_msdu_has_wlan_mcast_flag(htt_pdev, rx_desc)); 811 index = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc) ? 812 txrx_sec_mcast : txrx_sec_ucast; 813 814 switch (peer->security[index].sec_type) { 815 case htt_sec_type_tkip: 816 tkip_demic = 1; 817 fallthrough; 818 /* fall-through to rest of tkip ops */ 819 case htt_sec_type_tkip_nomic: 820 while (cur) { 821 tmp_next = qdf_nbuf_next(cur); 822 if (!ol_rx_frag_tkip_decap(pdev, cur, hdr_space)) { 823 /* TKIP decap failed, discard frags */ 824 ol_rx_frames_free(htt_pdev, frag_list); 825 ol_txrx_err("TKIP decap failed"); 826 return; 827 } 828 cur = tmp_next; 829 } 830 break; 831 832 case htt_sec_type_aes_ccmp: 833 while (cur) { 834 tmp_next = qdf_nbuf_next(cur); 835 if (!ol_rx_frag_ccmp_demic(pdev, cur, hdr_space)) { 836 /* CCMP demic failed, discard frags */ 837 ol_rx_frames_free(htt_pdev, frag_list); 838 ol_txrx_err("CCMP demic failed"); 839 return; 840 } 841 if (!ol_rx_frag_ccmp_decap(pdev, cur, hdr_space)) { 842 /* CCMP decap failed, discard frags */ 843 ol_rx_frames_free(htt_pdev, frag_list); 844 ol_txrx_err("CCMP decap failed"); 845 return; 846 } 847 cur = tmp_next; 848 } 849 break; 850 851 case htt_sec_type_wep40: 852 case htt_sec_type_wep104: 853 case htt_sec_type_wep128: 854 while (cur) { 855 tmp_next = qdf_nbuf_next(cur); 856 if (!ol_rx_frag_wep_decap(pdev, cur, hdr_space)) { 857 /* wep decap failed, discard frags */ 858 ol_rx_frames_free(htt_pdev, frag_list); 859 ol_txrx_err("wep decap failed"); 860 return; 861 } 862 cur = tmp_next; 863 } 864 break; 865 case htt_sec_type_aes_gcmp: 866 case htt_sec_type_aes_gcmp_256: 867 while (cur) { 868 tmp_next = qdf_nbuf_next(cur); 869 if (!ol_rx_frag_gcmp_demic(pdev, cur, hdr_space)) { 870 ol_rx_frames_free(htt_pdev, frag_list); 871 ol_txrx_err("GCMP demic failed"); 872 return; 873 } 874 if (!ol_rx_frag_gcmp_decap(pdev, cur, hdr_space)) { 875 ol_rx_frames_free(htt_pdev, frag_list); 876 ol_txrx_err("GCMP decap failed"); 877 return; 878 } 879 cur = tmp_next; 880 } 881 882 break; 883 default: 884 break; 885 } 886 887 msdu = ol_rx_defrag_decap_recombine(htt_pdev, frag_list, hdr_space); 888 if (!msdu) 889 return; 890 891 if (tkip_demic) { 892 qdf_mem_copy(key, 893 peer->security[index].michael_key, 894 sizeof(peer->security[index].michael_key)); 895 if (!ol_rx_frag_tkip_demic(pdev, key, msdu, hdr_space)) { 896 uint64_t pn = 0; 897 ol_rx_err(pdev->ctrl_pdev, 898 vdev->vdev_id, peer->mac_addr.raw, tid, 0, 899 OL_RX_ERR_TKIP_MIC, msdu, &pn, 0); 900 htt_rx_desc_frame_free(htt_pdev, msdu); 901 ol_txrx_err("TKIP demic failed"); 902 return; 903 } 904 } 905 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, msdu); 906 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) 907 ol_rx_defrag_qos_decap(pdev, msdu, hdr_space); 908 if (ol_cfg_frame_type(pdev->ctrl_pdev) == wlan_frm_fmt_802_3) 909 ol_rx_defrag_nwifi_to_8023(pdev, msdu); 910 911 /* Packet Capture Mode */ 912 913 if ((ucfg_pkt_capture_get_pktcap_mode((void *)soc->psoc) & 914 PKT_CAPTURE_MODE_DATA_ONLY)) { 915 if (peer) { 916 if (peer->vdev) { 917 qdf_spin_lock_bh(&pdev->peer_ref_mutex); 918 peer_head = TAILQ_FIRST(&vdev->peer_list); 919 qdf_spin_unlock_bh(&pdev->peer_ref_mutex); 920 if (peer_head) { 921 qdf_spin_lock_bh( 922 &peer_head->peer_info_lock); 923 qdf_mem_copy(bssid, 924 &peer_head->mac_addr.raw, 925 QDF_MAC_ADDR_SIZE); 926 qdf_spin_unlock_bh( 927 &peer_head->peer_info_lock); 928 929 ucfg_pkt_capture_rx_msdu_process( 930 bssid, msdu, 931 vdev->vdev_id, 932 htt_pdev); 933 } 934 } 935 } 936 } 937 938 ol_rx_fwd_check(vdev, peer, tid, msdu); 939 } 940 941 /* 942 * Handling TKIP processing for defragmentation 943 */ 944 int ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev,qdf_nbuf_t msdu,uint16_t hdrlen)945 ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev, 946 qdf_nbuf_t msdu, uint16_t hdrlen) 947 { 948 uint8_t *ivp, *origHdr; 949 950 void *rx_desc_old_position = NULL; 951 void *ind_old_position = NULL; 952 int rx_desc_len = 0; 953 954 ol_rx_frag_desc_adjust(pdev, 955 msdu, 956 &rx_desc_old_position, 957 &ind_old_position, &rx_desc_len); 958 /* Header should have extended IV */ 959 origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); 960 961 ivp = origHdr + hdrlen; 962 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 963 return OL_RX_DEFRAG_ERR; 964 965 qdf_mem_move(origHdr + f_tkip.ic_header, origHdr, hdrlen); 966 ol_rx_frag_restructure( 967 pdev, 968 msdu, 969 rx_desc_old_position, 970 ind_old_position, 971 &f_tkip, 972 rx_desc_len); 973 qdf_nbuf_pull_head(msdu, f_tkip.ic_header); 974 qdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer); 975 return OL_RX_DEFRAG_OK; 976 } 977 978 /* 979 * Handling WEP processing for defragmentation 980 */ 981 int ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev,qdf_nbuf_t msdu,uint16_t hdrlen)982 ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t hdrlen) 983 { 984 uint8_t *origHdr; 985 void *rx_desc_old_position = NULL; 986 void *ind_old_position = NULL; 987 int rx_desc_len = 0; 988 989 ol_rx_frag_desc_adjust(pdev, 990 msdu, 991 &rx_desc_old_position, 992 &ind_old_position, &rx_desc_len); 993 origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); 994 qdf_mem_move(origHdr + f_wep.ic_header, origHdr, hdrlen); 995 ol_rx_frag_restructure( 996 pdev, 997 msdu, 998 rx_desc_old_position, 999 ind_old_position, 1000 &f_wep, 1001 rx_desc_len); 1002 qdf_nbuf_pull_head(msdu, f_wep.ic_header); 1003 qdf_nbuf_trim_tail(msdu, f_wep.ic_trailer); 1004 return OL_RX_DEFRAG_OK; 1005 } 1006 1007 /* 1008 * Verify and strip MIC from the frame. 1009 */ 1010 int ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev,const uint8_t * key,qdf_nbuf_t msdu,uint16_t hdrlen)1011 ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev, const uint8_t *key, 1012 qdf_nbuf_t msdu, uint16_t hdrlen) 1013 { 1014 int status; 1015 uint32_t pktlen; 1016 uint8_t mic[IEEE80211_WEP_MICLEN]; 1017 uint8_t mic0[IEEE80211_WEP_MICLEN]; 1018 void *rx_desc_old_position = NULL; 1019 void *ind_old_position = NULL; 1020 int rx_desc_len = 0; 1021 1022 ol_rx_frag_desc_adjust(pdev, 1023 msdu, 1024 &rx_desc_old_position, 1025 &ind_old_position, &rx_desc_len); 1026 1027 pktlen = ol_rx_defrag_len(msdu) - rx_desc_len; 1028 1029 status = ol_rx_defrag_mic(pdev, key, msdu, hdrlen, 1030 pktlen - (hdrlen + f_tkip.ic_miclen), mic); 1031 if (status != OL_RX_DEFRAG_OK) 1032 return OL_RX_DEFRAG_ERR; 1033 1034 ol_rx_defrag_copydata(msdu, pktlen - f_tkip.ic_miclen + rx_desc_len, 1035 f_tkip.ic_miclen, (caddr_t) mic0); 1036 if (qdf_mem_cmp(mic, mic0, f_tkip.ic_miclen)) 1037 return OL_RX_DEFRAG_ERR; 1038 1039 qdf_nbuf_trim_tail(msdu, f_tkip.ic_miclen); 1040 return OL_RX_DEFRAG_OK; 1041 } 1042 1043 /* 1044 * Handling CCMP processing for defragmentation 1045 */ 1046 int ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev,qdf_nbuf_t nbuf,uint16_t hdrlen)1047 ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev, 1048 qdf_nbuf_t nbuf, uint16_t hdrlen) 1049 { 1050 uint8_t *ivp, *origHdr; 1051 void *rx_desc_old_position = NULL; 1052 void *ind_old_position = NULL; 1053 int rx_desc_len = 0; 1054 1055 ol_rx_frag_desc_adjust(pdev, 1056 nbuf, 1057 &rx_desc_old_position, 1058 &ind_old_position, &rx_desc_len); 1059 1060 origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len); 1061 ivp = origHdr + hdrlen; 1062 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 1063 return OL_RX_DEFRAG_ERR; 1064 1065 qdf_mem_move(origHdr + f_ccmp.ic_header, origHdr, hdrlen); 1066 ol_rx_frag_restructure( 1067 pdev, 1068 nbuf, 1069 rx_desc_old_position, 1070 ind_old_position, 1071 &f_ccmp, 1072 rx_desc_len); 1073 qdf_nbuf_pull_head(nbuf, f_ccmp.ic_header); 1074 1075 return OL_RX_DEFRAG_OK; 1076 } 1077 1078 /* 1079 * Verify and strip MIC from the frame. 1080 */ 1081 int ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev,qdf_nbuf_t wbuf,uint16_t hdrlen)1082 ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev, 1083 qdf_nbuf_t wbuf, uint16_t hdrlen) 1084 { 1085 uint8_t *ivp, *origHdr; 1086 void *rx_desc_old_position = NULL; 1087 void *ind_old_position = NULL; 1088 int rx_desc_len = 0; 1089 1090 ol_rx_frag_desc_adjust(pdev, 1091 wbuf, 1092 &rx_desc_old_position, 1093 &ind_old_position, &rx_desc_len); 1094 1095 origHdr = (uint8_t *) (qdf_nbuf_data(wbuf) + rx_desc_len); 1096 1097 ivp = origHdr + hdrlen; 1098 if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) 1099 return OL_RX_DEFRAG_ERR; 1100 1101 qdf_nbuf_trim_tail(wbuf, f_ccmp.ic_trailer); 1102 1103 return OL_RX_DEFRAG_OK; 1104 } 1105 1106 /* 1107 * Craft pseudo header used to calculate the MIC. 1108 */ ol_rx_defrag_michdr(const struct ieee80211_frame * wh0,uint8_t hdr[])1109 void ol_rx_defrag_michdr(const struct ieee80211_frame *wh0, uint8_t hdr[]) 1110 { 1111 const struct ieee80211_frame_addr4 *wh = 1112 (const struct ieee80211_frame_addr4 *)wh0; 1113 1114 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { 1115 case IEEE80211_FC1_DIR_NODS: 1116 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 1117 DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 1118 wh->i_addr2); 1119 break; 1120 case IEEE80211_FC1_DIR_TODS: 1121 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 1122 DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 1123 wh->i_addr2); 1124 break; 1125 case IEEE80211_FC1_DIR_FROMDS: 1126 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ 1127 DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 1128 wh->i_addr3); 1129 break; 1130 case IEEE80211_FC1_DIR_DSTODS: 1131 DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ 1132 DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE, 1133 wh->i_addr4); 1134 break; 1135 } 1136 /* 1137 * Bit 7 is QDF_IEEE80211_FC0_SUBTYPE_QOS for data frame, but 1138 * it could also be set for deauth, disassoc, action, etc. for 1139 * a mgt type frame. It comes into picture for MFP. 1140 */ 1141 if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) { 1142 const struct ieee80211_qosframe *qwh = 1143 (const struct ieee80211_qosframe *)wh; 1144 hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; 1145 } else { 1146 hdr[12] = 0; 1147 } 1148 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ 1149 } 1150 1151 /* 1152 * Michael_mic for defragmentation 1153 */ 1154 int ol_rx_defrag_mic(ol_txrx_pdev_handle pdev,const uint8_t * key,qdf_nbuf_t wbuf,uint16_t off,uint16_t data_len,uint8_t mic[])1155 ol_rx_defrag_mic(ol_txrx_pdev_handle pdev, 1156 const uint8_t *key, 1157 qdf_nbuf_t wbuf, 1158 uint16_t off, uint16_t data_len, uint8_t mic[]) 1159 { 1160 uint8_t hdr[16] = { 0, }; 1161 uint32_t l, r; 1162 const uint8_t *data; 1163 uint32_t space; 1164 void *rx_desc_old_position = NULL; 1165 void *ind_old_position = NULL; 1166 int rx_desc_len = 0; 1167 htt_pdev_handle htt_pdev = pdev->htt_pdev; 1168 1169 ol_rx_frag_desc_adjust(pdev, 1170 wbuf, 1171 &rx_desc_old_position, 1172 &ind_old_position, &rx_desc_len); 1173 1174 ol_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) + 1175 rx_desc_len), hdr); 1176 l = get_le32(key); 1177 r = get_le32(key + 4); 1178 1179 /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ 1180 l ^= get_le32(hdr); 1181 michael_block(l, r); 1182 l ^= get_le32(&hdr[4]); 1183 michael_block(l, r); 1184 l ^= get_le32(&hdr[8]); 1185 michael_block(l, r); 1186 l ^= get_le32(&hdr[12]); 1187 michael_block(l, r); 1188 1189 /* first buffer has special handling */ 1190 data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len + off; 1191 space = ol_rx_defrag_len(wbuf) - rx_desc_len - off; 1192 for (;; ) { 1193 if (space > data_len) 1194 space = data_len; 1195 1196 /* collect 32-bit blocks from current buffer */ 1197 while (space >= sizeof(uint32_t)) { 1198 l ^= get_le32(data); 1199 michael_block(l, r); 1200 data += sizeof(uint32_t); 1201 space -= sizeof(uint32_t); 1202 data_len -= sizeof(uint32_t); 1203 } 1204 if (data_len < sizeof(uint32_t)) 1205 break; 1206 1207 wbuf = qdf_nbuf_next(wbuf); 1208 if (!wbuf) 1209 return OL_RX_DEFRAG_ERR; 1210 1211 rx_desc_len = ol_rx_get_desc_len(htt_pdev, wbuf, 1212 &rx_desc_old_position); 1213 1214 if (space != 0) { 1215 const uint8_t *data_next; 1216 /* 1217 * Block straddles buffers, split references. 1218 */ 1219 data_next = 1220 (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len; 1221 if ((ol_rx_defrag_len(wbuf) - rx_desc_len) < 1222 sizeof(uint32_t) - space) { 1223 return OL_RX_DEFRAG_ERR; 1224 } 1225 switch (space) { 1226 case 1: 1227 l ^= get_le32_split(data[0], data_next[0], 1228 data_next[1], data_next[2]); 1229 data = data_next + 3; 1230 space = (ol_rx_defrag_len(wbuf) - rx_desc_len) 1231 - 3; 1232 break; 1233 case 2: 1234 l ^= get_le32_split(data[0], data[1], 1235 data_next[0], data_next[1]); 1236 data = data_next + 2; 1237 space = (ol_rx_defrag_len(wbuf) - rx_desc_len) 1238 - 2; 1239 break; 1240 case 3: 1241 l ^= get_le32_split(data[0], data[1], data[2], 1242 data_next[0]); 1243 data = data_next + 1; 1244 space = (ol_rx_defrag_len(wbuf) - rx_desc_len) 1245 - 1; 1246 break; 1247 } 1248 michael_block(l, r); 1249 data_len -= sizeof(uint32_t); 1250 } else { 1251 /* 1252 * Setup for next buffer. 1253 */ 1254 data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len; 1255 space = ol_rx_defrag_len(wbuf) - rx_desc_len; 1256 } 1257 } 1258 /* Last block and padding (0x5a, 4..7 x 0) */ 1259 switch (data_len) { 1260 case 0: 1261 l ^= get_le32_split(0x5a, 0, 0, 0); 1262 break; 1263 case 1: 1264 l ^= get_le32_split(data[0], 0x5a, 0, 0); 1265 break; 1266 case 2: 1267 l ^= get_le32_split(data[0], data[1], 0x5a, 0); 1268 break; 1269 case 3: 1270 l ^= get_le32_split(data[0], data[1], data[2], 0x5a); 1271 break; 1272 } 1273 michael_block(l, r); 1274 michael_block(l, r); 1275 put_le32(mic, l); 1276 put_le32(mic + 4, r); 1277 1278 return OL_RX_DEFRAG_OK; 1279 } 1280 1281 /* 1282 * Calculate headersize 1283 */ ol_rx_frag_hdrsize(const void * data)1284 uint16_t ol_rx_frag_hdrsize(const void *data) 1285 { 1286 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data; 1287 uint16_t size = sizeof(struct ieee80211_frame); 1288 1289 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) 1290 size += QDF_MAC_ADDR_SIZE; 1291 1292 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) { 1293 size += sizeof(uint16_t); 1294 if (wh->i_fc[1] & IEEE80211_FC1_ORDER) 1295 size += sizeof(struct ieee80211_htc); 1296 } 1297 return size; 1298 } 1299 1300 /* 1301 * Recombine and decap fragments 1302 */ 1303 qdf_nbuf_t ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev,qdf_nbuf_t frag_list,uint16_t hdrsize)1304 ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev, 1305 qdf_nbuf_t frag_list, uint16_t hdrsize) 1306 { 1307 qdf_nbuf_t tmp; 1308 qdf_nbuf_t msdu = frag_list; 1309 qdf_nbuf_t rx_nbuf = frag_list; 1310 struct ieee80211_frame *wh; 1311 1312 msdu = qdf_nbuf_next(msdu); 1313 qdf_nbuf_set_next(rx_nbuf, NULL); 1314 while (msdu) { 1315 htt_rx_msdu_desc_free(htt_pdev, msdu); 1316 tmp = qdf_nbuf_next(msdu); 1317 qdf_nbuf_set_next(msdu, NULL); 1318 ol_rx_frag_pull_hdr(htt_pdev, msdu, hdrsize); 1319 if (!ol_rx_defrag_concat(rx_nbuf, msdu)) { 1320 ol_rx_frames_free(htt_pdev, tmp); 1321 htt_rx_desc_frame_free(htt_pdev, rx_nbuf); 1322 qdf_nbuf_free(msdu); 1323 /* msdu rx desc already freed above */ 1324 return NULL; 1325 } 1326 msdu = tmp; 1327 } 1328 wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, 1329 rx_nbuf); 1330 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG; 1331 *(uint16_t *) wh->i_seq &= ~IEEE80211_SEQ_FRAG_MASK; 1332 1333 return rx_nbuf; 1334 } 1335 ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev,qdf_nbuf_t msdu)1336 void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu) 1337 { 1338 struct ieee80211_frame wh; 1339 uint32_t hdrsize; 1340 struct llc_snap_hdr_t llchdr; 1341 struct ethernet_hdr_t *eth_hdr; 1342 void *rx_desc_old_position = NULL; 1343 void *ind_old_position = NULL; 1344 int rx_desc_len = 0; 1345 struct ieee80211_frame *wh_ptr; 1346 1347 ol_rx_frag_desc_adjust(pdev, 1348 msdu, 1349 &rx_desc_old_position, 1350 &ind_old_position, &rx_desc_len); 1351 1352 wh_ptr = (struct ieee80211_frame *)(qdf_nbuf_data(msdu) + rx_desc_len); 1353 qdf_mem_copy(&wh, wh_ptr, sizeof(wh)); 1354 hdrsize = sizeof(struct ieee80211_frame); 1355 qdf_mem_copy(&llchdr, ((uint8_t *) (qdf_nbuf_data(msdu) + 1356 rx_desc_len)) + hdrsize, 1357 sizeof(struct llc_snap_hdr_t)); 1358 1359 /* 1360 * Now move the data pointer to the beginning of the mac header : 1361 * new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize) 1362 */ 1363 qdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize + 1364 sizeof(struct llc_snap_hdr_t) - 1365 sizeof(struct ethernet_hdr_t))); 1366 eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(msdu)); 1367 switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) { 1368 case IEEE80211_FC1_DIR_NODS: 1369 qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1, 1370 QDF_MAC_ADDR_SIZE); 1371 qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, QDF_MAC_ADDR_SIZE); 1372 break; 1373 case IEEE80211_FC1_DIR_TODS: 1374 qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr3, 1375 QDF_MAC_ADDR_SIZE); 1376 qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, QDF_MAC_ADDR_SIZE); 1377 break; 1378 case IEEE80211_FC1_DIR_FROMDS: 1379 qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1, 1380 QDF_MAC_ADDR_SIZE); 1381 qdf_mem_copy(eth_hdr->src_addr, wh.i_addr3, QDF_MAC_ADDR_SIZE); 1382 break; 1383 case IEEE80211_FC1_DIR_DSTODS: 1384 break; 1385 } 1386 1387 qdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype, 1388 sizeof(llchdr.ethertype)); 1389 1390 ol_rx_defrag_push_rx_desc(msdu, rx_desc_old_position, 1391 ind_old_position, rx_desc_len); 1392 } 1393 1394 /* 1395 * Handling QOS for defragmentation 1396 */ 1397 void ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev,qdf_nbuf_t nbuf,uint16_t hdrlen)1398 ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev, 1399 qdf_nbuf_t nbuf, uint16_t hdrlen) 1400 { 1401 struct ieee80211_frame *wh; 1402 uint16_t qoslen; 1403 void *rx_desc_old_position = NULL; 1404 void *ind_old_position = NULL; 1405 int rx_desc_len = 0; 1406 1407 ol_rx_frag_desc_adjust(pdev, 1408 nbuf, 1409 &rx_desc_old_position, 1410 &ind_old_position, &rx_desc_len); 1411 1412 wh = (struct ieee80211_frame *)(qdf_nbuf_data(nbuf) + rx_desc_len); 1413 if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) { 1414 qoslen = sizeof(struct ieee80211_qoscntl); 1415 /* Qos frame with Order bit set indicates a HTC frame */ 1416 if (wh->i_fc[1] & IEEE80211_FC1_ORDER) 1417 qoslen += sizeof(struct ieee80211_htc); 1418 1419 /* remove QoS filed from header */ 1420 hdrlen -= qoslen; 1421 qdf_mem_move((uint8_t *) wh + qoslen, wh, hdrlen); 1422 wh = (struct ieee80211_frame *)qdf_nbuf_pull_head(nbuf, 1423 rx_desc_len + 1424 qoslen); 1425 /* clear QoS bit */ 1426 /* 1427 * KW# 6154 'qdf_nbuf_pull_head' in turn calls 1428 * __qdf_nbuf_pull_head, 1429 * which returns NULL if there is not sufficient data to pull. 1430 * It's guaranteed that qdf_nbuf_pull_head will succeed rather 1431 * than returning NULL, since the entire rx frame is already 1432 * present in the rx buffer. 1433 * However, to make it obvious to static analyzers that this 1434 * code is safe, add an explicit check that qdf_nbuf_pull_head 1435 * returns a non-NULL value. 1436 * Since this part of the code is not performance-critical, 1437 * adding this explicit check is okay. 1438 */ 1439 if (wh) 1440 wh->i_fc[0] &= ~QDF_IEEE80211_FC0_SUBTYPE_QOS; 1441 1442 ol_rx_defrag_push_rx_desc(nbuf, rx_desc_old_position, 1443 ind_old_position, rx_desc_len); 1444 1445 } 1446 } 1447