1 /* 2 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved. 3 * 4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc. 5 * 6 * 7 * Permission to use, copy, modify, and/or distribute this software for 8 * any purpose with or without fee is hereby granted, provided that the 9 * above copyright notice and this permission notice appear in all 10 * copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 19 * PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * This file was originally distributed by Qualcomm Atheros, Inc. 24 * under proprietary terms before Copyright ownership was assigned 25 * to the Linux Foundation. 26 */ 27 28 /** 29 * DOC: wma_data.c 30 * This file contains tx/rx and data path related functions. 31 */ 32 33 /* Header files */ 34 35 #include "wma.h" 36 #include "wma_api.h" 37 #include "cds_api.h" 38 #include "wmi_unified_api.h" 39 #include "wlan_qct_sys.h" 40 #include "wni_api.h" 41 #include "ani_global.h" 42 #include "wmi_unified.h" 43 #include "wni_cfg.h" 44 #include "cfg_api.h" 45 #include "ol_txrx_ctrl_api.h" 46 #include <cdp_txrx_tx_throttle.h> 47 #if defined(CONFIG_HL_SUPPORT) 48 #include "wlan_tgt_def_config_hl.h" 49 #else 50 #include "wlan_tgt_def_config.h" 51 #endif 52 #include "ol_txrx.h" 53 #include "qdf_nbuf.h" 54 #include "qdf_types.h" 55 #include "qdf_mem.h" 56 #include "ol_txrx_peer_find.h" 57 58 #include "wma_types.h" 59 #include "lim_api.h" 60 #include "lim_session_utils.h" 61 62 #include "cds_utils.h" 63 64 #if !defined(REMOVE_PKT_LOG) 65 #include "pktlog_ac.h" 66 #endif /* REMOVE_PKT_LOG */ 67 68 #include "dbglog_host.h" 69 #include "csr_api.h" 70 #include "ol_fw.h" 71 72 #include "dfs.h" 73 #include "wma_internal.h" 74 #include "cdp_txrx_flow_ctrl_legacy.h" 75 #include "cdp_txrx_cmn.h" 76 #include "cdp_txrx_misc.h" 77 #include <cdp_txrx_peer_ops.h> 78 #include <cdp_txrx_cfg.h> 79 #include "cdp_txrx_stats.h" 80 81 82 typedef struct { 83 int32_t rate; 84 uint8_t flag; 85 } wma_search_rate_t; 86 87 #define WMA_MAX_OFDM_CCK_RATE_TBL_SIZE 12 88 /* In ofdm_cck_rate_tbl->flag, if bit 7 is 1 it's CCK, otherwise it ofdm. 89 * Lower bit carries the ofdm/cck index for encoding the rate 90 */ 91 static wma_search_rate_t ofdm_cck_rate_tbl[WMA_MAX_OFDM_CCK_RATE_TBL_SIZE] = { 92 {540, 4}, /* 4: OFDM 54 Mbps */ 93 {480, 0}, /* 0: OFDM 48 Mbps */ 94 {360, 5}, /* 5: OFDM 36 Mbps */ 95 {240, 1}, /* 1: OFDM 24 Mbps */ 96 {180, 6}, /* 6: OFDM 18 Mbps */ 97 {120, 2}, /* 2: OFDM 12 Mbps */ 98 {110, (1 << 7)}, /* 0: CCK 11 Mbps Long */ 99 {90, 7}, /* 7: OFDM 9 Mbps */ 100 {60, 3}, /* 3: OFDM 6 Mbps */ 101 {55, ((1 << 7) | 1)}, /* 1: CCK 5.5 Mbps Long */ 102 {20, ((1 << 7) | 2)}, /* 2: CCK 2 Mbps Long */ 103 {10, ((1 << 7) | 3)} /* 3: CCK 1 Mbps Long */ 104 }; 105 106 #define WMA_MAX_VHT20_RATE_TBL_SIZE 9 107 /* In vht20_400ns_rate_tbl flag carries the mcs index for encoding the rate */ 108 static wma_search_rate_t vht20_400ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = { 109 {867, 8}, /* MCS8 1SS short GI */ 110 {722, 7}, /* MCS7 1SS short GI */ 111 {650, 6}, /* MCS6 1SS short GI */ 112 {578, 5}, /* MCS5 1SS short GI */ 113 {433, 4}, /* MCS4 1SS short GI */ 114 {289, 3}, /* MCS3 1SS short GI */ 115 {217, 2}, /* MCS2 1SS short GI */ 116 {144, 1}, /* MCS1 1SS short GI */ 117 {72, 0} /* MCS0 1SS short GI */ 118 }; 119 120 /* In vht20_800ns_rate_tbl flag carries the mcs index for encoding the rate */ 121 static wma_search_rate_t vht20_800ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = { 122 {780, 8}, /* MCS8 1SS long GI */ 123 {650, 7}, /* MCS7 1SS long GI */ 124 {585, 6}, /* MCS6 1SS long GI */ 125 {520, 5}, /* MCS5 1SS long GI */ 126 {390, 4}, /* MCS4 1SS long GI */ 127 {260, 3}, /* MCS3 1SS long GI */ 128 {195, 2}, /* MCS2 1SS long GI */ 129 {130, 1}, /* MCS1 1SS long GI */ 130 {65, 0} /* MCS0 1SS long GI */ 131 }; 132 133 #define WMA_MAX_VHT40_RATE_TBL_SIZE 10 134 /* In vht40_400ns_rate_tbl flag carries the mcs index for encoding the rate */ 135 static wma_search_rate_t vht40_400ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = { 136 {2000, 9}, /* MCS9 1SS short GI */ 137 {1800, 8}, /* MCS8 1SS short GI */ 138 {1500, 7}, /* MCS7 1SS short GI */ 139 {1350, 6}, /* MCS6 1SS short GI */ 140 {1200, 5}, /* MCS5 1SS short GI */ 141 {900, 4}, /* MCS4 1SS short GI */ 142 {600, 3}, /* MCS3 1SS short GI */ 143 {450, 2}, /* MCS2 1SS short GI */ 144 {300, 1}, /* MCS1 1SS short GI */ 145 {150, 0}, /* MCS0 1SS short GI */ 146 }; 147 148 static wma_search_rate_t vht40_800ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = { 149 {1800, 9}, /* MCS9 1SS long GI */ 150 {1620, 8}, /* MCS8 1SS long GI */ 151 {1350, 7}, /* MCS7 1SS long GI */ 152 {1215, 6}, /* MCS6 1SS long GI */ 153 {1080, 5}, /* MCS5 1SS long GI */ 154 {810, 4}, /* MCS4 1SS long GI */ 155 {540, 3}, /* MCS3 1SS long GI */ 156 {405, 2}, /* MCS2 1SS long GI */ 157 {270, 1}, /* MCS1 1SS long GI */ 158 {135, 0} /* MCS0 1SS long GI */ 159 }; 160 161 #define WMA_MAX_VHT80_RATE_TBL_SIZE 10 162 static wma_search_rate_t vht80_400ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = { 163 {4333, 9}, /* MCS9 1SS short GI */ 164 {3900, 8}, /* MCS8 1SS short GI */ 165 {3250, 7}, /* MCS7 1SS short GI */ 166 {2925, 6}, /* MCS6 1SS short GI */ 167 {2600, 5}, /* MCS5 1SS short GI */ 168 {1950, 4}, /* MCS4 1SS short GI */ 169 {1300, 3}, /* MCS3 1SS short GI */ 170 {975, 2}, /* MCS2 1SS short GI */ 171 {650, 1}, /* MCS1 1SS short GI */ 172 {325, 0} /* MCS0 1SS short GI */ 173 }; 174 175 static wma_search_rate_t vht80_800ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = { 176 {3900, 9}, /* MCS9 1SS long GI */ 177 {3510, 8}, /* MCS8 1SS long GI */ 178 {2925, 7}, /* MCS7 1SS long GI */ 179 {2633, 6}, /* MCS6 1SS long GI */ 180 {2340, 5}, /* MCS5 1SS long GI */ 181 {1755, 4}, /* MCS4 1SS long GI */ 182 {1170, 3}, /* MCS3 1SS long GI */ 183 {878, 2}, /* MCS2 1SS long GI */ 184 {585, 1}, /* MCS1 1SS long GI */ 185 {293, 0} /* MCS0 1SS long GI */ 186 }; 187 188 #define WMA_MAX_HT20_RATE_TBL_SIZE 8 189 static wma_search_rate_t ht20_400ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = { 190 {722, 7}, /* MCS7 1SS short GI */ 191 {650, 6}, /* MCS6 1SS short GI */ 192 {578, 5}, /* MCS5 1SS short GI */ 193 {433, 4}, /* MCS4 1SS short GI */ 194 {289, 3}, /* MCS3 1SS short GI */ 195 {217, 2}, /* MCS2 1SS short GI */ 196 {144, 1}, /* MCS1 1SS short GI */ 197 {72, 0} /* MCS0 1SS short GI */ 198 }; 199 200 static wma_search_rate_t ht20_800ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = { 201 {650, 7}, /* MCS7 1SS long GI */ 202 {585, 6}, /* MCS6 1SS long GI */ 203 {520, 5}, /* MCS5 1SS long GI */ 204 {390, 4}, /* MCS4 1SS long GI */ 205 {260, 3}, /* MCS3 1SS long GI */ 206 {195, 2}, /* MCS2 1SS long GI */ 207 {130, 1}, /* MCS1 1SS long GI */ 208 {65, 0} /* MCS0 1SS long GI */ 209 }; 210 211 #define WMA_MAX_HT40_RATE_TBL_SIZE 8 212 static wma_search_rate_t ht40_400ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = { 213 {1500, 7}, /* MCS7 1SS short GI */ 214 {1350, 6}, /* MCS6 1SS short GI */ 215 {1200, 5}, /* MCS5 1SS short GI */ 216 {900, 4}, /* MCS4 1SS short GI */ 217 {600, 3}, /* MCS3 1SS short GI */ 218 {450, 2}, /* MCS2 1SS short GI */ 219 {300, 1}, /* MCS1 1SS short GI */ 220 {150, 0} /* MCS0 1SS short GI */ 221 }; 222 223 static wma_search_rate_t ht40_800ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = { 224 {1350, 7}, /* MCS7 1SS long GI */ 225 {1215, 6}, /* MCS6 1SS long GI */ 226 {1080, 5}, /* MCS5 1SS long GI */ 227 {810, 4}, /* MCS4 1SS long GI */ 228 {540, 3}, /* MCS3 1SS long GI */ 229 {405, 2}, /* MCS2 1SS long GI */ 230 {270, 1}, /* MCS1 1SS long GI */ 231 {135, 0} /* MCS0 1SS long GI */ 232 }; 233 234 /** 235 * wma_bin_search_rate() - binary search function to find rate 236 * @tbl: rate table 237 * @tbl_size: table size 238 * @mbpsx10_rate: return mbps rate 239 * @ret_flag: return flag 240 * 241 * Return: none 242 */ 243 static void wma_bin_search_rate(wma_search_rate_t *tbl, int32_t tbl_size, 244 int32_t *mbpsx10_rate, uint8_t *ret_flag) 245 { 246 int32_t upper, lower, mid; 247 248 /* the table is descenting. index holds the largest value and the 249 * bottom index holds the smallest value */ 250 251 upper = 0; /* index 0 */ 252 lower = tbl_size - 1; /* last index */ 253 254 if (*mbpsx10_rate >= tbl[upper].rate) { 255 /* use the largest rate */ 256 *mbpsx10_rate = tbl[upper].rate; 257 *ret_flag = tbl[upper].flag; 258 return; 259 } else if (*mbpsx10_rate <= tbl[lower].rate) { 260 /* use the smallest rate */ 261 *mbpsx10_rate = tbl[lower].rate; 262 *ret_flag = tbl[lower].flag; 263 return; 264 } 265 /* now we do binery search to get the floor value */ 266 while (lower - upper > 1) { 267 mid = (upper + lower) >> 1; 268 if (*mbpsx10_rate == tbl[mid].rate) { 269 /* found the exact match */ 270 *mbpsx10_rate = tbl[mid].rate; 271 *ret_flag = tbl[mid].flag; 272 return; 273 } else { 274 /* not found. if mid's rate is larger than input move 275 * upper to mid. If mid's rate is larger than input 276 * move lower to mid. 277 */ 278 if (*mbpsx10_rate > tbl[mid].rate) 279 lower = mid; 280 else 281 upper = mid; 282 } 283 } 284 /* after the bin search the index is the ceiling of rate */ 285 *mbpsx10_rate = tbl[upper].rate; 286 *ret_flag = tbl[upper].flag; 287 return; 288 } 289 290 /** 291 * wma_fill_ofdm_cck_mcast_rate() - fill ofdm cck mcast rate 292 * @mbpsx10_rate: mbps rates 293 * @nss: nss 294 * @rate: rate 295 * 296 * Return: QDF status 297 */ 298 static QDF_STATUS wma_fill_ofdm_cck_mcast_rate(int32_t mbpsx10_rate, 299 uint8_t nss, uint8_t *rate) 300 { 301 uint8_t idx = 0; 302 wma_bin_search_rate(ofdm_cck_rate_tbl, WMA_MAX_OFDM_CCK_RATE_TBL_SIZE, 303 &mbpsx10_rate, &idx); 304 305 /* if bit 7 is set it uses CCK */ 306 if (idx & 0x80) 307 *rate |= (1 << 6) | (idx & 0xF); /* set bit 6 to 1 for CCK */ 308 else 309 *rate |= (idx & 0xF); 310 return QDF_STATUS_SUCCESS; 311 } 312 313 /** 314 * wma_set_ht_vht_mcast_rate() - set ht/vht mcast rate 315 * @shortgi: short gaurd interval 316 * @mbpsx10_rate: mbps rates 317 * @sgi_idx: shortgi index 318 * @sgi_rate: shortgi rate 319 * @lgi_idx: longgi index 320 * @lgi_rate: longgi rate 321 * @premable: preamble 322 * @rate: rate 323 * @streaming_rate: streaming rate 324 * 325 * Return: none 326 */ 327 static void wma_set_ht_vht_mcast_rate(uint32_t shortgi, int32_t mbpsx10_rate, 328 uint8_t sgi_idx, int32_t sgi_rate, 329 uint8_t lgi_idx, int32_t lgi_rate, 330 uint8_t premable, uint8_t *rate, 331 int32_t *streaming_rate) 332 { 333 if (shortgi == 0) { 334 *rate |= (premable << 6) | (lgi_idx & 0xF); 335 *streaming_rate = lgi_rate; 336 } else { 337 *rate |= (premable << 6) | (sgi_idx & 0xF); 338 *streaming_rate = sgi_rate; 339 } 340 } 341 342 /** 343 * wma_fill_ht20_mcast_rate() - fill ht20 mcast rate 344 * @shortgi: short gaurd interval 345 * @mbpsx10_rate: mbps rates 346 * @nss: nss 347 * @rate: rate 348 * @streaming_rate: streaming rate 349 * 350 * Return: QDF status 351 */ 352 static QDF_STATUS wma_fill_ht20_mcast_rate(uint32_t shortgi, 353 int32_t mbpsx10_rate, uint8_t nss, 354 uint8_t *rate, 355 int32_t *streaming_rate) 356 { 357 uint8_t sgi_idx = 0, lgi_idx = 0; 358 int32_t sgi_rate, lgi_rate; 359 if (nss == 1) 360 mbpsx10_rate = mbpsx10_rate >> 1; 361 362 sgi_rate = mbpsx10_rate; 363 lgi_rate = mbpsx10_rate; 364 if (shortgi) 365 wma_bin_search_rate(ht20_400ns_rate_tbl, 366 WMA_MAX_HT20_RATE_TBL_SIZE, &sgi_rate, 367 &sgi_idx); 368 else 369 wma_bin_search_rate(ht20_800ns_rate_tbl, 370 WMA_MAX_HT20_RATE_TBL_SIZE, &lgi_rate, 371 &lgi_idx); 372 373 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate, 374 lgi_idx, lgi_rate, 2, rate, streaming_rate); 375 if (nss == 1) 376 *streaming_rate = *streaming_rate << 1; 377 return QDF_STATUS_SUCCESS; 378 } 379 380 /** 381 * wma_fill_ht40_mcast_rate() - fill ht40 mcast rate 382 * @shortgi: short gaurd interval 383 * @mbpsx10_rate: mbps rates 384 * @nss: nss 385 * @rate: rate 386 * @streaming_rate: streaming rate 387 * 388 * Return: QDF status 389 */ 390 static QDF_STATUS wma_fill_ht40_mcast_rate(uint32_t shortgi, 391 int32_t mbpsx10_rate, uint8_t nss, 392 uint8_t *rate, 393 int32_t *streaming_rate) 394 { 395 uint8_t sgi_idx = 0, lgi_idx = 0; 396 int32_t sgi_rate, lgi_rate; 397 398 /* for 2x2 divide the rate by 2 */ 399 if (nss == 1) 400 mbpsx10_rate = mbpsx10_rate >> 1; 401 402 sgi_rate = mbpsx10_rate; 403 lgi_rate = mbpsx10_rate; 404 if (shortgi) 405 wma_bin_search_rate(ht40_400ns_rate_tbl, 406 WMA_MAX_HT40_RATE_TBL_SIZE, &sgi_rate, 407 &sgi_idx); 408 else 409 wma_bin_search_rate(ht40_800ns_rate_tbl, 410 WMA_MAX_HT40_RATE_TBL_SIZE, &lgi_rate, 411 &lgi_idx); 412 413 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate, 414 lgi_idx, lgi_rate, 2, rate, streaming_rate); 415 416 return QDF_STATUS_SUCCESS; 417 } 418 419 /** 420 * wma_fill_vht20_mcast_rate() - fill vht20 mcast rate 421 * @shortgi: short gaurd interval 422 * @mbpsx10_rate: mbps rates 423 * @nss: nss 424 * @rate: rate 425 * @streaming_rate: streaming rate 426 * 427 * Return: QDF status 428 */ 429 static QDF_STATUS wma_fill_vht20_mcast_rate(uint32_t shortgi, 430 int32_t mbpsx10_rate, uint8_t nss, 431 uint8_t *rate, 432 int32_t *streaming_rate) 433 { 434 uint8_t sgi_idx = 0, lgi_idx = 0; 435 int32_t sgi_rate, lgi_rate; 436 437 /* for 2x2 divide the rate by 2 */ 438 if (nss == 1) 439 mbpsx10_rate = mbpsx10_rate >> 1; 440 441 sgi_rate = mbpsx10_rate; 442 lgi_rate = mbpsx10_rate; 443 if (shortgi) 444 wma_bin_search_rate(vht20_400ns_rate_tbl, 445 WMA_MAX_VHT20_RATE_TBL_SIZE, &sgi_rate, 446 &sgi_idx); 447 else 448 wma_bin_search_rate(vht20_800ns_rate_tbl, 449 WMA_MAX_VHT20_RATE_TBL_SIZE, &lgi_rate, 450 &lgi_idx); 451 452 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate, 453 lgi_idx, lgi_rate, 3, rate, streaming_rate); 454 if (nss == 1) 455 *streaming_rate = *streaming_rate << 1; 456 return QDF_STATUS_SUCCESS; 457 } 458 459 /** 460 * wma_fill_vht40_mcast_rate() - fill vht40 mcast rate 461 * @shortgi: short gaurd interval 462 * @mbpsx10_rate: mbps rates 463 * @nss: nss 464 * @rate: rate 465 * @streaming_rate: streaming rate 466 * 467 * Return: QDF status 468 */ 469 static QDF_STATUS wma_fill_vht40_mcast_rate(uint32_t shortgi, 470 int32_t mbpsx10_rate, uint8_t nss, 471 uint8_t *rate, 472 int32_t *streaming_rate) 473 { 474 uint8_t sgi_idx = 0, lgi_idx = 0; 475 int32_t sgi_rate, lgi_rate; 476 477 /* for 2x2 divide the rate by 2 */ 478 if (nss == 1) 479 mbpsx10_rate = mbpsx10_rate >> 1; 480 481 sgi_rate = mbpsx10_rate; 482 lgi_rate = mbpsx10_rate; 483 if (shortgi) 484 wma_bin_search_rate(vht40_400ns_rate_tbl, 485 WMA_MAX_VHT40_RATE_TBL_SIZE, &sgi_rate, 486 &sgi_idx); 487 else 488 wma_bin_search_rate(vht40_800ns_rate_tbl, 489 WMA_MAX_VHT40_RATE_TBL_SIZE, &lgi_rate, 490 &lgi_idx); 491 492 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, 493 sgi_idx, sgi_rate, lgi_idx, lgi_rate, 494 3, rate, streaming_rate); 495 if (nss == 1) 496 *streaming_rate = *streaming_rate << 1; 497 return QDF_STATUS_SUCCESS; 498 } 499 500 /** 501 * wma_fill_vht80_mcast_rate() - fill vht80 mcast rate 502 * @shortgi: short gaurd interval 503 * @mbpsx10_rate: mbps rates 504 * @nss: nss 505 * @rate: rate 506 * @streaming_rate: streaming rate 507 * 508 * Return: QDF status 509 */ 510 static QDF_STATUS wma_fill_vht80_mcast_rate(uint32_t shortgi, 511 int32_t mbpsx10_rate, uint8_t nss, 512 uint8_t *rate, 513 int32_t *streaming_rate) 514 { 515 uint8_t sgi_idx = 0, lgi_idx = 0; 516 int32_t sgi_rate, lgi_rate; 517 518 /* for 2x2 divide the rate by 2 */ 519 if (nss == 1) 520 mbpsx10_rate = mbpsx10_rate >> 1; 521 522 sgi_rate = mbpsx10_rate; 523 lgi_rate = mbpsx10_rate; 524 if (shortgi) 525 wma_bin_search_rate(vht80_400ns_rate_tbl, 526 WMA_MAX_VHT80_RATE_TBL_SIZE, &sgi_rate, 527 &sgi_idx); 528 else 529 wma_bin_search_rate(vht80_800ns_rate_tbl, 530 WMA_MAX_VHT80_RATE_TBL_SIZE, &lgi_rate, 531 &lgi_idx); 532 533 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate, 534 lgi_idx, lgi_rate, 3, rate, streaming_rate); 535 if (nss == 1) 536 *streaming_rate = *streaming_rate << 1; 537 return QDF_STATUS_SUCCESS; 538 } 539 540 /** 541 * wma_fill_ht_mcast_rate() - fill ht mcast rate 542 * @shortgi: short gaurd interval 543 * @chwidth: channel width 544 * @chanmode: channel mode 545 * @mhz: frequency 546 * @mbpsx10_rate: mbps rates 547 * @nss: nss 548 * @rate: rate 549 * @streaming_rate: streaming rate 550 * 551 * Return: QDF status 552 */ 553 static QDF_STATUS wma_fill_ht_mcast_rate(uint32_t shortgi, 554 uint32_t chwidth, int32_t mbpsx10_rate, 555 uint8_t nss, WLAN_PHY_MODE chanmode, 556 uint8_t *rate, 557 int32_t *streaming_rate) 558 { 559 int32_t ret = 0; 560 561 *streaming_rate = 0; 562 if (chwidth == 0) 563 ret = wma_fill_ht20_mcast_rate(shortgi, mbpsx10_rate, 564 nss, rate, streaming_rate); 565 else if (chwidth == 1) 566 ret = wma_fill_ht40_mcast_rate(shortgi, mbpsx10_rate, 567 nss, rate, streaming_rate); 568 else 569 WMA_LOGE("%s: Error, Invalid chwidth enum %d", __func__, 570 chwidth); 571 return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL; 572 } 573 574 /** 575 * wma_fill_vht_mcast_rate() - fill vht mcast rate 576 * @shortgi: short gaurd interval 577 * @chwidth: channel width 578 * @chanmode: channel mode 579 * @mhz: frequency 580 * @mbpsx10_rate: mbps rates 581 * @nss: nss 582 * @rate: rate 583 * @streaming_rate: streaming rate 584 * 585 * Return: QDF status 586 */ 587 static QDF_STATUS wma_fill_vht_mcast_rate(uint32_t shortgi, 588 uint32_t chwidth, 589 int32_t mbpsx10_rate, uint8_t nss, 590 WLAN_PHY_MODE chanmode, 591 uint8_t *rate, 592 int32_t *streaming_rate) 593 { 594 int32_t ret = 0; 595 596 *streaming_rate = 0; 597 if (chwidth == 0) 598 ret = wma_fill_vht20_mcast_rate(shortgi, mbpsx10_rate, nss, 599 rate, streaming_rate); 600 else if (chwidth == 1) 601 ret = wma_fill_vht40_mcast_rate(shortgi, mbpsx10_rate, nss, 602 rate, streaming_rate); 603 else if (chwidth == 2) 604 ret = wma_fill_vht80_mcast_rate(shortgi, mbpsx10_rate, nss, 605 rate, streaming_rate); 606 else 607 WMA_LOGE("%s: chwidth enum %d not supported", 608 __func__, chwidth); 609 return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL; 610 } 611 612 #define WMA_MCAST_1X1_CUT_OFF_RATE 2000 613 /** 614 * wma_encode_mc_rate() - fill mc rates 615 * @shortgi: short gaurd interval 616 * @chwidth: channel width 617 * @chanmode: channel mode 618 * @mhz: frequency 619 * @mbpsx10_rate: mbps rates 620 * @nss: nss 621 * @rate: rate 622 * 623 * Return: QDF status 624 */ 625 static QDF_STATUS wma_encode_mc_rate(uint32_t shortgi, uint32_t chwidth, 626 WLAN_PHY_MODE chanmode, A_UINT32 mhz, 627 int32_t mbpsx10_rate, uint8_t nss, 628 uint8_t *rate) 629 { 630 int32_t ret = 0; 631 632 /* nss input value: 0 - 1x1; 1 - 2x2; 2 - 3x3 633 * the phymode selection is based on following assumption: 634 * (1) if the app specifically requested 1x1 or 2x2 we hornor it 635 * (2) if mbpsx10_rate <= 540: always use BG 636 * (3) 540 < mbpsx10_rate <= 2000: use 1x1 HT/VHT 637 * (4) 2000 < mbpsx10_rate: use 2x2 HT/VHT 638 */ 639 WMA_LOGE("%s: Input: nss = %d, chanmode = %d, " 640 "mbpsx10 = 0x%x, chwidth = %d, shortgi = %d", 641 __func__, nss, chanmode, mbpsx10_rate, chwidth, shortgi); 642 if ((mbpsx10_rate & 0x40000000) && nss > 0) { 643 /* bit 30 indicates user inputed nss, 644 * bit 28 and 29 used to encode nss 645 */ 646 uint8_t user_nss = (mbpsx10_rate & 0x30000000) >> 28; 647 648 nss = (user_nss < nss) ? user_nss : nss; 649 /* zero out bits 19 - 21 to recover the actual rate */ 650 mbpsx10_rate &= ~0x70000000; 651 } else if (mbpsx10_rate <= WMA_MCAST_1X1_CUT_OFF_RATE) { 652 /* if the input rate is less or equal to the 653 * 1x1 cutoff rate we use 1x1 only 654 */ 655 nss = 0; 656 } 657 /* encode NSS bits (bit 4, bit 5) */ 658 *rate = (nss & 0x3) << 4; 659 /* if mcast input rate exceeds the ofdm/cck max rate 54mpbs 660 * we try to choose best ht/vht mcs rate 661 */ 662 if (540 < mbpsx10_rate) { 663 /* cannot use ofdm/cck, choose closest ht/vht mcs rate */ 664 uint8_t rate_ht = *rate; 665 uint8_t rate_vht = *rate; 666 int32_t stream_rate_ht = 0; 667 int32_t stream_rate_vht = 0; 668 int32_t stream_rate = 0; 669 670 ret = wma_fill_ht_mcast_rate(shortgi, chwidth, mbpsx10_rate, 671 nss, chanmode, &rate_ht, 672 &stream_rate_ht); 673 if (ret != QDF_STATUS_SUCCESS) { 674 stream_rate_ht = 0; 675 } 676 if (mhz < WMA_2_4_GHZ_MAX_FREQ) { 677 /* not in 5 GHZ frequency */ 678 *rate = rate_ht; 679 stream_rate = stream_rate_ht; 680 goto ht_vht_done; 681 } 682 /* capable doing 11AC mcast so that search vht tables */ 683 ret = wma_fill_vht_mcast_rate(shortgi, chwidth, mbpsx10_rate, 684 nss, chanmode, &rate_vht, 685 &stream_rate_vht); 686 if (ret != QDF_STATUS_SUCCESS) { 687 if (stream_rate_ht != 0) 688 ret = QDF_STATUS_SUCCESS; 689 *rate = rate_ht; 690 stream_rate = stream_rate_ht; 691 goto ht_vht_done; 692 } 693 if (stream_rate_ht == 0) { 694 /* only vht rate available */ 695 *rate = rate_vht; 696 stream_rate = stream_rate_vht; 697 } else { 698 /* set ht as default first */ 699 *rate = rate_ht; 700 stream_rate = stream_rate_ht; 701 if (stream_rate < mbpsx10_rate) { 702 if (mbpsx10_rate <= stream_rate_vht || 703 stream_rate < stream_rate_vht) { 704 *rate = rate_vht; 705 stream_rate = stream_rate_vht; 706 } 707 } else { 708 if (stream_rate_vht >= mbpsx10_rate && 709 stream_rate_vht < stream_rate) { 710 *rate = rate_vht; 711 stream_rate = stream_rate_vht; 712 } 713 } 714 } 715 ht_vht_done: 716 WMA_LOGE("%s: NSS = %d, ucast_chanmode = %d, " 717 "freq = %d, input_rate = %d, chwidth = %d " 718 "rate = 0x%x, streaming_rate = %d", 719 __func__, nss, chanmode, mhz, 720 mbpsx10_rate, chwidth, *rate, stream_rate); 721 } else { 722 if (mbpsx10_rate > 0) 723 ret = wma_fill_ofdm_cck_mcast_rate(mbpsx10_rate, 724 nss, rate); 725 else 726 *rate = 0xFF; 727 728 WMA_LOGE("%s: NSS = %d, ucast_chanmode = %d, " 729 "input_rate = %d, rate = 0x%x", 730 __func__, nss, chanmode, mbpsx10_rate, *rate); 731 } 732 return ret; 733 } 734 735 /** 736 * wma_set_bss_rate_flags() - set rate flags based on BSS capability 737 * @iface: txrx_node ctx 738 * @add_bss: add_bss params 739 * 740 * Return: none 741 */ 742 void wma_set_bss_rate_flags(struct wma_txrx_node *iface, 743 tpAddBssParams add_bss) 744 { 745 iface->rate_flags = 0; 746 747 if (add_bss->vhtCapable) { 748 if (add_bss->ch_width == CH_WIDTH_80P80MHZ) 749 iface->rate_flags |= eHAL_TX_RATE_VHT80; 750 if (add_bss->ch_width == CH_WIDTH_160MHZ) 751 iface->rate_flags |= eHAL_TX_RATE_VHT80; 752 if (add_bss->ch_width == CH_WIDTH_80MHZ) 753 iface->rate_flags |= eHAL_TX_RATE_VHT80; 754 else if (add_bss->ch_width) 755 iface->rate_flags |= eHAL_TX_RATE_VHT40; 756 else 757 iface->rate_flags |= eHAL_TX_RATE_VHT20; 758 } 759 /* avoid to conflict with htCapable flag */ 760 else if (add_bss->htCapable) { 761 if (add_bss->ch_width) 762 iface->rate_flags |= eHAL_TX_RATE_HT40; 763 else 764 iface->rate_flags |= eHAL_TX_RATE_HT20; 765 } 766 767 if (add_bss->staContext.fShortGI20Mhz || 768 add_bss->staContext.fShortGI40Mhz) 769 iface->rate_flags |= eHAL_TX_RATE_SGI; 770 771 if (!add_bss->htCapable && !add_bss->vhtCapable) 772 iface->rate_flags = eHAL_TX_RATE_LEGACY; 773 } 774 775 /** 776 * wmi_unified_send_txbf() - set txbf parameter to fw 777 * @wma: wma handle 778 * @params: txbf parameters 779 * 780 * Return: 0 for success or error code 781 */ 782 int32_t wmi_unified_send_txbf(tp_wma_handle wma, tpAddStaParams params) 783 { 784 wmi_vdev_txbf_en txbf_en; 785 786 /* This is set when Other partner is Bformer 787 * and we are capable bformee(enabled both in ini and fw) 788 */ 789 txbf_en.sutxbfee = params->vhtTxBFCapable; 790 txbf_en.mutxbfee = params->vhtTxMUBformeeCapable; 791 txbf_en.sutxbfer = params->enable_su_tx_bformer; 792 txbf_en.mutxbfer = 0; 793 794 /* When MU TxBfee is set, SU TxBfee must be set by default */ 795 if (txbf_en.mutxbfee) 796 txbf_en.sutxbfee = txbf_en.mutxbfee; 797 798 WMA_LOGD("txbf_en.sutxbfee %d txbf_en.mutxbfee %d, sutxbfer %d", 799 txbf_en.sutxbfee, txbf_en.mutxbfee, txbf_en.sutxbfer); 800 801 return wma_vdev_set_param(wma->wmi_handle, 802 params->smesessionId, 803 WMI_VDEV_PARAM_TXBF, 804 *((A_UINT8 *) &txbf_en)); 805 } 806 807 /** 808 * wma_data_tx_ack_work_handler() - process data tx ack 809 * @ack_work: work structure 810 * 811 * Return: none 812 */ 813 static void wma_data_tx_ack_work_handler(void *ack_work) 814 { 815 struct wma_tx_ack_work_ctx *work; 816 tp_wma_handle wma_handle; 817 pWMAAckFnTxComp ack_cb; 818 819 if (cds_is_load_or_unload_in_progress()) { 820 WMA_LOGE("%s: Driver load/unload in progress", __func__); 821 return; 822 } 823 824 work = (struct wma_tx_ack_work_ctx *)ack_work; 825 826 wma_handle = work->wma_handle; 827 ack_cb = wma_handle->umac_data_ota_ack_cb; 828 829 if (work->status) 830 WMA_LOGE("Data Tx Ack Cb Status %d", work->status); 831 else 832 WMA_LOGD("Data Tx Ack Cb Status %d", work->status); 833 834 /* Call the Ack Cb registered by UMAC */ 835 if (ack_cb) 836 ack_cb((tpAniSirGlobal) (wma_handle->mac_context), 837 work->status ? 0 : 1); 838 else 839 WMA_LOGE("Data Tx Ack Cb is NULL"); 840 841 wma_handle->umac_data_ota_ack_cb = NULL; 842 wma_handle->last_umac_data_nbuf = NULL; 843 qdf_mem_free(work); 844 wma_handle->ack_work_ctx = NULL; 845 } 846 847 /** 848 * wma_data_tx_ack_comp_hdlr() - handles tx data ack completion 849 * @context: context with which the handler is registered 850 * @netbuf: tx data nbuf 851 * @err: status of tx completion 852 * 853 * This is the cb registered with TxRx for 854 * Ack Complete 855 * 856 * Return: none 857 */ 858 void 859 wma_data_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status) 860 { 861 ol_txrx_pdev_handle pdev; 862 tp_wma_handle wma_handle = (tp_wma_handle) wma_context; 863 864 if (NULL == wma_handle) { 865 WMA_LOGE("%s: Invalid WMA Handle", __func__); 866 return; 867 } 868 869 pdev = cds_get_context(QDF_MODULE_ID_TXRX); 870 871 if (NULL == pdev) { 872 WMA_LOGE("%s: Failed to get pdev", __func__); 873 return; 874 } 875 876 /* 877 * if netBuf does not match with pending nbuf then just free the 878 * netbuf and do not call ack cb 879 */ 880 if (wma_handle->last_umac_data_nbuf != netbuf) { 881 if (wma_handle->umac_data_ota_ack_cb) { 882 WMA_LOGE("%s: nbuf does not match but umac_data_ota_ack_cb is not null", 883 __func__); 884 } else { 885 WMA_LOGE("%s: nbuf does not match and umac_data_ota_ack_cb is also null", 886 __func__); 887 } 888 goto free_nbuf; 889 } 890 891 if (wma_handle && wma_handle->umac_data_ota_ack_cb) { 892 struct wma_tx_ack_work_ctx *ack_work; 893 894 ack_work = qdf_mem_malloc(sizeof(struct wma_tx_ack_work_ctx)); 895 wma_handle->ack_work_ctx = ack_work; 896 if (ack_work) { 897 ack_work->wma_handle = wma_handle; 898 ack_work->sub_type = 0; 899 ack_work->status = status; 900 901 qdf_create_work(0, &ack_work->ack_cmp_work, 902 wma_data_tx_ack_work_handler, 903 ack_work); 904 qdf_sched_work(0, &ack_work->ack_cmp_work); 905 } 906 } 907 908 free_nbuf: 909 /* unmap and freeing the tx buf as txrx is not taking care */ 910 qdf_nbuf_unmap_single(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE); 911 qdf_nbuf_free(netbuf); 912 } 913 914 /** 915 * wma_update_txrx_chainmask() - update txrx chainmask 916 * @num_rf_chains: number rf chains 917 * @cmd_value: command value 918 * 919 * Return: none 920 */ 921 void wma_update_txrx_chainmask(int num_rf_chains, int *cmd_value) 922 { 923 if (*cmd_value > WMA_MAX_RF_CHAINS(num_rf_chains)) { 924 WMA_LOGE("%s: Chainmask value exceeds the maximum" 925 " supported range setting it to" 926 " maximum value. Requested value %d" 927 " Updated value %d", __func__, *cmd_value, 928 WMA_MAX_RF_CHAINS(num_rf_chains)); 929 *cmd_value = WMA_MAX_RF_CHAINS(num_rf_chains); 930 } else if (*cmd_value < WMA_MIN_RF_CHAINS) { 931 WMA_LOGE("%s: Chainmask value is less than the minimum" 932 " supported range setting it to" 933 " minimum value. Requested value %d" 934 " Updated value %d", __func__, *cmd_value, 935 WMA_MIN_RF_CHAINS); 936 *cmd_value = WMA_MIN_RF_CHAINS; 937 } 938 } 939 940 /** 941 * wma_peer_state_change_event_handler() - peer state change event handler 942 * @handle: wma handle 943 * @event_buff: event buffer 944 * @len: length of buffer 945 * 946 * This event handler unpauses vdev if peer state change to AUTHORIZED STATE 947 * 948 * Return: 0 for success or error code 949 */ 950 int wma_peer_state_change_event_handler(void *handle, 951 uint8_t *event_buff, 952 uint32_t len) 953 { 954 WMI_PEER_STATE_EVENTID_param_tlvs *param_buf; 955 wmi_peer_state_event_fixed_param *event; 956 ol_txrx_vdev_handle vdev; 957 tp_wma_handle wma_handle = (tp_wma_handle) handle; 958 959 if (!event_buff) { 960 WMA_LOGE("%s: Received NULL event ptr from FW", __func__); 961 return -EINVAL; 962 } 963 param_buf = (WMI_PEER_STATE_EVENTID_param_tlvs *) event_buff; 964 if (!param_buf) { 965 WMA_LOGE("%s: Received NULL buf ptr from FW", __func__); 966 return -ENOMEM; 967 } 968 969 event = param_buf->fixed_param; 970 vdev = wma_find_vdev_by_id(wma_handle, event->vdev_id); 971 if (NULL == vdev) { 972 WMA_LOGP("%s: Couldn't find vdev for vdev_id: %d", 973 __func__, event->vdev_id); 974 return -EINVAL; 975 } 976 977 if (ol_txrx_get_opmode(vdev) == wlan_op_mode_sta 978 && event->state == WMI_PEER_STATE_AUTHORIZED) { 979 /* 980 * set event so that hdd 981 * can procced and unpause tx queue 982 */ 983 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL 984 if (!wma_handle->peer_authorized_cb) { 985 WMA_LOGE("%s: peer authorized cb not registered", 986 __func__); 987 return -EINVAL; 988 } 989 wma_handle->peer_authorized_cb(event->vdev_id); 990 #endif 991 } 992 993 return 0; 994 } 995 996 /** 997 * wma_set_enable_disable_mcc_adaptive_scheduler() -enable/disable mcc scheduler 998 * @mcc_adaptive_scheduler: enable/disable 999 * 1000 * This function enable/disable mcc adaptive scheduler in fw. 1001 * 1002 * Return: QDF_STATUS_SUCCESS for sucess or error code 1003 */ 1004 QDF_STATUS wma_set_enable_disable_mcc_adaptive_scheduler(uint32_t 1005 mcc_adaptive_scheduler) 1006 { 1007 tp_wma_handle wma = NULL; 1008 uint32_t pdev_id; 1009 1010 wma = cds_get_context(QDF_MODULE_ID_WMA); 1011 if (NULL == wma) { 1012 WMA_LOGE("%s : Failed to get wma", __func__); 1013 return QDF_STATUS_E_FAULT; 1014 } 1015 1016 /* 1017 * Since there could be up to two instances of OCS in FW (one per MAC), 1018 * FW provides the option of enabling and disabling MAS on a per MAC 1019 * basis. But, Host does not have enable/disable option for individual 1020 * MACs. So, FW agreed for the Host to send down a 'pdev id' of 0. 1021 * When 'pdev id' of 0 is used, FW treats this as a SOC level command 1022 * and applies the same value to both MACs. Irrespective of the value 1023 * of 'WMI_SERVICE_DEPRECATED_REPLACE', the pdev id needs to be '0' 1024 * (SOC level) for WMI_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMDID 1025 */ 1026 pdev_id = WMI_PDEV_ID_SOC; 1027 1028 return wmi_unified_set_enable_disable_mcc_adaptive_scheduler_cmd( 1029 wma->wmi_handle, mcc_adaptive_scheduler, pdev_id); 1030 } 1031 1032 /** 1033 * wma_set_mcc_channel_time_latency() -set MCC channel time latency 1034 * @wma: wma handle 1035 * @mcc_channel: mcc channel 1036 * @mcc_channel_time_latency: MCC channel time latency. 1037 * 1038 * Currently used to set time latency for an MCC vdev/adapter using operating 1039 * channel of it and channel number. The info is provided run time using 1040 * iwpriv command: iwpriv <wlan0 | p2p0> setMccLatency <latency in ms>. 1041 * 1042 * Return: QDF status 1043 */ 1044 QDF_STATUS wma_set_mcc_channel_time_latency 1045 (tp_wma_handle wma, 1046 uint32_t mcc_channel, uint32_t mcc_channel_time_latency) 1047 { 1048 uint32_t cfg_val = 0; 1049 struct sAniSirGlobal *pMac = NULL; 1050 uint32_t channel1 = mcc_channel; 1051 uint32_t chan1_freq = cds_chan_to_freq(channel1); 1052 1053 if (!wma) { 1054 WMA_LOGE("%s:NULL wma ptr. Exiting", __func__); 1055 QDF_ASSERT(0); 1056 return QDF_STATUS_E_FAILURE; 1057 } 1058 pMac = cds_get_context(QDF_MODULE_ID_PE); 1059 if (!pMac) { 1060 WMA_LOGE("%s:NULL pMac ptr. Exiting", __func__); 1061 QDF_ASSERT(0); 1062 return QDF_STATUS_E_FAILURE; 1063 } 1064 1065 /* First step is to confirm if MCC is active */ 1066 if (!lim_is_in_mcc(pMac)) { 1067 WMA_LOGE("%s: MCC is not active. Exiting", __func__); 1068 QDF_ASSERT(0); 1069 return QDF_STATUS_E_FAILURE; 1070 } 1071 /* Confirm MCC adaptive scheduler feature is disabled */ 1072 if (wlan_cfg_get_int(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED, 1073 &cfg_val) == eSIR_SUCCESS) { 1074 if (cfg_val == WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED_STAMAX) { 1075 WMA_LOGD("%s: Can't set channel latency while MCC " 1076 "ADAPTIVE SCHED is enabled. Exit", __func__); 1077 return QDF_STATUS_SUCCESS; 1078 } 1079 } else { 1080 WMA_LOGE("%s: Failed to get value for MCC_ADAPTIVE_SCHED, " 1081 "Exit w/o setting latency", __func__); 1082 QDF_ASSERT(0); 1083 return QDF_STATUS_E_FAILURE; 1084 } 1085 1086 return wmi_unified_set_mcc_channel_time_latency_cmd(wma->wmi_handle, 1087 chan1_freq, 1088 mcc_channel_time_latency); 1089 } 1090 1091 /** 1092 * wma_set_mcc_channel_time_quota() -set MCC channel time quota 1093 * @wma: wma handle 1094 * @adapter_1_chan_number: adapter 1 channel number 1095 * @adapter_1_quota: adapter 1 quota 1096 * @adapter_2_chan_number: adapter 2 channel number 1097 * 1098 * Currently used to set time quota for 2 MCC vdevs/adapters using (operating 1099 * channel, quota) for each mode . The info is provided run time using 1100 * iwpriv command: iwpriv <wlan0 | p2p0> setMccQuota <quota in ms>. 1101 * Note: the quota provided in command is for the same mode in cmd. HDD 1102 * checks if MCC mode is active, gets the second mode and its operating chan. 1103 * Quota for the 2nd role is calculated as 100 - quota of first mode. 1104 * 1105 * Return: QDF status 1106 */ 1107 QDF_STATUS wma_set_mcc_channel_time_quota 1108 (tp_wma_handle wma, 1109 uint32_t adapter_1_chan_number, 1110 uint32_t adapter_1_quota, uint32_t adapter_2_chan_number) 1111 { 1112 uint32_t cfg_val = 0; 1113 struct sAniSirGlobal *pMac = NULL; 1114 uint32_t chan1_freq = cds_chan_to_freq(adapter_1_chan_number); 1115 uint32_t chan2_freq = cds_chan_to_freq(adapter_2_chan_number); 1116 1117 if (!wma) { 1118 WMA_LOGE("%s:NULL wma ptr. Exiting", __func__); 1119 QDF_ASSERT(0); 1120 return QDF_STATUS_E_FAILURE; 1121 } 1122 pMac = cds_get_context(QDF_MODULE_ID_PE); 1123 if (!pMac) { 1124 WMA_LOGE("%s:NULL pMac ptr. Exiting", __func__); 1125 QDF_ASSERT(0); 1126 return QDF_STATUS_E_FAILURE; 1127 } 1128 1129 /* First step is to confirm if MCC is active */ 1130 if (!lim_is_in_mcc(pMac)) { 1131 WMA_LOGD("%s: MCC is not active. Exiting", __func__); 1132 QDF_ASSERT(0); 1133 return QDF_STATUS_E_FAILURE; 1134 } 1135 1136 /* Confirm MCC adaptive scheduler feature is disabled */ 1137 if (wlan_cfg_get_int(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED, 1138 &cfg_val) == eSIR_SUCCESS) { 1139 if (cfg_val == WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED_STAMAX) { 1140 WMA_LOGD("%s: Can't set channel quota while " 1141 "MCC_ADAPTIVE_SCHED is enabled. Exit", 1142 __func__); 1143 return QDF_STATUS_SUCCESS; 1144 } 1145 } else { 1146 WMA_LOGE("%s: Failed to retrieve " 1147 "WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED. Exit", __func__); 1148 QDF_ASSERT(0); 1149 return QDF_STATUS_E_FAILURE; 1150 } 1151 1152 return wmi_unified_set_mcc_channel_time_quota_cmd(wma->wmi_handle, 1153 chan1_freq, 1154 adapter_1_quota, 1155 chan2_freq); 1156 } 1157 1158 /** 1159 * wma_set_linkstate() - set wma linkstate 1160 * @wma: wma handle 1161 * @params: link state params 1162 * 1163 * Return: none 1164 */ 1165 void wma_set_linkstate(tp_wma_handle wma, tpLinkStateParams params) 1166 { 1167 ol_txrx_pdev_handle pdev; 1168 ol_txrx_vdev_handle vdev; 1169 ol_txrx_peer_handle peer; 1170 uint8_t vdev_id, peer_id; 1171 bool roam_synch_in_progress = false; 1172 QDF_STATUS status; 1173 1174 params->status = true; 1175 WMA_LOGD("%s: state %d selfmac %pM", __func__, 1176 params->state, params->selfMacAddr); 1177 if ((params->state != eSIR_LINK_PREASSOC_STATE) && 1178 (params->state != eSIR_LINK_DOWN_STATE)) { 1179 WMA_LOGD("%s: unsupported link state %d", 1180 __func__, params->state); 1181 goto out; 1182 } 1183 1184 pdev = cds_get_context(QDF_MODULE_ID_TXRX); 1185 1186 if (NULL == pdev) { 1187 WMA_LOGE("%s: Unable to get TXRX context", __func__); 1188 goto out; 1189 } 1190 1191 vdev = wma_find_vdev_by_addr(wma, params->selfMacAddr, &vdev_id); 1192 if (!vdev) { 1193 WMA_LOGP("%s: vdev not found for addr: %pM", 1194 __func__, params->selfMacAddr); 1195 goto out; 1196 } 1197 1198 if (wma_is_vdev_in_ap_mode(wma, vdev_id)) { 1199 WMA_LOGD("%s: Ignoring set link req in ap mode", __func__); 1200 goto out; 1201 } 1202 1203 if (params->state == eSIR_LINK_PREASSOC_STATE) { 1204 if (wma_is_roam_synch_in_progress(wma, vdev_id)) 1205 roam_synch_in_progress = true; 1206 status = wma_create_peer(wma, pdev, vdev, params->bssid, 1207 WMI_PEER_TYPE_DEFAULT, vdev_id, 1208 roam_synch_in_progress); 1209 if (status != QDF_STATUS_SUCCESS) 1210 WMA_LOGE("%s: Unable to create peer", __func__); 1211 if (roam_synch_in_progress) 1212 return; 1213 } else { 1214 WMA_LOGD("%s, vdev_id: %d, pausing tx_ll_queue for VDEV_STOP", 1215 __func__, vdev_id); 1216 ol_txrx_vdev_pause(wma->interfaces[vdev_id].handle, 1217 OL_TXQ_PAUSE_REASON_VDEV_STOP); 1218 wma->interfaces[vdev_id].pause_bitmap |= (1 << PAUSE_TYPE_HOST); 1219 if (wmi_unified_vdev_stop_send(wma->wmi_handle, vdev_id)) { 1220 WMA_LOGP("%s: %d Failed to send vdev stop", 1221 __func__, __LINE__); 1222 } 1223 peer = ol_txrx_find_peer_by_addr(pdev, params->bssid, &peer_id); 1224 if (peer) { 1225 WMA_LOGP("%s: Deleting peer %pM vdev id %d", 1226 __func__, params->bssid, vdev_id); 1227 wma_remove_peer(wma, params->bssid, vdev_id, peer, 1228 roam_synch_in_progress); 1229 } 1230 } 1231 out: 1232 wma_send_msg(wma, WMA_SET_LINK_STATE_RSP, (void *)params, 0); 1233 } 1234 1235 /** 1236 * wma_unpause_vdev - unpause all vdev 1237 * @wma: wma handle 1238 * 1239 * unpause all vdev aftter resume/coming out of wow mode 1240 * 1241 * Return: none 1242 */ 1243 void wma_unpause_vdev(tp_wma_handle wma) 1244 { 1245 int8_t vdev_id; 1246 struct wma_txrx_node *iface; 1247 1248 for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) { 1249 if (!wma->interfaces[vdev_id].handle) 1250 continue; 1251 1252 #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2) 1253 /* When host resume, by default, unpause all active vdev */ 1254 if (wma->interfaces[vdev_id].pause_bitmap) { 1255 ol_txrx_vdev_unpause(wma->interfaces[vdev_id].handle, 1256 0xffffffff); 1257 wma->interfaces[vdev_id].pause_bitmap = 0; 1258 } 1259 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */ 1260 1261 iface = &wma->interfaces[vdev_id]; 1262 iface->conn_state = false; 1263 } 1264 } 1265 1266 /** 1267 * wma_process_rate_update_indate() - rate update indication 1268 * @wma: wma handle 1269 * @pRateUpdateParams: Rate update params 1270 * 1271 * This function update rate & short GI interval to fw based on params 1272 * send by SME. 1273 * 1274 * Return: QDF status 1275 */ 1276 QDF_STATUS wma_process_rate_update_indicate(tp_wma_handle wma, 1277 tSirRateUpdateInd * 1278 pRateUpdateParams) 1279 { 1280 int32_t ret = 0; 1281 uint8_t vdev_id = 0; 1282 void *pdev; 1283 int32_t mbpsx10_rate = -1; 1284 uint32_t paramId; 1285 uint8_t rate = 0; 1286 uint32_t short_gi; 1287 struct wma_txrx_node *intr = wma->interfaces; 1288 QDF_STATUS status; 1289 1290 /* Get the vdev id */ 1291 pdev = wma_find_vdev_by_addr(wma, pRateUpdateParams->bssid.bytes, 1292 &vdev_id); 1293 if (!pdev) { 1294 WMA_LOGE("vdev handle is invalid for %pM", 1295 pRateUpdateParams->bssid.bytes); 1296 qdf_mem_free(pRateUpdateParams); 1297 return QDF_STATUS_E_INVAL; 1298 } 1299 short_gi = intr[vdev_id].config.shortgi; 1300 if (short_gi == 0) 1301 short_gi = (intr[vdev_id].rate_flags & eHAL_TX_RATE_SGI) ? 1302 true : false; 1303 /* first check if reliable TX mcast rate is used. If not check the bcast. 1304 * Then is mcast. Mcast rate is saved in mcastDataRate24GHz 1305 */ 1306 if (pRateUpdateParams->reliableMcastDataRateTxFlag > 0) { 1307 mbpsx10_rate = pRateUpdateParams->reliableMcastDataRate; 1308 paramId = WMI_VDEV_PARAM_MCAST_DATA_RATE; 1309 if (pRateUpdateParams-> 1310 reliableMcastDataRateTxFlag & eHAL_TX_RATE_SGI) 1311 short_gi = 1; /* upper layer specified short GI */ 1312 } else if (pRateUpdateParams->bcastDataRate > -1) { 1313 mbpsx10_rate = pRateUpdateParams->bcastDataRate; 1314 paramId = WMI_VDEV_PARAM_BCAST_DATA_RATE; 1315 } else { 1316 mbpsx10_rate = pRateUpdateParams->mcastDataRate24GHz; 1317 paramId = WMI_VDEV_PARAM_MCAST_DATA_RATE; 1318 if (pRateUpdateParams-> 1319 mcastDataRate24GHzTxFlag & eHAL_TX_RATE_SGI) 1320 short_gi = 1; /* upper layer specified short GI */ 1321 } 1322 WMA_LOGE("%s: dev_id = %d, dev_type = %d, dev_mode = %d, " 1323 "mac = %pM, config.shortgi = %d, rate_flags = 0x%x", 1324 __func__, vdev_id, intr[vdev_id].type, 1325 pRateUpdateParams->dev_mode, pRateUpdateParams->bssid.bytes, 1326 intr[vdev_id].config.shortgi, intr[vdev_id].rate_flags); 1327 ret = wma_encode_mc_rate(short_gi, intr[vdev_id].config.chwidth, 1328 intr[vdev_id].chanmode, intr[vdev_id].mhz, 1329 mbpsx10_rate, pRateUpdateParams->nss, &rate); 1330 if (ret != QDF_STATUS_SUCCESS) { 1331 WMA_LOGE("%s: Error, Invalid input rate value", __func__); 1332 qdf_mem_free(pRateUpdateParams); 1333 return ret; 1334 } 1335 status = wma_vdev_set_param(wma->wmi_handle, vdev_id, 1336 WMI_VDEV_PARAM_SGI, short_gi); 1337 if (QDF_IS_STATUS_ERROR(status)) { 1338 WMA_LOGE("%s: Failed to Set WMI_VDEV_PARAM_SGI (%d), status = %d", 1339 __func__, short_gi, status); 1340 qdf_mem_free(pRateUpdateParams); 1341 return status; 1342 } 1343 status = wma_vdev_set_param(wma->wmi_handle, 1344 vdev_id, paramId, rate); 1345 qdf_mem_free(pRateUpdateParams); 1346 if (QDF_IS_STATUS_ERROR(status)) { 1347 WMA_LOGE("%s: Failed to Set rate, status = %d", __func__, status); 1348 return status; 1349 } 1350 1351 return QDF_STATUS_SUCCESS; 1352 } 1353 1354 /** 1355 * wma_mgmt_tx_ack_work_handler() - mgmt tx ack work queue 1356 * @ack_work: work structure 1357 * 1358 * Return: none 1359 */ 1360 static void wma_mgmt_tx_ack_work_handler(void *ack_work) 1361 { 1362 struct wma_tx_ack_work_ctx *work; 1363 tp_wma_handle wma_handle; 1364 pWMAAckFnTxComp ack_cb; 1365 1366 if (cds_is_load_or_unload_in_progress()) { 1367 WMA_LOGE("%s: Driver load/unload in progress", __func__); 1368 return; 1369 } 1370 1371 work = (struct wma_tx_ack_work_ctx *)ack_work; 1372 1373 wma_handle = work->wma_handle; 1374 ack_cb = wma_handle->umac_ota_ack_cb[work->sub_type]; 1375 1376 WMA_LOGD("Tx Ack Cb SubType %d Status %d", 1377 work->sub_type, work->status); 1378 1379 /* Call the Ack Cb registered by UMAC */ 1380 ack_cb((tpAniSirGlobal) (wma_handle->mac_context), 1381 work->status ? 0 : 1); 1382 1383 qdf_mem_free(work); 1384 wma_handle->ack_work_ctx = NULL; 1385 } 1386 1387 /** 1388 * wma_mgmt_tx_comp_conf_ind() - Post mgmt tx complete indication to PE. 1389 * @wma_handle: Pointer to WMA handle 1390 * @sub_type: Tx mgmt frame sub type 1391 * @status: Mgmt frame tx status 1392 * 1393 * This function sends mgmt complition confirmation to PE for deauth 1394 * and deassoc frames. 1395 * 1396 * Return: none 1397 */ 1398 static void 1399 wma_mgmt_tx_comp_conf_ind(tp_wma_handle wma_handle, uint8_t sub_type, 1400 int32_t status) 1401 { 1402 int32_t tx_comp_status; 1403 1404 tx_comp_status = status ? 0 : 1; 1405 if (sub_type == SIR_MAC_MGMT_DISASSOC) { 1406 wma_send_msg(wma_handle, WMA_DISASSOC_TX_COMP, NULL, 1407 tx_comp_status); 1408 } else if (sub_type == SIR_MAC_MGMT_DEAUTH) { 1409 wma_send_msg(wma_handle, WMA_DEAUTH_TX_COMP, NULL, 1410 tx_comp_status); 1411 } 1412 } 1413 1414 /** 1415 * wma_mgmt_tx_ack_comp_hdlr() - handles tx ack mgmt completion 1416 * @context: context with which the handler is registered 1417 * @netbuf: tx mgmt nbuf 1418 * @status: status of tx completion 1419 * 1420 * This is callback registered with TxRx for 1421 * Ack Complete. 1422 * 1423 * Return: none 1424 */ 1425 static void 1426 wma_mgmt_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status) 1427 { 1428 tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(netbuf)); 1429 tp_wma_handle wma_handle = (tp_wma_handle) wma_context; 1430 1431 if (wma_handle && wma_handle->umac_ota_ack_cb[pFc->subType]) { 1432 if ((pFc->subType == SIR_MAC_MGMT_DISASSOC) || 1433 (pFc->subType == SIR_MAC_MGMT_DEAUTH)) { 1434 wma_mgmt_tx_comp_conf_ind(wma_handle, 1435 (uint8_t) pFc->subType, 1436 status); 1437 } else { 1438 struct wma_tx_ack_work_ctx *ack_work; 1439 1440 ack_work = 1441 qdf_mem_malloc(sizeof(struct wma_tx_ack_work_ctx)); 1442 1443 if (ack_work) { 1444 ack_work->wma_handle = wma_handle; 1445 ack_work->sub_type = pFc->subType; 1446 ack_work->status = status; 1447 1448 qdf_create_work(0, &ack_work->ack_cmp_work, 1449 wma_mgmt_tx_ack_work_handler, 1450 ack_work); 1451 1452 qdf_sched_work(0, &ack_work->ack_cmp_work); 1453 } 1454 } 1455 } 1456 } 1457 1458 /** 1459 * wma_mgmt_tx_dload_comp_hldr() - handles tx mgmt completion 1460 * @context: context with which the handler is registered 1461 * @netbuf: tx mgmt nbuf 1462 * @status: status of tx completion 1463 * 1464 * This function calls registered download callback while sending mgmt packet. 1465 * 1466 * Return: none 1467 */ 1468 static void 1469 wma_mgmt_tx_dload_comp_hldr(void *wma_context, qdf_nbuf_t netbuf, 1470 int32_t status) 1471 { 1472 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 1473 1474 tp_wma_handle wma_handle = (tp_wma_handle) wma_context; 1475 void *mac_context = wma_handle->mac_context; 1476 1477 WMA_LOGD("Tx Complete Status %d", status); 1478 1479 if (!wma_handle->tx_frm_download_comp_cb) { 1480 WMA_LOGE("Tx Complete Cb not registered by umac"); 1481 return; 1482 } 1483 1484 /* Call Tx Mgmt Complete Callback registered by umac */ 1485 wma_handle->tx_frm_download_comp_cb(mac_context, netbuf, 0); 1486 1487 /* Reset Callback */ 1488 wma_handle->tx_frm_download_comp_cb = NULL; 1489 1490 /* Set the Tx Mgmt Complete Event */ 1491 qdf_status = qdf_event_set(&wma_handle->tx_frm_download_comp_event); 1492 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) 1493 WMA_LOGP("%s: Event Set failed - tx_frm_comp_event", __func__); 1494 } 1495 1496 /** 1497 * wma_tx_attach() - attach tx related callbacks 1498 * @pwmaCtx: wma context 1499 * 1500 * attaches tx fn with underlying layer. 1501 * 1502 * Return: QDF status 1503 */ 1504 QDF_STATUS wma_tx_attach(tp_wma_handle wma_handle) 1505 { 1506 /* Get the Vos Context */ 1507 p_cds_contextType cds_handle = 1508 (p_cds_contextType) (wma_handle->cds_context); 1509 1510 /* Get the txRx Pdev handle */ 1511 ol_txrx_pdev_handle txrx_pdev = 1512 (ol_txrx_pdev_handle) (cds_handle->pdev_txrx_ctx); 1513 1514 /* Register for Tx Management Frames */ 1515 ol_txrx_mgmt_tx_cb_set(txrx_pdev, GENERIC_NODOWLOAD_ACK_COMP_INDEX, 1516 NULL, wma_mgmt_tx_ack_comp_hdlr, wma_handle); 1517 1518 ol_txrx_mgmt_tx_cb_set(txrx_pdev, GENERIC_DOWNLD_COMP_NOACK_COMP_INDEX, 1519 wma_mgmt_tx_dload_comp_hldr, NULL, wma_handle); 1520 1521 ol_txrx_mgmt_tx_cb_set(txrx_pdev, GENERIC_DOWNLD_COMP_ACK_COMP_INDEX, 1522 wma_mgmt_tx_dload_comp_hldr, 1523 wma_mgmt_tx_ack_comp_hdlr, wma_handle); 1524 1525 /* Store the Mac Context */ 1526 wma_handle->mac_context = cds_handle->pMACContext; 1527 1528 return QDF_STATUS_SUCCESS; 1529 } 1530 1531 /** 1532 * wma_tx_detach() - detach tx related callbacks 1533 * @tp_wma_handle: wma context 1534 * 1535 * Deregister with TxRx for Tx Mgmt Download and Ack completion. 1536 * 1537 * Return: QDF status 1538 */ 1539 QDF_STATUS wma_tx_detach(tp_wma_handle wma_handle) 1540 { 1541 uint32_t frame_index = 0; 1542 1543 /* Get the Vos Context */ 1544 p_cds_contextType cds_handle = 1545 (p_cds_contextType) (wma_handle->cds_context); 1546 1547 /* Get the txRx Pdev handle */ 1548 ol_txrx_pdev_handle txrx_pdev = 1549 (ol_txrx_pdev_handle) (cds_handle->pdev_txrx_ctx); 1550 1551 if (txrx_pdev) { 1552 /* Deregister with TxRx for Tx Mgmt completion call back */ 1553 for (frame_index = 0; frame_index < FRAME_INDEX_MAX; 1554 frame_index++) { 1555 ol_txrx_mgmt_tx_cb_set(txrx_pdev, frame_index, NULL, 1556 NULL, txrx_pdev); 1557 } 1558 } 1559 /* Destroy Tx Frame Complete event */ 1560 qdf_event_destroy(&wma_handle->tx_frm_download_comp_event); 1561 1562 /* Tx queue empty check event (dummy event) */ 1563 qdf_event_destroy(&wma_handle->tx_queue_empty_event); 1564 1565 /* Reset Tx Frm Callbacks */ 1566 wma_handle->tx_frm_download_comp_cb = NULL; 1567 1568 /* Reset Tx Data Frame Ack Cb */ 1569 wma_handle->umac_data_ota_ack_cb = NULL; 1570 1571 /* Reset last Tx Data Frame nbuf ptr */ 1572 wma_handle->last_umac_data_nbuf = NULL; 1573 1574 return QDF_STATUS_SUCCESS; 1575 } 1576 1577 #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \ 1578 defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT) 1579 1580 /** 1581 * wma_mcc_vdev_tx_pause_evt_handler() - pause event handler 1582 * @handle: wma handle 1583 * @event: event buffer 1584 * @len: data length 1585 * 1586 * This function handle pause event from fw and pause/unpause 1587 * vdev. 1588 * 1589 * Return: 0 for success or error code. 1590 */ 1591 int wma_mcc_vdev_tx_pause_evt_handler(void *handle, uint8_t *event, 1592 uint32_t len) 1593 { 1594 tp_wma_handle wma = (tp_wma_handle) handle; 1595 WMI_TX_PAUSE_EVENTID_param_tlvs *param_buf; 1596 wmi_tx_pause_event_fixed_param *wmi_event; 1597 uint8_t vdev_id; 1598 A_UINT32 vdev_map; 1599 1600 param_buf = (WMI_TX_PAUSE_EVENTID_param_tlvs *) event; 1601 if (!param_buf) { 1602 WMA_LOGE("Invalid roam event buffer"); 1603 return -EINVAL; 1604 } 1605 1606 if (wma_get_wow_bus_suspend(wma)) { 1607 WMA_LOGD(" Suspend is in progress: Pause/Unpause Tx is NoOp"); 1608 return 0; 1609 } 1610 1611 wmi_event = param_buf->fixed_param; 1612 vdev_map = wmi_event->vdev_map; 1613 /* FW mapped vdev from ID 1614 * vdev_map = (1 << vdev_id) 1615 * So, host should unmap to ID */ 1616 for (vdev_id = 0; vdev_map != 0; vdev_id++) { 1617 if (!(vdev_map & 0x1)) { 1618 /* No Vdev */ 1619 } else { 1620 if (!wma->interfaces[vdev_id].handle) { 1621 WMA_LOGE("%s: invalid vdev ID %d", __func__, 1622 vdev_id); 1623 /* Test Next VDEV */ 1624 vdev_map >>= 1; 1625 continue; 1626 } 1627 1628 /* PAUSE action, add bitmap */ 1629 if (ACTION_PAUSE == wmi_event->action) { 1630 /* 1631 * Now only support per-dev pause so it is not 1632 * necessary to pause a paused queue again. 1633 */ 1634 if (!wma->interfaces[vdev_id].pause_bitmap) 1635 ol_txrx_vdev_pause( 1636 wma->interfaces[vdev_id]. 1637 handle, 1638 OL_TXQ_PAUSE_REASON_FW); 1639 wma->interfaces[vdev_id].pause_bitmap |= 1640 (1 << wmi_event->pause_type); 1641 } 1642 /* UNPAUSE action, clean bitmap */ 1643 else if (ACTION_UNPAUSE == wmi_event->action) { 1644 /* Handle unpause only if already paused */ 1645 if (wma->interfaces[vdev_id].pause_bitmap) { 1646 wma->interfaces[vdev_id].pause_bitmap &= 1647 ~(1 << wmi_event->pause_type); 1648 1649 if (!wma->interfaces[vdev_id]. 1650 pause_bitmap) { 1651 /* PAUSE BIT MAP is cleared 1652 * UNPAUSE VDEV */ 1653 ol_txrx_vdev_unpause( 1654 wma->interfaces[vdev_id] 1655 .handle, 1656 OL_TXQ_PAUSE_REASON_FW); 1657 } 1658 } 1659 } else { 1660 WMA_LOGE("Not Valid Action Type %d", 1661 wmi_event->action); 1662 } 1663 1664 WMA_LOGD 1665 ("vdev_id %d, pause_map 0x%x, pause type %d, action %d", 1666 vdev_id, wma->interfaces[vdev_id].pause_bitmap, 1667 wmi_event->pause_type, wmi_event->action); 1668 } 1669 /* Test Next VDEV */ 1670 vdev_map >>= 1; 1671 } 1672 1673 return 0; 1674 } 1675 1676 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */ 1677 1678 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) 1679 1680 /** 1681 * wma_set_peer_rate_report_condition - 1682 * this function set peer rate report 1683 * condition info to firmware. 1684 * @handle: Handle of WMA 1685 * @config: Bad peer configuration from SIR module 1686 * 1687 * It is a wrapper function to sent WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID 1688 * to the firmare\target.If the command sent to firmware failed, free the 1689 * buffer that allocated. 1690 * 1691 * Return: QDF_STATUS based on values sent to firmware 1692 */ 1693 static 1694 QDF_STATUS wma_set_peer_rate_report_condition(WMA_HANDLE handle, 1695 struct t_bad_peer_txtcl_config *config) 1696 { 1697 tp_wma_handle wma_handle = (tp_wma_handle)handle; 1698 struct wmi_peer_rate_report_params rate_report_params = {0}; 1699 u_int32_t i, j; 1700 1701 rate_report_params.rate_report_enable = config->enable; 1702 rate_report_params.backoff_time = config->tgt_backoff; 1703 rate_report_params.timer_period = config->tgt_report_prd; 1704 for (i = 0; i < WMI_PEER_RATE_REPORT_COND_MAX_NUM; i++) { 1705 rate_report_params.report_per_phy[i].cond_flags = 1706 config->threshold[i].cond; 1707 rate_report_params.report_per_phy[i].delta.delta_min = 1708 config->threshold[i].delta; 1709 rate_report_params.report_per_phy[i].delta.percent = 1710 config->threshold[i].percentage; 1711 for (j = 0; j < WMI_MAX_NUM_OF_RATE_THRESH; j++) { 1712 rate_report_params.report_per_phy[i]. 1713 report_rate_threshold[j] = 1714 config->threshold[i].thresh[j]; 1715 } 1716 } 1717 1718 return wmi_unified_peer_rate_report_cmd(wma_handle->wmi_handle, 1719 &rate_report_params); 1720 } 1721 1722 /** 1723 * wma_process_init_bad_peer_tx_ctl_info - 1724 * this function to initialize peer rate report config info. 1725 * @handle: Handle of WMA 1726 * @config: Bad peer configuration from SIR module 1727 * 1728 * This function initializes the bad peer tx control data structure in WMA, 1729 * sends down the initial configuration to the firmware and configures 1730 * the peer status update seeting in the tx_rx module. 1731 * 1732 * Return: QDF_STATUS based on procedure status 1733 */ 1734 1735 QDF_STATUS wma_process_init_bad_peer_tx_ctl_info(tp_wma_handle wma, 1736 struct t_bad_peer_txtcl_config *config) 1737 { 1738 /* Parameter sanity check */ 1739 ol_txrx_pdev_handle curr_pdev; 1740 1741 if (NULL == wma || NULL == config) { 1742 WMA_LOGE("%s Invalid input\n", __func__); 1743 return QDF_STATUS_E_FAILURE; 1744 } 1745 1746 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX); 1747 if (NULL == curr_pdev) { 1748 WMA_LOGE("%s: Failed to get pdev\n", __func__); 1749 return QDF_STATUS_E_FAILURE; 1750 } 1751 1752 WMA_LOGE("%s enable %d period %d txq limit %d\n", __func__, 1753 config->enable, 1754 config->period, 1755 config->txq_limit); 1756 1757 /* Only need to initialize the setting 1758 when the feature is enabled */ 1759 if (config->enable) { 1760 int i = 0; 1761 1762 ol_txrx_bad_peer_txctl_set_setting(curr_pdev, 1763 config->enable, 1764 config->period, 1765 config->txq_limit); 1766 1767 for (i = 0; i < WLAN_WMA_IEEE80211_MAX_LEVEL; i++) { 1768 u_int32_t threshold, limit; 1769 threshold = 1770 config->threshold[i].thresh[0]; 1771 limit = config->threshold[i].txlimit; 1772 ol_txrx_bad_peer_txctl_update_threshold(curr_pdev, i, 1773 threshold, 1774 limit); 1775 } 1776 } 1777 1778 return wma_set_peer_rate_report_condition(wma, config); 1779 } 1780 #endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */ 1781 1782 1783 /** 1784 * wma_process_init_thermal_info() - initialize thermal info 1785 * @wma: Pointer to WMA handle 1786 * @pThermalParams: Pointer to thermal mitigation parameters 1787 * 1788 * This function initializes the thermal management table in WMA, 1789 * sends down the initial temperature thresholds to the firmware 1790 * and configures the throttle period in the tx rx module 1791 * 1792 * Returns: QDF_STATUS_SUCCESS for success otherwise failure 1793 */ 1794 QDF_STATUS wma_process_init_thermal_info(tp_wma_handle wma, 1795 t_thermal_mgmt *pThermalParams) 1796 { 1797 t_thermal_cmd_params thermal_params; 1798 ol_txrx_pdev_handle curr_pdev; 1799 1800 if (NULL == wma || NULL == pThermalParams) { 1801 WMA_LOGE("TM Invalid input"); 1802 return QDF_STATUS_E_FAILURE; 1803 } 1804 1805 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX); 1806 if (NULL == curr_pdev) { 1807 WMA_LOGE("%s: Failed to get pdev", __func__); 1808 return QDF_STATUS_E_FAILURE; 1809 } 1810 1811 WMA_LOGD("TM enable %d period %d", pThermalParams->thermalMgmtEnabled, 1812 pThermalParams->throttlePeriod); 1813 1814 WMA_LOGD("Throttle Duty Cycle Level in percentage:\n" 1815 "0 %d\n" 1816 "1 %d\n" 1817 "2 %d\n" 1818 "3 %d", 1819 pThermalParams->throttle_duty_cycle_tbl[0], 1820 pThermalParams->throttle_duty_cycle_tbl[1], 1821 pThermalParams->throttle_duty_cycle_tbl[2], 1822 pThermalParams->throttle_duty_cycle_tbl[3]); 1823 1824 wma->thermal_mgmt_info.thermalMgmtEnabled = 1825 pThermalParams->thermalMgmtEnabled; 1826 wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold = 1827 pThermalParams->thermalLevels[0].minTempThreshold; 1828 wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold = 1829 pThermalParams->thermalLevels[0].maxTempThreshold; 1830 wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold = 1831 pThermalParams->thermalLevels[1].minTempThreshold; 1832 wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold = 1833 pThermalParams->thermalLevels[1].maxTempThreshold; 1834 wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold = 1835 pThermalParams->thermalLevels[2].minTempThreshold; 1836 wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold = 1837 pThermalParams->thermalLevels[2].maxTempThreshold; 1838 wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold = 1839 pThermalParams->thermalLevels[3].minTempThreshold; 1840 wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold = 1841 pThermalParams->thermalLevels[3].maxTempThreshold; 1842 wma->thermal_mgmt_info.thermalCurrLevel = WLAN_WMA_THERMAL_LEVEL_0; 1843 1844 WMA_LOGD("TM level min max:\n" 1845 "0 %d %d\n" 1846 "1 %d %d\n" 1847 "2 %d %d\n" 1848 "3 %d %d", 1849 wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold, 1850 wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold, 1851 wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold, 1852 wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold, 1853 wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold, 1854 wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold, 1855 wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold, 1856 wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold); 1857 1858 if (wma->thermal_mgmt_info.thermalMgmtEnabled) { 1859 ol_tx_throttle_init_period(curr_pdev, 1860 pThermalParams->throttlePeriod, 1861 &pThermalParams->throttle_duty_cycle_tbl[0]); 1862 1863 /* Get the temperature thresholds to set in firmware */ 1864 thermal_params.minTemp = 1865 wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].minTempThreshold; 1866 thermal_params.maxTemp = 1867 wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].maxTempThreshold; 1868 thermal_params.thermalEnable = 1869 wma->thermal_mgmt_info.thermalMgmtEnabled; 1870 1871 WMA_LOGE("TM sending the following to firmware: min %d max %d enable %d", 1872 thermal_params.minTemp, thermal_params.maxTemp, 1873 thermal_params.thermalEnable); 1874 1875 if (QDF_STATUS_SUCCESS != 1876 wma_set_thermal_mgmt(wma, thermal_params)) { 1877 WMA_LOGE("Could not send thermal mgmt command to the firmware!"); 1878 } 1879 } 1880 return QDF_STATUS_SUCCESS; 1881 } 1882 1883 /** 1884 * wma_set_thermal_level_ind() - send SME set thermal level indication message 1885 * @level: thermal level 1886 * 1887 * Send SME SET_THERMAL_LEVEL_IND message 1888 * 1889 * Returns: none 1890 */ 1891 static void wma_set_thermal_level_ind(u_int8_t level) 1892 { 1893 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 1894 cds_msg_t sme_msg = {0}; 1895 1896 WMA_LOGI(FL("Thermal level: %d"), level); 1897 1898 sme_msg.type = eWNI_SME_SET_THERMAL_LEVEL_IND; 1899 sme_msg.bodyptr = NULL; 1900 sme_msg.bodyval = level; 1901 1902 qdf_status = cds_mq_post_message(QDF_MODULE_ID_SME, &sme_msg); 1903 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) 1904 WMA_LOGE(FL( 1905 "Fail to post set thermal level ind msg")); 1906 } 1907 1908 /** 1909 * wma_process_set_thermal_level() - sets thermal level 1910 * @wma: Pointer to WMA handle 1911 * @thermal_level : Thermal level 1912 * 1913 * This function sets the new thermal throttle level in the 1914 * txrx module and sends down the corresponding temperature 1915 * thresholds to the firmware 1916 * 1917 * Returns: QDF_STATUS_SUCCESS for success otherwise failure 1918 */ 1919 QDF_STATUS wma_process_set_thermal_level(tp_wma_handle wma, 1920 uint8_t thermal_level) 1921 { 1922 ol_txrx_pdev_handle curr_pdev; 1923 1924 if (NULL == wma) { 1925 WMA_LOGE("TM Invalid input"); 1926 return QDF_STATUS_E_FAILURE; 1927 } 1928 1929 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX); 1930 if (NULL == curr_pdev) { 1931 WMA_LOGE("%s: Failed to get pdev", __func__); 1932 return QDF_STATUS_E_FAILURE; 1933 } 1934 1935 WMA_LOGE("TM set level %d", thermal_level); 1936 1937 /* Check if thermal mitigation is enabled */ 1938 if (!wma->thermal_mgmt_info.thermalMgmtEnabled) { 1939 WMA_LOGE("Thermal mgmt is not enabled, ignoring set level command"); 1940 return QDF_STATUS_E_FAILURE; 1941 } 1942 1943 if (thermal_level >= WLAN_WMA_MAX_THERMAL_LEVELS) { 1944 WMA_LOGE("Invalid thermal level set %d", thermal_level); 1945 return QDF_STATUS_E_FAILURE; 1946 } 1947 1948 if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) { 1949 WMA_LOGD("Current level %d is same as the set level, ignoring", 1950 wma->thermal_mgmt_info.thermalCurrLevel); 1951 return QDF_STATUS_SUCCESS; 1952 } 1953 1954 wma->thermal_mgmt_info.thermalCurrLevel = thermal_level; 1955 1956 ol_tx_throttle_set_level(curr_pdev, thermal_level); 1957 1958 /* Send SME SET_THERMAL_LEVEL_IND message */ 1959 wma_set_thermal_level_ind(thermal_level); 1960 1961 return QDF_STATUS_SUCCESS; 1962 } 1963 1964 1965 /** 1966 * wma_set_thermal_mgmt() - set thermal mgmt command to fw 1967 * @wma_handle: Pointer to WMA handle 1968 * @thermal_info: Thermal command information 1969 * 1970 * This function sends the thermal management command 1971 * to the firmware 1972 * 1973 * Return: QDF_STATUS_SUCCESS for success otherwise failure 1974 */ 1975 QDF_STATUS wma_set_thermal_mgmt(tp_wma_handle wma_handle, 1976 t_thermal_cmd_params thermal_info) 1977 { 1978 struct thermal_cmd_params mgmt_thermal_info = {0}; 1979 1980 if (!wma_handle) { 1981 WMA_LOGE("%s:'wma_set_thermal_mgmt':invalid input", __func__); 1982 QDF_ASSERT(0); 1983 return QDF_STATUS_E_FAILURE; 1984 } 1985 1986 mgmt_thermal_info.min_temp = thermal_info.minTemp; 1987 mgmt_thermal_info.max_temp = thermal_info.maxTemp; 1988 mgmt_thermal_info.thermal_enable = thermal_info.thermalEnable; 1989 1990 return wmi_unified_set_thermal_mgmt_cmd(wma_handle->wmi_handle, 1991 &mgmt_thermal_info); 1992 } 1993 1994 /** 1995 * wma_thermal_mgmt_get_level() - returns throttle level 1996 * @handle: Pointer to WMA handle 1997 * @temp: temperature 1998 * 1999 * This function returns the thermal(throttle) level 2000 * given the temperature 2001 * 2002 * Return: thermal (throttle) level 2003 */ 2004 uint8_t wma_thermal_mgmt_get_level(void *handle, uint32_t temp) 2005 { 2006 tp_wma_handle wma = (tp_wma_handle) handle; 2007 int i; 2008 uint8_t level; 2009 2010 level = i = wma->thermal_mgmt_info.thermalCurrLevel; 2011 while (temp < wma->thermal_mgmt_info.thermalLevels[i].minTempThreshold 2012 && i > 0) { 2013 i--; 2014 level = i; 2015 } 2016 2017 i = wma->thermal_mgmt_info.thermalCurrLevel; 2018 while (temp > wma->thermal_mgmt_info.thermalLevels[i].maxTempThreshold 2019 && i < (WLAN_WMA_MAX_THERMAL_LEVELS - 1)) { 2020 i++; 2021 level = i; 2022 } 2023 2024 WMA_LOGW("Change thermal level from %d -> %d\n", 2025 wma->thermal_mgmt_info.thermalCurrLevel, level); 2026 2027 return level; 2028 } 2029 2030 /** 2031 * wma_thermal_mgmt_evt_handler() - thermal mgmt event handler 2032 * @wma_handle: Pointer to WMA handle 2033 * @event: Thermal event information 2034 * 2035 * This function handles the thermal mgmt event from the firmware len 2036 * 2037 * Return: 0 for success otherwise failure 2038 */ 2039 int wma_thermal_mgmt_evt_handler(void *handle, uint8_t *event, 2040 uint32_t len) 2041 { 2042 tp_wma_handle wma; 2043 wmi_thermal_mgmt_event_fixed_param *tm_event; 2044 uint8_t thermal_level; 2045 t_thermal_cmd_params thermal_params; 2046 WMI_THERMAL_MGMT_EVENTID_param_tlvs *param_buf; 2047 ol_txrx_pdev_handle curr_pdev; 2048 2049 if (NULL == event || NULL == handle) { 2050 WMA_LOGE("Invalid thermal mitigation event buffer"); 2051 return -EINVAL; 2052 } 2053 2054 wma = (tp_wma_handle) handle; 2055 2056 if (NULL == wma) { 2057 WMA_LOGE("%s: Failed to get wma handle", __func__); 2058 return -EINVAL; 2059 } 2060 2061 param_buf = (WMI_THERMAL_MGMT_EVENTID_param_tlvs *) event; 2062 2063 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX); 2064 if (NULL == curr_pdev) { 2065 WMA_LOGE("%s: Failed to get pdev", __func__); 2066 return -EINVAL; 2067 } 2068 2069 /* Check if thermal mitigation is enabled */ 2070 if (!wma->thermal_mgmt_info.thermalMgmtEnabled) { 2071 WMA_LOGE("Thermal mgmt is not enabled, ignoring event"); 2072 return -EINVAL; 2073 } 2074 2075 tm_event = param_buf->fixed_param; 2076 WMA_LOGD("Thermal mgmt event received with temperature %d", 2077 tm_event->temperature_degreeC); 2078 2079 /* Get the thermal mitigation level for the reported temperature */ 2080 thermal_level = 2081 wma_thermal_mgmt_get_level(handle, tm_event->temperature_degreeC); 2082 WMA_LOGD("Thermal mgmt level %d", thermal_level); 2083 2084 if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) { 2085 WMA_LOGD("Current level %d is same as the set level, ignoring", 2086 wma->thermal_mgmt_info.thermalCurrLevel); 2087 return 0; 2088 } 2089 2090 wma->thermal_mgmt_info.thermalCurrLevel = thermal_level; 2091 2092 /* Inform txrx */ 2093 ol_tx_throttle_set_level(curr_pdev, thermal_level); 2094 2095 /* Send SME SET_THERMAL_LEVEL_IND message */ 2096 wma_set_thermal_level_ind(thermal_level); 2097 2098 /* Get the temperature thresholds to set in firmware */ 2099 thermal_params.minTemp = 2100 wma->thermal_mgmt_info.thermalLevels[thermal_level]. 2101 minTempThreshold; 2102 thermal_params.maxTemp = 2103 wma->thermal_mgmt_info.thermalLevels[thermal_level]. 2104 maxTempThreshold; 2105 thermal_params.thermalEnable = 2106 wma->thermal_mgmt_info.thermalMgmtEnabled; 2107 2108 if (QDF_STATUS_SUCCESS != wma_set_thermal_mgmt(wma, thermal_params)) { 2109 WMA_LOGE("Could not send thermal mgmt command to the firmware!"); 2110 return -EINVAL; 2111 } 2112 2113 return 0; 2114 } 2115 2116 /** 2117 * wma_ibss_peer_info_event_handler() - IBSS peer info event handler 2118 * @handle: wma handle 2119 * @data: event data 2120 * @len: length of data 2121 * 2122 * This function handles IBSS peer info event from FW. 2123 * 2124 * Return: 0 for success or error code 2125 */ 2126 int wma_ibss_peer_info_event_handler(void *handle, uint8_t *data, 2127 uint32_t len) 2128 { 2129 cds_msg_t cds_msg; 2130 wmi_peer_info *peer_info; 2131 ol_txrx_pdev_handle pdev; 2132 tSirIbssPeerInfoParams *pSmeRsp; 2133 uint32_t count, num_peers, status; 2134 tSirIbssGetPeerInfoRspParams *pRsp; 2135 WMI_PEER_INFO_EVENTID_param_tlvs *param_tlvs; 2136 wmi_peer_info_event_fixed_param *fix_param; 2137 uint8_t peer_mac[IEEE80211_ADDR_LEN]; 2138 2139 pdev = cds_get_context(QDF_MODULE_ID_TXRX); 2140 if (NULL == pdev) { 2141 WMA_LOGE("%s: could not get pdev context", __func__); 2142 return 0; 2143 } 2144 2145 param_tlvs = (WMI_PEER_INFO_EVENTID_param_tlvs *) data; 2146 fix_param = param_tlvs->fixed_param; 2147 peer_info = param_tlvs->peer_info; 2148 num_peers = fix_param->num_peers; 2149 status = 0; 2150 2151 WMA_LOGE("%s: num_peers %d", __func__, num_peers); 2152 2153 pRsp = qdf_mem_malloc(sizeof(tSirIbssGetPeerInfoRspParams)); 2154 if (NULL == pRsp) { 2155 WMA_LOGE("%s: could not allocate memory for ibss peer info rsp len %zu", 2156 __func__, sizeof(tSirIbssGetPeerInfoRspParams)); 2157 return 0; 2158 } 2159 2160 /*sanity check */ 2161 if ((num_peers > 32) || (NULL == peer_info)) { 2162 WMA_LOGE("%s: Invalid event data from target num_peers %d peer_info %p", 2163 __func__, num_peers, peer_info); 2164 status = 1; 2165 goto send_response; 2166 } 2167 2168 /* 2169 *For displaying only connected IBSS peer info, iterate till 2170 *last but one entry only as last entry is used for IBSS creator 2171 */ 2172 for (count = 0; count < num_peers-1; count++) { 2173 pSmeRsp = &pRsp->ibssPeerInfoRspParams.peerInfoParams[count]; 2174 2175 WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_info->peer_mac_address, 2176 peer_mac); 2177 qdf_mem_copy(pSmeRsp->mac_addr, peer_mac, 2178 sizeof(pSmeRsp->mac_addr)); 2179 pSmeRsp->mcsIndex = 0; 2180 pSmeRsp->rssi = peer_info->rssi + WMA_TGT_NOISE_FLOOR_DBM; 2181 pSmeRsp->txRate = peer_info->data_rate; 2182 pSmeRsp->txRateFlags = 0; 2183 2184 WMA_LOGE("peer " MAC_ADDRESS_STR "rssi %d txRate %d", 2185 MAC_ADDR_ARRAY(peer_mac), 2186 pSmeRsp->rssi, pSmeRsp->txRate); 2187 2188 peer_info++; 2189 } 2190 2191 send_response: 2192 /* message header */ 2193 pRsp->mesgType = eWNI_SME_IBSS_PEER_INFO_RSP; 2194 pRsp->mesgLen = sizeof(tSirIbssGetPeerInfoRspParams); 2195 pRsp->ibssPeerInfoRspParams.status = status; 2196 pRsp->ibssPeerInfoRspParams.numPeers = num_peers; 2197 2198 /* cds message wrapper */ 2199 cds_msg.type = eWNI_SME_IBSS_PEER_INFO_RSP; 2200 cds_msg.bodyptr = (void *)pRsp; 2201 cds_msg.bodyval = 0; 2202 2203 if (QDF_STATUS_SUCCESS != 2204 cds_mq_post_message(CDS_MQ_ID_SME, (cds_msg_t *) &cds_msg)) { 2205 WMA_LOGE("%s: could not post peer info rsp msg to SME", 2206 __func__); 2207 /* free the mem and return */ 2208 qdf_mem_free((void *)pRsp); 2209 } 2210 2211 return 0; 2212 } 2213 2214 /** 2215 * wma_fast_tx_fail_event_handler() -tx failure event handler 2216 * @handle: wma handle 2217 * @data: event data 2218 * @len: data length 2219 * 2220 * Handle fast tx failure indication event from FW 2221 * 2222 * Return: 0 for success or error code. 2223 */ 2224 int wma_fast_tx_fail_event_handler(void *handle, uint8_t *data, 2225 uint32_t len) 2226 { 2227 uint8_t tx_fail_cnt; 2228 uint8_t peer_mac[IEEE80211_ADDR_LEN]; 2229 tp_wma_handle wma = (tp_wma_handle) handle; 2230 WMI_PEER_TX_FAIL_CNT_THR_EVENTID_param_tlvs *param_tlvs; 2231 wmi_peer_tx_fail_cnt_thr_event_fixed_param *fix_param; 2232 2233 param_tlvs = (WMI_PEER_TX_FAIL_CNT_THR_EVENTID_param_tlvs *) data; 2234 fix_param = param_tlvs->fixed_param; 2235 2236 WMI_MAC_ADDR_TO_CHAR_ARRAY(&fix_param->peer_mac_address, peer_mac); 2237 WMA_LOGE("%s: received fast tx failure event for peer" 2238 " 0x:%2x:0x%2x:0x%2x:0x%2x:0x%2x:0x%2x seq No %d", __func__, 2239 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3], 2240 peer_mac[4], peer_mac[5], fix_param->seq_no); 2241 2242 tx_fail_cnt = fix_param->seq_no; 2243 2244 /*call HDD callback */ 2245 if (NULL != wma->hddTxFailCb) { 2246 wma->hddTxFailCb(peer_mac, tx_fail_cnt); 2247 } else { 2248 WMA_LOGE("%s: HDD callback is %p", __func__, wma->hddTxFailCb); 2249 } 2250 2251 return 0; 2252 } 2253 2254 /** 2255 * wma_decap_to_8023() - Decapsulate to 802.3 format 2256 * @msdu: skb buffer 2257 * @info: decapsulate info 2258 * 2259 * Return: none 2260 */ 2261 static void wma_decap_to_8023(qdf_nbuf_t msdu, struct wma_decap_info_t *info) 2262 { 2263 struct llc_snap_hdr_t *llc_hdr; 2264 uint16_t ether_type; 2265 uint16_t l2_hdr_space; 2266 struct ieee80211_qosframe_addr4 *wh; 2267 uint8_t local_buf[ETHERNET_HDR_LEN]; 2268 uint8_t *buf; 2269 struct ethernet_hdr_t *ethr_hdr; 2270 2271 buf = (uint8_t *) qdf_nbuf_data(msdu); 2272 llc_hdr = (struct llc_snap_hdr_t *)buf; 2273 ether_type = (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1]; 2274 /* do llc remove if needed */ 2275 l2_hdr_space = 0; 2276 if (IS_SNAP(llc_hdr)) { 2277 if (IS_BTEP(llc_hdr)) { 2278 /* remove llc */ 2279 l2_hdr_space += sizeof(struct llc_snap_hdr_t); 2280 llc_hdr = NULL; 2281 } else if (IS_RFC1042(llc_hdr)) { 2282 if (!(ether_type == ETHERTYPE_AARP || 2283 ether_type == ETHERTYPE_IPX)) { 2284 /* remove llc */ 2285 l2_hdr_space += sizeof(struct llc_snap_hdr_t); 2286 llc_hdr = NULL; 2287 } 2288 } 2289 } 2290 if (l2_hdr_space > ETHERNET_HDR_LEN) { 2291 buf = qdf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN); 2292 } else if (l2_hdr_space < ETHERNET_HDR_LEN) { 2293 buf = qdf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space); 2294 } 2295 2296 /* mpdu hdr should be present in info,re-create ethr_hdr based on mpdu hdr */ 2297 wh = (struct ieee80211_qosframe_addr4 *)info->hdr; 2298 ethr_hdr = (struct ethernet_hdr_t *)local_buf; 2299 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { 2300 case IEEE80211_FC1_DIR_NODS: 2301 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1, 2302 ETHERNET_ADDR_LEN); 2303 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2, 2304 ETHERNET_ADDR_LEN); 2305 break; 2306 case IEEE80211_FC1_DIR_TODS: 2307 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3, 2308 ETHERNET_ADDR_LEN); 2309 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2, 2310 ETHERNET_ADDR_LEN); 2311 break; 2312 case IEEE80211_FC1_DIR_FROMDS: 2313 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1, 2314 ETHERNET_ADDR_LEN); 2315 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr3, 2316 ETHERNET_ADDR_LEN); 2317 break; 2318 case IEEE80211_FC1_DIR_DSTODS: 2319 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3, 2320 ETHERNET_ADDR_LEN); 2321 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr4, 2322 ETHERNET_ADDR_LEN); 2323 break; 2324 } 2325 2326 if (llc_hdr == NULL) { 2327 ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff; 2328 ethr_hdr->ethertype[1] = (ether_type) & 0xff; 2329 } else { 2330 uint32_t pktlen = 2331 qdf_nbuf_len(msdu) - sizeof(ethr_hdr->ethertype); 2332 ether_type = (uint16_t) pktlen; 2333 ether_type = qdf_nbuf_len(msdu) - sizeof(struct ethernet_hdr_t); 2334 ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff; 2335 ethr_hdr->ethertype[1] = (ether_type) & 0xff; 2336 } 2337 qdf_mem_copy(buf, ethr_hdr, ETHERNET_HDR_LEN); 2338 } 2339 2340 /** 2341 * wma_ieee80211_hdrsize() - get 802.11 header size 2342 * @data: 80211 frame 2343 * 2344 * Return: size of header 2345 */ 2346 static int32_t wma_ieee80211_hdrsize(const void *data) 2347 { 2348 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data; 2349 int32_t size = sizeof(struct ieee80211_frame); 2350 2351 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) 2352 size += IEEE80211_ADDR_LEN; 2353 if (IEEE80211_QOS_HAS_SEQ(wh)) 2354 size += sizeof(uint16_t); 2355 return size; 2356 } 2357 2358 /** 2359 * wmi_desc_pool_init() - Initialize the WMI descriptor pool 2360 * @wma_handle: handle to wma 2361 * @pool_size: Size of wma pool 2362 * 2363 * Return: 0 for success, error code on failure. 2364 */ 2365 int wmi_desc_pool_init(tp_wma_handle wma_handle, uint32_t pool_size) 2366 { 2367 int i; 2368 2369 if (!pool_size) { 2370 WMA_LOGE("%s: failed to allocate desc pool", __func__); 2371 qdf_assert_always(pool_size); 2372 return -EINVAL; 2373 } 2374 WMA_LOGE("%s: initialize desc pool of size %d", __func__, pool_size); 2375 wma_handle->wmi_desc_pool.pool_size = pool_size; 2376 wma_handle->wmi_desc_pool.num_free = pool_size; 2377 wma_handle->wmi_desc_pool.array = qdf_mem_malloc(pool_size * 2378 sizeof(union wmi_desc_elem_t)); 2379 if (!wma_handle->wmi_desc_pool.array) { 2380 WMA_LOGE("%s: failed to allocate desc pool", __func__); 2381 return -ENOMEM; 2382 } 2383 wma_handle->wmi_desc_pool.freelist = &wma_handle-> 2384 wmi_desc_pool.array[0]; 2385 2386 for (i = 0; i < (pool_size - 1); i++) { 2387 wma_handle->wmi_desc_pool.array[i].wmi_desc.desc_id = i; 2388 wma_handle->wmi_desc_pool.array[i].next = 2389 &wma_handle->wmi_desc_pool.array[i + 1]; 2390 } 2391 2392 wma_handle->wmi_desc_pool.array[i].next = NULL; 2393 wma_handle->wmi_desc_pool.array[i].wmi_desc.desc_id = i; 2394 2395 qdf_spinlock_create(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock); 2396 return 0; 2397 } 2398 2399 /** 2400 * wmi_desc_pool_deinit() - Deinitialize the WMI descriptor pool 2401 * @wma_handle: handle to wma 2402 * 2403 * Return: None 2404 */ 2405 void wmi_desc_pool_deinit(tp_wma_handle wma_handle) 2406 { 2407 qdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock); 2408 if (wma_handle->wmi_desc_pool.array) { 2409 qdf_mem_free(wma_handle->wmi_desc_pool.array); 2410 wma_handle->wmi_desc_pool.array = NULL; 2411 } else { 2412 WMA_LOGE("%s: Empty WMI descriptor pool", __func__); 2413 } 2414 2415 wma_handle->wmi_desc_pool.freelist = NULL; 2416 wma_handle->wmi_desc_pool.pool_size = 0; 2417 wma_handle->wmi_desc_pool.num_free = 0; 2418 qdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock); 2419 qdf_spinlock_destroy(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock); 2420 } 2421 2422 /** 2423 * wmi_desc_get() - Get wmi descriptor from wmi free descriptor pool 2424 * @wma_handle: handle to wma 2425 * 2426 * Return: pointer to wmi descriptor, NULL on failure 2427 */ 2428 struct wmi_desc_t *wmi_desc_get(tp_wma_handle wma_handle) 2429 { 2430 struct wmi_desc_t *wmi_desc = NULL; 2431 2432 qdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock); 2433 if (wma_handle->wmi_desc_pool.freelist) { 2434 wma_handle->wmi_desc_pool.num_free--; 2435 wmi_desc = &wma_handle->wmi_desc_pool.freelist->wmi_desc; 2436 wma_handle->wmi_desc_pool.freelist = 2437 wma_handle->wmi_desc_pool.freelist->next; 2438 } 2439 qdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock); 2440 2441 return wmi_desc; 2442 } 2443 2444 /** 2445 * wmi_desc_put() - Put wmi descriptor to wmi free descriptor pool 2446 * @wma_handle: handle to wma 2447 * @wmi_desc: wmi descriptor 2448 * 2449 * Return: None 2450 */ 2451 void wmi_desc_put(tp_wma_handle wma_handle, struct wmi_desc_t *wmi_desc) 2452 { 2453 qdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock); 2454 ((union wmi_desc_elem_t *)wmi_desc)->next = 2455 wma_handle->wmi_desc_pool.freelist; 2456 wma_handle->wmi_desc_pool.freelist = (union wmi_desc_elem_t *)wmi_desc; 2457 wma_handle->wmi_desc_pool.num_free++; 2458 qdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock); 2459 } 2460 2461 /** 2462 * wma_tx_packet() - Sends Tx Frame to TxRx 2463 * @wma_context: wma context 2464 * @tx_frame: frame buffer 2465 * @frmLen: frame length 2466 * @frmType: frame type 2467 * @txDir: tx diection 2468 * @tid: TID 2469 * @tx_frm_download_comp_cb: tx download callback handler 2470 * @tx_frm_ota_comp_cb: OTA complition handler 2471 * @tx_flag: tx flag 2472 * @vdev_id: vdev id 2473 * @tdlsFlag: tdls flag 2474 * 2475 * This function sends the frame corresponding to the 2476 * given vdev id. 2477 * This is blocking call till the downloading of frame is complete. 2478 * 2479 * Return: QDF status 2480 */ 2481 QDF_STATUS wma_tx_packet(void *wma_context, void *tx_frame, uint16_t frmLen, 2482 eFrameType frmType, eFrameTxDir txDir, uint8_t tid, 2483 pWMATxRxCompFunc tx_frm_download_comp_cb, void *pData, 2484 pWMAAckFnTxComp tx_frm_ota_comp_cb, uint8_t tx_flag, 2485 uint8_t vdev_id, bool tdlsFlag, uint16_t channel_freq) 2486 { 2487 tp_wma_handle wma_handle = (tp_wma_handle) (wma_context); 2488 int32_t status; 2489 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 2490 int32_t is_high_latency; 2491 ol_txrx_vdev_handle txrx_vdev; 2492 enum frame_index tx_frm_index = GENERIC_NODOWNLD_NOACK_COMP_INDEX; 2493 tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(tx_frame)); 2494 uint8_t use_6mbps = 0; 2495 uint8_t downld_comp_required = 0; 2496 uint16_t chanfreq; 2497 #ifdef WLAN_FEATURE_11W 2498 uint8_t *pFrame = NULL; 2499 void *pPacket = NULL; 2500 uint16_t newFrmLen = 0; 2501 #endif /* WLAN_FEATURE_11W */ 2502 struct wma_txrx_node *iface; 2503 tpAniSirGlobal pMac; 2504 tpSirMacMgmtHdr mHdr; 2505 struct wmi_mgmt_params mgmt_param = {0}; 2506 struct wmi_desc_t *wmi_desc = NULL; 2507 ol_pdev_handle ctrl_pdev; 2508 2509 if (NULL == wma_handle) { 2510 WMA_LOGE("wma_handle is NULL"); 2511 return QDF_STATUS_E_FAILURE; 2512 } 2513 iface = &wma_handle->interfaces[vdev_id]; 2514 /* Get the vdev handle from vdev id */ 2515 txrx_vdev = wma_handle->interfaces[vdev_id].handle; 2516 2517 if (!txrx_vdev) { 2518 WMA_LOGE("TxRx Vdev Handle is NULL"); 2519 return QDF_STATUS_E_FAILURE; 2520 } 2521 2522 ol_txrx_hl_tdls_flag_reset(txrx_vdev, false); 2523 2524 if (frmType >= TXRX_FRM_MAX) { 2525 WMA_LOGE("Invalid Frame Type Fail to send Frame"); 2526 return QDF_STATUS_E_FAILURE; 2527 } 2528 2529 pMac = cds_get_context(QDF_MODULE_ID_PE); 2530 if (!pMac) { 2531 WMA_LOGE("pMac Handle is NULL"); 2532 return QDF_STATUS_E_FAILURE; 2533 } 2534 /* 2535 * Currently only support to 2536 * send 80211 Mgmt and 80211 Data are added. 2537 */ 2538 if (!((frmType == TXRX_FRM_802_11_MGMT) || 2539 (frmType == TXRX_FRM_802_11_DATA))) { 2540 WMA_LOGE("No Support to send other frames except 802.11 Mgmt/Data"); 2541 return QDF_STATUS_E_FAILURE; 2542 } 2543 mHdr = (tpSirMacMgmtHdr)qdf_nbuf_data(tx_frame); 2544 #ifdef WLAN_FEATURE_11W 2545 if ((iface && iface->rmfEnabled) && 2546 (frmType == TXRX_FRM_802_11_MGMT) && 2547 (pFc->subType == SIR_MAC_MGMT_DISASSOC || 2548 pFc->subType == SIR_MAC_MGMT_DEAUTH || 2549 pFc->subType == SIR_MAC_MGMT_ACTION)) { 2550 struct ieee80211_frame *wh = 2551 (struct ieee80211_frame *)qdf_nbuf_data(tx_frame); 2552 if (!IEEE80211_IS_BROADCAST(wh->i_addr1) && 2553 !IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2554 if (pFc->wep) { 2555 /* Allocate extra bytes for privacy header and trailer */ 2556 newFrmLen = frmLen + IEEE80211_CCMP_HEADERLEN + 2557 IEEE80211_CCMP_MICLEN; 2558 qdf_status = 2559 cds_packet_alloc((uint16_t) newFrmLen, 2560 (void **)&pFrame, 2561 (void **)&pPacket); 2562 2563 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 2564 WMA_LOGP("%s: Failed to allocate %d bytes for RMF status " 2565 "code (%x)", __func__, newFrmLen, 2566 qdf_status); 2567 /* Free the original packet memory */ 2568 cds_packet_free((void *)tx_frame); 2569 goto error; 2570 } 2571 2572 /* 2573 * Initialize the frame with 0's and only fill 2574 * MAC header and data, Keep the CCMP header and 2575 * trailer as 0's, firmware shall fill this 2576 */ 2577 qdf_mem_set(pFrame, newFrmLen, 0); 2578 qdf_mem_copy(pFrame, wh, sizeof(*wh)); 2579 qdf_mem_copy(pFrame + sizeof(*wh) + 2580 IEEE80211_CCMP_HEADERLEN, 2581 pData + sizeof(*wh), 2582 frmLen - sizeof(*wh)); 2583 2584 cds_packet_free((void *)tx_frame); 2585 tx_frame = pPacket; 2586 frmLen = newFrmLen; 2587 } 2588 } else { 2589 /* Allocate extra bytes for MMIE */ 2590 newFrmLen = frmLen + IEEE80211_MMIE_LEN; 2591 qdf_status = cds_packet_alloc((uint16_t) newFrmLen, 2592 (void **)&pFrame, 2593 (void **)&pPacket); 2594 2595 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 2596 WMA_LOGP("%s: Failed to allocate %d bytes for RMF status " 2597 "code (%x)", __func__, newFrmLen, 2598 qdf_status); 2599 /* Free the original packet memory */ 2600 cds_packet_free((void *)tx_frame); 2601 goto error; 2602 } 2603 /* 2604 * Initialize the frame with 0's and only fill 2605 * MAC header and data. MMIE field will be 2606 * filled by cds_attach_mmie API 2607 */ 2608 qdf_mem_set(pFrame, newFrmLen, 0); 2609 qdf_mem_copy(pFrame, wh, sizeof(*wh)); 2610 qdf_mem_copy(pFrame + sizeof(*wh), 2611 pData + sizeof(*wh), frmLen - sizeof(*wh)); 2612 if (!cds_attach_mmie(iface->key.key, 2613 iface->key.key_id[0].ipn, 2614 WMA_IGTK_KEY_INDEX_4, 2615 pFrame, 2616 pFrame + newFrmLen, newFrmLen)) { 2617 WMA_LOGP("%s: Failed to attach MMIE at the end of " 2618 "frame", __func__); 2619 /* Free the original packet memory */ 2620 cds_packet_free((void *)tx_frame); 2621 goto error; 2622 } 2623 cds_packet_free((void *)tx_frame); 2624 tx_frame = pPacket; 2625 frmLen = newFrmLen; 2626 } 2627 } 2628 #endif /* WLAN_FEATURE_11W */ 2629 2630 if ((frmType == TXRX_FRM_802_11_MGMT) && 2631 (pFc->subType == SIR_MAC_MGMT_PROBE_RSP)) { 2632 uint64_t adjusted_tsf_le; 2633 struct ieee80211_frame *wh = 2634 (struct ieee80211_frame *)qdf_nbuf_data(tx_frame); 2635 2636 /* Make the TSF offset negative to match TSF in beacons */ 2637 adjusted_tsf_le = cpu_to_le64(0ULL - 2638 wma_handle->interfaces[vdev_id]. 2639 tsfadjust); 2640 A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le)); 2641 } 2642 if (frmType == TXRX_FRM_802_11_DATA) { 2643 qdf_nbuf_t ret; 2644 qdf_nbuf_t skb = (qdf_nbuf_t) tx_frame; 2645 ol_txrx_pdev_handle pdev = 2646 cds_get_context(QDF_MODULE_ID_TXRX); 2647 2648 struct wma_decap_info_t decap_info; 2649 struct ieee80211_frame *wh = 2650 (struct ieee80211_frame *)qdf_nbuf_data(skb); 2651 unsigned long curr_timestamp = qdf_mc_timer_get_system_ticks(); 2652 2653 if (pdev == NULL) { 2654 WMA_LOGE("%s: pdev pointer is not available", __func__); 2655 return QDF_STATUS_E_FAULT; 2656 } 2657 2658 /* 2659 * 1) TxRx Module expects data input to be 802.3 format 2660 * So Decapsulation has to be done. 2661 * 2) Only one Outstanding Data pending for Ack is allowed 2662 */ 2663 if (tx_frm_ota_comp_cb) { 2664 if (wma_handle->umac_data_ota_ack_cb) { 2665 /* 2666 * If last data frame was sent more than 5 seconds 2667 * ago and still we did not receive ack/nack from 2668 * fw then allow Tx of this data frame 2669 */ 2670 if (curr_timestamp >= 2671 wma_handle->last_umac_data_ota_timestamp + 2672 500) { 2673 WMA_LOGE("%s: No Tx Ack for last data frame for more than 5 secs, allow Tx of current data frame", 2674 __func__); 2675 } else { 2676 WMA_LOGE("%s: Already one Data pending for Ack, reject Tx of data frame", 2677 __func__); 2678 return QDF_STATUS_E_FAILURE; 2679 } 2680 } 2681 } else { 2682 /* 2683 * Data Frames are sent through TxRx Non Standard Data Path 2684 * so Ack Complete Cb is must 2685 */ 2686 WMA_LOGE("No Ack Complete Cb. Don't Allow"); 2687 return QDF_STATUS_E_FAILURE; 2688 } 2689 2690 /* Take out 802.11 header from skb */ 2691 decap_info.hdr_len = wma_ieee80211_hdrsize(wh); 2692 qdf_mem_copy(decap_info.hdr, wh, decap_info.hdr_len); 2693 qdf_nbuf_pull_head(skb, decap_info.hdr_len); 2694 2695 /* Decapsulate to 802.3 format */ 2696 wma_decap_to_8023(skb, &decap_info); 2697 2698 /* Zero out skb's context buffer for the driver to use */ 2699 qdf_mem_set(skb->cb, sizeof(skb->cb), 0); 2700 2701 /* Terminate the (single-element) list of tx frames */ 2702 skb->next = NULL; 2703 2704 /* Store the Ack Complete Cb */ 2705 wma_handle->umac_data_ota_ack_cb = tx_frm_ota_comp_cb; 2706 2707 /* Store the timestamp and nbuf for this data Tx */ 2708 wma_handle->last_umac_data_ota_timestamp = curr_timestamp; 2709 wma_handle->last_umac_data_nbuf = skb; 2710 2711 /* Send the Data frame to TxRx in Non Standard Path */ 2712 ol_txrx_hl_tdls_flag_reset(txrx_vdev, tdlsFlag); 2713 2714 ret = ol_tx_non_std(txrx_vdev, OL_TX_SPEC_NO_FREE, skb); 2715 2716 ol_txrx_hl_tdls_flag_reset(txrx_vdev, false); 2717 2718 if (ret) { 2719 WMA_LOGE("TxRx Rejected. Fail to do Tx"); 2720 /* Call Download Cb so that umac can free the buffer */ 2721 if (tx_frm_download_comp_cb) 2722 tx_frm_download_comp_cb(wma_handle->mac_context, 2723 tx_frame, 2724 WMA_TX_FRAME_BUFFER_FREE); 2725 wma_handle->umac_data_ota_ack_cb = NULL; 2726 wma_handle->last_umac_data_nbuf = NULL; 2727 return QDF_STATUS_E_FAILURE; 2728 } 2729 2730 /* Call Download Callback if passed */ 2731 if (tx_frm_download_comp_cb) 2732 tx_frm_download_comp_cb(wma_handle->mac_context, 2733 tx_frame, 2734 WMA_TX_FRAME_BUFFER_NO_FREE); 2735 2736 return QDF_STATUS_SUCCESS; 2737 } 2738 2739 ctrl_pdev = ol_txrx_get_ctrl_pdev_from_vdev(txrx_vdev); 2740 if (ctrl_pdev == NULL) { 2741 WMA_LOGE("ol_pdev_handle is NULL\n"); 2742 return QDF_STATUS_E_FAILURE; 2743 } 2744 is_high_latency = ol_cfg_is_high_latency(ctrl_pdev); 2745 2746 downld_comp_required = tx_frm_download_comp_cb && is_high_latency && 2747 tx_frm_ota_comp_cb; 2748 2749 /* Fill the frame index to send */ 2750 if (pFc->type == SIR_MAC_MGMT_FRAME) { 2751 if (tx_frm_ota_comp_cb) { 2752 if (downld_comp_required) 2753 tx_frm_index = 2754 GENERIC_DOWNLD_COMP_ACK_COMP_INDEX; 2755 else 2756 tx_frm_index = GENERIC_NODOWLOAD_ACK_COMP_INDEX; 2757 2758 /* Store the Ack Cb sent by UMAC */ 2759 if (pFc->subType < SIR_MAC_MGMT_RESERVED15) { 2760 wma_handle->umac_ota_ack_cb[pFc->subType] = 2761 tx_frm_ota_comp_cb; 2762 } 2763 } else { 2764 if (downld_comp_required) 2765 tx_frm_index = 2766 GENERIC_DOWNLD_COMP_NOACK_COMP_INDEX; 2767 else 2768 tx_frm_index = 2769 GENERIC_NODOWNLD_NOACK_COMP_INDEX; 2770 } 2771 } 2772 2773 /* 2774 * If Dowload Complete is required 2775 * Wait for download complete 2776 */ 2777 if (downld_comp_required) { 2778 /* Store Tx Comp Cb */ 2779 wma_handle->tx_frm_download_comp_cb = tx_frm_download_comp_cb; 2780 2781 /* Reset the Tx Frame Complete Event */ 2782 qdf_status = 2783 qdf_event_reset(&wma_handle->tx_frm_download_comp_event); 2784 2785 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 2786 WMA_LOGP("%s: Event Reset failed tx comp event %x", 2787 __func__, qdf_status); 2788 goto error; 2789 } 2790 } 2791 2792 /* If the frame has to be sent at BD Rate2 inform TxRx */ 2793 if (tx_flag & HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME) 2794 use_6mbps = 1; 2795 2796 if (wma_handle->interfaces[vdev_id].scan_info.chan_freq != 0) { 2797 chanfreq = wma_handle->interfaces[vdev_id].scan_info.chan_freq; 2798 WMA_LOGI("%s: Preauth frame on channel %d", __func__, chanfreq); 2799 } else if (pFc->subType == SIR_MAC_MGMT_PROBE_RSP) { 2800 if ((wma_is_vdev_in_ap_mode(wma_handle, vdev_id)) && 2801 (0 != wma_handle->interfaces[vdev_id].mhz)) 2802 chanfreq = wma_handle->interfaces[vdev_id].mhz; 2803 else 2804 chanfreq = channel_freq; 2805 WMA_LOGI("%s: Probe response frame on channel %d vdev:%d", 2806 __func__, chanfreq, vdev_id); 2807 if (wma_is_vdev_in_ap_mode(wma_handle, vdev_id) && !chanfreq) 2808 WMA_LOGE("%s: AP oper chan is zero", __func__); 2809 } else if (pFc->subType == SIR_MAC_MGMT_ACTION) { 2810 chanfreq = channel_freq; 2811 } else { 2812 chanfreq = 0; 2813 } 2814 if (pMac->fEnableDebugLog & 0x1) { 2815 if ((pFc->type == SIR_MAC_MGMT_FRAME) && 2816 (pFc->subType != SIR_MAC_MGMT_PROBE_REQ) && 2817 (pFc->subType != SIR_MAC_MGMT_PROBE_RSP)) { 2818 WMA_LOGE("TX MGMT - Type %hu, SubType %hu seq_num[%d]", 2819 pFc->type, pFc->subType, 2820 ((mHdr->seqControl.seqNumHi << 4) | 2821 mHdr->seqControl.seqNumLo)); 2822 } 2823 } 2824 2825 if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap, 2826 WMI_SERVICE_MGMT_TX_WMI)) { 2827 mgmt_param.tx_frame = tx_frame; 2828 mgmt_param.frm_len = frmLen; 2829 mgmt_param.vdev_id = vdev_id; 2830 mgmt_param.pdata = pData; 2831 mgmt_param.chanfreq = chanfreq; 2832 mgmt_param.qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); 2833 wmi_desc = wmi_desc_get(wma_handle); 2834 if (!wmi_desc) { 2835 WMA_LOGE("%s: Failed to get wmi_desc", __func__); 2836 status = QDF_STATUS_E_FAILURE; 2837 } else { 2838 mgmt_param.desc_id = wmi_desc->desc_id; 2839 status = wmi_mgmt_unified_cmd_send( 2840 wma_handle->wmi_handle, 2841 &mgmt_param); 2842 if (status) { 2843 wmi_desc_put(wma_handle, wmi_desc); 2844 } else { 2845 wmi_desc->nbuf = tx_frame; 2846 wmi_desc->tx_cmpl_cb = tx_frm_download_comp_cb; 2847 wmi_desc->ota_post_proc_cb = tx_frm_ota_comp_cb; 2848 } 2849 } 2850 } else { 2851 /* Hand over the Tx Mgmt frame to TxRx */ 2852 status = ol_txrx_mgmt_send_ext(txrx_vdev, tx_frame, 2853 tx_frm_index, use_6mbps, chanfreq); 2854 } 2855 2856 /* 2857 * Failed to send Tx Mgmt Frame 2858 */ 2859 if (status) { 2860 /* Call Download Cb so that umac can free the buffer */ 2861 if (tx_frm_download_comp_cb) 2862 tx_frm_download_comp_cb(wma_handle->mac_context, 2863 tx_frame, 2864 WMA_TX_FRAME_BUFFER_FREE); 2865 WMA_LOGP("%s: Failed to send Mgmt Frame", __func__); 2866 goto error; 2867 } 2868 2869 if (!tx_frm_download_comp_cb) 2870 return QDF_STATUS_SUCCESS; 2871 2872 /* 2873 * Wait for Download Complete 2874 * if required 2875 */ 2876 if (downld_comp_required) { 2877 /* 2878 * Wait for Download Complete 2879 * @ Integrated : Dxe Complete 2880 * @ Discrete : Target Download Complete 2881 */ 2882 qdf_status = 2883 qdf_wait_single_event(&wma_handle-> 2884 tx_frm_download_comp_event, 2885 WMA_TX_FRAME_COMPLETE_TIMEOUT); 2886 2887 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 2888 WMA_LOGP("Wait Event failed txfrm_comp_event"); 2889 /* 2890 * @Integrated: Something Wrong with Dxe 2891 * TODO: Some Debug Code 2892 * Here We need to trigger SSR since 2893 * since system went into a bad state where 2894 * we didn't get Download Complete for almost 2895 * WMA_TX_FRAME_COMPLETE_TIMEOUT (1 sec) 2896 */ 2897 2898 /* display scheduler stats */ 2899 ol_txrx_display_stats(WLAN_SCHEDULER_STATS); 2900 } 2901 } 2902 2903 return QDF_STATUS_SUCCESS; 2904 2905 error: 2906 wma_handle->tx_frm_download_comp_cb = NULL; 2907 return QDF_STATUS_E_FAILURE; 2908 } 2909 2910 /** 2911 * wma_ds_peek_rx_packet_info() - peek rx packet info 2912 * @pkt: packet 2913 * @pkt_meta: packet meta 2914 * @bSwap: byte swap 2915 * 2916 * Function fills the rx packet meta info from the the cds packet 2917 * 2918 * Return: QDF status 2919 */ 2920 QDF_STATUS wma_ds_peek_rx_packet_info(cds_pkt_t *pkt, void **pkt_meta, 2921 bool bSwap) 2922 { 2923 /* Sanity Check */ 2924 if (pkt == NULL) { 2925 WMA_LOGE("wma:Invalid parameter sent on wma_peek_rx_pkt_info"); 2926 return QDF_STATUS_E_FAULT; 2927 } 2928 2929 *pkt_meta = &(pkt->pkt_meta); 2930 2931 return QDF_STATUS_SUCCESS; 2932 } 2933 2934 /** 2935 * ol_rx_err() - ol rx err handler 2936 * @pdev: ol pdev 2937 * @vdev_id: vdev id 2938 * @peer_mac_addr: peer mac address 2939 * @tid: TID 2940 * @tsf32: TSF 2941 * @err_type: error type 2942 * @rx_frame: rx frame 2943 * @pn: PN Number 2944 * @key_id: key id 2945 * 2946 * This function handles rx error and send MIC error failure to LIM 2947 * 2948 * Return: none 2949 */ 2950 void ol_rx_err(ol_pdev_handle pdev, uint8_t vdev_id, 2951 uint8_t *peer_mac_addr, int tid, uint32_t tsf32, 2952 enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame, 2953 uint64_t *pn, uint8_t key_id) 2954 { 2955 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 2956 tpSirSmeMicFailureInd mic_err_ind; 2957 struct ether_header *eth_hdr; 2958 cds_msg_t cds_msg; 2959 2960 if (NULL == wma) { 2961 WMA_LOGE("%s: Failed to get wma", __func__); 2962 return; 2963 } 2964 2965 if (err_type != OL_RX_ERR_TKIP_MIC) 2966 return; 2967 2968 if (qdf_nbuf_len(rx_frame) < sizeof(*eth_hdr)) 2969 return; 2970 eth_hdr = (struct ether_header *)qdf_nbuf_data(rx_frame); 2971 mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind)); 2972 if (!mic_err_ind) { 2973 WMA_LOGE("%s: Failed to allocate memory for MIC indication message", 2974 __func__); 2975 return; 2976 } 2977 qdf_mem_set((void *)mic_err_ind, sizeof(*mic_err_ind), 0); 2978 2979 mic_err_ind->messageType = eWNI_SME_MIC_FAILURE_IND; 2980 mic_err_ind->length = sizeof(*mic_err_ind); 2981 mic_err_ind->sessionId = vdev_id; 2982 qdf_copy_macaddr(&mic_err_ind->bssId, 2983 (struct qdf_mac_addr *) &wma->interfaces[vdev_id].bssid); 2984 qdf_mem_copy(mic_err_ind->info.taMacAddr, 2985 (struct qdf_mac_addr *) peer_mac_addr, 2986 sizeof(tSirMacAddr)); 2987 qdf_mem_copy(mic_err_ind->info.srcMacAddr, 2988 (struct qdf_mac_addr *) eth_hdr->ether_shost, 2989 sizeof(tSirMacAddr)); 2990 qdf_mem_copy(mic_err_ind->info.dstMacAddr, 2991 (struct qdf_mac_addr *) eth_hdr->ether_dhost, 2992 sizeof(tSirMacAddr)); 2993 mic_err_ind->info.keyId = key_id; 2994 mic_err_ind->info.multicast = 2995 IEEE80211_IS_MULTICAST(eth_hdr->ether_dhost); 2996 qdf_mem_copy(mic_err_ind->info.TSC, pn, SIR_CIPHER_SEQ_CTR_SIZE); 2997 2998 qdf_mem_set(&cds_msg, sizeof(cds_msg_t), 0); 2999 cds_msg.type = eWNI_SME_MIC_FAILURE_IND; 3000 cds_msg.bodyptr = (void *) mic_err_ind; 3001 3002 if (QDF_STATUS_SUCCESS != 3003 cds_mq_post_message(CDS_MQ_ID_SME, (cds_msg_t *) &cds_msg)) { 3004 WMA_LOGE("%s: could not post mic failure indication to SME", 3005 __func__); 3006 qdf_mem_free((void *)mic_err_ind); 3007 } 3008 } 3009 3010 /** 3011 * wma_tx_abort() - abort tx 3012 * @vdev_id: vdev id 3013 * 3014 * In case of deauth host abort transmitting packet. 3015 * 3016 * Return: none 3017 */ 3018 void wma_tx_abort(uint8_t vdev_id) 3019 { 3020 #define PEER_ALL_TID_BITMASK 0xffffffff 3021 tp_wma_handle wma; 3022 uint32_t peer_tid_bitmap = PEER_ALL_TID_BITMASK; 3023 struct wma_txrx_node *iface; 3024 struct peer_flush_params param = {0}; 3025 3026 wma = cds_get_context(QDF_MODULE_ID_WMA); 3027 if (NULL == wma) { 3028 WMA_LOGE("%s: wma is NULL", __func__); 3029 return; 3030 } 3031 3032 iface = &wma->interfaces[vdev_id]; 3033 if (!iface->handle) { 3034 WMA_LOGE("%s: Failed to get iface handle: %p", 3035 __func__, iface->handle); 3036 return; 3037 } 3038 WMA_LOGA("%s: vdevid %d bssid %pM", __func__, vdev_id, iface->bssid); 3039 iface->pause_bitmap |= (1 << PAUSE_TYPE_HOST); 3040 ol_txrx_vdev_pause(iface->handle, OL_TXQ_PAUSE_REASON_TX_ABORT); 3041 3042 /* Flush all TIDs except MGMT TID for this peer in Target */ 3043 peer_tid_bitmap &= ~(0x1 << WMI_MGMT_TID); 3044 param.peer_tid_bitmap = peer_tid_bitmap; 3045 param.vdev_id = vdev_id; 3046 wmi_unified_peer_flush_tids_send(wma->wmi_handle, iface->bssid, 3047 ¶m); 3048 } 3049 3050 #if defined(FEATURE_LRO) 3051 /** 3052 * wma_lro_config_cmd() - process the LRO config command 3053 * @wma: Pointer to WMA handle 3054 * @wma_lro_cmd: Pointer to LRO configuration parameters 3055 * 3056 * This function sends down the LRO configuration parameters to 3057 * the firmware to enable LRO, sets the TCP flags and sets the 3058 * seed values for the toeplitz hash generation 3059 * 3060 * Return: QDF_STATUS_SUCCESS for success otherwise failure 3061 */ 3062 QDF_STATUS wma_lro_config_cmd(tp_wma_handle wma_handle, 3063 struct wma_lro_config_cmd_t *wma_lro_cmd) 3064 { 3065 struct wmi_lro_config_cmd_t wmi_lro_cmd = {0}; 3066 3067 if (NULL == wma_handle || NULL == wma_lro_cmd) { 3068 WMA_LOGE("wma_lro_config_cmd': invalid input!"); 3069 return QDF_STATUS_E_FAILURE; 3070 } 3071 3072 wmi_lro_cmd.lro_enable = wma_lro_cmd->lro_enable; 3073 wmi_lro_cmd.tcp_flag = wma_lro_cmd->tcp_flag; 3074 wmi_lro_cmd.tcp_flag_mask = wma_lro_cmd->tcp_flag_mask; 3075 qdf_mem_copy(wmi_lro_cmd.toeplitz_hash_ipv4, 3076 wma_lro_cmd->toeplitz_hash_ipv4, 3077 LRO_IPV4_SEED_ARR_SZ * sizeof(uint32_t)); 3078 qdf_mem_copy(wmi_lro_cmd.toeplitz_hash_ipv6, 3079 wma_lro_cmd->toeplitz_hash_ipv6, 3080 LRO_IPV6_SEED_ARR_SZ * sizeof(uint32_t)); 3081 3082 return wmi_unified_lro_config_cmd(wma_handle->wmi_handle, 3083 &wmi_lro_cmd); 3084 } 3085 #endif 3086 3087 /** 3088 * wma_indicate_err() - indicate an error to the protocol stack 3089 * @err_type: error type 3090 * @err_info: information associated with the error 3091 * 3092 * This function indicates an error encountered in the data path 3093 * to the protocol stack 3094 * 3095 * Return: none 3096 */ 3097 void 3098 wma_indicate_err( 3099 enum ol_rx_err_type err_type, 3100 struct ol_error_info *err_info) 3101 { 3102 switch (err_type) { 3103 case OL_RX_ERR_TKIP_MIC: 3104 { 3105 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); 3106 tpSirSmeMicFailureInd mic_err_ind; 3107 cds_msg_t cds_msg; 3108 uint8_t vdev_id; 3109 3110 if (NULL == wma) { 3111 WMA_LOGE("%s: Failed to get wma context", 3112 __func__); 3113 return; 3114 } 3115 3116 mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind)); 3117 if (!mic_err_ind) { 3118 WMA_LOGE("%s: MIC indication mem alloc failed", 3119 __func__); 3120 return; 3121 } 3122 3123 qdf_mem_set((void *) mic_err_ind, 0, 3124 sizeof(*mic_err_ind)); 3125 mic_err_ind->messageType = eWNI_SME_MIC_FAILURE_IND; 3126 mic_err_ind->length = sizeof(*mic_err_ind); 3127 vdev_id = err_info->u.mic_err.vdev_id; 3128 qdf_copy_macaddr(&mic_err_ind->bssId, 3129 (struct qdf_mac_addr *) &wma->interfaces[vdev_id].bssid); 3130 WMA_LOGE("MIC error: BSSID:%02x:%02x:%02x:%02x:%02x:%02x\n", 3131 mic_err_ind->bssId.bytes[0], mic_err_ind->bssId.bytes[1], 3132 mic_err_ind->bssId.bytes[2], mic_err_ind->bssId.bytes[3], 3133 mic_err_ind->bssId.bytes[4], mic_err_ind->bssId.bytes[5]); 3134 qdf_mem_copy(mic_err_ind->info.taMacAddr, 3135 (struct qdf_mac_addr *) err_info->u.mic_err.ta, 3136 sizeof(tSirMacAddr)); 3137 qdf_mem_copy(mic_err_ind->info.srcMacAddr, 3138 (struct qdf_mac_addr *) err_info->u.mic_err.sa, 3139 sizeof(tSirMacAddr)); 3140 qdf_mem_copy(mic_err_ind->info.dstMacAddr, 3141 (struct qdf_mac_addr *) err_info->u.mic_err.da, 3142 sizeof(tSirMacAddr)); 3143 mic_err_ind->info.keyId = err_info->u.mic_err.key_id; 3144 mic_err_ind->info.multicast = 3145 IEEE80211_IS_MULTICAST(err_info->u.mic_err.da); 3146 qdf_mem_copy(mic_err_ind->info.TSC, 3147 (void *)&err_info-> 3148 u.mic_err.pn, SIR_CIPHER_SEQ_CTR_SIZE); 3149 3150 qdf_mem_set(&cds_msg, sizeof(cds_msg_t), 0); 3151 cds_msg.type = eWNI_SME_MIC_FAILURE_IND; 3152 cds_msg.bodyptr = (void *) mic_err_ind; 3153 if (QDF_STATUS_SUCCESS != 3154 cds_mq_post_message(CDS_MQ_ID_SME, 3155 (cds_msg_t *) &cds_msg)) { 3156 WMA_LOGE("%s: mic failure ind post to SME failed", 3157 __func__); 3158 qdf_mem_free((void *)mic_err_ind); 3159 } 3160 break; 3161 } 3162 default: 3163 { 3164 WMA_LOGE("%s: unhandled ol error type %d", __func__, err_type); 3165 break; 3166 } 3167 } 3168 } 3169