1 /* 2 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _DP_INTERNAL_H_ 20 #define _DP_INTERNAL_H_ 21 22 #include "dp_types.h" 23 24 #define RX_BUFFER_SIZE_PKTLOG_LITE 1024 25 26 27 #define DP_RSSI_INVAL 0x80 28 #define DP_RSSI_AVG_WEIGHT 2 29 /* 30 * Formula to derive avg_rssi is taken from wifi2.o firmware 31 */ 32 #define DP_GET_AVG_RSSI(avg_rssi, last_rssi) \ 33 (((avg_rssi) - (((uint8_t)(avg_rssi)) >> DP_RSSI_AVG_WEIGHT)) \ 34 + ((((uint8_t)(last_rssi)) >> DP_RSSI_AVG_WEIGHT))) 35 36 /* Macro For NYSM value received in VHT TLV */ 37 #define VHT_SGI_NYSM 3 38 39 /* PPDU STATS CFG */ 40 #define DP_PPDU_STATS_CFG_ALL 0xFFFF 41 42 /* PPDU stats mask sent to FW to enable enhanced stats */ 43 #define DP_PPDU_STATS_CFG_ENH_STATS 0xE67 44 /* PPDU stats mask sent to FW to support debug sniffer feature */ 45 #define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF 46 /* PPDU stats mask sent to FW to support BPR feature*/ 47 #define DP_PPDU_STATS_CFG_BPR 0x2000 48 /* PPDU stats mask sent to FW to support BPR and enhanced stats feature */ 49 #define DP_PPDU_STATS_CFG_BPR_ENH (DP_PPDU_STATS_CFG_BPR | \ 50 DP_PPDU_STATS_CFG_ENH_STATS) 51 /* PPDU stats mask sent to FW to support BPR and pcktlog stats feature */ 52 #define DP_PPDU_STATS_CFG_BPR_PKTLOG (DP_PPDU_STATS_CFG_BPR | \ 53 DP_PPDU_TXLITE_STATS_BITMASK_CFG) 54 55 /** 56 * Bitmap of HTT PPDU TLV types for Default mode 57 */ 58 #define HTT_PPDU_DEFAULT_TLV_BITMAP \ 59 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 60 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 61 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 62 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 63 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 64 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 65 66 /** 67 * Bitmap of HTT PPDU delayed ba TLV types for Default mode 68 */ 69 #define HTT_PPDU_DELAYED_BA_TLV_BITMAP \ 70 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 71 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 72 (1 << HTT_PPDU_STATS_USR_RATE_TLV) 73 74 /** 75 * Bitmap of HTT PPDU TLV types for Delayed BA 76 */ 77 #define HTT_PPDU_STATUS_TLV_BITMAP \ 78 (1 << HTT_PPDU_STATS_COMMON_TLV) | \ 79 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) 80 81 /** 82 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 64 83 */ 84 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64 \ 85 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 86 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 87 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 88 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 89 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 90 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 91 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ 92 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV)) 93 94 /** 95 * Bitmap of HTT PPDU TLV types for Sniffer mode bitmap 256 96 */ 97 #define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256 \ 98 ((1 << HTT_PPDU_STATS_COMMON_TLV) | \ 99 (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ 100 (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ 101 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ 102 (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ 103 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ 104 (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) | \ 105 (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV)) 106 107 #ifdef WLAN_TX_PKT_CAPTURE_ENH 108 extern uint8_t 109 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX]; 110 #endif 111 112 #if DP_PRINT_ENABLE 113 #include <stdarg.h> /* va_list */ 114 #include <qdf_types.h> /* qdf_vprint */ 115 #include <cdp_txrx_handle.h> 116 117 enum { 118 /* FATAL_ERR - print only irrecoverable error messages */ 119 DP_PRINT_LEVEL_FATAL_ERR, 120 121 /* ERR - include non-fatal err messages */ 122 DP_PRINT_LEVEL_ERR, 123 124 /* WARN - include warnings */ 125 DP_PRINT_LEVEL_WARN, 126 127 /* INFO1 - include fundamental, infrequent events */ 128 DP_PRINT_LEVEL_INFO1, 129 130 /* INFO2 - include non-fundamental but infrequent events */ 131 DP_PRINT_LEVEL_INFO2, 132 }; 133 134 135 #define dp_print(level, fmt, ...) do { \ 136 if (level <= g_txrx_print_level) \ 137 qdf_print(fmt, ## __VA_ARGS__); \ 138 while (0) 139 #define DP_PRINT(level, fmt, ...) do { \ 140 dp_print(level, "DP: " fmt, ## __VA_ARGS__); \ 141 while (0) 142 #else 143 #define DP_PRINT(level, fmt, ...) 144 #endif /* DP_PRINT_ENABLE */ 145 146 #define DP_TRACE(LVL, fmt, args ...) \ 147 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \ 148 fmt, ## args) 149 150 #ifdef DP_PRINT_NO_CONSOLE 151 /* Stat prints should not go to console or kernel logs.*/ 152 #define DP_PRINT_STATS(fmt, args ...)\ 153 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, \ 154 fmt, ## args) 155 #else 156 #define DP_PRINT_STATS(fmt, args ...)\ 157 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,\ 158 fmt, ## args) 159 #endif 160 #define DP_STATS_INIT(_handle) \ 161 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 162 163 #define DP_STATS_CLR(_handle) \ 164 qdf_mem_zero(&((_handle)->stats), sizeof((_handle)->stats)) 165 166 #ifndef DISABLE_DP_STATS 167 #define DP_STATS_INC(_handle, _field, _delta) \ 168 { \ 169 if (likely(_handle)) \ 170 _handle->stats._field += _delta; \ 171 } 172 173 #define DP_STATS_INCC(_handle, _field, _delta, _cond) \ 174 { \ 175 if (_cond && likely(_handle)) \ 176 _handle->stats._field += _delta; \ 177 } 178 179 #define DP_STATS_DEC(_handle, _field, _delta) \ 180 { \ 181 if (likely(_handle)) \ 182 _handle->stats._field -= _delta; \ 183 } 184 185 #define DP_STATS_UPD(_handle, _field, _delta) \ 186 { \ 187 if (likely(_handle)) \ 188 _handle->stats._field = _delta; \ 189 } 190 191 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \ 192 { \ 193 DP_STATS_INC(_handle, _field.num, _count); \ 194 DP_STATS_INC(_handle, _field.bytes, _bytes) \ 195 } 196 197 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ 198 { \ 199 DP_STATS_INCC(_handle, _field.num, _count, _cond); \ 200 DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ 201 } 202 203 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) \ 204 { \ 205 _handle_a->stats._field += _handle_b->stats._field; \ 206 } 207 208 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \ 209 { \ 210 DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \ 211 DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\ 212 } 213 214 #define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \ 215 { \ 216 _handle_a->stats._field = _handle_b->stats._field; \ 217 } 218 219 #else 220 #define DP_STATS_INC(_handle, _field, _delta) 221 #define DP_STATS_INCC(_handle, _field, _delta, _cond) 222 #define DP_STATS_DEC(_handle, _field, _delta) 223 #define DP_STATS_UPD(_handle, _field, _delta) 224 #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) 225 #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) 226 #define DP_STATS_AGGR(_handle_a, _handle_b, _field) 227 #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) 228 #endif 229 230 #ifdef ENABLE_DP_HIST_STATS 231 #define DP_HIST_INIT() \ 232 uint32_t num_of_packets[MAX_PDEV_CNT] = {0}; 233 234 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) \ 235 { \ 236 ++num_of_packets[_pdev_id]; \ 237 } 238 239 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 240 do { \ 241 if (_p_cntrs == 1) { \ 242 DP_STATS_INC(_pdev, \ 243 tx_comp_histogram.pkts_1, 1); \ 244 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 245 DP_STATS_INC(_pdev, \ 246 tx_comp_histogram.pkts_2_20, 1); \ 247 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 248 DP_STATS_INC(_pdev, \ 249 tx_comp_histogram.pkts_21_40, 1); \ 250 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 251 DP_STATS_INC(_pdev, \ 252 tx_comp_histogram.pkts_41_60, 1); \ 253 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 254 DP_STATS_INC(_pdev, \ 255 tx_comp_histogram.pkts_61_80, 1); \ 256 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 257 DP_STATS_INC(_pdev, \ 258 tx_comp_histogram.pkts_81_100, 1); \ 259 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 260 DP_STATS_INC(_pdev, \ 261 tx_comp_histogram.pkts_101_200, 1); \ 262 } else if (_p_cntrs > 200) { \ 263 DP_STATS_INC(_pdev, \ 264 tx_comp_histogram.pkts_201_plus, 1); \ 265 } \ 266 } while (0) 267 268 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ 269 do { \ 270 if (_p_cntrs == 1) { \ 271 DP_STATS_INC(_pdev, \ 272 rx_ind_histogram.pkts_1, 1); \ 273 } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ 274 DP_STATS_INC(_pdev, \ 275 rx_ind_histogram.pkts_2_20, 1); \ 276 } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ 277 DP_STATS_INC(_pdev, \ 278 rx_ind_histogram.pkts_21_40, 1); \ 279 } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ 280 DP_STATS_INC(_pdev, \ 281 rx_ind_histogram.pkts_41_60, 1); \ 282 } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ 283 DP_STATS_INC(_pdev, \ 284 rx_ind_histogram.pkts_61_80, 1); \ 285 } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ 286 DP_STATS_INC(_pdev, \ 287 rx_ind_histogram.pkts_81_100, 1); \ 288 } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ 289 DP_STATS_INC(_pdev, \ 290 rx_ind_histogram.pkts_101_200, 1); \ 291 } else if (_p_cntrs > 200) { \ 292 DP_STATS_INC(_pdev, \ 293 rx_ind_histogram.pkts_201_plus, 1); \ 294 } \ 295 } while (0) 296 297 #define DP_TX_HIST_STATS_PER_PDEV() \ 298 do { \ 299 uint8_t hist_stats = 0; \ 300 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 301 hist_stats++) { \ 302 DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 303 num_of_packets[hist_stats]); \ 304 } \ 305 } while (0) 306 307 308 #define DP_RX_HIST_STATS_PER_PDEV() \ 309 do { \ 310 uint8_t hist_stats = 0; \ 311 for (hist_stats = 0; hist_stats < soc->pdev_count; \ 312 hist_stats++) { \ 313 DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ 314 num_of_packets[hist_stats]); \ 315 } \ 316 } while (0) 317 318 #else 319 #define DP_HIST_INIT() 320 #define DP_HIST_PACKET_COUNT_INC(_pdev_id) 321 #define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 322 #define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) 323 #define DP_RX_HIST_STATS_PER_PDEV() 324 #define DP_TX_HIST_STATS_PER_PDEV() 325 #endif /* DISABLE_DP_STATS */ 326 327 #ifdef FEATURE_TSO_STATS 328 /** 329 * dp_init_tso_stats() - Clear tso stats 330 * @pdev: pdev handle 331 * 332 * Return: None 333 */ 334 static inline 335 void dp_init_tso_stats(struct dp_pdev *pdev) 336 { 337 if (pdev) { 338 qdf_mem_zero(&((pdev)->stats.tso_stats), 339 sizeof((pdev)->stats.tso_stats)); 340 qdf_atomic_init(&pdev->tso_idx); 341 } 342 } 343 344 /** 345 * dp_stats_tso_segment_histogram_update() - TSO Segment Histogram 346 * @pdev: pdev handle 347 * @_p_cntrs: number of tso segments for a tso packet 348 * 349 * Return: None 350 */ 351 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 352 uint8_t _p_cntrs); 353 354 /** 355 * dp_tso_segment_update() - Collect tso segment information 356 * @pdev: pdev handle 357 * @stats_idx: tso packet number 358 * @idx: tso segment number 359 * @seg: tso segment 360 * 361 * Return: None 362 */ 363 void dp_tso_segment_update(struct dp_pdev *pdev, 364 uint32_t stats_idx, 365 uint8_t idx, 366 struct qdf_tso_seg_t seg); 367 368 /** 369 * dp_tso_packet_update() - TSO Packet information 370 * @pdev: pdev handle 371 * @stats_idx: tso packet number 372 * @msdu: nbuf handle 373 * @num_segs: tso segments 374 * 375 * Return: None 376 */ 377 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 378 qdf_nbuf_t msdu, uint16_t num_segs); 379 380 /** 381 * dp_tso_segment_stats_update() - TSO Segment stats 382 * @pdev: pdev handle 383 * @stats_seg: tso segment list 384 * @stats_idx: tso packet number 385 * 386 * Return: None 387 */ 388 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 389 struct qdf_tso_seg_elem_t *stats_seg, 390 uint32_t stats_idx); 391 392 /** 393 * dp_print_tso_stats() - dump tso statistics 394 * @soc:soc handle 395 * @level: verbosity level 396 * 397 * Return: None 398 */ 399 void dp_print_tso_stats(struct dp_soc *soc, 400 enum qdf_stats_verbosity_level level); 401 402 /** 403 * dp_txrx_clear_tso_stats() - clear tso stats 404 * @soc: soc handle 405 * 406 * Return: None 407 */ 408 void dp_txrx_clear_tso_stats(struct dp_soc *soc); 409 #else 410 static inline 411 void dp_init_tso_stats(struct dp_pdev *pdev) 412 { 413 } 414 415 static inline 416 void dp_stats_tso_segment_histogram_update(struct dp_pdev *pdev, 417 uint8_t _p_cntrs) 418 { 419 } 420 421 static inline 422 void dp_tso_segment_update(struct dp_pdev *pdev, 423 uint32_t stats_idx, 424 uint32_t idx, 425 struct qdf_tso_seg_t seg) 426 { 427 } 428 429 static inline 430 void dp_tso_packet_update(struct dp_pdev *pdev, uint32_t stats_idx, 431 qdf_nbuf_t msdu, uint16_t num_segs) 432 { 433 } 434 435 static inline 436 void dp_tso_segment_stats_update(struct dp_pdev *pdev, 437 struct qdf_tso_seg_elem_t *stats_seg, 438 uint32_t stats_idx) 439 { 440 } 441 442 static inline 443 void dp_print_tso_stats(struct dp_soc *soc, 444 enum qdf_stats_verbosity_level level) 445 { 446 } 447 448 static inline 449 void dp_txrx_clear_tso_stats(struct dp_soc *soc) 450 { 451 } 452 #endif /* FEATURE_TSO_STATS */ 453 454 #define DP_HTT_T2H_HP_PIPE 5 455 static inline void dp_update_pdev_stats(struct dp_pdev *tgtobj, 456 struct cdp_vdev_stats *srcobj) 457 { 458 uint8_t i; 459 uint8_t pream_type; 460 461 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { 462 for (i = 0; i < MAX_MCS; i++) { 463 tgtobj->stats.tx.pkt_type[pream_type]. 464 mcs_count[i] += 465 srcobj->tx.pkt_type[pream_type]. 466 mcs_count[i]; 467 tgtobj->stats.rx.pkt_type[pream_type]. 468 mcs_count[i] += 469 srcobj->rx.pkt_type[pream_type]. 470 mcs_count[i]; 471 } 472 } 473 474 for (i = 0; i < MAX_BW; i++) { 475 tgtobj->stats.tx.bw[i] += srcobj->tx.bw[i]; 476 tgtobj->stats.rx.bw[i] += srcobj->rx.bw[i]; 477 } 478 479 for (i = 0; i < SS_COUNT; i++) { 480 tgtobj->stats.tx.nss[i] += srcobj->tx.nss[i]; 481 tgtobj->stats.rx.nss[i] += srcobj->rx.nss[i]; 482 } 483 484 for (i = 0; i < WME_AC_MAX; i++) { 485 tgtobj->stats.tx.wme_ac_type[i] += 486 srcobj->tx.wme_ac_type[i]; 487 tgtobj->stats.rx.wme_ac_type[i] += 488 srcobj->rx.wme_ac_type[i]; 489 tgtobj->stats.tx.excess_retries_per_ac[i] += 490 srcobj->tx.excess_retries_per_ac[i]; 491 } 492 493 for (i = 0; i < MAX_GI; i++) { 494 tgtobj->stats.tx.sgi_count[i] += 495 srcobj->tx.sgi_count[i]; 496 tgtobj->stats.rx.sgi_count[i] += 497 srcobj->rx.sgi_count[i]; 498 } 499 500 for (i = 0; i < MAX_RECEPTION_TYPES; i++) 501 tgtobj->stats.rx.reception_type[i] += 502 srcobj->rx.reception_type[i]; 503 504 tgtobj->stats.tx.comp_pkt.bytes += srcobj->tx.comp_pkt.bytes; 505 tgtobj->stats.tx.comp_pkt.num += srcobj->tx.comp_pkt.num; 506 tgtobj->stats.tx.ucast.num += srcobj->tx.ucast.num; 507 tgtobj->stats.tx.ucast.bytes += srcobj->tx.ucast.bytes; 508 tgtobj->stats.tx.mcast.num += srcobj->tx.mcast.num; 509 tgtobj->stats.tx.mcast.bytes += srcobj->tx.mcast.bytes; 510 tgtobj->stats.tx.bcast.num += srcobj->tx.bcast.num; 511 tgtobj->stats.tx.bcast.bytes += srcobj->tx.bcast.bytes; 512 tgtobj->stats.tx.tx_success.num += srcobj->tx.tx_success.num; 513 tgtobj->stats.tx.tx_success.bytes += 514 srcobj->tx.tx_success.bytes; 515 tgtobj->stats.tx.nawds_mcast.num += 516 srcobj->tx.nawds_mcast.num; 517 tgtobj->stats.tx.nawds_mcast.bytes += 518 srcobj->tx.nawds_mcast.bytes; 519 tgtobj->stats.tx.nawds_mcast_drop += 520 srcobj->tx.nawds_mcast_drop; 521 tgtobj->stats.tx.num_ppdu_cookie_valid += 522 srcobj->tx.num_ppdu_cookie_valid; 523 tgtobj->stats.tx.tx_failed += srcobj->tx.tx_failed; 524 tgtobj->stats.tx.ofdma += srcobj->tx.ofdma; 525 tgtobj->stats.tx.stbc += srcobj->tx.stbc; 526 tgtobj->stats.tx.ldpc += srcobj->tx.ldpc; 527 tgtobj->stats.tx.retries += srcobj->tx.retries; 528 tgtobj->stats.tx.non_amsdu_cnt += srcobj->tx.non_amsdu_cnt; 529 tgtobj->stats.tx.amsdu_cnt += srcobj->tx.amsdu_cnt; 530 tgtobj->stats.tx.non_ampdu_cnt += srcobj->tx.non_ampdu_cnt; 531 tgtobj->stats.tx.ampdu_cnt += srcobj->tx.ampdu_cnt; 532 tgtobj->stats.tx.dropped.fw_rem.num += srcobj->tx.dropped.fw_rem.num; 533 tgtobj->stats.tx.dropped.fw_rem.bytes += 534 srcobj->tx.dropped.fw_rem.bytes; 535 tgtobj->stats.tx.dropped.fw_rem_tx += 536 srcobj->tx.dropped.fw_rem_tx; 537 tgtobj->stats.tx.dropped.fw_rem_notx += 538 srcobj->tx.dropped.fw_rem_notx; 539 tgtobj->stats.tx.dropped.fw_reason1 += 540 srcobj->tx.dropped.fw_reason1; 541 tgtobj->stats.tx.dropped.fw_reason2 += 542 srcobj->tx.dropped.fw_reason2; 543 tgtobj->stats.tx.dropped.fw_reason3 += 544 srcobj->tx.dropped.fw_reason3; 545 tgtobj->stats.tx.dropped.age_out += srcobj->tx.dropped.age_out; 546 tgtobj->stats.rx.err.mic_err += srcobj->rx.err.mic_err; 547 if (srcobj->rx.rssi != 0) 548 tgtobj->stats.rx.rssi = srcobj->rx.rssi; 549 tgtobj->stats.rx.rx_rate = srcobj->rx.rx_rate; 550 tgtobj->stats.rx.err.decrypt_err += srcobj->rx.err.decrypt_err; 551 tgtobj->stats.rx.non_ampdu_cnt += srcobj->rx.non_ampdu_cnt; 552 tgtobj->stats.rx.amsdu_cnt += srcobj->rx.ampdu_cnt; 553 tgtobj->stats.rx.non_amsdu_cnt += srcobj->rx.non_amsdu_cnt; 554 tgtobj->stats.rx.amsdu_cnt += srcobj->rx.amsdu_cnt; 555 tgtobj->stats.rx.nawds_mcast_drop += srcobj->rx.nawds_mcast_drop; 556 tgtobj->stats.rx.to_stack.num += srcobj->rx.to_stack.num; 557 tgtobj->stats.rx.to_stack.bytes += srcobj->rx.to_stack.bytes; 558 559 for (i = 0; i < CDP_MAX_RX_RINGS; i++) { 560 tgtobj->stats.rx.rcvd_reo[i].num += 561 srcobj->rx.rcvd_reo[i].num; 562 tgtobj->stats.rx.rcvd_reo[i].bytes += 563 srcobj->rx.rcvd_reo[i].bytes; 564 } 565 566 srcobj->rx.unicast.num = 567 srcobj->rx.to_stack.num - 568 (srcobj->rx.multicast.num); 569 srcobj->rx.unicast.bytes = 570 srcobj->rx.to_stack.bytes - 571 (srcobj->rx.multicast.bytes); 572 573 tgtobj->stats.rx.unicast.num += srcobj->rx.unicast.num; 574 tgtobj->stats.rx.unicast.bytes += srcobj->rx.unicast.bytes; 575 tgtobj->stats.rx.multicast.num += srcobj->rx.multicast.num; 576 tgtobj->stats.rx.multicast.bytes += srcobj->rx.multicast.bytes; 577 tgtobj->stats.rx.bcast.num += srcobj->rx.bcast.num; 578 tgtobj->stats.rx.bcast.bytes += srcobj->rx.bcast.bytes; 579 tgtobj->stats.rx.raw.num += srcobj->rx.raw.num; 580 tgtobj->stats.rx.raw.bytes += srcobj->rx.raw.bytes; 581 tgtobj->stats.rx.intra_bss.pkts.num += 582 srcobj->rx.intra_bss.pkts.num; 583 tgtobj->stats.rx.intra_bss.pkts.bytes += 584 srcobj->rx.intra_bss.pkts.bytes; 585 tgtobj->stats.rx.intra_bss.fail.num += 586 srcobj->rx.intra_bss.fail.num; 587 tgtobj->stats.rx.intra_bss.fail.bytes += 588 srcobj->rx.intra_bss.fail.bytes; 589 590 tgtobj->stats.tx.last_ack_rssi = 591 srcobj->tx.last_ack_rssi; 592 tgtobj->stats.rx.mec_drop.num += srcobj->rx.mec_drop.num; 593 tgtobj->stats.rx.mec_drop.bytes += srcobj->rx.mec_drop.bytes; 594 tgtobj->stats.rx.multipass_rx_pkt_drop += 595 srcobj->rx.multipass_rx_pkt_drop; 596 } 597 598 static inline void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj, 599 struct dp_vdev *srcobj) 600 { 601 DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.nawds_mcast); 602 603 DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.rcvd); 604 DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.processed); 605 DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.reinject_pkts); 606 DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.inspect_pkts); 607 DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.raw.raw_pkt); 608 DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.dma_map_error); 609 DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_host.num); 610 DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_target); 611 DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sg.sg_pkt); 612 DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.mcast_en.mcast_pkt); 613 DP_STATS_AGGR(tgtobj, srcobj, 614 tx_i.mcast_en.dropped_map_error); 615 DP_STATS_AGGR(tgtobj, srcobj, 616 tx_i.mcast_en.dropped_self_mac); 617 DP_STATS_AGGR(tgtobj, srcobj, 618 tx_i.mcast_en.dropped_send_fail); 619 DP_STATS_AGGR(tgtobj, srcobj, tx_i.mcast_en.ucast); 620 DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.dma_error); 621 DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.ring_full); 622 DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.enqueue_fail); 623 DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.desc_na.num); 624 DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.res_full); 625 DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.headroom_insufficient); 626 DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified); 627 DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified_raw); 628 DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sniffer_rcvd); 629 DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.exception_fw); 630 DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.completion_fw); 631 632 tgtobj->stats.tx_i.dropped.dropped_pkt.num = 633 tgtobj->stats.tx_i.dropped.dma_error + 634 tgtobj->stats.tx_i.dropped.ring_full + 635 tgtobj->stats.tx_i.dropped.enqueue_fail + 636 tgtobj->stats.tx_i.dropped.desc_na.num + 637 tgtobj->stats.tx_i.dropped.res_full; 638 639 } 640 641 static inline void dp_update_vdev_stats(struct cdp_vdev_stats *tgtobj, 642 struct dp_peer *srcobj) 643 { 644 uint8_t i; 645 uint8_t pream_type; 646 647 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { 648 for (i = 0; i < MAX_MCS; i++) { 649 tgtobj->tx.pkt_type[pream_type]. 650 mcs_count[i] += 651 srcobj->stats.tx.pkt_type[pream_type]. 652 mcs_count[i]; 653 tgtobj->rx.pkt_type[pream_type]. 654 mcs_count[i] += 655 srcobj->stats.rx.pkt_type[pream_type]. 656 mcs_count[i]; 657 } 658 } 659 660 for (i = 0; i < MAX_BW; i++) { 661 tgtobj->tx.bw[i] += srcobj->stats.tx.bw[i]; 662 tgtobj->rx.bw[i] += srcobj->stats.rx.bw[i]; 663 } 664 665 for (i = 0; i < SS_COUNT; i++) { 666 tgtobj->tx.nss[i] += srcobj->stats.tx.nss[i]; 667 tgtobj->rx.nss[i] += srcobj->stats.rx.nss[i]; 668 } 669 670 for (i = 0; i < WME_AC_MAX; i++) { 671 tgtobj->tx.wme_ac_type[i] += 672 srcobj->stats.tx.wme_ac_type[i]; 673 tgtobj->rx.wme_ac_type[i] += 674 srcobj->stats.rx.wme_ac_type[i]; 675 tgtobj->tx.excess_retries_per_ac[i] += 676 srcobj->stats.tx.excess_retries_per_ac[i]; 677 } 678 679 for (i = 0; i < MAX_GI; i++) { 680 tgtobj->tx.sgi_count[i] += 681 srcobj->stats.tx.sgi_count[i]; 682 tgtobj->rx.sgi_count[i] += 683 srcobj->stats.rx.sgi_count[i]; 684 } 685 686 for (i = 0; i < MAX_RECEPTION_TYPES; i++) 687 tgtobj->rx.reception_type[i] += 688 srcobj->stats.rx.reception_type[i]; 689 690 tgtobj->tx.comp_pkt.bytes += srcobj->stats.tx.comp_pkt.bytes; 691 tgtobj->tx.comp_pkt.num += srcobj->stats.tx.comp_pkt.num; 692 tgtobj->tx.ucast.num += srcobj->stats.tx.ucast.num; 693 tgtobj->tx.ucast.bytes += srcobj->stats.tx.ucast.bytes; 694 tgtobj->tx.mcast.num += srcobj->stats.tx.mcast.num; 695 tgtobj->tx.mcast.bytes += srcobj->stats.tx.mcast.bytes; 696 tgtobj->tx.bcast.num += srcobj->stats.tx.bcast.num; 697 tgtobj->tx.bcast.bytes += srcobj->stats.tx.bcast.bytes; 698 tgtobj->tx.tx_success.num += srcobj->stats.tx.tx_success.num; 699 tgtobj->tx.tx_success.bytes += 700 srcobj->stats.tx.tx_success.bytes; 701 tgtobj->tx.nawds_mcast.num += 702 srcobj->stats.tx.nawds_mcast.num; 703 tgtobj->tx.nawds_mcast.bytes += 704 srcobj->stats.tx.nawds_mcast.bytes; 705 tgtobj->tx.nawds_mcast_drop += 706 srcobj->stats.tx.nawds_mcast_drop; 707 tgtobj->tx.num_ppdu_cookie_valid += 708 srcobj->stats.tx.num_ppdu_cookie_valid; 709 tgtobj->tx.tx_failed += srcobj->stats.tx.tx_failed; 710 tgtobj->tx.ofdma += srcobj->stats.tx.ofdma; 711 tgtobj->tx.stbc += srcobj->stats.tx.stbc; 712 tgtobj->tx.ldpc += srcobj->stats.tx.ldpc; 713 tgtobj->tx.retries += srcobj->stats.tx.retries; 714 tgtobj->tx.non_amsdu_cnt += srcobj->stats.tx.non_amsdu_cnt; 715 tgtobj->tx.amsdu_cnt += srcobj->stats.tx.amsdu_cnt; 716 tgtobj->tx.non_ampdu_cnt += srcobj->stats.tx.non_ampdu_cnt; 717 tgtobj->tx.ampdu_cnt += srcobj->stats.tx.ampdu_cnt; 718 tgtobj->tx.dropped.fw_rem.num += srcobj->stats.tx.dropped.fw_rem.num; 719 tgtobj->tx.dropped.fw_rem.bytes += 720 srcobj->stats.tx.dropped.fw_rem.bytes; 721 tgtobj->tx.dropped.fw_rem_tx += 722 srcobj->stats.tx.dropped.fw_rem_tx; 723 tgtobj->tx.dropped.fw_rem_notx += 724 srcobj->stats.tx.dropped.fw_rem_notx; 725 tgtobj->tx.dropped.fw_reason1 += 726 srcobj->stats.tx.dropped.fw_reason1; 727 tgtobj->tx.dropped.fw_reason2 += 728 srcobj->stats.tx.dropped.fw_reason2; 729 tgtobj->tx.dropped.fw_reason3 += 730 srcobj->stats.tx.dropped.fw_reason3; 731 tgtobj->tx.dropped.age_out += srcobj->stats.tx.dropped.age_out; 732 tgtobj->rx.err.mic_err += srcobj->stats.rx.err.mic_err; 733 if (srcobj->stats.rx.rssi != 0) 734 tgtobj->rx.rssi = srcobj->stats.rx.rssi; 735 tgtobj->rx.rx_rate = srcobj->stats.rx.rx_rate; 736 tgtobj->rx.err.decrypt_err += srcobj->stats.rx.err.decrypt_err; 737 tgtobj->rx.non_ampdu_cnt += srcobj->stats.rx.non_ampdu_cnt; 738 tgtobj->rx.amsdu_cnt += srcobj->stats.rx.ampdu_cnt; 739 tgtobj->rx.non_amsdu_cnt += srcobj->stats.rx.non_amsdu_cnt; 740 tgtobj->rx.amsdu_cnt += srcobj->stats.rx.amsdu_cnt; 741 tgtobj->rx.nawds_mcast_drop += srcobj->stats.rx.nawds_mcast_drop; 742 tgtobj->rx.to_stack.num += srcobj->stats.rx.to_stack.num; 743 tgtobj->rx.to_stack.bytes += srcobj->stats.rx.to_stack.bytes; 744 745 for (i = 0; i < CDP_MAX_RX_RINGS; i++) { 746 tgtobj->rx.rcvd_reo[i].num += 747 srcobj->stats.rx.rcvd_reo[i].num; 748 tgtobj->rx.rcvd_reo[i].bytes += 749 srcobj->stats.rx.rcvd_reo[i].bytes; 750 } 751 752 srcobj->stats.rx.unicast.num = 753 srcobj->stats.rx.to_stack.num - 754 srcobj->stats.rx.multicast.num; 755 srcobj->stats.rx.unicast.bytes = 756 srcobj->stats.rx.to_stack.bytes - 757 srcobj->stats.rx.multicast.bytes; 758 759 tgtobj->rx.unicast.num += srcobj->stats.rx.unicast.num; 760 tgtobj->rx.unicast.bytes += srcobj->stats.rx.unicast.bytes; 761 tgtobj->rx.multicast.num += srcobj->stats.rx.multicast.num; 762 tgtobj->rx.multicast.bytes += srcobj->stats.rx.multicast.bytes; 763 tgtobj->rx.bcast.num += srcobj->stats.rx.bcast.num; 764 tgtobj->rx.bcast.bytes += srcobj->stats.rx.bcast.bytes; 765 tgtobj->rx.raw.num += srcobj->stats.rx.raw.num; 766 tgtobj->rx.raw.bytes += srcobj->stats.rx.raw.bytes; 767 tgtobj->rx.intra_bss.pkts.num += 768 srcobj->stats.rx.intra_bss.pkts.num; 769 tgtobj->rx.intra_bss.pkts.bytes += 770 srcobj->stats.rx.intra_bss.pkts.bytes; 771 tgtobj->rx.intra_bss.fail.num += 772 srcobj->stats.rx.intra_bss.fail.num; 773 tgtobj->rx.intra_bss.fail.bytes += 774 srcobj->stats.rx.intra_bss.fail.bytes; 775 tgtobj->tx.last_ack_rssi = 776 srcobj->stats.tx.last_ack_rssi; 777 tgtobj->rx.mec_drop.num += srcobj->stats.rx.mec_drop.num; 778 tgtobj->rx.mec_drop.bytes += srcobj->stats.rx.mec_drop.bytes; 779 tgtobj->rx.multipass_rx_pkt_drop += 780 srcobj->stats.rx.multipass_rx_pkt_drop; 781 } 782 783 #define DP_UPDATE_STATS(_tgtobj, _srcobj) \ 784 do { \ 785 uint8_t i; \ 786 uint8_t pream_type; \ 787 for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ 788 for (i = 0; i < MAX_MCS; i++) { \ 789 DP_STATS_AGGR(_tgtobj, _srcobj, \ 790 tx.pkt_type[pream_type].mcs_count[i]); \ 791 DP_STATS_AGGR(_tgtobj, _srcobj, \ 792 rx.pkt_type[pream_type].mcs_count[i]); \ 793 } \ 794 } \ 795 \ 796 for (i = 0; i < MAX_BW; i++) { \ 797 DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \ 798 DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \ 799 } \ 800 \ 801 for (i = 0; i < SS_COUNT; i++) { \ 802 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \ 803 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \ 804 } \ 805 for (i = 0; i < WME_AC_MAX; i++) { \ 806 DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \ 807 DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \ 808 DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \ 809 \ 810 } \ 811 \ 812 for (i = 0; i < MAX_GI; i++) { \ 813 DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \ 814 DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \ 815 } \ 816 \ 817 for (i = 0; i < MAX_RECEPTION_TYPES; i++) \ 818 DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \ 819 \ 820 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \ 821 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \ 822 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \ 823 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \ 824 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \ 825 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \ 826 DP_STATS_AGGR(_tgtobj, _srcobj, tx.nawds_mcast_drop); \ 827 DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \ 828 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \ 829 DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \ 830 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \ 831 DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \ 832 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \ 833 DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \ 834 DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_ampdu_cnt); \ 835 DP_STATS_AGGR(_tgtobj, _srcobj, tx.ampdu_cnt); \ 836 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.dropped.fw_rem); \ 837 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \ 838 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \ 839 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \ 840 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \ 841 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \ 842 DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \ 843 \ 844 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \ 845 if (_srcobj->stats.rx.rssi != 0) \ 846 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rssi); \ 847 DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \ 848 DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \ 849 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \ 850 DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \ 851 DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \ 852 DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \ 853 DP_STATS_AGGR(_tgtobj, _srcobj, rx.nawds_mcast_drop); \ 854 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \ 855 \ 856 for (i = 0; i < CDP_MAX_RX_RINGS; i++) \ 857 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \ 858 \ 859 _srcobj->stats.rx.unicast.num = \ 860 _srcobj->stats.rx.to_stack.num - \ 861 _srcobj->stats.rx.multicast.num; \ 862 _srcobj->stats.rx.unicast.bytes = \ 863 _srcobj->stats.rx.to_stack.bytes - \ 864 _srcobj->stats.rx.multicast.bytes; \ 865 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \ 866 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \ 867 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \ 868 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \ 869 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \ 870 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \ 871 DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.mec_drop); \ 872 \ 873 _tgtobj->stats.tx.last_ack_rssi = \ 874 _srcobj->stats.tx.last_ack_rssi; \ 875 DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \ 876 } while (0) 877 878 extern int dp_peer_find_attach(struct dp_soc *soc); 879 extern void dp_peer_find_detach(struct dp_soc *soc); 880 extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer); 881 extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer); 882 extern void dp_peer_find_hash_erase(struct dp_soc *soc); 883 884 /* 885 * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer 886 * @peer: Datapath peer 887 * 888 * return: void 889 */ 890 void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer); 891 892 /* 893 * dp_peer_ppdu_delayed_ba_cleanup() free ppdu allocated in peer 894 * @peer: Datapath peer 895 * 896 * return: void 897 */ 898 void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer); 899 900 extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer); 901 void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer); 902 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, 903 bool reuse); 904 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, 905 bool reuse); 906 void dp_peer_unref_delete(struct dp_peer *peer); 907 extern void *dp_find_peer_by_addr(struct cdp_pdev *dev, 908 uint8_t *peer_mac_addr); 909 extern struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc, 910 uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id); 911 912 #ifdef DP_PEER_EXTENDED_API 913 /** 914 * dp_register_peer() - Register peer into physical device 915 * @soc_hdl - data path soc handle 916 * @pdev_id - device instance id 917 * @sta_desc - peer description 918 * 919 * Register peer into physical device 920 * 921 * Return: QDF_STATUS_SUCCESS registration success 922 * QDF_STATUS_E_FAULT peer not found 923 */ 924 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 925 struct ol_txrx_desc_type *sta_desc); 926 927 /** 928 * dp_clear_peer() - remove peer from physical device 929 * @soc_hdl - data path soc handle 930 * @pdev_id - device instance id 931 * @peer_addr - peer mac address 932 * 933 * remove peer from physical device 934 * 935 * Return: QDF_STATUS_SUCCESS registration success 936 * QDF_STATUS_E_FAULT peer not found 937 */ 938 QDF_STATUS dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 939 struct qdf_mac_addr peer_addr); 940 941 /* 942 * dp_find_peer_exist - find peer if already exists 943 * @soc: datapath soc handle 944 * @pdev_id: physical device instance id 945 * @peer_mac_addr: peer mac address 946 * 947 * Return: true or false 948 */ 949 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, 950 uint8_t *peer_addr); 951 952 /* 953 * dp_find_peer_exist_on_vdev - find if peer exists on the given vdev 954 * @soc: datapath soc handle 955 * @vdev_id: vdev instance id 956 * @peer_mac_addr: peer mac address 957 * 958 * Return: true or false 959 */ 960 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, 961 uint8_t *peer_addr); 962 963 /* 964 * dp_find_peer_exist_on_other_vdev - find if peer exists 965 * on other than the given vdev 966 * @soc: datapath soc handle 967 * @vdev_id: vdev instance id 968 * @peer_mac_addr: peer mac address 969 * @max_bssid: max number of bssids 970 * 971 * Return: true or false 972 */ 973 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl, 974 uint8_t vdev_id, uint8_t *peer_addr, 975 uint16_t max_bssid); 976 977 void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle, 978 struct cdp_vdev *vdev, 979 uint8_t *peer_addr); 980 981 /** 982 * dp_peer_state_update() - update peer local state 983 * @pdev - data path device instance 984 * @peer_addr - peer mac address 985 * @state - new peer local state 986 * 987 * update peer local state 988 * 989 * Return: QDF_STATUS_SUCCESS registration success 990 */ 991 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc, uint8_t *peer_mac, 992 enum ol_txrx_peer_state state); 993 994 /** 995 * dp_get_vdevid() - Get virtual interface id which peer registered 996 * @soc - datapath soc handle 997 * @peer_mac - peer mac address 998 * @vdev_id - virtual interface id which peer registered 999 * 1000 * Get virtual interface id which peer registered 1001 * 1002 * Return: QDF_STATUS_SUCCESS registration success 1003 */ 1004 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 1005 uint8_t *vdev_id); 1006 struct cdp_vdev *dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle, 1007 struct qdf_mac_addr peer_addr); 1008 struct cdp_vdev *dp_get_vdev_for_peer(void *peer); 1009 uint8_t *dp_peer_get_peer_mac_addr(void *peer); 1010 1011 /** 1012 * dp_get_peer_state() - Get local peer state 1013 * @soc - datapath soc handle 1014 * @vdev_id - vdev id 1015 * @peer_mac - peer mac addr 1016 * 1017 * Get local peer state 1018 * 1019 * Return: peer status 1020 */ 1021 int dp_get_peer_state(struct cdp_soc_t *soc, uint8_t vdev_id, 1022 uint8_t *peer_mac); 1023 void dp_local_peer_id_pool_init(struct dp_pdev *pdev); 1024 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer); 1025 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer); 1026 #else 1027 /** 1028 * dp_get_vdevid() - Get virtual interface id which peer registered 1029 * @soc - datapath soc handle 1030 * @peer_mac - peer mac address 1031 * @vdev_id - virtual interface id which peer registered 1032 * 1033 * Get virtual interface id which peer registered 1034 * 1035 * Return: QDF_STATUS_SUCCESS registration success 1036 */ 1037 static inline 1038 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac, 1039 uint8_t *vdev_id) 1040 { 1041 return QDF_STATUS_E_NOSUPPORT; 1042 } 1043 1044 static inline void dp_local_peer_id_pool_init(struct dp_pdev *pdev) 1045 { 1046 } 1047 1048 static inline 1049 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) 1050 { 1051 } 1052 1053 static inline 1054 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) 1055 { 1056 } 1057 #endif 1058 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, 1059 uint8_t *peer_mac, uint16_t vdev_id, 1060 uint8_t tid, 1061 int status); 1062 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc, 1063 uint8_t *peer_mac, uint16_t vdev_id, 1064 uint8_t dialogtoken, uint16_t tid, 1065 uint16_t batimeout, 1066 uint16_t buffersize, 1067 uint16_t startseqnum); 1068 QDF_STATUS dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, 1069 uint8_t *peer_mac, uint16_t vdev_id, 1070 uint8_t tid, uint8_t *dialogtoken, 1071 uint16_t *statuscode, 1072 uint16_t *buffersize, 1073 uint16_t *batimeout); 1074 QDF_STATUS dp_set_addba_response(struct cdp_soc_t *cdp_soc, 1075 uint8_t *peer_mac, 1076 uint16_t vdev_id, uint8_t tid, 1077 uint16_t statuscode); 1078 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, 1079 uint16_t vdev_id, int tid, 1080 uint16_t reasoncode); 1081 /* 1082 * dp_delba_tx_completion_wifi3() - Handle delba tx completion 1083 * 1084 * @cdp_soc: soc handle 1085 * @vdev_id: id of the vdev handle 1086 * @peer_mac: peer mac address 1087 * @tid: Tid number 1088 * @status: Tx completion status 1089 * Indicate status of delba Tx to DP for stats update and retry 1090 * delba if tx failed. 1091 * 1092 */ 1093 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac, 1094 uint16_t vdev_id, uint8_t tid, 1095 int status); 1096 extern QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, 1097 uint32_t ba_window_size, 1098 uint32_t start_seq); 1099 1100 extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, 1101 enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params, 1102 void (*callback_fn), void *data); 1103 1104 extern void dp_reo_cmdlist_destroy(struct dp_soc *soc); 1105 1106 /** 1107 * dp_reo_status_ring_handler - Handler for REO Status ring 1108 * @int_ctx: pointer to DP interrupt context 1109 * @soc: DP Soc handle 1110 * 1111 * Returns: Number of descriptors reaped 1112 */ 1113 uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, 1114 struct dp_soc *soc); 1115 void dp_aggregate_vdev_stats(struct dp_vdev *vdev, 1116 struct cdp_vdev_stats *vdev_stats); 1117 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, 1118 union hal_reo_status *reo_status); 1119 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, 1120 union hal_reo_status *reo_status); 1121 uint16_t dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id, 1122 qdf_nbuf_t nbuf, 1123 uint8_t newmac[][QDF_MAC_ADDR_SIZE], 1124 uint8_t new_mac_cnt); 1125 void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 1126 1127 void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id); 1128 QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, 1129 uint32_t stats_type_upload_mask, uint32_t config_param_0, 1130 uint32_t config_param_1, uint32_t config_param_2, 1131 uint32_t config_param_3, int cookie, int cookie_msb, 1132 uint8_t mac_id); 1133 void dp_htt_stats_print_tag(struct dp_pdev *pdev, 1134 uint8_t tag_type, uint32_t *tag_buf); 1135 void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf); 1136 void dp_peer_rxtid_stats(struct dp_peer *peer, void (*callback_fn), 1137 void *cb_ctxt); 1138 QDF_STATUS 1139 dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, 1140 uint8_t *peer_mac, enum cdp_sec_type sec_type, 1141 uint32_t *rx_pn); 1142 1143 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id); 1144 1145 QDF_STATUS 1146 dp_set_michael_key(struct cdp_soc_t *soc, uint8_t vdev_id, 1147 uint8_t *peer_mac, 1148 bool is_unicast, uint32_t *key); 1149 1150 /** 1151 * dp_check_pdev_exists() - Validate pdev before use 1152 * @soc - dp soc handle 1153 * @data - pdev handle 1154 * 1155 * Return: 0 - success/invalid - failure 1156 */ 1157 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data); 1158 1159 /** 1160 * dp_update_delay_stats() - Update delay statistics in structure 1161 * and fill min, max and avg delay 1162 * @pdev: pdev handle 1163 * @delay: delay in ms 1164 * @tid: tid value 1165 * @mode: type of tx delay mode 1166 * @ring id: ring number 1167 * 1168 * Return: none 1169 */ 1170 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay, 1171 uint8_t tid, uint8_t mode, uint8_t ring_id); 1172 1173 /** 1174 * dp_print_ring_stats(): Print tail and head pointer 1175 * @pdev: DP_PDEV handle 1176 * 1177 * Return:void 1178 */ 1179 void dp_print_ring_stats(struct dp_pdev *pdev); 1180 1181 /** 1182 * dp_print_pdev_cfg_params() - Print the pdev cfg parameters 1183 * @pdev_handle: DP pdev handle 1184 * 1185 * Return - void 1186 */ 1187 void dp_print_pdev_cfg_params(struct dp_pdev *pdev); 1188 1189 /** 1190 * dp_print_soc_cfg_params()- Dump soc wlan config parameters 1191 * @soc_handle: Soc handle 1192 * 1193 * Return: void 1194 */ 1195 void dp_print_soc_cfg_params(struct dp_soc *soc); 1196 1197 /** 1198 * dp_srng_get_str_from_ring_type() - Return string name for a ring 1199 * @ring_type: Ring 1200 * 1201 * Return: char const pointer 1202 */ 1203 const 1204 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type); 1205 1206 /* 1207 * dp_txrx_path_stats() - Function to display dump stats 1208 * @soc - soc handle 1209 * 1210 * return: none 1211 */ 1212 void dp_txrx_path_stats(struct dp_soc *soc); 1213 1214 /* 1215 * dp_print_per_ring_stats(): Packet count per ring 1216 * @soc - soc handle 1217 * 1218 * Return - None 1219 */ 1220 void dp_print_per_ring_stats(struct dp_soc *soc); 1221 1222 /** 1223 * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level 1224 * @pdev: DP PDEV handle 1225 * 1226 * return: void 1227 */ 1228 void dp_aggregate_pdev_stats(struct dp_pdev *pdev); 1229 1230 /** 1231 * dp_print_rx_rates(): Print Rx rate stats 1232 * @vdev: DP_VDEV handle 1233 * 1234 * Return:void 1235 */ 1236 void dp_print_rx_rates(struct dp_vdev *vdev); 1237 1238 /** 1239 * dp_print_tx_rates(): Print tx rates 1240 * @vdev: DP_VDEV handle 1241 * 1242 * Return:void 1243 */ 1244 void dp_print_tx_rates(struct dp_vdev *vdev); 1245 1246 /** 1247 * dp_print_peer_stats():print peer stats 1248 * @peer: DP_PEER handle 1249 * 1250 * return void 1251 */ 1252 void dp_print_peer_stats(struct dp_peer *peer); 1253 1254 /** 1255 * dp_print_pdev_tx_stats(): Print Pdev level TX stats 1256 * @pdev: DP_PDEV Handle 1257 * 1258 * Return:void 1259 */ 1260 void 1261 dp_print_pdev_tx_stats(struct dp_pdev *pdev); 1262 1263 /** 1264 * dp_print_pdev_rx_stats(): Print Pdev level RX stats 1265 * @pdev: DP_PDEV Handle 1266 * 1267 * Return: void 1268 */ 1269 void 1270 dp_print_pdev_rx_stats(struct dp_pdev *pdev); 1271 1272 /** 1273 * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats 1274 * @pdev: DP_PDEV Handle 1275 * 1276 * Return: void 1277 */ 1278 void 1279 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev); 1280 1281 /** 1282 * dp_print_soc_tx_stats(): Print SOC level stats 1283 * @soc DP_SOC Handle 1284 * 1285 * Return: void 1286 */ 1287 void dp_print_soc_tx_stats(struct dp_soc *soc); 1288 1289 /** 1290 * dp_print_soc_interrupt_stats() - Print interrupt stats for the soc 1291 * @soc: dp_soc handle 1292 * 1293 * Return: None 1294 */ 1295 void dp_print_soc_interrupt_stats(struct dp_soc *soc); 1296 1297 /** 1298 * dp_print_soc_rx_stats: Print SOC level Rx stats 1299 * @soc: DP_SOC Handle 1300 * 1301 * Return:void 1302 */ 1303 void dp_print_soc_rx_stats(struct dp_soc *soc); 1304 1305 /** 1306 * dp_get_mac_id_for_pdev() - Return mac corresponding to pdev for mac 1307 * 1308 * @mac_id: MAC id 1309 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 1310 * 1311 * Single pdev using both MACs will operate on both MAC rings, 1312 * which is the case for MCL. 1313 * For WIN each PDEV will operate one ring, so index is zero. 1314 * 1315 */ 1316 static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id) 1317 { 1318 if (mac_id && pdev_id) { 1319 qdf_print("Both mac_id and pdev_id cannot be non zero"); 1320 QDF_BUG(0); 1321 return 0; 1322 } 1323 return (mac_id + pdev_id); 1324 } 1325 1326 /** 1327 * dp_get_lmac_id_for_pdev_id() - Return lmac id corresponding to host pdev id 1328 * @soc: soc pointer 1329 * @mac_id: MAC id 1330 * @pdev_id: pdev_id corresponding to pdev, 0 for MCL 1331 * 1332 * For MCL, Single pdev using both MACs will operate on both MAC rings. 1333 * 1334 * For WIN, each PDEV will operate one ring. 1335 * 1336 */ 1337 static inline int 1338 dp_get_lmac_id_for_pdev_id 1339 (struct dp_soc *soc, uint32_t mac_id, uint32_t pdev_id) 1340 { 1341 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 1342 if (mac_id && pdev_id) { 1343 qdf_print("Both mac_id and pdev_id cannot be non zero"); 1344 QDF_BUG(0); 1345 return 0; 1346 } 1347 return (mac_id + pdev_id); 1348 } 1349 1350 return soc->pdev_list[pdev_id]->lmac_id; 1351 } 1352 1353 /** 1354 * dp_get_pdev_for_lmac_id() - Return pdev pointer corresponding to lmac id 1355 * @soc: soc pointer 1356 * @lmac_id: LMAC id 1357 * 1358 * For MCL, Single pdev exists 1359 * 1360 * For WIN, each PDEV will operate one ring. 1361 * 1362 */ 1363 static inline struct dp_pdev * 1364 dp_get_pdev_for_lmac_id(struct dp_soc *soc, uint32_t lmac_id) 1365 { 1366 int i = 0; 1367 1368 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { 1369 i = wlan_cfg_get_pdev_idx(soc->wlan_cfg_ctx, lmac_id); 1370 qdf_assert_always(i < MAX_PDEV_CNT); 1371 1372 return soc->pdev_list[i]; 1373 } 1374 1375 /* Typically for MCL as there only 1 PDEV*/ 1376 return soc->pdev_list[0]; 1377 } 1378 1379 /** 1380 * dp_get_target_pdev_id_for_host_pdev_id() - Return target pdev corresponding 1381 * to host pdev id 1382 * @soc: soc pointer 1383 * @mac_for_pdev: pdev_id corresponding to host pdev for WIN, mac id for MCL 1384 * 1385 * returns target pdev_id for host pdev id. For WIN, this is derived through 1386 * a two step process: 1387 * 1. Get lmac_id corresponding to host pdev_id (lmac_id can change 1388 * during mode switch) 1389 * 2. Get target pdev_id (set up during WMI ready) from lmac_id 1390 * 1391 * For MCL, return the offset-1 translated mac_id 1392 */ 1393 static inline int 1394 dp_get_target_pdev_id_for_host_pdev_id 1395 (struct dp_soc *soc, uint32_t mac_for_pdev) 1396 { 1397 struct dp_pdev *pdev; 1398 1399 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 1400 return DP_SW2HW_MACID(mac_for_pdev); 1401 1402 pdev = soc->pdev_list[mac_for_pdev]; 1403 1404 /*non-MCL case, get original target_pdev mapping*/ 1405 return wlan_cfg_get_target_pdev_id(soc->wlan_cfg_ctx, pdev->lmac_id); 1406 } 1407 1408 /** 1409 * dp_get_host_pdev_id_for_target_pdev_id() - Return host pdev corresponding 1410 * to target pdev id 1411 * @soc: soc pointer 1412 * @pdev_id: pdev_id corresponding to target pdev 1413 * 1414 * returns host pdev_id for target pdev id. For WIN, this is derived through 1415 * a two step process: 1416 * 1. Get lmac_id corresponding to target pdev_id 1417 * 2. Get host pdev_id (set up during WMI ready) from lmac_id 1418 * 1419 * For MCL, return the 0-offset pdev_id 1420 */ 1421 static inline int 1422 dp_get_host_pdev_id_for_target_pdev_id 1423 (struct dp_soc *soc, uint32_t pdev_id) 1424 { 1425 struct dp_pdev *pdev; 1426 int lmac_id; 1427 1428 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 1429 return DP_HW2SW_MACID(pdev_id); 1430 1431 /*non-MCL case, get original target_lmac mapping from target pdev*/ 1432 lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, 1433 DP_HW2SW_MACID(pdev_id)); 1434 1435 /*Get host pdev from lmac*/ 1436 pdev = dp_get_pdev_for_lmac_id(soc, lmac_id); 1437 1438 return pdev->pdev_id; 1439 } 1440 1441 /* 1442 * dp_get_mac_id_for_mac() - Return mac corresponding WIN and MCL mac_ids 1443 * 1444 * @soc: handle to DP soc 1445 * @mac_id: MAC id 1446 * 1447 * Single pdev using both MACs will operate on both MAC rings, 1448 * which is the case for MCL. 1449 * For WIN each PDEV will operate one ring, so index is zero. 1450 * 1451 */ 1452 static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id) 1453 { 1454 /* 1455 * Single pdev using both MACs will operate on both MAC rings, 1456 * which is the case for MCL. 1457 */ 1458 if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) 1459 return mac_id; 1460 1461 /* For WIN each PDEV will operate one ring, so index is zero. */ 1462 return 0; 1463 } 1464 1465 bool dp_is_soc_reinit(struct dp_soc *soc); 1466 1467 /* 1468 * dp_is_subtype_data() - check if the frame subtype is data 1469 * 1470 * @frame_ctrl: Frame control field 1471 * 1472 * check the frame control field and verify if the packet 1473 * is a data packet. 1474 * 1475 * Return: true or false 1476 */ 1477 static inline bool dp_is_subtype_data(uint16_t frame_ctrl) 1478 { 1479 if (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_TYPE_MASK) == 1480 QDF_IEEE80211_FC0_TYPE_DATA) && 1481 (((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 1482 QDF_IEEE80211_FC0_SUBTYPE_DATA) || 1483 ((qdf_cpu_to_le16(frame_ctrl) & QDF_IEEE80211_FC0_SUBTYPE_MASK) == 1484 QDF_IEEE80211_FC0_SUBTYPE_QOS))) { 1485 return true; 1486 } 1487 1488 return false; 1489 } 1490 1491 #ifdef WDI_EVENT_ENABLE 1492 QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 1493 uint32_t stats_type_upload_mask, 1494 uint8_t mac_id); 1495 1496 int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 1497 wdi_event_subscribe *event_cb_sub_handle, 1498 uint32_t event); 1499 1500 int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 1501 wdi_event_subscribe *event_cb_sub_handle, 1502 uint32_t event); 1503 1504 void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc, 1505 void *data, u_int16_t peer_id, 1506 int status, u_int8_t pdev_id); 1507 1508 int dp_wdi_event_attach(struct dp_pdev *txrx_pdev); 1509 int dp_wdi_event_detach(struct dp_pdev *txrx_pdev); 1510 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 1511 bool enable); 1512 1513 /** 1514 * dp_get_pldev() - function to get pktlog device handle 1515 * @soc_hdl: datapath soc handle 1516 * @pdev_id: physical device id 1517 * 1518 * Return: pktlog device handle or NULL 1519 */ 1520 void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); 1521 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn); 1522 1523 static inline void 1524 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, 1525 void *cb_context, 1526 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 1527 uint8_t pipe_id) 1528 { 1529 struct hif_msg_callbacks hif_pipe_callbacks; 1530 1531 /* TODO: Temporary change to bypass HTC connection for this new 1532 * HIF pipe, which will be used for packet log and other high- 1533 * priority HTT messages. Proper HTC connection to be added 1534 * later once required FW changes are available 1535 */ 1536 hif_pipe_callbacks.rxCompletionHandler = callback; 1537 hif_pipe_callbacks.Context = cb_context; 1538 hif_update_pipe_callback(dp_soc->hif_handle, 1539 DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks); 1540 } 1541 1542 QDF_STATUS dp_peer_stats_notify(struct dp_pdev *pdev, struct dp_peer *peer); 1543 1544 #else 1545 static inline int dp_wdi_event_unsub(struct cdp_soc_t *soc, uint8_t pdev_id, 1546 wdi_event_subscribe *event_cb_sub_handle, 1547 uint32_t event) 1548 { 1549 return 0; 1550 } 1551 1552 static inline int dp_wdi_event_sub(struct cdp_soc_t *soc, uint8_t pdev_id, 1553 wdi_event_subscribe *event_cb_sub_handle, 1554 uint32_t event) 1555 { 1556 return 0; 1557 } 1558 1559 static inline 1560 void dp_wdi_event_handler(enum WDI_EVENT event, 1561 struct dp_soc *soc, 1562 void *data, u_int16_t peer_id, 1563 int status, u_int8_t pdev_id) 1564 { 1565 } 1566 1567 static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev) 1568 { 1569 return 0; 1570 } 1571 1572 static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev) 1573 { 1574 return 0; 1575 } 1576 1577 static inline int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, 1578 bool enable) 1579 { 1580 return 0; 1581 } 1582 static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, 1583 uint32_t stats_type_upload_mask, uint8_t mac_id) 1584 { 1585 return 0; 1586 } 1587 1588 static inline void 1589 dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context, 1590 QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), 1591 uint8_t pipe_id) 1592 { 1593 } 1594 1595 static inline QDF_STATUS dp_peer_stats_notify(struct dp_pdev *pdev, 1596 struct dp_peer *peer) 1597 { 1598 return QDF_STATUS_SUCCESS; 1599 } 1600 1601 #endif /* CONFIG_WIN */ 1602 1603 #ifdef VDEV_PEER_PROTOCOL_COUNT 1604 /** 1605 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 1606 * @vdev: VDEV DP object 1607 * @nbuf: data packet 1608 * @peer: Peer DP object 1609 * @is_egress: whether egress or ingress 1610 * @is_rx: whether rx or tx 1611 * 1612 * This function updates the per-peer protocol counters 1613 * Return: void 1614 */ 1615 void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, 1616 qdf_nbuf_t nbuf, 1617 struct dp_peer *peer, 1618 bool is_egress, 1619 bool is_rx); 1620 1621 /** 1622 * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters 1623 * @soc: SOC DP object 1624 * @vdev_id: vdev_id 1625 * @nbuf: data packet 1626 * @is_egress: whether egress or ingress 1627 * @is_rx: whether rx or tx 1628 * 1629 * This function updates the per-peer protocol counters 1630 * Return: void 1631 */ 1632 1633 void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc, 1634 int8_t vdev_id, 1635 qdf_nbuf_t nbuf, 1636 bool is_egress, 1637 bool is_rx); 1638 1639 #else 1640 #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, peer, \ 1641 is_egress, is_rx) 1642 #endif 1643 1644 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 1645 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl); 1646 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, 1647 bool force); 1648 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ 1649 1650 #ifdef PEER_PROTECTED_ACCESS 1651 /** 1652 * dp_peer_unref_del_find_by_id() - dec ref and del peer if ref count is 1653 * taken by dp_peer_find_by_id 1654 * @peer: peer context 1655 * 1656 * Return: none 1657 */ 1658 static inline void dp_peer_unref_del_find_by_id(struct dp_peer *peer) 1659 { 1660 dp_peer_unref_delete(peer); 1661 } 1662 #else 1663 static inline void dp_peer_unref_del_find_by_id(struct dp_peer *peer) 1664 { 1665 } 1666 #endif 1667 1668 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY 1669 /** 1670 * dp_srng_access_start() - Wrapper function to log access start of a hal ring 1671 * @int_ctx: pointer to DP interrupt context 1672 * @soc: DP Soc handle 1673 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1674 * 1675 * Return: 0 on success; error on failure 1676 */ 1677 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 1678 hal_ring_handle_t hal_ring_hdl); 1679 1680 /** 1681 * dp_srng_access_end() - Wrapper function to log access end of a hal ring 1682 * @int_ctx: pointer to DP interrupt context 1683 * @soc: DP Soc handle 1684 * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced 1685 * 1686 * Return: void 1687 */ 1688 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc, 1689 hal_ring_handle_t hal_ring_hdl); 1690 1691 #else 1692 1693 static inline int dp_srng_access_start(struct dp_intr *int_ctx, 1694 struct dp_soc *dp_soc, 1695 hal_ring_handle_t hal_ring_hdl) 1696 { 1697 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 1698 1699 return hal_srng_access_start(hal_soc, hal_ring_hdl); 1700 } 1701 1702 static inline void dp_srng_access_end(struct dp_intr *int_ctx, 1703 struct dp_soc *dp_soc, 1704 hal_ring_handle_t hal_ring_hdl) 1705 { 1706 hal_soc_handle_t hal_soc = dp_soc->hal_soc; 1707 1708 return hal_srng_access_end(hal_soc, hal_ring_hdl); 1709 } 1710 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */ 1711 1712 #ifdef QCA_ENH_V3_STATS_SUPPORT 1713 /** 1714 * dp_pdev_print_delay_stats(): Print pdev level delay stats 1715 * @pdev: DP_PDEV handle 1716 * 1717 * Return:void 1718 */ 1719 void dp_pdev_print_delay_stats(struct dp_pdev *pdev); 1720 1721 /** 1722 * dp_pdev_print_tid_stats(): Print pdev level tid stats 1723 * @pdev: DP_PDEV handle 1724 * 1725 * Return:void 1726 */ 1727 void dp_pdev_print_tid_stats(struct dp_pdev *pdev); 1728 #endif /* CONFIG_WIN */ 1729 1730 void dp_soc_set_txrx_ring_map(struct dp_soc *soc); 1731 1732 #ifndef WLAN_TX_PKT_CAPTURE_ENH 1733 /** 1734 * dp_tx_ppdu_stats_attach - Initialize Tx PPDU stats and enhanced capture 1735 * @pdev: DP PDEV 1736 * 1737 * Return: none 1738 */ 1739 static inline void dp_tx_ppdu_stats_attach(struct dp_pdev *pdev) 1740 { 1741 } 1742 1743 /** 1744 * dp_tx_ppdu_stats_detach - Cleanup Tx PPDU stats and enhanced capture 1745 * @pdev: DP PDEV 1746 * 1747 * Return: none 1748 */ 1749 static inline void dp_tx_ppdu_stats_detach(struct dp_pdev *pdev) 1750 { 1751 } 1752 1753 /** 1754 * dp_tx_ppdu_stats_process - Deferred PPDU stats handler 1755 * @context: Opaque work context (PDEV) 1756 * 1757 * Return: none 1758 */ 1759 static inline void dp_tx_ppdu_stats_process(void *context) 1760 { 1761 } 1762 1763 /** 1764 * dp_tx_add_to_comp_queue() - add completion msdu to queue 1765 * @soc: DP Soc handle 1766 * @tx_desc: software Tx descriptor 1767 * @ts : Tx completion status from HAL/HTT descriptor 1768 * @peer: DP peer 1769 * 1770 * Return: none 1771 */ 1772 static inline 1773 QDF_STATUS dp_tx_add_to_comp_queue(struct dp_soc *soc, 1774 struct dp_tx_desc_s *desc, 1775 struct hal_tx_completion_status *ts, 1776 struct dp_peer *peer) 1777 { 1778 return QDF_STATUS_E_FAILURE; 1779 } 1780 1781 /* 1782 * dp_tx_capture_htt_frame_counter: increment counter for htt_frame_type 1783 * pdev: DP pdev handle 1784 * htt_frame_type: htt frame type received from fw 1785 * 1786 * return: void 1787 */ 1788 static inline 1789 void dp_tx_capture_htt_frame_counter(struct dp_pdev *pdev, 1790 uint32_t htt_frame_type) 1791 { 1792 } 1793 1794 /* 1795 * dp_tx_cature_stats: print tx capture stats 1796 * @pdev: DP PDEV handle 1797 * 1798 * return: void 1799 */ 1800 static inline 1801 void dp_print_pdev_tx_capture_stats(struct dp_pdev *pdev) 1802 { 1803 } 1804 1805 #endif 1806 1807 #ifdef FEATURE_PERPKT_INFO 1808 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf); 1809 #else 1810 static inline 1811 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf) 1812 { 1813 } 1814 #endif 1815 1816 /** 1817 * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev 1818 * @vdev: DP vdev handle 1819 * 1820 * Return: struct cdp_vdev pointer 1821 */ 1822 static inline 1823 struct cdp_vdev *dp_vdev_to_cdp_vdev(struct dp_vdev *vdev) 1824 { 1825 return (struct cdp_vdev *)vdev; 1826 } 1827 1828 /** 1829 * dp_pdev_to_cdp_pdev() - typecast dp pdev to cdp pdev 1830 * @pdev: DP pdev handle 1831 * 1832 * Return: struct cdp_pdev pointer 1833 */ 1834 static inline 1835 struct cdp_pdev *dp_pdev_to_cdp_pdev(struct dp_pdev *pdev) 1836 { 1837 return (struct cdp_pdev *)pdev; 1838 } 1839 1840 /** 1841 * dp_soc_to_cdp_soc() - typecast dp psoc to cdp psoc 1842 * @psoc: DP psoc handle 1843 * 1844 * Return: struct cdp_soc pointer 1845 */ 1846 static inline 1847 struct cdp_soc *dp_soc_to_cdp_soc(struct dp_soc *psoc) 1848 { 1849 return (struct cdp_soc *)psoc; 1850 } 1851 1852 /** 1853 * dp_soc_to_cdp_soc_t() - typecast dp psoc to 1854 * ol txrx soc handle 1855 * @psoc: DP psoc handle 1856 * 1857 * Return: struct cdp_soc_t pointer 1858 */ 1859 static inline 1860 struct cdp_soc_t *dp_soc_to_cdp_soc_t(struct dp_soc *psoc) 1861 { 1862 return (struct cdp_soc_t *)psoc; 1863 } 1864 1865 /** 1866 * cdp_soc_t_to_dp_soc() - typecast cdp_soc_t to 1867 * dp soc handle 1868 * @psoc: CDP psoc handle 1869 * 1870 * Return: struct dp_soc pointer 1871 */ 1872 static inline 1873 struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc) 1874 { 1875 return (struct dp_soc *)psoc; 1876 } 1877 1878 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 1879 /** 1880 * dp_rx_flow_update_fse_stats() - Update a flow's statistics 1881 * @pdev: pdev handle 1882 * @flow_id: flow index (truncated hash) in the Rx FST 1883 * 1884 * Return: Success when flow statistcs is updated, error on failure 1885 */ 1886 QDF_STATUS dp_rx_flow_get_fse_stats(struct dp_pdev *pdev, 1887 struct cdp_rx_flow_info *rx_flow_info, 1888 struct cdp_flow_stats *stats); 1889 1890 /** 1891 * dp_rx_flow_delete_entry() - Delete a flow entry from flow search table 1892 * @pdev: pdev handle 1893 * @rx_flow_info: DP flow parameters 1894 * 1895 * Return: Success when flow is deleted, error on failure 1896 */ 1897 QDF_STATUS dp_rx_flow_delete_entry(struct dp_pdev *pdev, 1898 struct cdp_rx_flow_info *rx_flow_info); 1899 1900 /** 1901 * dp_rx_flow_add_entry() - Add a flow entry to flow search table 1902 * @pdev: DP pdev instance 1903 * @rx_flow_info: DP flow paramaters 1904 * 1905 * Return: Success when flow is added, no-memory or already exists on error 1906 */ 1907 QDF_STATUS dp_rx_flow_add_entry(struct dp_pdev *pdev, 1908 struct cdp_rx_flow_info *rx_flow_info); 1909 1910 /** 1911 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 1912 * @soc: SoC handle 1913 * @pdev: Pdev handle 1914 * 1915 * Return: Handle to flow search table entry 1916 */ 1917 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev); 1918 1919 /** 1920 * dp_rx_fst_detach() - De-initialize Rx FST 1921 * @soc: SoC handle 1922 * @pdev: Pdev handle 1923 * 1924 * Return: None 1925 */ 1926 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev); 1927 1928 /** 1929 * dp_rx_flow_send_fst_fw_setup() - Program FST parameters in FW/HW post-attach 1930 * @soc: SoC handle 1931 * @pdev: Pdev handle 1932 * 1933 * Return: Success when fst parameters are programmed in FW, error otherwise 1934 */ 1935 QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc, 1936 struct dp_pdev *pdev); 1937 #else 1938 /** 1939 * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters 1940 * @soc: SoC handle 1941 * @pdev: Pdev handle 1942 * 1943 * Return: Handle to flow search table entry 1944 */ 1945 static inline 1946 QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev) 1947 { 1948 return QDF_STATUS_SUCCESS; 1949 } 1950 1951 /** 1952 * dp_rx_fst_detach() - De-initialize Rx FST 1953 * @soc: SoC handle 1954 * @pdev: Pdev handle 1955 * 1956 * Return: None 1957 */ 1958 static inline 1959 void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev) 1960 { 1961 } 1962 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 1963 1964 /** 1965 * dp_get_vdev_from_soc_vdev_id_wifi3() - Returns vdev object given the vdev id 1966 * @soc: core DP soc context 1967 * @vdev_id: vdev id from vdev object can be retrieved 1968 * 1969 * Return: struct dp_vdev*: Pointer to DP vdev object 1970 */ 1971 static inline struct dp_vdev * 1972 dp_get_vdev_from_soc_vdev_id_wifi3(struct dp_soc *soc, 1973 uint8_t vdev_id) 1974 { 1975 if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT)) 1976 return NULL; 1977 1978 return soc->vdev_id_map[vdev_id]; 1979 } 1980 1981 /** 1982 * dp_get_pdev_from_soc_pdev_id_wifi3() - Returns pdev object given the pdev id 1983 * @soc: core DP soc context 1984 * @pdev_id: pdev id from pdev object can be retrieved 1985 * 1986 * Return: struct dp_pdev*: Pointer to DP pdev object 1987 */ 1988 static inline struct dp_pdev * 1989 dp_get_pdev_from_soc_pdev_id_wifi3(struct dp_soc *soc, 1990 uint8_t pdev_id) 1991 { 1992 if (qdf_unlikely(pdev_id >= MAX_PDEV_CNT)) 1993 return NULL; 1994 1995 return soc->pdev_list[pdev_id]; 1996 } 1997 1998 /* 1999 * dp_rx_tid_update_wifi3() – Update receive TID state 2000 * @peer: Datapath peer handle 2001 * @tid: TID 2002 * @ba_window_size: BlockAck window size 2003 * @start_seq: Starting sequence number 2004 * 2005 * Return: QDF_STATUS code 2006 */ 2007 QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t 2008 ba_window_size, uint32_t start_seq); 2009 2010 /** 2011 * dp_get_peer_mac_list(): function to get peer mac list of vdev 2012 * @soc: Datapath soc handle 2013 * @vdev_id: vdev id 2014 * @newmac: Table of the clients mac 2015 * @mac_cnt: No. of MACs required 2016 * 2017 * return: no of clients 2018 */ 2019 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, 2020 u_int8_t newmac[][QDF_MAC_ADDR_SIZE], 2021 u_int16_t mac_cnt); 2022 /* 2023 * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported 2024 * @soc: DP SoC context 2025 * @max_mac_rings: No of MAC rings 2026 * 2027 * Return: None 2028 */ 2029 void dp_is_hw_dbs_enable(struct dp_soc *soc, 2030 int *max_mac_rings); 2031 2032 2033 #endif /* #ifndef _DP_INTERNAL_H_ */ 2034