1 /* 2 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _DP_TYPES_H_ 20 #define _DP_TYPES_H_ 21 22 #include <qdf_types.h> 23 #include <qdf_nbuf.h> 24 #include <qdf_lock.h> 25 #include <qdf_atomic.h> 26 #include <qdf_util.h> 27 #include <qdf_list.h> 28 #include <qdf_lro.h> 29 #include <queue.h> 30 #include <htt_common.h> 31 32 #include <cdp_txrx_cmn.h> 33 #ifdef DP_MOB_DEFS 34 #include <cds_ieee80211_common.h> 35 #endif 36 #include <wdi_event_api.h> /* WDI subscriber event list */ 37 38 #include "hal_hw_headers.h" 39 #include <hal_tx.h> 40 #include <hal_reo.h> 41 #include "wlan_cfg.h" 42 #include "hal_rx.h" 43 #include <hal_api.h> 44 #include <hal_api_mon.h> 45 #include "hal_rx.h" 46 //#include "hal_rx_flow.h" 47 48 #define MAX_BW 7 49 #define MAX_RETRIES 4 50 #define MAX_RECEPTION_TYPES 4 51 52 #ifndef REMOVE_PKT_LOG 53 #include <pktlog.h> 54 #endif 55 56 #ifdef WLAN_TX_PKT_CAPTURE_ENH 57 #include "dp_tx_capture.h" 58 #endif 59 60 #define REPT_MU_MIMO 1 61 #define REPT_MU_OFDMA_MIMO 3 62 #define DP_VO_TID 6 63 /** MAX TID MAPS AVAILABLE PER PDEV */ 64 #define DP_MAX_TID_MAPS 16 65 /** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */ 66 #define DSCP_TID_MAP_MAX (64 + 6) 67 #define DP_IP_DSCP_SHIFT 2 68 #define DP_IP_DSCP_MASK 0x3f 69 #define DP_FC0_SUBTYPE_QOS 0x80 70 #define DP_QOS_TID 0x0f 71 #define DP_IPV6_PRIORITY_SHIFT 20 72 #define MAX_MON_LINK_DESC_BANKS 2 73 #define DP_VDEV_ALL 0xff 74 75 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1) 76 #define MAX_PDEV_CNT 1 77 #else 78 #define MAX_PDEV_CNT 3 79 #endif 80 81 /* Max no. of VDEV per PSOC */ 82 #ifdef WLAN_PSOC_MAX_VDEVS 83 #define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS 84 #else 85 #define MAX_VDEV_CNT 51 86 #endif 87 88 #define MAX_LINK_DESC_BANKS 8 89 #define MAX_TXDESC_POOLS 4 90 #define MAX_RXDESC_POOLS 4 91 #define MAX_REO_DEST_RINGS 4 92 #define EXCEPTION_DEST_RING_ID 0 93 #define MAX_TCL_DATA_RINGS 4 94 #define MAX_IDLE_SCATTER_BUFS 16 95 #define DP_MAX_IRQ_PER_CONTEXT 12 96 #define DEFAULT_HW_PEER_ID 0xffff 97 98 #define WBM_INT_ERROR_ALL 0 99 #define WBM_INT_ERROR_REO_NULL_BUFFER 1 100 #define WBM_INT_ERROR_REO_NULL_LINK_DESC 2 101 #define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3 102 #define WBM_INT_ERROR_REO_BUFF_REAPED 4 103 #define MAX_WBM_INT_ERROR_REASONS 5 104 105 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS 106 /* Maximum retries for Delba per tid per peer */ 107 #define DP_MAX_DELBA_RETRY 3 108 109 #define PCP_TID_MAP_MAX 8 110 #define MAX_MU_USERS 37 111 112 #ifndef REMOVE_PKT_LOG 113 enum rx_pktlog_mode { 114 DP_RX_PKTLOG_DISABLED = 0, 115 DP_RX_PKTLOG_FULL, 116 DP_RX_PKTLOG_LITE, 117 }; 118 #endif 119 120 struct msdu_list { 121 qdf_nbuf_t head; 122 qdf_nbuf_t tail; 123 uint32 sum_len; 124 }; 125 126 struct dp_soc_cmn; 127 struct dp_pdev; 128 struct dp_vdev; 129 struct dp_tx_desc_s; 130 struct dp_soc; 131 union dp_rx_desc_list_elem_t; 132 struct cdp_peer_rate_stats_ctx; 133 struct cdp_soc_rate_stats_ctx; 134 struct dp_rx_fst; 135 136 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \ 137 TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem) 138 139 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \ 140 TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem) 141 142 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \ 143 TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase)) 144 145 #define DP_MUTEX_TYPE qdf_spinlock_t 146 147 #define DP_FRAME_IS_MULTICAST(_a) (*(_a) & 0x01) 148 #define DP_FRAME_IS_IPV4_MULTICAST(_a) (*(_a) == 0x01) 149 150 #define DP_FRAME_IS_IPV6_MULTICAST(_a) \ 151 ((_a)[0] == 0x33 && \ 152 (_a)[1] == 0x33) 153 154 #define DP_FRAME_IS_BROADCAST(_a) \ 155 ((_a)[0] == 0xff && \ 156 (_a)[1] == 0xff && \ 157 (_a)[2] == 0xff && \ 158 (_a)[3] == 0xff && \ 159 (_a)[4] == 0xff && \ 160 (_a)[5] == 0xff) 161 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \ 162 (_llc)->llc_ssap == 0xaa && \ 163 (_llc)->llc_un.type_snap.control == 0x3) 164 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600) 165 #define DP_FRAME_FC0_TYPE_MASK 0x0c 166 #define DP_FRAME_FC0_TYPE_DATA 0x08 167 #define DP_FRAME_IS_DATA(_frame) \ 168 (((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA) 169 170 /** 171 * macros to convert hw mac id to sw mac id: 172 * mac ids used by hardware start from a value of 1 while 173 * those in host software start from a value of 0. Use the 174 * macros below to convert between mac ids used by software and 175 * hardware 176 */ 177 #define DP_SW2HW_MACID(id) ((id) + 1) 178 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0) 179 180 /** 181 * Number of Tx Queues 182 * enum and macro to define how many threshold levels is used 183 * for the AC based flow control 184 */ 185 #ifdef QCA_AC_BASED_FLOW_CONTROL 186 enum dp_fl_ctrl_threshold { 187 DP_TH_BE_BK = 0, 188 DP_TH_VI, 189 DP_TH_VO, 190 DP_TH_HI, 191 }; 192 193 #define FL_TH_MAX (4) 194 #define FL_TH_VI_PERCENTAGE (80) 195 #define FL_TH_VO_PERCENTAGE (60) 196 #define FL_TH_HI_PERCENTAGE (40) 197 #endif 198 199 /** 200 * enum dp_intr_mode 201 * @DP_INTR_LEGACY: Legacy/Line interrupts, for WIN 202 * @DP_INTR_MSI: MSI interrupts, for MCL 203 * @DP_INTR_POLL: Polling 204 */ 205 enum dp_intr_mode { 206 DP_INTR_LEGACY = 0, 207 DP_INTR_MSI, 208 DP_INTR_POLL, 209 }; 210 211 /** 212 * enum dp_tx_frm_type 213 * @dp_tx_frm_std: Regular frame, no added header fragments 214 * @dp_tx_frm_tso: TSO segment, with a modified IP header added 215 * @dp_tx_frm_sg: SG segment 216 * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added 217 * @dp_tx_frm_me: Multicast to Unicast Converted frame 218 * @dp_tx_frm_raw: Raw Frame 219 */ 220 enum dp_tx_frm_type { 221 dp_tx_frm_std = 0, 222 dp_tx_frm_tso, 223 dp_tx_frm_sg, 224 dp_tx_frm_audio, 225 dp_tx_frm_me, 226 dp_tx_frm_raw, 227 }; 228 229 /** 230 * enum dp_ast_type 231 * @dp_ast_type_wds: WDS peer AST type 232 * @dp_ast_type_static: static ast entry type 233 * @dp_ast_type_mec: Multicast echo ast entry type 234 */ 235 enum dp_ast_type { 236 dp_ast_type_wds = 0, 237 dp_ast_type_static, 238 dp_ast_type_mec, 239 }; 240 241 /** 242 * enum dp_nss_cfg 243 * @dp_nss_cfg_default: No radios are offloaded 244 * @dp_nss_cfg_first_radio: First radio offloaded 245 * @dp_nss_cfg_second_radio: Second radio offloaded 246 * @dp_nss_cfg_dbdc: Dual radios offloaded 247 * @dp_nss_cfg_dbtc: Three radios offloaded 248 */ 249 enum dp_nss_cfg { 250 dp_nss_cfg_default = 0x0, 251 dp_nss_cfg_first_radio = 0x1, 252 dp_nss_cfg_second_radio = 0x2, 253 dp_nss_cfg_dbdc = 0x3, 254 dp_nss_cfg_dbtc = 0x7, 255 dp_nss_cfg_max 256 }; 257 258 #ifdef WLAN_TX_PKT_CAPTURE_ENH 259 #define DP_CPU_RING_MAP_1 1 260 #endif 261 262 /** 263 * dp_cpu_ring_map_type - dp tx cpu ring map 264 * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded 265 * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded 266 * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded 267 * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded 268 * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded 269 * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring 270 * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val 271 */ 272 enum dp_cpu_ring_map_types { 273 DP_NSS_DEFAULT_MAP, 274 DP_NSS_FIRST_RADIO_OFFLOADED_MAP, 275 DP_NSS_SECOND_RADIO_OFFLOADED_MAP, 276 DP_NSS_DBDC_OFFLOADED_MAP, 277 DP_NSS_DBTC_OFFLOADED_MAP, 278 #ifdef WLAN_TX_PKT_CAPTURE_ENH 279 DP_SINGLE_TX_RING_MAP, 280 #endif 281 DP_NSS_CPU_RING_MAP_MAX 282 }; 283 284 /** 285 * struct rx_desc_pool 286 * @pool_size: number of RX descriptor in the pool 287 * @elem_size: Element size 288 * @desc_pages: Multi page descriptors 289 * @array: pointer to array of RX descriptor 290 * @freelist: pointer to free RX descriptor link list 291 * @lock: Protection for the RX descriptor pool 292 * @owner: owner for nbuf 293 */ 294 struct rx_desc_pool { 295 uint32_t pool_size; 296 #ifdef RX_DESC_MULTI_PAGE_ALLOC 297 uint16_t elem_size; 298 struct qdf_mem_multi_page_t desc_pages; 299 #else 300 union dp_rx_desc_list_elem_t *array; 301 #endif 302 union dp_rx_desc_list_elem_t *freelist; 303 qdf_spinlock_t lock; 304 uint8_t owner; 305 }; 306 307 /** 308 * struct dp_tx_ext_desc_elem_s 309 * @next: next extension descriptor pointer 310 * @vaddr: hlos virtual address pointer 311 * @paddr: physical address pointer for descriptor 312 */ 313 struct dp_tx_ext_desc_elem_s { 314 struct dp_tx_ext_desc_elem_s *next; 315 void *vaddr; 316 qdf_dma_addr_t paddr; 317 }; 318 319 /** 320 * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool 321 * @elem_count: Number of descriptors in the pool 322 * @elem_size: Size of each descriptor 323 * @num_free: Number of free descriptors 324 * @msdu_ext_desc: MSDU extension descriptor 325 * @desc_pages: multiple page allocation information for actual descriptors 326 * @link_elem_size: size of the link descriptor in cacheable memory used for 327 * chaining the extension descriptors 328 * @desc_link_pages: multiple page allocation information for link descriptors 329 */ 330 struct dp_tx_ext_desc_pool_s { 331 uint16_t elem_count; 332 int elem_size; 333 uint16_t num_free; 334 struct qdf_mem_multi_page_t desc_pages; 335 int link_elem_size; 336 struct qdf_mem_multi_page_t desc_link_pages; 337 struct dp_tx_ext_desc_elem_s *freelist; 338 qdf_spinlock_t lock; 339 qdf_dma_mem_context(memctx); 340 }; 341 342 /** 343 * struct dp_tx_desc_s - Tx Descriptor 344 * @next: Next in the chain of descriptors in freelist or in the completion list 345 * @nbuf: Buffer Address 346 * @msdu_ext_desc: MSDU extension descriptor 347 * @id: Descriptor ID 348 * @vdev: vdev over which the packet was transmitted 349 * @pdev: Handle to pdev 350 * @pool_id: Pool ID - used when releasing the descriptor 351 * @flags: Flags to track the state of descriptor and special frame handling 352 * @comp: Pool ID - used when releasing the descriptor 353 * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet). 354 * This is maintained in descriptor to allow more efficient 355 * processing in completion event processing code. 356 * This field is filled in with the htt_pkt_type enum. 357 * @frm_type: Frame Type - ToDo check if this is redundant 358 * @pkt_offset: Offset from which the actual packet data starts 359 * @me_buffer: Pointer to ME buffer - store this so that it can be freed on 360 * Tx completion of ME packet 361 * @pool: handle to flow_pool this descriptor belongs to. 362 */ 363 struct dp_tx_desc_s { 364 struct dp_tx_desc_s *next; 365 qdf_nbuf_t nbuf; 366 struct dp_tx_ext_desc_elem_s *msdu_ext_desc; 367 uint32_t id; 368 struct dp_vdev *vdev; 369 struct dp_pdev *pdev; 370 uint8_t pool_id; 371 uint16_t flags; 372 struct hal_tx_desc_comp_s comp; 373 uint16_t tx_encap_type; 374 uint8_t frm_type; 375 uint8_t pkt_offset; 376 void *me_buffer; 377 void *tso_desc; 378 void *tso_num_desc; 379 uint64_t timestamp; 380 }; 381 382 /** 383 * enum flow_pool_status - flow pool status 384 * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors) 385 * and network queues are unpaused 386 * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors) 387 * and network queues are paused 388 * @FLOW_POOL_INVALID: pool is invalid (put descriptor) 389 * @FLOW_POOL_INACTIVE: pool is inactive (pool is free) 390 */ 391 enum flow_pool_status { 392 FLOW_POOL_ACTIVE_UNPAUSED = 0, 393 FLOW_POOL_ACTIVE_PAUSED = 1, 394 FLOW_POOL_BE_BK_PAUSED = 2, 395 FLOW_POOL_VI_PAUSED = 3, 396 FLOW_POOL_VO_PAUSED = 4, 397 FLOW_POOL_INVALID = 5, 398 FLOW_POOL_INACTIVE = 6, 399 }; 400 401 /** 402 * struct dp_tx_tso_seg_pool_s 403 * @pool_size: total number of pool elements 404 * @num_free: free element count 405 * @freelist: first free element pointer 406 * @desc_pages: multiple page allocation information for actual descriptors 407 * @lock: lock for accessing the pool 408 */ 409 struct dp_tx_tso_seg_pool_s { 410 uint16_t pool_size; 411 uint16_t num_free; 412 struct qdf_tso_seg_elem_t *freelist; 413 struct qdf_mem_multi_page_t desc_pages; 414 qdf_spinlock_t lock; 415 }; 416 417 /** 418 * struct dp_tx_tso_num_seg_pool_s { 419 * @num_seg_pool_size: total number of pool elements 420 * @num_free: free element count 421 * @freelist: first free element pointer 422 * @desc_pages: multiple page allocation information for actual descriptors 423 * @lock: lock for accessing the pool 424 */ 425 426 struct dp_tx_tso_num_seg_pool_s { 427 uint16_t num_seg_pool_size; 428 uint16_t num_free; 429 struct qdf_tso_num_seg_elem_t *freelist; 430 struct qdf_mem_multi_page_t desc_pages; 431 /*tso mutex */ 432 qdf_spinlock_t lock; 433 }; 434 435 /** 436 * struct dp_tx_desc_pool_s - Tx Descriptor pool information 437 * @elem_size: Size of each descriptor in the pool 438 * @pool_size: Total number of descriptors in the pool 439 * @num_free: Number of free descriptors 440 * @num_allocated: Number of used descriptors 441 * @freelist: Chain of free descriptors 442 * @desc_pages: multiple page allocation information for actual descriptors 443 * @num_invalid_bin: Deleted pool with pending Tx completions. 444 * @flow_pool_array_lock: Lock when operating on flow_pool_array. 445 * @flow_pool_array: List of allocated flow pools 446 * @lock- Lock for descriptor allocation/free from/to the pool 447 */ 448 struct dp_tx_desc_pool_s { 449 uint16_t elem_size; 450 uint32_t num_allocated; 451 struct dp_tx_desc_s *freelist; 452 struct qdf_mem_multi_page_t desc_pages; 453 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 454 uint16_t pool_size; 455 uint8_t flow_pool_id; 456 uint8_t num_invalid_bin; 457 uint16_t avail_desc; 458 enum flow_pool_status status; 459 enum htt_flow_type flow_type; 460 #ifdef QCA_AC_BASED_FLOW_CONTROL 461 uint16_t stop_th[FL_TH_MAX]; 462 uint16_t start_th[FL_TH_MAX]; 463 qdf_time_t max_pause_time[FL_TH_MAX]; 464 qdf_time_t latest_pause_time[FL_TH_MAX]; 465 #else 466 uint16_t stop_th; 467 uint16_t start_th; 468 #endif 469 uint16_t pkt_drop_no_desc; 470 qdf_spinlock_t flow_pool_lock; 471 uint8_t pool_create_cnt; 472 void *pool_owner_ctx; 473 #else 474 uint16_t elem_count; 475 uint32_t num_free; 476 qdf_spinlock_t lock; 477 #endif 478 }; 479 480 /** 481 * struct dp_txrx_pool_stats - flow pool related statistics 482 * @pool_map_count: flow pool map received 483 * @pool_unmap_count: flow pool unmap received 484 * @pkt_drop_no_pool: packets dropped due to unavailablity of pool 485 */ 486 struct dp_txrx_pool_stats { 487 uint16_t pool_map_count; 488 uint16_t pool_unmap_count; 489 uint16_t pkt_drop_no_pool; 490 }; 491 492 struct dp_srng { 493 hal_ring_handle_t hal_srng; 494 void *base_vaddr_unaligned; 495 qdf_dma_addr_t base_paddr_unaligned; 496 uint32_t alloc_size; 497 uint8_t cached; 498 int irq; 499 uint32_t num_entries; 500 }; 501 502 struct dp_rx_reorder_array_elem { 503 qdf_nbuf_t head; 504 qdf_nbuf_t tail; 505 }; 506 507 #define DP_RX_BA_INACTIVE 0 508 #define DP_RX_BA_ACTIVE 1 509 #define DP_RX_BA_IN_PROGRESS 2 510 struct dp_reo_cmd_info { 511 uint16_t cmd; 512 enum hal_reo_cmd_type cmd_type; 513 void *data; 514 void (*handler)(struct dp_soc *, void *, union hal_reo_status *); 515 TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem; 516 }; 517 518 /* Rx TID */ 519 struct dp_rx_tid { 520 /* TID */ 521 int tid; 522 523 /* Num of addba requests */ 524 uint32_t num_of_addba_req; 525 526 /* Num of addba responses */ 527 uint32_t num_of_addba_resp; 528 529 /* Num of delba requests */ 530 uint32_t num_of_delba_req; 531 532 /* Num of addba responses successful */ 533 uint32_t num_addba_rsp_success; 534 535 /* Num of addba responses failed */ 536 uint32_t num_addba_rsp_failed; 537 538 /* pn size */ 539 uint8_t pn_size; 540 /* REO TID queue descriptors */ 541 void *hw_qdesc_vaddr_unaligned; 542 qdf_dma_addr_t hw_qdesc_paddr_unaligned; 543 qdf_dma_addr_t hw_qdesc_paddr; 544 uint32_t hw_qdesc_alloc_size; 545 546 /* RX ADDBA session state */ 547 int ba_status; 548 549 /* RX BA window size */ 550 uint16_t ba_win_size; 551 552 /* Starting sequence number in Addba request */ 553 uint16_t startseqnum; 554 555 /* TODO: Check the following while adding defragmentation support */ 556 struct dp_rx_reorder_array_elem *array; 557 /* base - single rx reorder element used for non-aggr cases */ 558 struct dp_rx_reorder_array_elem base; 559 560 /* only used for defrag right now */ 561 TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem; 562 563 /* Store dst desc for reinjection */ 564 hal_ring_desc_t dst_ring_desc; 565 struct dp_rx_desc *head_frag_desc; 566 567 /* rx_tid lock */ 568 qdf_spinlock_t tid_lock; 569 570 /* Sequence and fragments that are being processed currently */ 571 uint32_t curr_seq_num; 572 uint32_t curr_frag_num; 573 574 uint32_t defrag_timeout_ms; 575 uint16_t dialogtoken; 576 uint16_t statuscode; 577 /* user defined ADDBA response status code */ 578 uint16_t userstatuscode; 579 580 /* Store ppdu_id when 2k exception is received */ 581 uint32_t ppdu_id_2k; 582 583 /* Delba Tx completion status */ 584 uint8_t delba_tx_status; 585 586 /* Delba Tx retry count */ 587 uint8_t delba_tx_retry; 588 589 /* Delba stats */ 590 uint32_t delba_tx_success_cnt; 591 uint32_t delba_tx_fail_cnt; 592 593 /* Delba reason code for retries */ 594 uint8_t delba_rcode; 595 596 }; 597 598 /** 599 * struct dp_intr_stats - DP Interrupt Stats for an interrupt context 600 * @num_tx_ring_masks: interrupts with tx_ring_mask set 601 * @num_rx_ring_masks: interrupts with rx_ring_mask set 602 * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set 603 * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set 604 * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set 605 * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set 606 * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set 607 * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set 608 * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set 609 * @num_masks: total number of times the interrupt was received 610 * 611 * Counter for individual masks are incremented only if there are any packets 612 * on that ring. 613 */ 614 struct dp_intr_stats { 615 uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS]; 616 uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS]; 617 uint32_t num_rx_mon_ring_masks; 618 uint32_t num_rx_err_ring_masks; 619 uint32_t num_rx_wbm_rel_ring_masks; 620 uint32_t num_reo_status_ring_masks; 621 uint32_t num_rxdma2host_ring_masks; 622 uint32_t num_host2rxdma_ring_masks; 623 uint32_t num_masks; 624 }; 625 626 /* per interrupt context */ 627 struct dp_intr { 628 uint8_t tx_ring_mask; /* WBM Tx completion rings (0-2) 629 associated with this napi context */ 630 uint8_t rx_ring_mask; /* Rx REO rings (0-3) associated 631 with this interrupt context */ 632 uint8_t rx_mon_ring_mask; /* Rx monitor ring mask (0-2) */ 633 uint8_t rx_err_ring_mask; /* REO Exception Ring */ 634 uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */ 635 uint8_t reo_status_ring_mask; /* REO command response ring */ 636 uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */ 637 uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */ 638 /* Host to RXDMA monitor buffer ring */ 639 uint8_t host2rxdma_mon_ring_mask; 640 struct dp_soc *soc; /* Reference to SoC structure , 641 to get DMA ring handles */ 642 qdf_lro_ctx_t lro_ctx; 643 uint8_t dp_intr_id; 644 645 /* Interrupt Stats for individual masks */ 646 struct dp_intr_stats intr_stats; 647 }; 648 649 #define REO_DESC_FREELIST_SIZE 64 650 #define REO_DESC_FREE_DEFER_MS 1000 651 struct reo_desc_list_node { 652 qdf_list_node_t node; 653 unsigned long free_ts; 654 struct dp_rx_tid rx_tid; 655 }; 656 657 /* SoC level data path statistics */ 658 struct dp_soc_stats { 659 struct { 660 uint32_t added; 661 uint32_t deleted; 662 uint32_t aged_out; 663 uint32_t map_err; 664 } ast; 665 666 /* SOC level TX stats */ 667 struct { 668 /* packets dropped on tx because of no peer */ 669 struct cdp_pkt_info tx_invalid_peer; 670 /* descriptors in each tcl ring */ 671 uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS]; 672 /* Descriptors in use at soc */ 673 uint32_t desc_in_use; 674 /* tqm_release_reason == FW removed */ 675 uint32_t dropped_fw_removed; 676 /* tx completion release_src != TQM or FW */ 677 uint32_t invalid_release_source; 678 /* tx completion wbm_internal_error */ 679 uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS]; 680 /* TX Comp loop packet limit hit */ 681 uint32_t tx_comp_loop_pkt_limit_hit; 682 /* Head pointer Out of sync at the end of dp_tx_comp_handler */ 683 uint32_t hp_oos2; 684 } tx; 685 686 /* SOC level RX stats */ 687 struct { 688 /* Rx errors */ 689 /* Total Packets in Rx Error ring */ 690 uint32_t err_ring_pkts; 691 /* No of Fragments */ 692 uint32_t rx_frags; 693 /* No of incomplete fragments in waitlist */ 694 uint32_t rx_frag_wait; 695 /* Fragments dropped due to errors */ 696 uint32_t rx_frag_err; 697 /* No of reinjected packets */ 698 uint32_t reo_reinject; 699 /* Reap loop packet limit hit */ 700 uint32_t reap_loop_pkt_limit_hit; 701 /* Head pointer Out of sync at the end of dp_rx_process */ 702 uint32_t hp_oos2; 703 /* Rx ring near full */ 704 uint32_t near_full; 705 struct { 706 /* Invalid RBM error count */ 707 uint32_t invalid_rbm; 708 /* Invalid VDEV Error count */ 709 uint32_t invalid_vdev; 710 /* Invalid PDEV error count */ 711 uint32_t invalid_pdev; 712 713 /* Packets delivered to stack that no related peer */ 714 uint32_t pkt_delivered_no_peer; 715 /* Defrag peer uninit error count */ 716 uint32_t defrag_peer_uninit; 717 /* Invalid sa_idx or da_idx*/ 718 uint32_t invalid_sa_da_idx; 719 /* MSDU DONE failures */ 720 uint32_t msdu_done_fail; 721 /* Invalid PEER Error count */ 722 struct cdp_pkt_info rx_invalid_peer; 723 /* Invalid PEER ID count */ 724 struct cdp_pkt_info rx_invalid_peer_id; 725 /* Invalid packet length */ 726 struct cdp_pkt_info rx_invalid_pkt_len; 727 /* HAL ring access Fail error count */ 728 uint32_t hal_ring_access_fail; 729 /* RX DMA error count */ 730 uint32_t rxdma_error[HAL_RXDMA_ERR_MAX]; 731 /* RX REO DEST Desc Invalid Magic count */ 732 uint32_t rx_desc_invalid_magic; 733 /* REO Error count */ 734 uint32_t reo_error[HAL_REO_ERR_MAX]; 735 /* HAL REO ERR Count */ 736 uint32_t hal_reo_error[MAX_REO_DEST_RINGS]; 737 /* HAL REO DEST Duplicate count */ 738 uint32_t hal_reo_dest_dup; 739 /* HAL WBM RELEASE Duplicate count */ 740 uint32_t hal_wbm_rel_dup; 741 /* HAL RXDMA error Duplicate count */ 742 uint32_t hal_rxdma_err_dup; 743 } err; 744 745 /* packet count per core - per ring */ 746 uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS]; 747 } rx; 748 }; 749 750 union dp_align_mac_addr { 751 uint8_t raw[QDF_MAC_ADDR_SIZE]; 752 struct { 753 uint16_t bytes_ab; 754 uint16_t bytes_cd; 755 uint16_t bytes_ef; 756 } align2; 757 struct { 758 uint32_t bytes_abcd; 759 uint16_t bytes_ef; 760 } align4; 761 struct __attribute__((__packed__)) { 762 uint16_t bytes_ab; 763 uint32_t bytes_cdef; 764 } align4_2; 765 }; 766 767 /** 768 * struct dp_ast_free_cb_params - HMWDS free callback cookie 769 * @mac_addr: ast mac address 770 * @peer_mac_addr: mac address of peer 771 * @type: ast entry type 772 * @vdev_id: vdev_id 773 * @flags: ast flags 774 */ 775 struct dp_ast_free_cb_params { 776 union dp_align_mac_addr mac_addr; 777 union dp_align_mac_addr peer_mac_addr; 778 enum cdp_txrx_ast_entry_type type; 779 uint8_t vdev_id; 780 uint32_t flags; 781 }; 782 783 /* 784 * dp_ast_entry 785 * 786 * @ast_idx: Hardware AST Index 787 * @mac_addr: MAC Address for this AST entry 788 * @peer: Next Hop peer (for non-WDS nodes, this will be point to 789 * associated peer with this MAC address) 790 * @next_hop: Set to 1 if this is for a WDS node 791 * @is_active: flag to indicate active data traffic on this node 792 * (used for aging out/expiry) 793 * @ase_list_elem: node in peer AST list 794 * @is_bss: flag to indicate if entry corresponds to bss peer 795 * @is_mapped: flag to indicate that we have mapped the AST entry 796 * in ast_table 797 * @pdev_id: pdev ID 798 * @vdev_id: vdev ID 799 * @ast_hash_value: hast value in HW 800 * @ref_cnt: reference count 801 * @type: flag to indicate type of the entry(static/WDS/MEC) 802 * @delete_in_progress: Flag to indicate that delete commands send to FW 803 * and host is waiting for response from FW 804 * @callback: ast free/unmap callback 805 * @cookie: argument to callback 806 * @hash_list_elem: node in soc AST hash list (mac address used as hash) 807 */ 808 struct dp_ast_entry { 809 uint16_t ast_idx; 810 union dp_align_mac_addr mac_addr; 811 struct dp_peer *peer; 812 bool next_hop; 813 bool is_active; 814 bool is_mapped; 815 uint8_t pdev_id; 816 uint8_t vdev_id; 817 uint16_t ast_hash_value; 818 qdf_atomic_t ref_cnt; 819 enum cdp_txrx_ast_entry_type type; 820 bool delete_in_progress; 821 txrx_ast_free_cb callback; 822 void *cookie; 823 TAILQ_ENTRY(dp_ast_entry) ase_list_elem; 824 TAILQ_ENTRY(dp_ast_entry) hash_list_elem; 825 }; 826 827 /* SOC level htt stats */ 828 struct htt_t2h_stats { 829 /* lock to protect htt_stats_msg update */ 830 qdf_spinlock_t lock; 831 832 /* work queue to process htt stats */ 833 qdf_work_t work; 834 835 /* T2H Ext stats message queue */ 836 qdf_nbuf_queue_t msg; 837 838 /* number of completed stats in htt_stats_msg */ 839 uint32_t num_stats; 840 }; 841 842 /* SOC level structure for data path */ 843 struct dp_soc { 844 /** 845 * re-use memory section starts 846 */ 847 848 /* Common base structure - Should be the first member */ 849 struct cdp_soc_t cdp_soc; 850 851 /* SoC Obj */ 852 struct cdp_ctrl_objmgr_psoc *ctrl_psoc; 853 854 /* OS device abstraction */ 855 qdf_device_t osdev; 856 857 /* WLAN config context */ 858 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx; 859 860 /* HTT handle for host-fw interaction */ 861 struct htt_soc *htt_handle; 862 863 /* Commint init done */ 864 qdf_atomic_t cmn_init_done; 865 866 /* Opaque hif handle */ 867 struct hif_opaque_softc *hif_handle; 868 869 /* PDEVs on this SOC */ 870 struct dp_pdev *pdev_list[MAX_PDEV_CNT]; 871 872 /* Number of PDEVs */ 873 uint8_t pdev_count; 874 875 /*cce disable*/ 876 bool cce_disable; 877 878 /*ast override support in HW*/ 879 bool ast_override_support; 880 881 /*number of hw dscp tid map*/ 882 uint8_t num_hw_dscp_tid_map; 883 884 /* HAL SOC handle */ 885 hal_soc_handle_t hal_soc; 886 887 /* Device ID coming from Bus sub-system */ 888 uint32_t device_id; 889 890 /* Link descriptor memory banks */ 891 struct { 892 void *base_vaddr_unaligned; 893 void *base_vaddr; 894 qdf_dma_addr_t base_paddr_unaligned; 895 qdf_dma_addr_t base_paddr; 896 uint32_t size; 897 } link_desc_banks[MAX_LINK_DESC_BANKS]; 898 899 /* Link descriptor Idle list for HW internal use (SRNG mode) */ 900 struct dp_srng wbm_idle_link_ring; 901 902 /* Link descriptor Idle list for HW internal use (scatter buffer mode) 903 */ 904 qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS]; 905 void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS]; 906 907 /* Tx SW descriptor pool */ 908 struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS]; 909 910 /* Tx MSDU Extension descriptor pool */ 911 struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS]; 912 913 /* Tx TSO descriptor pool */ 914 struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS]; 915 916 /* Tx TSO Num of segments pool */ 917 struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS]; 918 919 /* REO destination rings */ 920 struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS]; 921 922 /* REO exception ring - See if should combine this with reo_dest_ring */ 923 struct dp_srng reo_exception_ring; 924 925 /* REO reinjection ring */ 926 struct dp_srng reo_reinject_ring; 927 928 /* REO command ring */ 929 struct dp_srng reo_cmd_ring; 930 931 /* REO command status ring */ 932 struct dp_srng reo_status_ring; 933 934 /* WBM Rx release ring */ 935 struct dp_srng rx_rel_ring; 936 937 /* TCL data ring */ 938 struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS]; 939 940 /* Number of TCL data rings */ 941 uint8_t num_tcl_data_rings; 942 943 /* TCL command ring */ 944 struct dp_srng tcl_cmd_ring; 945 946 /* TCL command status ring */ 947 struct dp_srng tcl_status_ring; 948 949 /* WBM Tx completion rings */ 950 struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS]; 951 952 /* Common WBM link descriptor release ring (SW to WBM) */ 953 struct dp_srng wbm_desc_rel_ring; 954 955 /* DP Interrupts */ 956 struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS]; 957 958 /* Rx SW descriptor pool for RXDMA monitor buffer */ 959 struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS]; 960 961 /* Rx SW descriptor pool for RXDMA status buffer */ 962 struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS]; 963 964 /* Rx SW descriptor pool for RXDMA buffer */ 965 struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS]; 966 967 /* Number of REO destination rings */ 968 uint8_t num_reo_dest_rings; 969 970 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 971 /* lock to control access to soc TX descriptors */ 972 qdf_spinlock_t flow_pool_array_lock; 973 974 /* pause callback to pause TX queues as per flow control */ 975 tx_pause_callback pause_cb; 976 977 /* flow pool related statistics */ 978 struct dp_txrx_pool_stats pool_stats; 979 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ 980 981 /* 982 * Re-use memory section ends. reuse memory indicator. 983 * Everything above this variable "dp_soc_reinit" is retained across 984 * WiFi up/down for AP use-cases. 985 * Everything below this variable "dp_soc_reinit" is reset during 986 * dp_soc_deinit. 987 */ 988 bool dp_soc_reinit; 989 990 uint32_t wbm_idle_scatter_buf_size; 991 992 /* VDEVs on this SOC */ 993 struct dp_vdev *vdev_id_map[MAX_VDEV_CNT]; 994 995 /* Tx H/W queues lock */ 996 qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES]; 997 998 /* Tx ring map for interrupt processing */ 999 uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS]; 1000 1001 /* Rx ring map for interrupt processing */ 1002 uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS]; 1003 1004 /* peer ID to peer object map (array of pointers to peer objects) */ 1005 struct dp_peer **peer_id_to_obj_map; 1006 1007 struct { 1008 unsigned mask; 1009 unsigned idx_bits; 1010 TAILQ_HEAD(, dp_peer) * bins; 1011 } peer_hash; 1012 1013 /* rx defrag state – TBD: do we need this per radio? */ 1014 struct { 1015 struct { 1016 TAILQ_HEAD(, dp_rx_tid) waitlist; 1017 uint32_t timeout_ms; 1018 uint32_t next_flush_ms; 1019 qdf_spinlock_t defrag_lock; 1020 } defrag; 1021 struct { 1022 int defrag_timeout_check; 1023 int dup_check; 1024 } flags; 1025 TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list; 1026 qdf_spinlock_t reo_cmd_lock; 1027 } rx; 1028 1029 /* optional rx processing function */ 1030 void (*rx_opt_proc)( 1031 struct dp_vdev *vdev, 1032 struct dp_peer *peer, 1033 unsigned tid, 1034 qdf_nbuf_t msdu_list); 1035 1036 /* pool addr for mcast enhance buff */ 1037 struct { 1038 int size; 1039 uint32_t paddr; 1040 uint32_t *vaddr; 1041 struct dp_tx_me_buf_t *freelist; 1042 int buf_in_use; 1043 qdf_dma_mem_context(memctx); 1044 } me_buf; 1045 1046 /** 1047 * peer ref mutex: 1048 * 1. Protect peer object lookups until the returned peer object's 1049 * reference count is incremented. 1050 * 2. Provide mutex when accessing peer object lookup structures. 1051 */ 1052 DP_MUTEX_TYPE peer_ref_mutex; 1053 1054 /* maximum value for peer_id */ 1055 uint32_t max_peers; 1056 1057 /* SoC level data path statistics */ 1058 struct dp_soc_stats stats; 1059 1060 /* Enable processing of Tx completion status words */ 1061 bool process_tx_status; 1062 bool process_rx_status; 1063 struct dp_ast_entry **ast_table; 1064 struct { 1065 unsigned mask; 1066 unsigned idx_bits; 1067 TAILQ_HEAD(, dp_ast_entry) * bins; 1068 } ast_hash; 1069 1070 qdf_spinlock_t ast_lock; 1071 /*Timer for AST entry ageout maintainance */ 1072 qdf_timer_t ast_aging_timer; 1073 1074 /*Timer counter for WDS AST entry ageout*/ 1075 uint8_t wds_ast_aging_timer_cnt; 1076 1077 /*interrupt timer*/ 1078 qdf_timer_t mon_reap_timer; 1079 uint8_t reap_timer_init; 1080 qdf_timer_t int_timer; 1081 uint8_t intr_mode; 1082 1083 qdf_list_t reo_desc_freelist; 1084 qdf_spinlock_t reo_desc_freelist_lock; 1085 1086 /* htt stats */ 1087 struct htt_t2h_stats htt_stats; 1088 1089 void *external_txrx_handle; /* External data path handle */ 1090 #ifdef IPA_OFFLOAD 1091 /* IPA uC datapath offload Wlan Tx resources */ 1092 struct { 1093 /* Resource info to be passed to IPA */ 1094 qdf_dma_addr_t ipa_tcl_ring_base_paddr; 1095 void *ipa_tcl_ring_base_vaddr; 1096 uint32_t ipa_tcl_ring_size; 1097 qdf_dma_addr_t ipa_tcl_hp_paddr; 1098 uint32_t alloc_tx_buf_cnt; 1099 1100 qdf_dma_addr_t ipa_wbm_ring_base_paddr; 1101 void *ipa_wbm_ring_base_vaddr; 1102 uint32_t ipa_wbm_ring_size; 1103 qdf_dma_addr_t ipa_wbm_tp_paddr; 1104 1105 /* TX buffers populated into the WBM ring */ 1106 void **tx_buf_pool_vaddr_unaligned; 1107 qdf_dma_addr_t *tx_buf_pool_paddr_unaligned; 1108 } ipa_uc_tx_rsc; 1109 1110 /* IPA uC datapath offload Wlan Rx resources */ 1111 struct { 1112 /* Resource info to be passed to IPA */ 1113 qdf_dma_addr_t ipa_reo_ring_base_paddr; 1114 void *ipa_reo_ring_base_vaddr; 1115 uint32_t ipa_reo_ring_size; 1116 qdf_dma_addr_t ipa_reo_tp_paddr; 1117 1118 /* Resource info to be passed to firmware and IPA */ 1119 qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr; 1120 void *ipa_rx_refill_buf_ring_base_vaddr; 1121 uint32_t ipa_rx_refill_buf_ring_size; 1122 qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr; 1123 } ipa_uc_rx_rsc; 1124 1125 qdf_atomic_t ipa_pipes_enabled; 1126 #endif 1127 1128 /* Smart monitor capability for HKv2 */ 1129 uint8_t hw_nac_monitor_support; 1130 /* Flag to indicate if HTT v2 is enabled*/ 1131 bool is_peer_map_unmap_v2; 1132 /* Per peer per Tid ba window size support */ 1133 uint8_t per_tid_basize_max_tid; 1134 /* Soc level flag to enable da_war */ 1135 uint8_t da_war_enabled; 1136 /* number of active ast entries */ 1137 uint32_t num_ast_entries; 1138 /* rdk rate statistics context at soc level*/ 1139 struct cdp_soc_rate_stats_ctx *rate_stats_ctx; 1140 /* rdk rate statistics control flag */ 1141 bool wlanstats_enabled; 1142 1143 /* 8021p PCP-TID map values */ 1144 uint8_t pcp_tid_map[PCP_TID_MAP_MAX]; 1145 /* TID map priority value */ 1146 uint8_t tidmap_prty; 1147 /* Pointer to global per ring type specific configuration table */ 1148 struct wlan_srng_cfg *wlan_srng_cfg; 1149 /* Num Tx outstanding on device */ 1150 qdf_atomic_t num_tx_outstanding; 1151 /* Num Tx allowed */ 1152 uint32_t num_tx_allowed; 1153 1154 /** 1155 * Flag to indicate whether WAR to address single cache entry 1156 * invalidation bug is enabled or not 1157 */ 1158 bool is_rx_fse_full_cache_invalidate_war_enabled; 1159 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 1160 /** 1161 * Pointer to DP RX Flow FST at SOC level if 1162 * is_rx_flow_search_table_per_pdev is false 1163 */ 1164 struct dp_rx_fst *rx_fst; 1165 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 1166 }; 1167 1168 #ifdef IPA_OFFLOAD 1169 /** 1170 * dp_ipa_resources - Resources needed for IPA 1171 */ 1172 struct dp_ipa_resources { 1173 qdf_shared_mem_t tx_ring; 1174 uint32_t tx_num_alloc_buffer; 1175 1176 qdf_shared_mem_t tx_comp_ring; 1177 qdf_shared_mem_t rx_rdy_ring; 1178 qdf_shared_mem_t rx_refill_ring; 1179 1180 /* IPA UC doorbell registers paddr */ 1181 qdf_dma_addr_t tx_comp_doorbell_paddr; 1182 uint32_t *tx_comp_doorbell_vaddr; 1183 qdf_dma_addr_t rx_ready_doorbell_paddr; 1184 }; 1185 #endif 1186 1187 #define MAX_RX_MAC_RINGS 2 1188 /* Same as NAC_MAX_CLENT */ 1189 #define DP_NAC_MAX_CLIENT 24 1190 1191 /* 1192 * Macros to setup link descriptor cookies - for link descriptors, we just 1193 * need first 3 bits to store bank ID. The remaining bytes will be used set a 1194 * unique ID, which will be useful in debugging 1195 */ 1196 #define LINK_DESC_BANK_ID_MASK 0x7 1197 #define LINK_DESC_ID_SHIFT 3 1198 #define LINK_DESC_ID_START 0x8000 1199 1200 #define LINK_DESC_COOKIE(_desc_id, _bank_id) \ 1201 ((((_desc_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_bank_id)) 1202 1203 #define LINK_DESC_COOKIE_BANK_ID(_cookie) \ 1204 ((_cookie) & LINK_DESC_BANK_ID_MASK) 1205 1206 /* same as ieee80211_nac_param */ 1207 enum dp_nac_param_cmd { 1208 /* IEEE80211_NAC_PARAM_ADD */ 1209 DP_NAC_PARAM_ADD = 1, 1210 /* IEEE80211_NAC_PARAM_DEL */ 1211 DP_NAC_PARAM_DEL, 1212 /* IEEE80211_NAC_PARAM_LIST */ 1213 DP_NAC_PARAM_LIST, 1214 }; 1215 1216 /** 1217 * struct dp_neighbour_peer - neighbour peer list type for smart mesh 1218 * @neighbour_peers_macaddr: neighbour peer's mac address 1219 * @neighbour_peer_list_elem: neighbour peer list TAILQ element 1220 * @ast_entry: ast_entry for neighbour peer 1221 * @rssi: rssi value 1222 */ 1223 struct dp_neighbour_peer { 1224 /* MAC address of neighbour's peer */ 1225 union dp_align_mac_addr neighbour_peers_macaddr; 1226 struct dp_vdev *vdev; 1227 struct dp_ast_entry *ast_entry; 1228 uint8_t rssi; 1229 /* node in the list of neighbour's peer */ 1230 TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem; 1231 }; 1232 1233 #ifdef WLAN_TX_PKT_CAPTURE_ENH 1234 #define WLAN_TX_PKT_CAPTURE_ENH 1 1235 #define DP_TX_PPDU_PROC_THRESHOLD 8 1236 #define DP_TX_PPDU_PROC_TIMEOUT 10 1237 #endif 1238 1239 /** 1240 * struct ppdu_info - PPDU Status info descriptor 1241 * @ppdu_id - Unique ppduid assigned by firmware for every tx packet 1242 * @sched_cmdid - schedule command id, which will be same in a burst 1243 * @max_ppdu_id - wrap around for ppdu id 1244 * @last_tlv_cnt - Keep track for missing ppdu tlvs 1245 * @last_user - last ppdu processed for user 1246 * @is_ampdu - set if Ampdu aggregate 1247 * @nbuf - ppdu descriptor payload 1248 * @ppdu_desc - ppdu descriptor 1249 * @ppdu_info_list_elem - linked list of ppdu tlvs 1250 * @ppdu_info_queue_elem - Singly linked list (queue) of ppdu tlvs 1251 * @mpdu_compltn_common_tlv - Successful MPDU counter from COMPLTN COMMON tlv 1252 * @mpdu_ack_ba_tlv - Successful MPDU from ACK BA tlv 1253 */ 1254 struct ppdu_info { 1255 uint32_t ppdu_id; 1256 uint32_t sched_cmdid; 1257 uint32_t max_ppdu_id; 1258 uint16_t tlv_bitmap; 1259 uint16_t last_tlv_cnt; 1260 uint16_t last_user:8, 1261 is_ampdu:1; 1262 qdf_nbuf_t nbuf; 1263 struct cdp_tx_completion_ppdu *ppdu_desc; 1264 #ifdef WLAN_TX_PKT_CAPTURE_ENH 1265 union { 1266 TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem; 1267 STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem; 1268 } ulist; 1269 #define ppdu_info_list_elem ulist.ppdu_info_dlist_elem 1270 #define ppdu_info_queue_elem ulist.ppdu_info_slist_elem 1271 #else 1272 TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem; 1273 #endif 1274 uint16_t mpdu_compltn_common_tlv; 1275 uint16_t mpdu_ack_ba_tlv; 1276 }; 1277 1278 /** 1279 * struct msdu_completion_info - wbm msdu completion info 1280 * @ppdu_id - Unique ppduid assigned by firmware for every tx packet 1281 * @peer_id - peer_id 1282 * @tid - tid which used during transmit 1283 * @first_msdu - first msdu indication 1284 * @last_msdu - last msdu indication 1285 * @msdu_part_of_amsdu - msdu part of amsdu 1286 * @transmit_cnt - retried count 1287 * @tsf - timestamp which it transmitted 1288 */ 1289 struct msdu_completion_info { 1290 uint32_t ppdu_id; 1291 uint16_t peer_id; 1292 uint8_t tid; 1293 uint8_t first_msdu:1, 1294 last_msdu:1, 1295 msdu_part_of_amsdu:1; 1296 uint8_t transmit_cnt; 1297 uint32_t tsf; 1298 }; 1299 1300 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 1301 struct rx_protocol_tag_map { 1302 /* This is the user configured tag for the said protocol type */ 1303 uint16_t tag; 1304 }; 1305 1306 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS 1307 struct rx_protocol_tag_stats { 1308 uint32_t tag_ctr; 1309 }; 1310 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ 1311 1312 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 1313 1314 #ifndef WLAN_TX_PKT_CAPTURE_ENH 1315 struct dp_pdev_tx_capture { 1316 }; 1317 1318 struct dp_peer_tx_capture { 1319 }; 1320 #endif 1321 #ifdef WLAN_RX_PKT_CAPTURE_ENH 1322 /* Template data to be set for Enhanced RX Monitor packets */ 1323 #define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a 1324 1325 /** 1326 * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern 1327 * at end of each MSDU in monitor-lite mode 1328 * @reserved1: reserved for future use 1329 * @reserved2: reserved for future use 1330 * @flow_tag: flow tag value read from skb->cb 1331 * @protocol_tag: protocol tag value read from skb->cb 1332 */ 1333 struct dp_rx_mon_enh_trailer_data { 1334 uint16_t reserved1; 1335 uint16_t reserved2; 1336 uint16_t flow_tag; 1337 uint16_t protocol_tag; 1338 }; 1339 #endif /* WLAN_RX_PKT_CAPTURE_ENH */ 1340 1341 /* PDEV level structure for data path */ 1342 struct dp_pdev { 1343 /** 1344 * Re-use Memory Section Starts 1345 */ 1346 /* PDEV handle from OSIF layer TBD: see if we really need osif_pdev */ 1347 struct cdp_ctrl_objmgr_pdev *ctrl_pdev; 1348 1349 /* PDEV Id */ 1350 int pdev_id; 1351 1352 /* LMAC Id */ 1353 int lmac_id; 1354 1355 /* TXRX SOC handle */ 1356 struct dp_soc *soc; 1357 1358 /* Ring used to replenish rx buffers (maybe to the firmware of MAC) */ 1359 struct dp_srng rx_refill_buf_ring; 1360 1361 /* RXDMA error destination ring */ 1362 struct dp_srng rxdma_err_dst_ring[NUM_RXDMA_RINGS_PER_PDEV]; 1363 1364 /* Link descriptor memory banks */ 1365 struct { 1366 void *base_vaddr_unaligned; 1367 void *base_vaddr; 1368 qdf_dma_addr_t base_paddr_unaligned; 1369 qdf_dma_addr_t base_paddr; 1370 uint32_t size; 1371 } link_desc_banks[NUM_RXDMA_RINGS_PER_PDEV][MAX_MON_LINK_DESC_BANKS]; 1372 1373 /* RXDMA monitor buffer replenish ring */ 1374 struct dp_srng rxdma_mon_buf_ring[NUM_RXDMA_RINGS_PER_PDEV]; 1375 1376 /* RXDMA monitor destination ring */ 1377 struct dp_srng rxdma_mon_dst_ring[NUM_RXDMA_RINGS_PER_PDEV]; 1378 1379 /* RXDMA monitor status ring. TBD: Check format of this ring */ 1380 struct dp_srng rxdma_mon_status_ring[NUM_RXDMA_RINGS_PER_PDEV]; 1381 1382 struct dp_srng rxdma_mon_desc_ring[NUM_RXDMA_RINGS_PER_PDEV]; 1383 1384 /* Stuck count on monitor destination ring MPDU process */ 1385 uint32_t mon_dest_ring_stuck_cnt; 1386 1387 /* 1388 * re-use memory section ends 1389 * reuse memory/deinit indicator 1390 * 1391 * DO NOT CHANGE NAME OR MOVE THIS VARIABLE 1392 */ 1393 bool pdev_deinit; 1394 1395 /* pdev status down or up required to handle dynamic hw 1396 * mode switch between DBS and DBS_SBS. 1397 * 1 = down 1398 * 0 = up 1399 */ 1400 bool is_pdev_down; 1401 1402 /* Second ring used to replenish rx buffers */ 1403 struct dp_srng rx_refill_buf_ring2; 1404 1405 /* Empty ring used by firmware to post rx buffers to the MAC */ 1406 struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS]; 1407 1408 /* wlan_cfg pdev ctxt*/ 1409 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx; 1410 1411 /** 1412 * TODO: See if we need a ring map here for LMAC rings. 1413 * 1. Monitor rings are currently planning to be processed on receiving 1414 * PPDU end interrupts and hence wont need ring based interrupts. 1415 * 2. Rx buffer rings will be replenished during REO destination 1416 * processing and doesn't require regular interrupt handling - we will 1417 * only handle low water mark interrupts which is not expected 1418 * frequently 1419 */ 1420 1421 /* VDEV list */ 1422 TAILQ_HEAD(, dp_vdev) vdev_list; 1423 1424 /* vdev list lock */ 1425 qdf_spinlock_t vdev_list_lock; 1426 1427 /* Number of vdevs this device have */ 1428 uint16_t vdev_count; 1429 1430 /* PDEV transmit lock */ 1431 qdf_spinlock_t tx_lock; 1432 1433 #ifndef REMOVE_PKT_LOG 1434 bool pkt_log_init; 1435 /* Pktlog pdev */ 1436 struct pktlog_dev_t *pl_dev; 1437 #endif /* #ifndef REMOVE_PKT_LOG */ 1438 1439 /* Monitor mode interface and status storage */ 1440 struct dp_vdev *monitor_vdev; 1441 1442 /* Monitor mode operation channel */ 1443 int mon_chan_num; 1444 1445 /* monitor mode lock */ 1446 qdf_spinlock_t mon_lock; 1447 1448 /*tx_mutex for me*/ 1449 DP_MUTEX_TYPE tx_mutex; 1450 1451 /* monitor */ 1452 bool monitor_configured; 1453 1454 /* Smart Mesh */ 1455 bool filter_neighbour_peers; 1456 1457 /*flag to indicate neighbour_peers_list not empty */ 1458 bool neighbour_peers_added; 1459 /* smart mesh mutex */ 1460 qdf_spinlock_t neighbour_peer_mutex; 1461 /* Neighnour peer list */ 1462 TAILQ_HEAD(, dp_neighbour_peer) neighbour_peers_list; 1463 /* msdu chain head & tail */ 1464 qdf_nbuf_t invalid_peer_head_msdu; 1465 qdf_nbuf_t invalid_peer_tail_msdu; 1466 1467 /* Band steering */ 1468 /* TBD */ 1469 1470 /* PDEV level data path statistics */ 1471 struct cdp_pdev_stats stats; 1472 1473 /* Global RX decap mode for the device */ 1474 enum htt_pkt_type rx_decap_mode; 1475 1476 /* Enhanced Stats is enabled */ 1477 bool enhanced_stats_en; 1478 1479 /* advance filter mode and type*/ 1480 uint8_t mon_filter_mode; 1481 uint16_t fp_mgmt_filter; 1482 uint16_t fp_ctrl_filter; 1483 uint16_t fp_data_filter; 1484 uint16_t mo_mgmt_filter; 1485 uint16_t mo_ctrl_filter; 1486 uint16_t mo_data_filter; 1487 uint16_t md_data_filter; 1488 1489 qdf_atomic_t num_tx_outstanding; 1490 1491 qdf_atomic_t num_tx_exception; 1492 1493 /* MCL specific local peer handle */ 1494 struct { 1495 uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1]; 1496 uint8_t freelist; 1497 qdf_spinlock_t lock; 1498 struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS]; 1499 } local_peer_ids; 1500 1501 /* dscp_tid_map_*/ 1502 uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX]; 1503 1504 struct hal_rx_ppdu_info ppdu_info; 1505 1506 /* operating channel */ 1507 uint8_t operating_channel; 1508 1509 qdf_nbuf_queue_t rx_status_q; 1510 uint32_t mon_ppdu_status; 1511 struct cdp_mon_status rx_mon_recv_status; 1512 /* monitor mode status/destination ring PPDU and MPDU count */ 1513 struct cdp_pdev_mon_stats rx_mon_stats; 1514 /* to track duplicate link descriptor indications by HW for a WAR */ 1515 uint64_t mon_last_linkdesc_paddr; 1516 /* to track duplicate buffer indications by HW for a WAR */ 1517 uint32_t mon_last_buf_cookie; 1518 /* 128 bytes mpdu header queue per user for ppdu */ 1519 qdf_nbuf_queue_t mpdu_q[MAX_MU_USERS]; 1520 /* is this a mpdu header TLV and not msdu header TLV */ 1521 bool is_mpdu_hdr[MAX_MU_USERS]; 1522 /* per user 128 bytes msdu header list for MPDU */ 1523 struct msdu_list msdu_list[MAX_MU_USERS]; 1524 /* RX enhanced capture mode */ 1525 uint8_t rx_enh_capture_mode; 1526 /* Rx per peer enhanced capture mode */ 1527 bool rx_enh_capture_peer; 1528 struct dp_vdev *rx_enh_monitor_vdev; 1529 /* RX enhanced capture trailer enable/disable flag */ 1530 bool is_rx_enh_capture_trailer_enabled; 1531 #ifdef WLAN_RX_PKT_CAPTURE_ENH 1532 /* RX per MPDU/PPDU information */ 1533 struct cdp_rx_indication_mpdu mpdu_ind; 1534 #endif 1535 /* pool addr for mcast enhance buff */ 1536 struct { 1537 int size; 1538 uint32_t paddr; 1539 char *vaddr; 1540 struct dp_tx_me_buf_t *freelist; 1541 int buf_in_use; 1542 qdf_dma_mem_context(memctx); 1543 } me_buf; 1544 1545 bool hmmc_tid_override_en; 1546 uint8_t hmmc_tid; 1547 1548 /* Number of VAPs with mcast enhancement enabled */ 1549 qdf_atomic_t mc_num_vap_attached; 1550 1551 qdf_atomic_t stats_cmd_complete; 1552 1553 #ifdef IPA_OFFLOAD 1554 ipa_uc_op_cb_type ipa_uc_op_cb; 1555 void *usr_ctxt; 1556 struct dp_ipa_resources ipa_resource; 1557 #endif 1558 1559 /* TBD */ 1560 1561 /* map this pdev to a particular Reo Destination ring */ 1562 enum cdp_host_reo_dest_ring reo_dest; 1563 1564 #ifndef REMOVE_PKT_LOG 1565 /* Packet log mode */ 1566 uint8_t rx_pktlog_mode; 1567 #endif 1568 1569 /* WDI event handlers */ 1570 struct wdi_event_subscribe_t **wdi_event_list; 1571 1572 /* ppdu_id of last received HTT TX stats */ 1573 uint32_t last_ppdu_id; 1574 struct { 1575 uint8_t last_user; 1576 qdf_nbuf_t buf; 1577 } tx_ppdu_info; 1578 1579 bool tx_sniffer_enable; 1580 /* mirror copy mode */ 1581 bool mcopy_mode; 1582 bool bpr_enable; 1583 1584 /* enable time latency check for tx completion */ 1585 bool latency_capture_enable; 1586 1587 /* enable calculation of delay stats*/ 1588 bool delay_stats_flag; 1589 struct { 1590 uint16_t tx_ppdu_id; 1591 uint16_t tx_peer_id; 1592 uint16_t rx_ppdu_id; 1593 } m_copy_id; 1594 1595 /* To check if PPDU Tx stats are enabled for Pktlog */ 1596 bool pktlog_ppdu_stats; 1597 1598 void *dp_txrx_handle; /* Advanced data path handle */ 1599 1600 #ifdef ATH_SUPPORT_NAC_RSSI 1601 bool nac_rssi_filtering; 1602 #endif 1603 /* list of ppdu tlvs */ 1604 TAILQ_HEAD(, ppdu_info) ppdu_info_list; 1605 uint32_t tlv_count; 1606 uint32_t list_depth; 1607 uint32_t ppdu_id; 1608 bool first_nbuf; 1609 struct { 1610 qdf_nbuf_t last_nbuf; /*Ptr to mgmt last buf */ 1611 uint8_t *mgmt_buf; /* Ptr to mgmt. payload in HTT ppdu stats */ 1612 uint32_t mgmt_buf_len; /* Len of mgmt. payload in ppdu stats */ 1613 uint32_t ppdu_id; 1614 } mgmtctrl_frm_info; 1615 1616 /* Current noise-floor reading for the pdev channel */ 1617 int16_t chan_noise_floor; 1618 1619 /* 1620 * For multiradio device, this flag indicates if 1621 * this radio is primary or secondary. 1622 * 1623 * For HK 1.0, this is used for WAR for the AST issue. 1624 * HK 1.x mandates creation of only 1 AST entry with same MAC address 1625 * across 2 radios. is_primary indicates the radio on which DP should 1626 * install HW AST entry if there is a request to add 2 AST entries 1627 * with same MAC address across 2 radios 1628 */ 1629 uint8_t is_primary; 1630 /* Context of cal client timer */ 1631 struct cdp_cal_client *cal_client_ctx; 1632 struct cdp_tx_sojourn_stats sojourn_stats; 1633 qdf_nbuf_t sojourn_buf; 1634 1635 /* peer pointer for collecting invalid peer stats */ 1636 struct dp_peer *invalid_peer; 1637 1638 union dp_rx_desc_list_elem_t *free_list_head; 1639 union dp_rx_desc_list_elem_t *free_list_tail; 1640 /* Pdev level flag to check peer based pktlog enabled or 1641 * disabled 1642 */ 1643 uint8_t dp_peer_based_pktlog; 1644 1645 /* Cached peer_id from htt_peer_details_tlv */ 1646 uint16_t fw_stats_peer_id; 1647 1648 /* qdf_event for fw_peer_stats */ 1649 qdf_event_t fw_peer_stats_event; 1650 1651 /* User configured max number of tx buffers */ 1652 uint32_t num_tx_allowed; 1653 1654 /* unique cookie required for peer session */ 1655 uint32_t next_peer_cookie; 1656 1657 /* 1658 * Run time enabled when the first protocol tag is added, 1659 * run time disabled when the last protocol tag is deleted 1660 */ 1661 bool is_rx_protocol_tagging_enabled; 1662 1663 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG 1664 /* 1665 * The protocol type is used as array index to save 1666 * user provided tag info 1667 */ 1668 struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX]; 1669 1670 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS 1671 /* 1672 * Track msdus received from each reo ring separately to avoid 1673 * simultaneous writes from different core 1674 */ 1675 struct rx_protocol_tag_stats 1676 reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX]; 1677 /* Track msdus received from expection ring separately */ 1678 struct rx_protocol_tag_stats 1679 rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX]; 1680 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */ 1681 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */ 1682 1683 /* tx packet capture enhancement */ 1684 enum cdp_tx_enh_capture_mode tx_capture_enabled; 1685 struct dp_pdev_tx_capture tx_capture; 1686 /* stats counter for tx ppdu processed */ 1687 uint64_t tx_ppdu_proc; 1688 1689 uint32_t *ppdu_tlv_buf; /* Buffer to hold HTT ppdu stats TLVs*/ 1690 1691 /* nbuf queue to maintain rx ppdu status buffer 1692 * belonging to one ppdu 1693 */ 1694 qdf_nbuf_queue_t rx_ppdu_buf_q; 1695 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 1696 /** 1697 * Pointer to DP Flow FST at SOC level if 1698 * is_rx_flow_search_table_per_pdev is true 1699 */ 1700 struct dp_rx_fst *rx_fst; 1701 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 1702 1703 #ifdef FEATURE_TSO_STATS 1704 /* TSO Id to index into TSO packet information */ 1705 qdf_atomic_t tso_idx; 1706 #endif /* FEATURE_TSO_STATS */ 1707 }; 1708 1709 struct dp_peer; 1710 1711 /* VDEV structure for data path state */ 1712 struct dp_vdev { 1713 /* OS device abstraction */ 1714 qdf_device_t osdev; 1715 /* physical device that is the parent of this virtual device */ 1716 struct dp_pdev *pdev; 1717 1718 /* Handle to the OS shim SW's virtual device */ 1719 ol_osif_vdev_handle osif_vdev; 1720 1721 /* Handle to the UMAC handle */ 1722 struct cdp_ctrl_objmgr_vdev *ctrl_vdev; 1723 /* vdev_id - ID used to specify a particular vdev to the target */ 1724 uint8_t vdev_id; 1725 1726 /* MAC address */ 1727 union dp_align_mac_addr mac_addr; 1728 1729 /* node in the pdev's list of vdevs */ 1730 TAILQ_ENTRY(dp_vdev) vdev_list_elem; 1731 1732 /* dp_peer list */ 1733 TAILQ_HEAD(, dp_peer) peer_list; 1734 1735 /* RX call back function to flush GRO packets*/ 1736 ol_txrx_rx_gro_flush_ind_fp osif_gro_flush; 1737 /* default RX call back function called by dp */ 1738 ol_txrx_rx_fp osif_rx; 1739 /* callback to deliver rx frames to the OS */ 1740 ol_txrx_rx_fp osif_rx_stack; 1741 /* call back function to flush out queued rx packets*/ 1742 ol_txrx_rx_flush_fp osif_rx_flush; 1743 ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap; 1744 ol_txrx_get_key_fp osif_get_key; 1745 ol_txrx_tx_free_ext_fp osif_tx_free_ext; 1746 1747 #ifdef notyet 1748 /* callback to check if the msdu is an WAI (WAPI) frame */ 1749 ol_rx_check_wai_fp osif_check_wai; 1750 #endif 1751 1752 /* proxy arp function */ 1753 ol_txrx_proxy_arp_fp osif_proxy_arp; 1754 1755 /* callback to hand rx monitor 802.11 MPDU to the OS shim */ 1756 ol_txrx_rx_mon_fp osif_rx_mon; 1757 1758 ol_txrx_mcast_me_fp me_convert; 1759 1760 /* completion function used by this vdev*/ 1761 ol_txrx_completion_fp tx_comp; 1762 1763 /* deferred vdev deletion state */ 1764 struct { 1765 /* VDEV delete pending */ 1766 int pending; 1767 /* 1768 * callback and a context argument to provide a 1769 * notification for when the vdev is deleted. 1770 */ 1771 ol_txrx_vdev_delete_cb callback; 1772 void *context; 1773 } delete; 1774 1775 /* tx data delivery notification callback function */ 1776 struct { 1777 ol_txrx_data_tx_cb func; 1778 void *ctxt; 1779 } tx_non_std_data_callback; 1780 1781 1782 /* safe mode control to bypass the encrypt and decipher process*/ 1783 uint32_t safemode; 1784 1785 /* rx filter related */ 1786 uint32_t drop_unenc; 1787 #ifdef notyet 1788 privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS]; 1789 uint32_t filters_num; 1790 #endif 1791 /* TDLS Link status */ 1792 bool tdls_link_connected; 1793 bool is_tdls_frame; 1794 1795 1796 /* VDEV operating mode */ 1797 enum wlan_op_mode opmode; 1798 1799 /* VDEV subtype */ 1800 enum wlan_op_subtype subtype; 1801 1802 /* Tx encapsulation type for this VAP */ 1803 enum htt_cmn_pkt_type tx_encap_type; 1804 /* Rx Decapsulation type for this VAP */ 1805 enum htt_cmn_pkt_type rx_decap_type; 1806 1807 /* BSS peer */ 1808 struct dp_peer *vap_bss_peer; 1809 1810 /* WDS enabled */ 1811 bool wds_enabled; 1812 1813 /* MEC enabled */ 1814 bool mec_enabled; 1815 1816 /* WDS Aging timer period */ 1817 uint32_t wds_aging_timer_val; 1818 1819 /* NAWDS enabled */ 1820 bool nawds_enabled; 1821 1822 /* Default HTT meta data for this VDEV */ 1823 /* TBD: check alignment constraints */ 1824 uint16_t htt_tcl_metadata; 1825 1826 /* Mesh mode vdev */ 1827 uint32_t mesh_vdev; 1828 1829 /* Mesh mode rx filter setting */ 1830 uint32_t mesh_rx_filter; 1831 1832 /* DSCP-TID mapping table ID */ 1833 uint8_t dscp_tid_map_id; 1834 1835 /* Multicast enhancement enabled */ 1836 uint8_t mcast_enhancement_en; 1837 1838 /* per vdev rx nbuf queue */ 1839 qdf_nbuf_queue_t rxq; 1840 1841 uint8_t tx_ring_id; 1842 struct dp_tx_desc_pool_s *tx_desc; 1843 struct dp_tx_ext_desc_pool_s *tx_ext_desc; 1844 1845 /* VDEV Stats */ 1846 struct cdp_vdev_stats stats; 1847 1848 /* Is this a proxySTA VAP */ 1849 bool proxysta_vdev; 1850 /* Is isolation mode enabled */ 1851 bool isolation_vdev; 1852 1853 /* Address search flags to be configured in HAL descriptor */ 1854 uint8_t hal_desc_addr_search_flags; 1855 #ifdef QCA_LL_TX_FLOW_CONTROL_V2 1856 struct dp_tx_desc_pool_s *pool; 1857 #endif 1858 /* AP BRIDGE enabled */ 1859 uint32_t ap_bridge_enabled; 1860 1861 enum cdp_sec_type sec_type; 1862 1863 /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */ 1864 bool raw_mode_war; 1865 1866 /* Address search type to be set in TX descriptor */ 1867 uint8_t search_type; 1868 1869 /* AST hash value for BSS peer in HW valid for STA VAP*/ 1870 uint16_t bss_ast_hash; 1871 1872 /* AST hash index for BSS peer in HW valid for STA VAP*/ 1873 uint16_t bss_ast_idx; 1874 1875 /* Capture timestamp of previous tx packet enqueued */ 1876 uint64_t prev_tx_enq_tstamp; 1877 1878 /* Capture timestamp of previous rx packet delivered */ 1879 uint64_t prev_rx_deliver_tstamp; 1880 1881 /* 8021p PCP-TID mapping table ID */ 1882 uint8_t tidmap_tbl_id; 1883 1884 /* 8021p PCP-TID map values */ 1885 uint8_t pcp_tid_map[PCP_TID_MAP_MAX]; 1886 1887 /* TIDmap priority */ 1888 uint8_t tidmap_prty; 1889 /* Self Peer in STA mode */ 1890 struct dp_peer *vap_self_peer; 1891 1892 bool multipass_en; 1893 #ifdef QCA_MULTIPASS_SUPPORT 1894 uint16_t *iv_vlan_map; 1895 1896 /* dp_peer special list */ 1897 TAILQ_HEAD(, dp_peer) mpass_peer_list; 1898 DP_MUTEX_TYPE mpass_peer_mutex; 1899 #endif 1900 }; 1901 1902 1903 enum { 1904 dp_sec_mcast = 0, 1905 dp_sec_ucast 1906 }; 1907 1908 #ifdef WDS_VENDOR_EXTENSION 1909 typedef struct { 1910 uint8_t wds_tx_mcast_4addr:1, 1911 wds_tx_ucast_4addr:1, 1912 wds_rx_filter:1, /* enforce rx filter */ 1913 wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames */ 1914 wds_rx_mcast_4addr:1; /* when set, accept 4addr multicast frames */ 1915 1916 } dp_ecm_policy; 1917 #endif 1918 1919 /* 1920 * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets 1921 * @cached_bufq: nbuff list to enqueue rx packets 1922 * @bufq_lock: spinlock for nbuff list access 1923 * @thres: maximum threshold for number of rx buff to enqueue 1924 * @entries: number of entries 1925 * @dropped: number of packets dropped 1926 */ 1927 struct dp_peer_cached_bufq { 1928 qdf_list_t cached_bufq; 1929 qdf_spinlock_t bufq_lock; 1930 uint32_t thresh; 1931 uint32_t entries; 1932 uint32_t dropped; 1933 }; 1934 1935 /* Peer structure for data path state */ 1936 struct dp_peer { 1937 /* VDEV to which this peer is associated */ 1938 struct dp_vdev *vdev; 1939 1940 struct cdp_ctrl_objmgr_peer *ctrl_peer; 1941 1942 struct dp_ast_entry *self_ast_entry; 1943 1944 qdf_atomic_t ref_cnt; 1945 1946 /* TODO: See if multiple peer IDs are required in wifi3.0 */ 1947 /* peer ID(s) for this peer */ 1948 uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER]; 1949 1950 union dp_align_mac_addr mac_addr; 1951 1952 /* node in the vdev's list of peers */ 1953 TAILQ_ENTRY(dp_peer) peer_list_elem; 1954 /* node in the hash table bin's list of peers */ 1955 TAILQ_ENTRY(dp_peer) hash_list_elem; 1956 1957 /* TID structures */ 1958 struct dp_rx_tid rx_tid[DP_MAX_TIDS]; 1959 struct dp_peer_tx_capture tx_capture; 1960 1961 1962 /* TBD: No transmit TID state required? */ 1963 1964 struct { 1965 enum cdp_sec_type sec_type; 1966 u_int32_t michael_key[2]; /* relevant for TKIP */ 1967 } security[2]; /* 0 -> multicast, 1 -> unicast */ 1968 1969 /* NAWDS Flag and Bss Peer bit */ 1970 uint8_t nawds_enabled:1, /* NAWDS flag */ 1971 bss_peer:1, /* set for bss peer */ 1972 wds_enabled:1, /* WDS peer */ 1973 authorize:1, /* Set when authorized */ 1974 nac:1, /* NAC Peer*/ 1975 tx_cap_enabled:1, /* Peer's tx-capture is enabled */ 1976 rx_cap_enabled:1, /* Peer's rx-capture is enabled */ 1977 valid:1; /* valid bit */ 1978 1979 /* MCL specific peer local id */ 1980 uint16_t local_id; 1981 enum ol_txrx_peer_state state; 1982 qdf_spinlock_t peer_info_lock; 1983 1984 /* Peer Stats */ 1985 struct cdp_peer_stats stats; 1986 1987 TAILQ_HEAD(, dp_ast_entry) ast_entry_list; 1988 /* TBD */ 1989 1990 #ifdef WDS_VENDOR_EXTENSION 1991 dp_ecm_policy wds_ecm; 1992 #endif 1993 bool delete_in_progress; 1994 1995 /* Active Block ack sessions */ 1996 uint16_t active_ba_session_cnt; 1997 1998 /* Current HW buffersize setting */ 1999 uint16_t hw_buffer_size; 2000 2001 /* 2002 * Flag to check if sessions with 256 buffersize 2003 * should be terminated. 2004 */ 2005 uint8_t kill_256_sessions; 2006 qdf_atomic_t is_default_route_set; 2007 /* Peer level flag to check peer based pktlog enabled or 2008 * disabled 2009 */ 2010 uint8_t peer_based_pktlog_filter; 2011 2012 /* rdk statistics context */ 2013 struct cdp_peer_rate_stats_ctx *wlanstats_ctx; 2014 /* average sojourn time */ 2015 qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX]; 2016 2017 #ifdef QCA_MULTIPASS_SUPPORT 2018 /* node in the special peer list element */ 2019 TAILQ_ENTRY(dp_peer) mpass_peer_list_elem; 2020 /* vlan id for key */ 2021 uint16_t vlan_id; 2022 #endif 2023 2024 #ifdef PEER_CACHE_RX_PKTS 2025 qdf_atomic_t flush_in_progress; 2026 struct dp_peer_cached_bufq bufq_info; 2027 #endif 2028 #ifdef FEATURE_PERPKT_INFO 2029 /* delayed ba ppdu stats handling */ 2030 struct cdp_delayed_tx_completion_ppdu_user delayed_ba_ppdu_stats; 2031 /* delayed ba flag */ 2032 bool last_delayed_ba; 2033 /* delayed ba ppdu id */ 2034 uint32_t last_delayed_ba_ppduid; 2035 #endif 2036 }; 2037 2038 /* 2039 * dp_invalid_peer_msg 2040 * @nbuf: data buffer 2041 * @wh: 802.11 header 2042 * @vdev_id: id of vdev 2043 */ 2044 struct dp_invalid_peer_msg { 2045 qdf_nbuf_t nbuf; 2046 struct ieee80211_frame *wh; 2047 uint8_t vdev_id; 2048 }; 2049 2050 /* 2051 * dp_tx_me_buf_t: ME buffer 2052 * next: pointer to next buffer 2053 * data: Destination Mac address 2054 */ 2055 struct dp_tx_me_buf_t { 2056 /* Note: ME buf pool initialization logic expects next pointer to 2057 * be the first element. Dont add anything before next */ 2058 struct dp_tx_me_buf_t *next; 2059 uint8_t data[QDF_MAC_ADDR_SIZE]; 2060 }; 2061 2062 #ifdef WLAN_SUPPORT_RX_FLOW_TAG 2063 struct hal_rx_fst; 2064 2065 struct dp_rx_fse { 2066 /* HAL Rx Flow Search Entry which matches HW definition */ 2067 void *hal_rx_fse; 2068 /* Toeplitz hash value */ 2069 uint32_t flow_hash; 2070 /* Flow index, equivalent to hash value truncated to FST size */ 2071 uint32_t flow_id; 2072 /* Stats tracking for this flow */ 2073 struct cdp_flow_stats stats; 2074 /* Flag indicating whether flow is IPv4 address tuple */ 2075 bool is_ipv4_addr_entry; 2076 /* Flag indicating whether flow is valid */ 2077 bool is_valid; 2078 }; 2079 2080 struct dp_rx_fst { 2081 /* Software (DP) FST */ 2082 uint8_t *base; 2083 /* Pointer to HAL FST */ 2084 struct hal_rx_fst *hal_rx_fst; 2085 /* Base physical address of HAL RX HW FST */ 2086 uint64_t hal_rx_fst_base_paddr; 2087 /* Maximum number of flows FSE supports */ 2088 uint16_t max_entries; 2089 /* Num entries in flow table */ 2090 uint16_t num_entries; 2091 /* SKID Length */ 2092 uint16_t max_skid_length; 2093 /* Hash mask to obtain legitimate hash entry */ 2094 uint32_t hash_mask; 2095 /* Timer for bundling of flows */ 2096 qdf_timer_t cache_invalidate_timer; 2097 /** 2098 * Flag which tracks whether cache update 2099 * is needed on timer expiry 2100 */ 2101 qdf_atomic_t is_cache_update_pending; 2102 /* Flag to indicate completion of FSE setup in HW/FW */ 2103 bool fse_setup_done; 2104 }; 2105 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ 2106 2107 #endif /* _DP_TYPES_H_ */ 2108