1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _DP_BE_RX_H_ 21 #define _DP_BE_RX_H_ 22 23 #include <dp_types.h> 24 #include "dp_be.h" 25 #include "dp_peer.h" 26 #include <dp_rx.h> 27 #include "hal_be_rx.h" 28 29 /* 30 * dp_be_intrabss_params 31 * 32 * @dest_soc: dest soc to forward the packet to 33 * @tx_vdev_id: vdev id retrieved from dest peer 34 */ 35 struct dp_be_intrabss_params { 36 struct dp_soc *dest_soc; 37 uint8_t tx_vdev_id; 38 }; 39 40 #ifndef QCA_HOST_MODE_WIFI_DISABLED 41 42 /* 43 * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL 44 * pkt with DA not equal to vdev mac addr, fwd is not allowed. 45 * @soc: core txrx main context 46 * @ta_txrx_peer: source peer entry 47 * @rx_tlv_hdr: start address of rx tlvs 48 * @nbuf: nbuf that has to be intrabss forwarded 49 * @msdu_metadata: msdu metadata 50 * 51 * Return: true if it is forwarded else false 52 */ 53 54 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, 55 struct dp_txrx_peer *ta_txrx_peer, 56 uint8_t *rx_tlv_hdr, 57 qdf_nbuf_t nbuf, 58 struct hal_rx_msdu_metadata msdu_metadata); 59 #endif 60 61 /* 62 * dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case 63 * @soc: core txrx main context 64 * @ta_txrx_peer: source txrx_peer entry 65 * @nbuf_copy: nbuf that has to be intrabss forwarded 66 * @tid_stats: tid_stats structure 67 * 68 * Return: true if it is forwarded else false 69 */ 70 bool 71 dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, 72 qdf_nbuf_t nbuf_copy, 73 struct cdp_tid_rx_stats *tid_stats); 74 75 uint32_t dp_rx_process_be(struct dp_intr *int_ctx, 76 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, 77 uint32_t quota); 78 79 /** 80 * dp_rx_desc_pool_init_be() - Initialize Rx Descriptor pool(s) 81 * @soc: Handle to DP Soc structure 82 * @rx_desc_pool: Rx descriptor pool handler 83 * @pool_id: Rx descriptor pool ID 84 * 85 * Return: QDF_STATUS_SUCCESS - succeeded, others - failed 86 */ 87 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc, 88 struct rx_desc_pool *rx_desc_pool, 89 uint32_t pool_id); 90 91 /** 92 * dp_rx_desc_pool_deinit_be() - De-initialize Rx Descriptor pool(s) 93 * @soc: Handle to DP Soc structure 94 * @rx_desc_pool: Rx descriptor pool handler 95 * @pool_id: Rx descriptor pool ID 96 * 97 * Return: None 98 */ 99 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc, 100 struct rx_desc_pool *rx_desc_pool, 101 uint32_t pool_id); 102 103 /** 104 * dp_wbm_get_rx_desc_from_hal_desc_be() - Get corresponding Rx Desc 105 * address from WBM ring Desc 106 * @soc: Handle to DP Soc structure 107 * @ring_desc: ring descriptor structure pointer 108 * @r_rx_desc: pointer to a pointer of Rx Desc 109 * 110 * Return: QDF_STATUS_SUCCESS - succeeded, others - failed 111 */ 112 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc, 113 void *ring_desc, 114 struct dp_rx_desc **r_rx_desc); 115 116 /** 117 * dp_rx_desc_cookie_2_va_be() - Convert RX Desc cookie ID to VA 118 * @soc:Handle to DP Soc structure 119 * @cookie: cookie used to lookup virtual address 120 * 121 * Return: Rx descriptor virtual address 122 */ 123 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc, 124 uint32_t cookie); 125 126 #if !defined(DP_FEATURE_HW_COOKIE_CONVERSION) || \ 127 defined(DP_HW_COOKIE_CONVERT_EXCEPTION) 128 /** 129 * dp_rx_desc_sw_cc_check() - check if RX desc VA is got correctly, 130 if not, do SW cookie conversion. 131 * @soc:Handle to DP Soc structure 132 * @rx_buf_cookie: RX desc cookie ID 133 * @r_rx_desc: double pointer for RX desc 134 * 135 * Return: None 136 */ 137 static inline void 138 dp_rx_desc_sw_cc_check(struct dp_soc *soc, 139 uint32_t rx_buf_cookie, 140 struct dp_rx_desc **r_rx_desc) 141 { 142 if (qdf_unlikely(!(*r_rx_desc))) { 143 *r_rx_desc = (struct dp_rx_desc *) 144 dp_cc_desc_find(soc, 145 rx_buf_cookie); 146 } 147 } 148 #else 149 static inline void 150 dp_rx_desc_sw_cc_check(struct dp_soc *soc, 151 uint32_t rx_buf_cookie, 152 struct dp_rx_desc **r_rx_desc) 153 { 154 } 155 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION && DP_HW_COOKIE_CONVERT_EXCEPTION */ 156 157 #define DP_PEER_METADATA_OFFLOAD_GET_BE(_peer_metadata) (0) 158 159 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH 160 static inline uint16_t 161 dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata) 162 { 163 struct htt_rx_peer_metadata_v1 *metadata = 164 (struct htt_rx_peer_metadata_v1 *)&peer_metadata; 165 uint16_t peer_id; 166 167 peer_id = metadata->peer_id | 168 (metadata->ml_peer_valid << soc->peer_id_shift); 169 170 return peer_id; 171 } 172 #else 173 /* Combine ml_peer_valid and peer_id field */ 174 #define DP_BE_PEER_METADATA_PEER_ID_MASK 0x00003fff 175 #define DP_BE_PEER_METADATA_PEER_ID_SHIFT 0 176 177 static inline uint16_t 178 dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata) 179 { 180 return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >> 181 DP_BE_PEER_METADATA_PEER_ID_SHIFT); 182 } 183 #endif 184 185 static inline uint16_t 186 dp_rx_peer_metadata_vdev_id_get_be(struct dp_soc *soc, uint32_t peer_metadata) 187 { 188 struct htt_rx_peer_metadata_v1 *metadata = 189 (struct htt_rx_peer_metadata_v1 *)&peer_metadata; 190 191 return metadata->vdev_id; 192 } 193 194 static inline uint8_t 195 dp_rx_peer_metadata_lmac_id_get_be(uint32_t peer_metadata) 196 { 197 return HTT_RX_PEER_META_DATA_V1_LMAC_ID_GET(peer_metadata); 198 } 199 200 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ 201 /** 202 * dp_rx_nf_process() - Near Full state handler for RX rings. 203 * @int_ctx: interrupt context 204 * @hal_ring_hdl: Rx ring handle 205 * @reo_ring_num: RX ring number 206 * @quota: Quota of work to be done 207 * 208 * Return: work done in the handler 209 */ 210 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx, 211 hal_ring_handle_t hal_ring_hdl, 212 uint8_t reo_ring_num, 213 uint32_t quota); 214 #else 215 static inline 216 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx, 217 hal_ring_handle_t hal_ring_hdl, 218 uint8_t reo_ring_num, 219 uint32_t quota) 220 { 221 return 0; 222 } 223 #endif /*WLAN_FEATURE_NEAR_FULL_IRQ */ 224 225 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) 226 struct dp_soc * 227 dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id); 228 #else 229 static inline struct dp_soc * 230 dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id) 231 { 232 return soc; 233 } 234 #endif 235 236 #ifdef WLAN_FEATURE_11BE_MLO 237 /** 238 * dp_rx_mlo_igmp_handler() - Rx handler for Mcast packets 239 * @soc: Handle to DP Soc structure 240 * @vdev: DP vdev handle 241 * @peer: DP peer handle 242 * @nbuf: nbuf to be enqueued 243 * 244 * Return: true when packet sent to stack, false failure 245 */ 246 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc, 247 struct dp_vdev *vdev, 248 struct dp_txrx_peer *peer, 249 qdf_nbuf_t nbuf); 250 251 /** 252 * dp_peer_rx_reorder_queue_setup() - Send reo queue setup wmi cmd to FW 253 per peer type 254 * @soc: DP Soc handle 255 * @peer: dp peer to operate on 256 * @tid: TID 257 * @ba_window_size: BlockAck window size 258 * 259 * Return: 0 - success, others - failure 260 */ 261 static inline 262 QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc, 263 struct dp_peer *peer, 264 int tid, 265 uint32_t ba_window_size) 266 { 267 uint8_t i; 268 struct dp_mld_link_peers link_peers_info; 269 struct dp_peer *link_peer; 270 struct dp_rx_tid *rx_tid; 271 struct dp_soc *link_peer_soc; 272 273 rx_tid = &peer->rx_tid[tid]; 274 if (!rx_tid->hw_qdesc_paddr) 275 return QDF_STATUS_E_INVAL; 276 277 if (!hal_reo_shared_qaddr_is_enable(soc->hal_soc)) { 278 if (IS_MLO_DP_MLD_PEER(peer)) { 279 /* get link peers with reference */ 280 dp_get_link_peers_ref_from_mld_peer(soc, peer, 281 &link_peers_info, 282 DP_MOD_ID_CDP); 283 /* send WMI cmd to each link peers */ 284 for (i = 0; i < link_peers_info.num_links; i++) { 285 link_peer = link_peers_info.link_peers[i]; 286 link_peer_soc = link_peer->vdev->pdev->soc; 287 if (link_peer_soc->cdp_soc.ol_ops-> 288 peer_rx_reorder_queue_setup) { 289 if (link_peer_soc->cdp_soc.ol_ops-> 290 peer_rx_reorder_queue_setup( 291 link_peer_soc->ctrl_psoc, 292 link_peer->vdev->pdev->pdev_id, 293 link_peer->vdev->vdev_id, 294 link_peer->mac_addr.raw, 295 rx_tid->hw_qdesc_paddr, 296 tid, tid, 297 1, ba_window_size)) { 298 dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n", 299 link_peer_soc, tid); 300 return QDF_STATUS_E_FAILURE; 301 } 302 } 303 } 304 /* release link peers reference */ 305 dp_release_link_peers_ref(&link_peers_info, 306 DP_MOD_ID_CDP); 307 } else if (peer->peer_type == CDP_LINK_PEER_TYPE) { 308 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) { 309 if (soc->cdp_soc.ol_ops-> 310 peer_rx_reorder_queue_setup( 311 soc->ctrl_psoc, 312 peer->vdev->pdev->pdev_id, 313 peer->vdev->vdev_id, 314 peer->mac_addr.raw, 315 rx_tid->hw_qdesc_paddr, 316 tid, tid, 317 1, ba_window_size)) { 318 dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n", 319 soc, tid); 320 return QDF_STATUS_E_FAILURE; 321 } 322 } 323 } else { 324 dp_peer_err("invalid peer type %d", peer->peer_type); 325 return QDF_STATUS_E_FAILURE; 326 } 327 } else { 328 /* Some BE targets dont require WMI and use shared 329 * table managed by host for storing Reo queue ref structs 330 */ 331 if (IS_MLO_DP_LINK_PEER(peer) || 332 peer->peer_id == HTT_INVALID_PEER) { 333 /* Return if this is for MLD link peer and table 334 * is not used in MLD link peer case as MLD peer's 335 * qref is written to LUT in peer setup or peer map. 336 * At this point peer setup for link peer is called 337 * before peer map, hence peer id is not assigned. 338 * This could happen if peer_setup is called before 339 * host receives HTT peer map. In this case return 340 * success with no op and let peer map handle 341 * writing the reo_qref to LUT. 342 */ 343 dp_peer_debug("Invalid peer id for dp_peer:%pK", peer); 344 return QDF_STATUS_SUCCESS; 345 } 346 347 hal_reo_shared_qaddr_write(soc->hal_soc, 348 peer->peer_id, 349 tid, peer->rx_tid[tid].hw_qdesc_paddr); 350 } 351 return QDF_STATUS_SUCCESS; 352 } 353 #else 354 static inline 355 QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc, 356 struct dp_peer *peer, 357 int tid, 358 uint32_t ba_window_size) 359 { 360 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 361 362 if (!rx_tid->hw_qdesc_paddr) 363 return QDF_STATUS_E_INVAL; 364 365 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) { 366 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup( 367 soc->ctrl_psoc, 368 peer->vdev->pdev->pdev_id, 369 peer->vdev->vdev_id, 370 peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid, 371 1, ba_window_size)) { 372 dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n", 373 soc, tid); 374 return QDF_STATUS_E_FAILURE; 375 } 376 } 377 378 return QDF_STATUS_SUCCESS; 379 } 380 #endif /* WLAN_FEATURE_11BE_MLO */ 381 382 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH 383 static inline 384 void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next) 385 { 386 if (next) { 387 /* prefetch skb->next and first few bytes of skb->cb */ 388 qdf_prefetch(next); 389 /* skb->cb spread across 2 cache lines hence below prefetch */ 390 qdf_prefetch(&next->_skb_refdst); 391 qdf_prefetch(&next->len); 392 qdf_prefetch(&next->protocol); 393 qdf_prefetch(next->data); 394 qdf_prefetch(next->data + 64); 395 qdf_prefetch(next->data + 128); 396 } 397 } 398 #else 399 static inline 400 void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next) 401 { 402 } 403 #endif 404 405 #ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH 406 /** 407 * dp_rx_cookie_2_va_rxdma_buf_prefetch() - function to prefetch the SW desc 408 * @soc: Handle to DP Soc structure 409 * @cookie: cookie used to lookup virtual address 410 * 411 * Return: prefetched Rx descriptor virtual address 412 */ 413 static inline 414 void *dp_rx_va_prefetch(void *last_prefetched_hw_desc) 415 { 416 void *prefetch_desc; 417 418 prefetch_desc = (void *)hal_rx_get_reo_desc_va(last_prefetched_hw_desc); 419 qdf_prefetch(prefetch_desc); 420 return prefetch_desc; 421 } 422 423 /** 424 * dp_rx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc 425 * @soc: Handle to HAL Soc structure 426 * @num_entries: valid number of HW descriptors 427 * @hal_ring_hdl: Destination ring pointer 428 * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor 429 * @last_prefetched_sw_desc: input & output param of last prefetch SW desc 430 * 431 * Return: None 432 */ 433 static inline void 434 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc, 435 hal_soc_handle_t hal_soc, 436 uint32_t num_entries, 437 hal_ring_handle_t hal_ring_hdl, 438 hal_ring_desc_t *last_prefetched_hw_desc, 439 struct dp_rx_desc **last_prefetched_sw_desc) 440 { 441 if (*last_prefetched_sw_desc) { 442 qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf); 443 qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64); 444 } 445 446 if (num_entries) { 447 *last_prefetched_sw_desc = 448 dp_rx_va_prefetch(*last_prefetched_hw_desc); 449 450 if ((uintptr_t)*last_prefetched_hw_desc & 0x3f) 451 *last_prefetched_hw_desc = 452 hal_srng_dst_prefetch_next_cached_desc(hal_soc, 453 hal_ring_hdl, 454 (uint8_t *)*last_prefetched_hw_desc); 455 else 456 *last_prefetched_hw_desc = 457 hal_srng_dst_get_next_32_byte_desc(hal_soc, 458 hal_ring_hdl, 459 (uint8_t *)*last_prefetched_hw_desc); 460 } 461 } 462 #else 463 static inline void 464 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc, 465 hal_soc_handle_t hal_soc, 466 uint32_t num_entries, 467 hal_ring_handle_t hal_ring_hdl, 468 hal_ring_desc_t *last_prefetched_hw_desc, 469 struct dp_rx_desc **last_prefetched_sw_desc) 470 { 471 } 472 #endif 473 #endif 474