1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 #include "cdp_txrx_cmn_struct.h" 19 #include "dp_types.h" 20 #include "dp_tx.h" 21 #include "dp_li_tx.h" 22 #include "dp_tx_desc.h" 23 #include <dp_internal.h> 24 #include <dp_htt.h> 25 #include <hal_li_api.h> 26 #include <hal_li_tx.h> 27 28 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE]; 29 30 void dp_tx_comp_get_params_from_hal_desc_li(struct dp_soc *soc, 31 void *tx_comp_hal_desc, 32 struct dp_tx_desc_s **r_tx_desc) 33 { 34 uint8_t pool_id; 35 uint32_t tx_desc_id; 36 37 tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc); 38 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> 39 DP_TX_DESC_ID_POOL_OS; 40 41 /* Find Tx descriptor */ 42 *r_tx_desc = dp_tx_desc_find(soc, pool_id, 43 (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> 44 DP_TX_DESC_ID_PAGE_OS, 45 (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> 46 DP_TX_DESC_ID_OFFSET_OS); 47 /* Pool id is not matching. Error */ 48 if ((*r_tx_desc)->pool_id != pool_id) { 49 dp_tx_comp_alert("Tx Comp pool id %d not matched %d", 50 pool_id, (*r_tx_desc)->pool_id); 51 52 qdf_assert_always(0); 53 } 54 } 55 56 #ifdef QCA_OL_TX_MULTIQ_SUPPORT 57 /* 58 * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion. 59 * @dp_soc - DP soc structure pointer 60 * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled 61 * 62 * Return - HAL ring handle 63 */ 64 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 65 uint8_t ring_id) 66 { 67 return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) : 68 HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id)); 69 } 70 71 #else 72 static inline uint8_t dp_tx_get_rbm_id_li(struct dp_soc *soc, 73 uint8_t ring_id) 74 { 75 return (ring_id + soc->wbm_sw0_bm_id); 76 } 77 #endif 78 79 #if defined(CLEAR_SW2TCL_CONSUMED_DESC) 80 /** 81 * dp_tx_clear_consumed_hw_descs - Reset all the consumed Tx ring descs to 0 82 * 83 * @soc: DP soc handle 84 * @hal_ring_hdl: Source ring pointer 85 * 86 * Return: void 87 */ 88 static inline 89 void dp_tx_clear_consumed_hw_descs(struct dp_soc *soc, 90 hal_ring_handle_t hal_ring_hdl) 91 { 92 void *desc = hal_srng_src_get_next_consumed(soc->hal_soc, hal_ring_hdl); 93 94 while (desc) { 95 hal_tx_desc_clear(desc); 96 desc = hal_srng_src_get_next_consumed(soc->hal_soc, 97 hal_ring_hdl); 98 } 99 } 100 101 #else 102 static inline 103 void dp_tx_clear_consumed_hw_descs(struct dp_soc *soc, 104 hal_ring_handle_t hal_ring_hdl) 105 { 106 } 107 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */ 108 109 QDF_STATUS 110 dp_tx_hw_enqueue_li(struct dp_soc *soc, struct dp_vdev *vdev, 111 struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata, 112 struct cdp_tx_exception_metadata *tx_exc_metadata, 113 struct dp_tx_msdu_info_s *msdu_info) 114 { 115 void *hal_tx_desc; 116 uint32_t *hal_tx_desc_cached; 117 int coalesce = 0; 118 struct dp_tx_queue *tx_q = &msdu_info->tx_queue; 119 uint8_t ring_id = tx_q->ring_id & DP_TX_QUEUE_MASK; 120 uint8_t tid = msdu_info->tid; 121 122 /* 123 * Setting it initialization statically here to avoid 124 * a memset call jump with qdf_mem_set call 125 */ 126 uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 }; 127 128 enum cdp_sec_type sec_type = ((tx_exc_metadata && 129 tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ? 130 tx_exc_metadata->sec_type : vdev->sec_type); 131 132 /* Return Buffer Manager ID */ 133 uint8_t bm_id = dp_tx_get_rbm_id_li(soc, ring_id); 134 135 hal_ring_handle_t hal_ring_hdl = NULL; 136 137 QDF_STATUS status = QDF_STATUS_E_RESOURCES; 138 139 if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) { 140 dp_err_rl("Invalid tx desc id:%d", tx_desc->id); 141 return QDF_STATUS_E_RESOURCES; 142 } 143 144 hal_tx_desc_cached = (void *)cached_desc; 145 146 hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached, 147 tx_desc->dma_addr, bm_id, tx_desc->id, 148 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)); 149 hal_tx_desc_set_lmac_id_li(soc->hal_soc, hal_tx_desc_cached, 150 vdev->lmac_id); 151 hal_tx_desc_set_search_type_li(soc->hal_soc, hal_tx_desc_cached, 152 vdev->search_type); 153 hal_tx_desc_set_search_index_li(soc->hal_soc, hal_tx_desc_cached, 154 vdev->bss_ast_idx); 155 hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached, 156 vdev->dscp_tid_map_id); 157 158 hal_tx_desc_set_encrypt_type(hal_tx_desc_cached, 159 sec_type_map[sec_type]); 160 hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached, 161 (vdev->bss_ast_hash & 0xF)); 162 163 hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata); 164 hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length); 165 hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset); 166 hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type); 167 hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached, 168 vdev->hal_desc_addr_search_flags); 169 170 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) 171 hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1); 172 173 /* verify checksum offload configuration*/ 174 if (vdev->csum_enabled && 175 ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == 176 QDF_NBUF_TX_CKSUM_TCP_UDP) || 177 qdf_nbuf_is_tso(tx_desc->nbuf))) { 178 hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1); 179 hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1); 180 } 181 182 if (tid != HTT_TX_EXT_TID_INVALID) 183 hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid); 184 185 if (tx_desc->flags & DP_TX_DESC_FLAG_MESH) 186 hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1); 187 188 if (qdf_unlikely(vdev->pdev->delay_stats_flag) || 189 qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))) 190 tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_real_get()); 191 192 dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u", 193 tx_desc->length, 194 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG), 195 (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset, 196 tx_desc->id); 197 198 hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id); 199 200 if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) { 201 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 202 "%s %d : HAL RING Access Failed -- %pK", 203 __func__, __LINE__, hal_ring_hdl); 204 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 205 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 206 return status; 207 } 208 209 dp_tx_clear_consumed_hw_descs(soc, hal_ring_hdl); 210 211 /* Sync cached descriptor with HW */ 212 213 hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl); 214 if (qdf_unlikely(!hal_tx_desc)) { 215 dp_verbose_debug("TCL ring full ring_id:%d", ring_id); 216 DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); 217 DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); 218 goto ring_access_fail; 219 } 220 221 tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; 222 dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf); 223 hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc); 224 coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid); 225 DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length); 226 dp_tx_update_stats(soc, tx_desc->nbuf); 227 status = QDF_STATUS_SUCCESS; 228 229 dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached, 230 hal_ring_hdl, soc); 231 232 ring_access_fail: 233 dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce); 234 235 return status; 236 } 237 238 QDF_STATUS dp_tx_desc_pool_init_li(struct dp_soc *soc, 239 uint16_t num_elem, 240 uint8_t pool_id) 241 { 242 uint32_t id, count, page_id, offset, pool_id_32; 243 struct dp_tx_desc_s *tx_desc; 244 struct dp_tx_desc_pool_s *tx_desc_pool; 245 uint16_t num_desc_per_page; 246 247 tx_desc_pool = &soc->tx_desc[pool_id]; 248 tx_desc = tx_desc_pool->freelist; 249 count = 0; 250 pool_id_32 = (uint32_t)pool_id; 251 num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page; 252 while (tx_desc) { 253 page_id = count / num_desc_per_page; 254 offset = count % num_desc_per_page; 255 id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) | 256 (page_id << DP_TX_DESC_ID_PAGE_OS) | offset); 257 258 tx_desc->id = id; 259 tx_desc->pool_id = pool_id; 260 tx_desc = tx_desc->next; 261 count++; 262 } 263 264 return QDF_STATUS_SUCCESS; 265 } 266 267 void dp_tx_desc_pool_deinit_li(struct dp_soc *soc, 268 struct dp_tx_desc_pool_s *tx_desc_pool, 269 uint8_t pool_id) 270 { 271 } 272