xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.c (revision 5611ef508114526caa3c58ffe2e188650c7b53d1)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "cdp_txrx_cmn_struct.h"
20 #include "dp_types.h"
21 #include "dp_tx.h"
22 #include "dp_be_tx.h"
23 #include "dp_tx_desc.h"
24 #include "hal_tx.h"
25 #include <hal_be_api.h>
26 
27 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
28 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
29 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
30 					    void *tx_comp_hal_desc,
31 					    struct dp_tx_desc_s **r_tx_desc)
32 {
33 	uint32_t tx_desc_id;
34 
35 	if (qdf_likely(
36 		hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc))) {
37 		/* HW cookie conversion done */
38 		*r_tx_desc = (struct dp_tx_desc_s *)
39 				hal_tx_comp_get_desc_va(tx_comp_hal_desc);
40 	} else {
41 		/* SW do cookie conversion to VA */
42 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
43 		*r_tx_desc =
44 		(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
45 	}
46 }
47 #else
48 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
49 					    void *tx_comp_hal_desc,
50 					    struct dp_tx_desc_s **r_tx_desc)
51 {
52 	*r_tx_desc = (struct dp_tx_desc_s *)
53 			hal_tx_comp_get_desc_va(tx_comp_hal_desc);
54 }
55 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
56 #else
57 
58 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
59 					    void *tx_comp_hal_desc,
60 					    struct dp_tx_desc_s **r_tx_desc)
61 {
62 	uint32_t tx_desc_id;
63 
64 	/* SW do cookie conversion to VA */
65 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
66 	*r_tx_desc =
67 	(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
68 }
69 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
70 
71 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
72 /*
73  * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion.
74  * @dp_soc - DP soc structure pointer
75  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
76  *
77  * Return - RBM ID corresponding to TCL ring_id
78  */
79 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
80 					  uint8_t ring_id)
81 {
82 	return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) :
83 			  HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id));
84 }
85 
86 #else
87 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
88 					  uint8_t ring_id)
89 {
90 	uint8_t wbm_ring_id, rbm;
91 
92 	wbm_ring_id = wlan_cfg_get_wbm_ring_num_for_index(ring_id);
93 	rbm = wbm_ring_id + soc->wbm_sw0_bm_id;
94 	dp_debug("ring_id %u wbm ring num %u rbm %u",
95 		 ring_id, wbm_ring_id, rbm);
96 	return rbm;
97 }
98 #endif
99 
100 QDF_STATUS
101 dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
102 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
103 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
104 		    struct dp_tx_msdu_info_s *msdu_info)
105 {
106 	void *hal_tx_desc;
107 	uint32_t *hal_tx_desc_cached;
108 	int coalesce = 0;
109 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
110 	uint8_t ring_id = tx_q->ring_id;
111 	uint8_t tid = msdu_info->tid;
112 	struct dp_vdev_be *be_vdev;
113 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
114 	uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
115 	hal_ring_handle_t hal_ring_hdl = NULL;
116 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
117 
118 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
119 
120 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
121 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
122 		return QDF_STATUS_E_RESOURCES;
123 	}
124 
125 	hal_tx_desc_cached = (void *)cached_desc;
126 
127 	hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
128 				 tx_desc->dma_addr, bm_id, tx_desc->id,
129 				 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
130 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
131 				vdev->lmac_id);
132 
133 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
134 				     vdev->bss_ast_idx);
135 	/*
136 	 * Bank_ID is used as DSCP_TABLE number in beryllium
137 	 * So there is no explicit field used for DSCP_TID_TABLE_NUM.
138 	 */
139 
140 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
141 				      (vdev->bss_ast_hash & 0xF));
142 
143 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
144 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
145 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
146 
147 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
148 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
149 
150 	/* verify checksum offload configuration*/
151 	if (vdev->csum_enabled &&
152 	    ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
153 					QDF_NBUF_TX_CKSUM_TCP_UDP) ||
154 	      qdf_nbuf_is_tso(tx_desc->nbuf))) {
155 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
156 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
157 	}
158 
159 	hal_tx_desc_set_bank_id(hal_tx_desc_cached, be_vdev->bank_id);
160 
161 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id);
162 
163 	if (tid != HTT_TX_EXT_TID_INVALID)
164 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
165 
166 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
167 	    qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
168 		tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
169 
170 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
171 			 tx_desc->length,
172 			 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
173 			 (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
174 			 tx_desc->id);
175 
176 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
177 
178 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
179 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
180 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
181 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
182 		return status;
183 	}
184 
185 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
186 	if (qdf_unlikely(!hal_tx_desc)) {
187 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
188 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
189 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
190 		goto ring_access_fail;
191 	}
192 
193 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
194 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
195 
196 	/* Sync cached descriptor with HW */
197 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
198 
199 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid);
200 
201 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
202 	dp_tx_update_stats(soc, tx_desc->nbuf);
203 	status = QDF_STATUS_SUCCESS;
204 
205 	dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
206 				 hal_ring_hdl, soc);
207 
208 ring_access_fail:
209 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
210 
211 	return status;
212 }
213 
214 QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
215 {
216 	int i, num_tcl_banks;
217 
218 	num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
219 	be_soc->num_bank_profiles = num_tcl_banks;
220 	be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *
221 					       sizeof(*be_soc->bank_profiles));
222 	if (!be_soc->bank_profiles) {
223 		dp_err("unable to allocate memory for DP TX Profiles!");
224 		return QDF_STATUS_E_NOMEM;
225 	}
226 
227 	qdf_mutex_create(&be_soc->tx_bank_lock);
228 
229 	for (i = 0; i < num_tcl_banks; i++) {
230 		be_soc->bank_profiles[i].is_configured = false;
231 		qdf_atomic_init(&be_soc->bank_profiles[i].ref_count);
232 	}
233 	return QDF_STATUS_SUCCESS;
234 }
235 
236 void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc)
237 {
238 	qdf_mem_free(be_soc->bank_profiles);
239 	qdf_mutex_destroy(&be_soc->tx_bank_lock);
240 }
241 
242 static
243 void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev,
244 				union hal_tx_bank_config *bank_config)
245 {
246 	struct dp_vdev *vdev = &be_vdev->vdev;
247 	struct dp_soc *soc = vdev->pdev->soc;
248 
249 	bank_config->epd = 0;
250 
251 	bank_config->encap_type = vdev->tx_encap_type;
252 
253 	/* Only valid for raw frames. Needs work for RAW mode */
254 	bank_config->encrypt_type = 0;
255 
256 	bank_config->src_buffer_swap = 0;
257 	bank_config->link_meta_swap = 0;
258 
259 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
260 		vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
261 	else
262 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
263 
264 	bank_config->index_lookup_enable = 0;
265 
266 	bank_config->addrx_en =
267 		(vdev->hal_desc_addr_search_flags & HAL_TX_DESC_ADDRX_EN) ?
268 		1 : 0;
269 	bank_config->addry_en =
270 		(vdev->hal_desc_addr_search_flags & HAL_TX_DESC_ADDRY_EN) ?
271 		1 : 0;
272 
273 	bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0;
274 
275 	/* Disabling vdev id check for now. Needs revist. */
276 	bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en;
277 
278 	bank_config->pmac_id = vdev->lmac_id;
279 
280 	bank_config->mcast_pkt_ctrl = 0;
281 }
282 
283 int dp_tx_get_bank_profile(struct dp_soc_be *be_soc,
284 			   struct dp_vdev_be *be_vdev)
285 {
286 	char *temp_str = "";
287 	bool found_match = false;
288 	int bank_id = DP_BE_INVALID_BANK_ID;
289 	int i;
290 	int unconfigured_slot = DP_BE_INVALID_BANK_ID;
291 	int zero_ref_count_slot = DP_BE_INVALID_BANK_ID;
292 	union hal_tx_bank_config vdev_config = {0};
293 
294 	/* convert vdev params into hal_tx_bank_config */
295 	dp_tx_get_vdev_bank_config(be_vdev, &vdev_config);
296 
297 	qdf_mutex_acquire(&be_soc->tx_bank_lock);
298 	/* go over all banks and find a matching/unconfigured/unsed bank */
299 	for (i = 0; i < be_soc->num_bank_profiles; i++) {
300 		if (be_soc->bank_profiles[i].is_configured &&
301 		    (be_soc->bank_profiles[i].bank_config.val ^
302 						vdev_config.val) == 0) {
303 			found_match = true;
304 			break;
305 		}
306 
307 		if (unconfigured_slot == DP_BE_INVALID_BANK_ID &&
308 		    !be_soc->bank_profiles[i].is_configured)
309 			unconfigured_slot = i;
310 		else if (zero_ref_count_slot  == DP_BE_INVALID_BANK_ID &&
311 		    !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count))
312 			zero_ref_count_slot = i;
313 	}
314 
315 	if (found_match) {
316 		temp_str = "matching";
317 		bank_id = i;
318 		goto inc_ref_and_return;
319 	}
320 	if (unconfigured_slot != DP_BE_INVALID_BANK_ID) {
321 		temp_str = "unconfigured";
322 		bank_id = unconfigured_slot;
323 		goto configure_and_return;
324 	}
325 	if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) {
326 		temp_str = "zero_ref_count";
327 		bank_id = zero_ref_count_slot;
328 	}
329 	if (bank_id == DP_BE_INVALID_BANK_ID) {
330 		dp_alert("unable to find TX bank!");
331 		QDF_BUG(0);
332 		return bank_id;
333 	}
334 
335 configure_and_return:
336 	be_soc->bank_profiles[bank_id].is_configured = true;
337 	be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val;
338 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
339 				      &be_soc->bank_profiles[bank_id].bank_config,
340 				      bank_id);
341 inc_ref_and_return:
342 	qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count);
343 	qdf_mutex_release(&be_soc->tx_bank_lock);
344 
345 	dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u",
346 		temp_str, bank_id, vdev_config.val,
347 		be_soc->bank_profiles[bank_id].bank_config.val,
348 		qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count));
349 
350 	dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x",
351 		be_soc->bank_profiles[bank_id].bank_config.epd,
352 		be_soc->bank_profiles[bank_id].bank_config.encap_type,
353 		be_soc->bank_profiles[bank_id].bank_config.encrypt_type,
354 		be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap,
355 		be_soc->bank_profiles[bank_id].bank_config.link_meta_swap,
356 		be_soc->bank_profiles[bank_id].bank_config.addrx_en,
357 		be_soc->bank_profiles[bank_id].bank_config.addry_en,
358 		be_soc->bank_profiles[bank_id].bank_config.mesh_enable,
359 		be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en,
360 		be_soc->bank_profiles[bank_id].bank_config.pmac_id,
361 		be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl);
362 
363 	return bank_id;
364 }
365 
366 void dp_tx_put_bank_profile(struct dp_soc_be *be_soc,
367 			    struct dp_vdev_be *be_vdev)
368 {
369 	qdf_mutex_acquire(&be_soc->tx_bank_lock);
370 	qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count);
371 	qdf_mutex_release(&be_soc->tx_bank_lock);
372 }
373 
374 void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
375 			       struct dp_vdev_be *be_vdev)
376 {
377 	dp_tx_put_bank_profile(be_soc, be_vdev);
378 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
379 }
380 
381 QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
382 				   uint16_t num_elem,
383 				   uint8_t pool_id)
384 {
385 	struct dp_tx_desc_pool_s *tx_desc_pool;
386 	struct dp_soc_be *be_soc;
387 	struct dp_spt_page_desc *page_desc;
388 	struct dp_spt_page_desc_list *page_desc_list;
389 	struct dp_tx_desc_s *tx_desc;
390 
391 	if (!num_elem) {
392 		dp_err("desc_num 0 !!");
393 		return QDF_STATUS_E_FAILURE;
394 	}
395 
396 	be_soc = dp_get_be_soc_from_dp_soc(soc);
397 	tx_desc_pool = &soc->tx_desc[pool_id];
398 	page_desc_list = &be_soc->tx_spt_page_desc[pool_id];
399 
400 	/* allocate SPT pages from page desc pool */
401 	page_desc_list->num_spt_pages =
402 		dp_cc_spt_page_desc_alloc(be_soc,
403 					  &page_desc_list->spt_page_list_head,
404 					  &page_desc_list->spt_page_list_tail,
405 					  num_elem);
406 
407 	if (!page_desc_list->num_spt_pages) {
408 		dp_err("fail to allocate cookie conversion spt pages");
409 		return QDF_STATUS_E_FAILURE;
410 	}
411 
412 	/* put each TX Desc VA to SPT pages and get corresponding ID */
413 	page_desc = page_desc_list->spt_page_list_head;
414 	tx_desc = tx_desc_pool->freelist;
415 	while (tx_desc) {
416 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
417 					 page_desc->avail_entry_index,
418 					 tx_desc);
419 		tx_desc->id =
420 			dp_cc_desc_id_generate(page_desc->ppt_index,
421 					       page_desc->avail_entry_index);
422 		tx_desc->pool_id = pool_id;
423 		tx_desc = tx_desc->next;
424 
425 		page_desc->avail_entry_index++;
426 		if (page_desc->avail_entry_index >=
427 				DP_CC_SPT_PAGE_MAX_ENTRIES)
428 			page_desc = page_desc->next;
429 	}
430 
431 	return QDF_STATUS_SUCCESS;
432 }
433 
434 void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
435 			       struct dp_tx_desc_pool_s *tx_desc_pool,
436 			       uint8_t pool_id)
437 {
438 	struct dp_soc_be *be_soc;
439 	struct dp_spt_page_desc *page_desc;
440 	struct dp_spt_page_desc_list *page_desc_list;
441 
442 	be_soc = dp_get_be_soc_from_dp_soc(soc);
443 	page_desc_list = &be_soc->tx_spt_page_desc[pool_id];
444 
445 	if (!page_desc_list->num_spt_pages) {
446 		dp_warn("page_desc_list is empty for pool_id %d", pool_id);
447 		return;
448 	}
449 
450 	/* cleanup for each page */
451 	page_desc = page_desc_list->spt_page_list_head;
452 	while (page_desc) {
453 		page_desc->avail_entry_index = 0;
454 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
455 		page_desc = page_desc->next;
456 	}
457 
458 	/* free pages desc back to pool */
459 	dp_cc_spt_page_desc_free(be_soc,
460 				 &page_desc_list->spt_page_list_head,
461 				 &page_desc_list->spt_page_list_tail,
462 				 page_desc_list->num_spt_pages);
463 	page_desc_list->num_spt_pages = 0;
464 }
465 
466 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
467 uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
468 			       hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
469 			       uint32_t quota)
470 {
471 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
472 	uint32_t work_done = 0;
473 
474 	if (dp_srng_get_near_full_level(soc, tx_comp_ring) <
475 			DP_SRNG_THRESH_NEAR_FULL)
476 		return 0;
477 
478 	qdf_atomic_set(&tx_comp_ring->near_full, 1);
479 	work_done++;
480 
481 	return work_done;
482 }
483 #endif
484