xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "cdp_txrx_cmn_struct.h"
20 #include "dp_types.h"
21 #include "dp_tx.h"
22 #include "dp_be_tx.h"
23 #include "dp_tx_desc.h"
24 #include "hal_tx.h"
25 #include <hal_be_api.h>
26 #include <hal_be_tx.h>
27 
28 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
29 
30 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
31 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
32 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
33 					    void *tx_comp_hal_desc,
34 					    struct dp_tx_desc_s **r_tx_desc)
35 {
36 	uint32_t tx_desc_id;
37 
38 	if (qdf_likely(
39 		hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc))) {
40 		/* HW cookie conversion done */
41 		*r_tx_desc = (struct dp_tx_desc_s *)
42 				hal_tx_comp_get_desc_va(tx_comp_hal_desc);
43 	} else {
44 		/* SW do cookie conversion to VA */
45 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
46 		*r_tx_desc =
47 		(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
48 	}
49 }
50 #else
51 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
52 					    void *tx_comp_hal_desc,
53 					    struct dp_tx_desc_s **r_tx_desc)
54 {
55 	*r_tx_desc = (struct dp_tx_desc_s *)
56 			hal_tx_comp_get_desc_va(tx_comp_hal_desc);
57 }
58 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
59 #else
60 
61 void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
62 					    void *tx_comp_hal_desc,
63 					    struct dp_tx_desc_s **r_tx_desc)
64 {
65 	uint32_t tx_desc_id;
66 
67 	/* SW do cookie conversion to VA */
68 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
69 	*r_tx_desc =
70 	(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
71 }
72 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
73 
74 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
75 #ifdef DP_TX_IMPLICIT_RBM_MAPPING
76 /*
77  * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion.
78  * @dp_soc - DP soc structure pointer
79  * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
80  *
81  * Return - RBM ID corresponding to TCL ring_id
82  */
83 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
84 					  uint8_t ring_id)
85 {
86 	return 0;
87 }
88 #else
89 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
90 					  uint8_t ring_id)
91 {
92 	return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) :
93 			  HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id));
94 }
95 #endif /*DP_TX_IMPLICIT_RBM_MAPPING*/
96 #else
97 static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
98 					  uint8_t tcl_index)
99 {
100 	uint8_t rbm;
101 
102 	rbm = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_index);
103 	dp_verbose_debug("tcl_id %u rbm %u", tcl_index, rbm);
104 	return rbm;
105 }
106 #endif
107 
108 QDF_STATUS
109 dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
110 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
111 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
112 		    struct dp_tx_msdu_info_s *msdu_info)
113 {
114 	void *hal_tx_desc;
115 	uint32_t *hal_tx_desc_cached;
116 	int coalesce = 0;
117 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
118 	uint8_t ring_id = tx_q->ring_id;
119 	uint8_t tid = msdu_info->tid;
120 	struct dp_vdev_be *be_vdev;
121 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
122 	uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
123 	hal_ring_handle_t hal_ring_hdl = NULL;
124 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
125 
126 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
127 
128 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
129 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
130 		return QDF_STATUS_E_RESOURCES;
131 	}
132 
133 	if (qdf_unlikely(tx_exc_metadata)) {
134 		qdf_assert_always((tx_exc_metadata->tx_encap_type ==
135 				   CDP_INVALID_TX_ENCAP_TYPE) ||
136 				   (tx_exc_metadata->tx_encap_type ==
137 				    vdev->tx_encap_type));
138 
139 		if (tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)
140 			qdf_assert_always((tx_exc_metadata->sec_type ==
141 					   CDP_INVALID_SEC_TYPE) ||
142 					   tx_exc_metadata->sec_type ==
143 					   vdev->sec_type);
144 	}
145 
146 	hal_tx_desc_cached = (void *)cached_desc;
147 
148 	hal_tx_desc_set_buf_addr_be(soc->hal_soc, hal_tx_desc_cached,
149 				    tx_desc->dma_addr, bm_id, tx_desc->id,
150 				    (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
151 	hal_tx_desc_set_lmac_id_be(soc->hal_soc, hal_tx_desc_cached,
152 				   vdev->lmac_id);
153 
154 	hal_tx_desc_set_search_index_be(soc->hal_soc, hal_tx_desc_cached,
155 					vdev->bss_ast_idx);
156 	/*
157 	 * Bank_ID is used as DSCP_TABLE number in beryllium
158 	 * So there is no explicit field used for DSCP_TID_TABLE_NUM.
159 	 */
160 
161 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
162 				      (vdev->bss_ast_hash & 0xF));
163 
164 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
165 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
166 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
167 
168 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
169 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
170 
171 	/* verify checksum offload configuration*/
172 	if (vdev->csum_enabled &&
173 	    ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
174 					QDF_NBUF_TX_CKSUM_TCP_UDP) ||
175 	      qdf_nbuf_is_tso(tx_desc->nbuf))) {
176 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
177 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
178 	}
179 
180 	hal_tx_desc_set_bank_id(hal_tx_desc_cached, be_vdev->bank_id);
181 
182 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id);
183 
184 	if (tid != HTT_TX_EXT_TID_INVALID)
185 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
186 
187 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
188 	    qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
189 		tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
190 
191 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
192 			 tx_desc->length,
193 			 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
194 			 (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
195 			 tx_desc->id);
196 
197 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
198 
199 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
200 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
201 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
202 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
203 		return status;
204 	}
205 
206 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
207 	if (qdf_unlikely(!hal_tx_desc)) {
208 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
209 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
210 		DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
211 		goto ring_access_fail;
212 	}
213 
214 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
215 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
216 
217 	/* Sync cached descriptor with HW */
218 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
219 
220 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid);
221 
222 	DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
223 	dp_tx_update_stats(soc, tx_desc->nbuf);
224 	status = QDF_STATUS_SUCCESS;
225 
226 	dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
227 				 hal_ring_hdl, soc);
228 
229 ring_access_fail:
230 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
231 
232 	return status;
233 }
234 
235 QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
236 {
237 	int i, num_tcl_banks;
238 
239 	num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
240 
241 	qdf_assert_always(num_tcl_banks);
242 	be_soc->num_bank_profiles = num_tcl_banks;
243 
244 	be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *
245 					       sizeof(*be_soc->bank_profiles));
246 	if (!be_soc->bank_profiles) {
247 		dp_err("unable to allocate memory for DP TX Profiles!");
248 		return QDF_STATUS_E_NOMEM;
249 	}
250 
251 	qdf_mutex_create(&be_soc->tx_bank_lock);
252 
253 	for (i = 0; i < num_tcl_banks; i++) {
254 		be_soc->bank_profiles[i].is_configured = false;
255 		qdf_atomic_init(&be_soc->bank_profiles[i].ref_count);
256 	}
257 	dp_info("initialized %u bank profiles", be_soc->num_bank_profiles);
258 	return QDF_STATUS_SUCCESS;
259 }
260 
261 void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc)
262 {
263 	qdf_mem_free(be_soc->bank_profiles);
264 	qdf_mutex_destroy(&be_soc->tx_bank_lock);
265 }
266 
267 static
268 void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev,
269 				union hal_tx_bank_config *bank_config)
270 {
271 	struct dp_vdev *vdev = &be_vdev->vdev;
272 	struct dp_soc *soc = vdev->pdev->soc;
273 
274 	bank_config->epd = 0;
275 
276 	bank_config->encap_type = vdev->tx_encap_type;
277 
278 	/* Only valid for raw frames. Needs work for RAW mode */
279 	if (vdev->tx_encap_type == htt_cmn_pkt_type_raw) {
280 		bank_config->encrypt_type = sec_type_map[vdev->sec_type];
281 	} else {
282 		bank_config->encrypt_type = 0;
283 	}
284 
285 	bank_config->src_buffer_swap = 0;
286 	bank_config->link_meta_swap = 0;
287 
288 	if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta) {
289 		bank_config->index_lookup_enable = 1;
290 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_MEC_NOTIFY;
291 		bank_config->addrx_en = 0;
292 		bank_config->addry_en = 0;
293 	} else {
294 		bank_config->index_lookup_enable = 0;
295 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
296 		bank_config->addrx_en =
297 			(vdev->hal_desc_addr_search_flags &
298 			 HAL_TX_DESC_ADDRX_EN) ? 1 : 0;
299 		bank_config->addry_en =
300 			(vdev->hal_desc_addr_search_flags &
301 			 HAL_TX_DESC_ADDRY_EN) ? 1 : 0;
302 	}
303 
304 	bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0;
305 
306 	bank_config->dscp_tid_map_id = vdev->dscp_tid_map_id;
307 
308 	/* Disabling vdev id check for now. Needs revist. */
309 	bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en;
310 
311 	bank_config->pmac_id = vdev->lmac_id;
312 }
313 
314 int dp_tx_get_bank_profile(struct dp_soc_be *be_soc,
315 			   struct dp_vdev_be *be_vdev)
316 {
317 	char *temp_str = "";
318 	bool found_match = false;
319 	int bank_id = DP_BE_INVALID_BANK_ID;
320 	int i;
321 	int unconfigured_slot = DP_BE_INVALID_BANK_ID;
322 	int zero_ref_count_slot = DP_BE_INVALID_BANK_ID;
323 	union hal_tx_bank_config vdev_config = {0};
324 
325 	/* convert vdev params into hal_tx_bank_config */
326 	dp_tx_get_vdev_bank_config(be_vdev, &vdev_config);
327 
328 	qdf_mutex_acquire(&be_soc->tx_bank_lock);
329 	/* go over all banks and find a matching/unconfigured/unsed bank */
330 	for (i = 0; i < be_soc->num_bank_profiles; i++) {
331 		if (be_soc->bank_profiles[i].is_configured &&
332 		    (be_soc->bank_profiles[i].bank_config.val ^
333 						vdev_config.val) == 0) {
334 			found_match = true;
335 			break;
336 		}
337 
338 		if (unconfigured_slot == DP_BE_INVALID_BANK_ID &&
339 		    !be_soc->bank_profiles[i].is_configured)
340 			unconfigured_slot = i;
341 		else if (zero_ref_count_slot  == DP_BE_INVALID_BANK_ID &&
342 		    !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count))
343 			zero_ref_count_slot = i;
344 	}
345 
346 	if (found_match) {
347 		temp_str = "matching";
348 		bank_id = i;
349 		goto inc_ref_and_return;
350 	}
351 	if (unconfigured_slot != DP_BE_INVALID_BANK_ID) {
352 		temp_str = "unconfigured";
353 		bank_id = unconfigured_slot;
354 		goto configure_and_return;
355 	}
356 	if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) {
357 		temp_str = "zero_ref_count";
358 		bank_id = zero_ref_count_slot;
359 	}
360 	if (bank_id == DP_BE_INVALID_BANK_ID) {
361 		dp_alert("unable to find TX bank!");
362 		QDF_BUG(0);
363 		return bank_id;
364 	}
365 
366 configure_and_return:
367 	be_soc->bank_profiles[bank_id].is_configured = true;
368 	be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val;
369 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
370 				      &be_soc->bank_profiles[bank_id].bank_config,
371 				      bank_id);
372 inc_ref_and_return:
373 	qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count);
374 	qdf_mutex_release(&be_soc->tx_bank_lock);
375 
376 	dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u",
377 		temp_str, bank_id, vdev_config.val,
378 		be_soc->bank_profiles[bank_id].bank_config.val,
379 		qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count));
380 
381 	dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x",
382 		be_soc->bank_profiles[bank_id].bank_config.epd,
383 		be_soc->bank_profiles[bank_id].bank_config.encap_type,
384 		be_soc->bank_profiles[bank_id].bank_config.encrypt_type,
385 		be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap,
386 		be_soc->bank_profiles[bank_id].bank_config.link_meta_swap,
387 		be_soc->bank_profiles[bank_id].bank_config.addrx_en,
388 		be_soc->bank_profiles[bank_id].bank_config.addry_en,
389 		be_soc->bank_profiles[bank_id].bank_config.mesh_enable,
390 		be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en,
391 		be_soc->bank_profiles[bank_id].bank_config.pmac_id,
392 		be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl);
393 
394 	return bank_id;
395 }
396 
397 void dp_tx_put_bank_profile(struct dp_soc_be *be_soc,
398 			    struct dp_vdev_be *be_vdev)
399 {
400 	qdf_mutex_acquire(&be_soc->tx_bank_lock);
401 	qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count);
402 	qdf_mutex_release(&be_soc->tx_bank_lock);
403 }
404 
405 void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
406 			       struct dp_vdev_be *be_vdev)
407 {
408 	dp_tx_put_bank_profile(be_soc, be_vdev);
409 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
410 }
411 
412 QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
413 				   uint16_t num_elem,
414 				   uint8_t pool_id)
415 {
416 	struct dp_tx_desc_pool_s *tx_desc_pool;
417 	struct dp_soc_be *be_soc;
418 	struct dp_spt_page_desc *page_desc;
419 	struct dp_spt_page_desc_list *page_desc_list;
420 	struct dp_tx_desc_s *tx_desc;
421 
422 	if (!num_elem) {
423 		dp_err("desc_num 0 !!");
424 		return QDF_STATUS_E_FAILURE;
425 	}
426 
427 	be_soc = dp_get_be_soc_from_dp_soc(soc);
428 	tx_desc_pool = &soc->tx_desc[pool_id];
429 	page_desc_list = &be_soc->tx_spt_page_desc[pool_id];
430 
431 	/* allocate SPT pages from page desc pool */
432 	page_desc_list->num_spt_pages =
433 		dp_cc_spt_page_desc_alloc(be_soc,
434 					  &page_desc_list->spt_page_list_head,
435 					  &page_desc_list->spt_page_list_tail,
436 					  num_elem);
437 
438 	if (!page_desc_list->num_spt_pages) {
439 		dp_err("fail to allocate cookie conversion spt pages");
440 		return QDF_STATUS_E_FAILURE;
441 	}
442 
443 	/* put each TX Desc VA to SPT pages and get corresponding ID */
444 	page_desc = page_desc_list->spt_page_list_head;
445 	tx_desc = tx_desc_pool->freelist;
446 	while (tx_desc) {
447 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
448 					 page_desc->avail_entry_index,
449 					 tx_desc);
450 		tx_desc->id =
451 			dp_cc_desc_id_generate(page_desc->ppt_index,
452 					       page_desc->avail_entry_index);
453 		tx_desc->pool_id = pool_id;
454 		tx_desc = tx_desc->next;
455 
456 		page_desc->avail_entry_index++;
457 		if (page_desc->avail_entry_index >=
458 				DP_CC_SPT_PAGE_MAX_ENTRIES)
459 			page_desc = page_desc->next;
460 	}
461 
462 	return QDF_STATUS_SUCCESS;
463 }
464 
465 void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
466 			       struct dp_tx_desc_pool_s *tx_desc_pool,
467 			       uint8_t pool_id)
468 {
469 	struct dp_soc_be *be_soc;
470 	struct dp_spt_page_desc *page_desc;
471 	struct dp_spt_page_desc_list *page_desc_list;
472 
473 	be_soc = dp_get_be_soc_from_dp_soc(soc);
474 	page_desc_list = &be_soc->tx_spt_page_desc[pool_id];
475 
476 	if (!page_desc_list->num_spt_pages) {
477 		dp_warn("page_desc_list is empty for pool_id %d", pool_id);
478 		return;
479 	}
480 
481 	/* cleanup for each page */
482 	page_desc = page_desc_list->spt_page_list_head;
483 	while (page_desc) {
484 		page_desc->avail_entry_index = 0;
485 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
486 		page_desc = page_desc->next;
487 	}
488 
489 	/* free pages desc back to pool */
490 	dp_cc_spt_page_desc_free(be_soc,
491 				 &page_desc_list->spt_page_list_head,
492 				 &page_desc_list->spt_page_list_tail,
493 				 page_desc_list->num_spt_pages);
494 	page_desc_list->num_spt_pages = 0;
495 }
496 
497 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
498 uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
499 			       hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
500 			       uint32_t quota)
501 {
502 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
503 	uint32_t work_done = 0;
504 
505 	if (dp_srng_get_near_full_level(soc, tx_comp_ring) <
506 			DP_SRNG_THRESH_NEAR_FULL)
507 		return 0;
508 
509 	qdf_atomic_set(&tx_comp_ring->near_full, 1);
510 	work_done++;
511 
512 	return work_done;
513 }
514 #endif
515