xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_flow_control.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <cds_api.h>
21 
22 /* OS abstraction libraries */
23 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
24 #include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
25 #include <qdf_util.h>           /* qdf_unlikely */
26 #include "dp_types.h"
27 #include "dp_tx_desc.h"
28 
29 #include <cdp_txrx_handle.h>
30 #include "dp_internal.h"
31 #define INVALID_FLOW_ID 0xFF
32 #define MAX_INVALID_BIN 3
33 #define GLOBAL_FLOW_POOL_STATS_LEN 25
34 #define FLOW_POOL_LOG_LEN 50
35 
36 #ifdef QCA_AC_BASED_FLOW_CONTROL
37 /**
38  * dp_tx_initialize_threshold() - Threshold of flow Pool initialization
39  * @pool: flow_pool
40  * @stop_threshold: stop threshold of certain AC
41  * @start_threshold: start threshold of certain AC
42  * @flow_pool_size: flow pool size
43  *
44  * Return: none
45  */
46 static inline void
47 dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
48 			   uint32_t start_threshold,
49 			   uint32_t stop_threshold,
50 			   uint16_t flow_pool_size)
51 {
52 	/* BE_BK threshold is same as previous threahold */
53 	pool->start_th[DP_TH_BE_BK] = (start_threshold
54 					* flow_pool_size) / 100;
55 	pool->stop_th[DP_TH_BE_BK] = (stop_threshold
56 					* flow_pool_size) / 100;
57 
58 	/* Update VI threshold based on BE_BK threshold */
59 	pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK]
60 					* FL_TH_VI_PERCENTAGE) / 100;
61 	pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK]
62 					* FL_TH_VI_PERCENTAGE) / 100;
63 
64 	/* Update VO threshold based on BE_BK threshold */
65 	pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK]
66 					* FL_TH_VO_PERCENTAGE) / 100;
67 	pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK]
68 					* FL_TH_VO_PERCENTAGE) / 100;
69 
70 	/* Update High Priority threshold based on BE_BK threshold */
71 	pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK]
72 					* FL_TH_HI_PERCENTAGE) / 100;
73 	pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK]
74 					* FL_TH_HI_PERCENTAGE) / 100;
75 
76 	dp_debug("tx flow control threshold is set, pool size is %d",
77 		 flow_pool_size);
78 }
79 
80 /**
81  * dp_tx_flow_pool_reattach() - Reattach flow_pool
82  * @pool: flow_pool
83  *
84  * Return: none
85  */
86 static inline void
87 dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
88 {
89 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
90 		  "%s: flow pool already allocated, attached %d times",
91 		  __func__, pool->pool_create_cnt);
92 
93 	pool->status = FLOW_POOL_ACTIVE_UNPAUSED_REATTACH;
94 	pool->pool_create_cnt++;
95 }
96 
97 /**
98  * dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool
99  * @pool: flow_pool
100  *
101  * Return: none
102  */
103 static inline void
104 dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
105 {
106 	int i;
107 
108 	for (i = 0; i < FL_TH_MAX; i++) {
109 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
110 			  "Level %d :: Start threshold %d :: Stop threshold %d",
111 			  i, pool->start_th[i], pool->stop_th[i]);
112 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
113 			  "Level %d :: Maximum pause time %lu ms",
114 			  i, pool->max_pause_time[i]);
115 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
116 			  "Level %d :: Latest pause timestamp %lu",
117 			  i, pool->latest_pause_time[i]);
118 	}
119 }
120 
121 /**
122  * dp_tx_flow_ctrl_reset_subqueues() - Reset subqueues to original state
123  * @soc: dp soc
124  * @pool: flow pool
125  * @pool_status: flow pool status
126  *
127  * Return: none
128  */
129 static inline void
130 dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
131 				struct dp_tx_desc_pool_s *pool,
132 				enum flow_pool_status pool_status)
133 {
134 	switch (pool_status) {
135 	case FLOW_POOL_ACTIVE_PAUSED:
136 		soc->pause_cb(pool->flow_pool_id,
137 			      WLAN_NETIF_PRIORITY_QUEUE_ON,
138 			      WLAN_DATA_FLOW_CTRL_PRI);
139 		fallthrough;
140 
141 	case FLOW_POOL_VO_PAUSED:
142 		soc->pause_cb(pool->flow_pool_id,
143 			      WLAN_NETIF_VO_QUEUE_ON,
144 			      WLAN_DATA_FLOW_CTRL_VO);
145 		fallthrough;
146 
147 	case FLOW_POOL_VI_PAUSED:
148 		soc->pause_cb(pool->flow_pool_id,
149 			      WLAN_NETIF_VI_QUEUE_ON,
150 			      WLAN_DATA_FLOW_CTRL_VI);
151 		fallthrough;
152 
153 	case FLOW_POOL_BE_BK_PAUSED:
154 		soc->pause_cb(pool->flow_pool_id,
155 			      WLAN_NETIF_BE_BK_QUEUE_ON,
156 			      WLAN_DATA_FLOW_CTRL_BE_BK);
157 		fallthrough;
158 	default:
159 		break;
160 	}
161 }
162 
163 #else
164 static inline void
165 dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
166 			   uint32_t start_threshold,
167 			   uint32_t stop_threshold,
168 			   uint16_t flow_pool_size)
169 
170 {
171 	/* INI is in percentage so divide by 100 */
172 	pool->start_th = (start_threshold * flow_pool_size) / 100;
173 	pool->stop_th = (stop_threshold * flow_pool_size) / 100;
174 }
175 
176 static inline void
177 dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
178 {
179 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
180 		  "%s: flow pool already allocated, attached %d times",
181 		  __func__, pool->pool_create_cnt);
182 	if (pool->avail_desc > pool->start_th)
183 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
184 	else
185 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
186 
187 	pool->pool_create_cnt++;
188 }
189 
190 static inline void
191 dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
192 {
193 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
194 		  "Start threshold %d :: Stop threshold %d",
195 	pool->start_th, pool->stop_th);
196 }
197 
198 static inline void
199 dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
200 				struct dp_tx_desc_pool_s *pool,
201 				enum flow_pool_status pool_status)
202 {
203 }
204 
205 #endif
206 
207 /**
208  * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
209  *
210  * @ctx: Handle to struct dp_soc.
211  *
212  * Return: none
213  */
214 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
215 {
216 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
217 	struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
218 	struct dp_tx_desc_pool_s *pool = NULL;
219 	struct dp_tx_desc_pool_s tmp_pool;
220 	int i;
221 
222 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
223 		"No of pool map received %d", pool_stats->pool_map_count);
224 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
225 		"No of pool unmap received %d",	pool_stats->pool_unmap_count);
226 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
227 		"Pkt dropped due to unavailablity of pool %d",
228 		pool_stats->pkt_drop_no_pool);
229 
230 	/*
231 	 * Nested spin lock.
232 	 * Always take in below order.
233 	 * flow_pool_array_lock -> flow_pool_lock
234 	 */
235 	qdf_spin_lock_bh(&soc->flow_pool_array_lock);
236 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
237 		pool = &soc->tx_desc[i];
238 		if (pool->status > FLOW_POOL_INVALID)
239 			continue;
240 		qdf_spin_lock_bh(&pool->flow_pool_lock);
241 		qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
242 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
243 		qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
244 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
245 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
246 			"Flow_pool_id %d :: status %d",
247 			tmp_pool.flow_pool_id, tmp_pool.status);
248 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
249 			"Total %d :: Available %d",
250 			tmp_pool.pool_size, tmp_pool.avail_desc);
251 		dp_tx_flow_pool_dump_threshold(&tmp_pool);
252 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
253 			"Member flow_id  %d :: flow_type %d",
254 			tmp_pool.flow_pool_id, tmp_pool.flow_type);
255 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
256 			"Pkt dropped due to unavailablity of descriptors %d",
257 			tmp_pool.pkt_drop_no_desc);
258 		qdf_spin_lock_bh(&soc->flow_pool_array_lock);
259 	}
260 	qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
261 }
262 
263 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc)
264 {
265 	struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
266 	struct dp_tx_desc_pool_s *pool = NULL;
267 	char *comb_log_str;
268 	uint32_t comb_log_str_size;
269 	int bytes_written = 0;
270 	int i;
271 
272 	comb_log_str_size = GLOBAL_FLOW_POOL_STATS_LEN +
273 				(FLOW_POOL_LOG_LEN * MAX_TXDESC_POOLS) + 1;
274 	comb_log_str = qdf_mem_malloc(comb_log_str_size);
275 	if (!comb_log_str)
276 		return;
277 
278 	bytes_written = qdf_snprintf(&comb_log_str[bytes_written],
279 				     comb_log_str_size, "G:(%d,%d,%d) ",
280 				     pool_stats->pool_map_count,
281 				     pool_stats->pool_unmap_count,
282 				     pool_stats->pkt_drop_no_pool);
283 
284 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
285 		pool = &soc->tx_desc[i];
286 		if (pool->status > FLOW_POOL_INVALID)
287 			continue;
288 		bytes_written += qdf_snprintf(&comb_log_str[bytes_written],
289 				      (bytes_written >= comb_log_str_size) ? 0 :
290 				      comb_log_str_size - bytes_written,
291 				      "| %d %d: (%d,%d,%d)",
292 				      pool->flow_pool_id, pool->status,
293 				      pool->pool_size, pool->avail_desc,
294 				      pool->pkt_drop_no_desc);
295 	}
296 
297 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
298 		  "FLOW_POOL_STATS %s", comb_log_str);
299 
300 	qdf_mem_free(comb_log_str);
301 }
302 
303 /**
304  * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
305  *
306  * @soc: Handle to struct dp_soc.
307  *
308  * Return: None
309  */
310 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
311 {
312 
313 	if (!soc) {
314 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
315 			"%s: soc is null", __func__);
316 		return;
317 	}
318 	qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
319 }
320 
321 /**
322  * dp_tx_create_flow_pool() - create flow pool
323  * @soc: Handle to struct dp_soc
324  * @flow_pool_id: flow pool id
325  * @flow_pool_size: flow pool size
326  *
327  * Return: flow_pool pointer / NULL for error
328  */
329 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
330 	uint8_t flow_pool_id, uint32_t flow_pool_size)
331 {
332 	struct dp_tx_desc_pool_s *pool;
333 	uint32_t stop_threshold;
334 	uint32_t start_threshold;
335 
336 	if (flow_pool_id >= MAX_TXDESC_POOLS) {
337 		dp_err("invalid flow_pool_id %d", flow_pool_id);
338 		return NULL;
339 	}
340 	pool = &soc->tx_desc[flow_pool_id];
341 	qdf_spin_lock_bh(&pool->flow_pool_lock);
342 	if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
343 		dp_tx_flow_pool_reattach(pool);
344 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
345 		dp_err("cannot alloc desc, status=%d, create_cnt=%d",
346 		       pool->status, pool->pool_create_cnt);
347 		return pool;
348 	}
349 
350 	if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
351 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
352 		dp_err("dp_tx_desc_pool_alloc failed flow_pool_id: %d",
353 			flow_pool_id);
354 		return NULL;
355 	}
356 
357 	if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size)) {
358 		dp_tx_desc_pool_free(soc, flow_pool_id);
359 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
360 		dp_err("dp_tx_desc_pool_init failed flow_pool_id: %d",
361 			flow_pool_id);
362 		return NULL;
363 	}
364 
365 	stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
366 	start_threshold = stop_threshold +
367 		wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
368 
369 	pool->flow_pool_id = flow_pool_id;
370 	pool->pool_size = flow_pool_size;
371 	pool->avail_desc = flow_pool_size;
372 	pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
373 	dp_tx_initialize_threshold(pool, start_threshold, stop_threshold,
374 				   flow_pool_size);
375 	pool->pool_create_cnt++;
376 
377 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
378 
379 	return pool;
380 }
381 
382 /**
383  * dp_is_tx_flow_pool_delete_allowed() - Can flow pool be deleted
384  * @soc: Handle to struct dp_soc
385  * @vdev_id: vdev_id corresponding to flow pool
386  *
387  * Check if it is OK to go ahead delete the flow pool. One of the case is
388  * MLO where it is not OK to delete the flow pool when link switch happens.
389  *
390  * Return: 0 for success or error
391  */
392 static bool dp_is_tx_flow_pool_delete_allowed(struct dp_soc *soc,
393 					      uint8_t vdev_id)
394 {
395 	struct dp_vdev *vdev = NULL;
396 	bool is_allow = true;
397 
398 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MISC);
399 
400 	/* only check for sta mode */
401 	if (!vdev || vdev->opmode != wlan_op_mode_sta)
402 		goto comp_ret;
403 
404 	/*
405 	 * Only if current vdev is belong to MLO connection and connected,
406 	 * then it's not allowed to delete current pool, for legacy
407 	 * connection, allowed always.
408 	 */
409 	is_allow = policy_mgr_is_mlo_sta_disconnected(
410 			(struct wlan_objmgr_psoc *)soc->ctrl_psoc,
411 			vdev_id);
412 comp_ret:
413 	if (vdev)
414 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC);
415 
416 	return is_allow;
417 }
418 
419 /**
420  * dp_tx_delete_flow_pool() - delete flow pool
421  * @soc: Handle to struct dp_soc
422  * @pool: flow pool pointer
423  * @force: free pool forcefully
424  *
425  * Delete flow_pool if all tx descriptors are available.
426  * Otherwise put it in FLOW_POOL_INVALID state.
427  * If force is set then pull all available descriptors to
428  * global pool.
429  *
430  * Return: 0 for success or error
431  */
432 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
433 	bool force)
434 {
435 	struct dp_vdev *vdev;
436 	enum flow_pool_status pool_status;
437 
438 	if (!soc || !pool) {
439 		dp_err("pool or soc is NULL");
440 		QDF_ASSERT(0);
441 		return ENOMEM;
442 	}
443 
444 	dp_info("pool_id %d create_cnt=%d, avail_desc=%d, size=%d, status=%d",
445 		pool->flow_pool_id, pool->pool_create_cnt, pool->avail_desc,
446 		pool->pool_size, pool->status);
447 
448 	if (!dp_is_tx_flow_pool_delete_allowed(soc, pool->flow_pool_id)) {
449 		dp_info("skip pool id %d delete as it's not allowed",
450 			pool->flow_pool_id);
451 		return -EAGAIN;
452 	}
453 
454 	qdf_spin_lock_bh(&pool->flow_pool_lock);
455 	if (!pool->pool_create_cnt) {
456 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
457 		dp_err("flow pool either not created or already deleted");
458 		return -ENOENT;
459 	}
460 	pool->pool_create_cnt--;
461 	if (pool->pool_create_cnt) {
462 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
463 		dp_err("pool is still attached, pending detach %d",
464 		       pool->pool_create_cnt);
465 		return -EAGAIN;
466 	}
467 
468 	if (pool->avail_desc < pool->pool_size) {
469 		pool_status = pool->status;
470 		pool->status = FLOW_POOL_INVALID;
471 		dp_tx_flow_ctrl_reset_subqueues(soc, pool, pool_status);
472 
473 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
474 		/* Reset TX desc associated to this Vdev as NULL */
475 		vdev = dp_vdev_get_ref_by_id(soc, pool->flow_pool_id,
476 					     DP_MOD_ID_MISC);
477 		if (vdev) {
478 			dp_tx_desc_flush(vdev->pdev, vdev, false);
479 			dp_vdev_unref_delete(soc, vdev,
480 					     DP_MOD_ID_MISC);
481 		}
482 		dp_err("avail desc less than pool size");
483 		return -EAGAIN;
484 	}
485 
486 	/* We have all the descriptors for the pool, we can delete the pool */
487 	dp_tx_desc_pool_deinit(soc, pool->flow_pool_id);
488 	dp_tx_desc_pool_free(soc, pool->flow_pool_id);
489 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
490 	return 0;
491 }
492 
493 /**
494  * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
495  * @pdev: Handle to struct dp_pdev
496  * @pool: flow_pool
497  * @vdev_id: flow_id /vdev_id
498  *
499  * Return: none
500  */
501 static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
502 	struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
503 {
504 	struct dp_vdev *vdev;
505 	struct dp_soc *soc = pdev->soc;
506 
507 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
508 	if (!vdev) {
509 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
510 		   "%s: invalid vdev_id %d",
511 		   __func__, vdev_id);
512 		return;
513 	}
514 
515 	vdev->pool = pool;
516 	qdf_spin_lock_bh(&pool->flow_pool_lock);
517 	pool->pool_owner_ctx = soc;
518 	pool->flow_pool_id = vdev_id;
519 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
520 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
521 }
522 
523 /**
524  * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
525  * @pdev: Handle to struct dp_pdev
526  * @pool: flow_pool
527  * @vdev_id: flow_id /vdev_id
528  *
529  * Return: none
530  */
531 static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
532 		struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
533 {
534 	struct dp_vdev *vdev;
535 	struct dp_soc *soc = pdev->soc;
536 
537 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
538 	if (!vdev) {
539 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
540 		   "%s: invalid vdev_id %d",
541 		   __func__, vdev_id);
542 		return;
543 	}
544 
545 	vdev->pool = NULL;
546 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
547 }
548 
549 /**
550  * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
551  * @pdev: Handle to struct dp_pdev
552  * @flow_id: flow id
553  * @flow_type: flow type
554  * @flow_pool_id: pool id
555  * @flow_pool_size: pool size
556  *
557  * Process below target to host message
558  * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
559  *
560  * Return: none
561  */
562 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
563 	uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size)
564 {
565 	struct dp_soc *soc = pdev->soc;
566 	struct dp_tx_desc_pool_s *pool;
567 	enum htt_flow_type type = flow_type;
568 
569 
570 	dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
571 		flow_id, flow_type, flow_pool_id, flow_pool_size);
572 
573 	if (qdf_unlikely(!soc)) {
574 		dp_err("soc is NULL");
575 		return QDF_STATUS_E_FAULT;
576 	}
577 	soc->pool_stats.pool_map_count++;
578 
579 	pool = dp_tx_create_flow_pool(soc, flow_pool_id,
580 			flow_pool_size);
581 	if (!pool) {
582 		dp_err("creation of flow_pool %d size %d failed",
583 		       flow_pool_id, flow_pool_size);
584 		return QDF_STATUS_E_RESOURCES;
585 	}
586 
587 	switch (type) {
588 
589 	case FLOW_TYPE_VDEV:
590 		dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
591 		break;
592 	default:
593 		dp_err("flow type %d not supported", type);
594 		break;
595 	}
596 
597 	return QDF_STATUS_SUCCESS;
598 }
599 
600 /**
601  * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
602  * @pdev: Handle to struct dp_pdev
603  * @flow_id: flow id
604  * @flow_type: flow type
605  * @flow_pool_id: pool id
606  *
607  * Process below target to host message
608  * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
609  *
610  * Return: none
611  */
612 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
613 	uint8_t flow_type, uint8_t flow_pool_id)
614 {
615 	struct dp_soc *soc = pdev->soc;
616 	struct dp_tx_desc_pool_s *pool;
617 	enum htt_flow_type type = flow_type;
618 
619 	dp_info("flow_id %d flow_type %d flow_pool_id %d", flow_id, flow_type,
620 		flow_pool_id);
621 
622 	if (qdf_unlikely(!pdev)) {
623 		dp_err("pdev is NULL");
624 		return;
625 	}
626 	soc->pool_stats.pool_unmap_count++;
627 
628 	pool = &soc->tx_desc[flow_pool_id];
629 	dp_info("pool status: %d", pool->status);
630 
631 	if (pool->status == FLOW_POOL_INACTIVE) {
632 		dp_err("flow pool id: %d is inactive, ignore unmap",
633 			flow_pool_id);
634 		return;
635 	}
636 
637 	switch (type) {
638 
639 	case FLOW_TYPE_VDEV:
640 		dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
641 		break;
642 	default:
643 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
644 		   "%s: flow type %d not supported !!!",
645 		   __func__, type);
646 		return;
647 	}
648 
649 	/* only delete if all descriptors are available */
650 	dp_tx_delete_flow_pool(soc, pool, false);
651 }
652 
653 /**
654  * dp_tx_flow_control_init() - Initialize tx flow control
655  * @tx_desc_pool: Handle to flow_pool
656  *
657  * Return: none
658  */
659 void dp_tx_flow_control_init(struct dp_soc *soc)
660 {
661 	qdf_spinlock_create(&soc->flow_pool_array_lock);
662 }
663 
664 /**
665  * dp_tx_desc_pool_dealloc() - De-allocate tx desc pool
666  * @tx_desc_pool: Handle to flow_pool
667  *
668  * Return: none
669  */
670 static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
671 {
672 	struct dp_tx_desc_pool_s *tx_desc_pool;
673 	int i;
674 
675 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
676 		tx_desc_pool = &((soc)->tx_desc[i]);
677 		if (!tx_desc_pool->desc_pages.num_pages)
678 			continue;
679 
680 		dp_tx_desc_pool_deinit(soc, i);
681 		dp_tx_desc_pool_free(soc, i);
682 	}
683 }
684 
685 /**
686  * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
687  * @tx_desc_pool: Handle to flow_pool
688  *
689  * Return: none
690  */
691 void dp_tx_flow_control_deinit(struct dp_soc *soc)
692 {
693 	dp_tx_desc_pool_dealloc(soc);
694 
695 	qdf_spinlock_destroy(&soc->flow_pool_array_lock);
696 }
697 
698 /**
699  * dp_txrx_register_pause_cb() - Register pause callback
700  * @ctx: Handle to struct dp_soc
701  * @pause_cb: Tx pause_cb
702  *
703  * Return: none
704  */
705 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
706 	tx_pause_callback pause_cb)
707 {
708 	struct dp_soc *soc = (struct dp_soc *)handle;
709 
710 	if (!soc || !pause_cb) {
711 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
712 			FL("soc or pause_cb is NULL"));
713 		return QDF_STATUS_E_INVAL;
714 	}
715 	soc->pause_cb = pause_cb;
716 
717 	return QDF_STATUS_SUCCESS;
718 }
719 
720 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id,
721 			       uint8_t vdev_id)
722 {
723 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
724 	struct dp_pdev *pdev =
725 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
726 	int tx_ring_size = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
727 
728 	if (!pdev) {
729 		dp_err("pdev is NULL");
730 		return QDF_STATUS_E_INVAL;
731 	}
732 
733 	return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV,
734 					   vdev_id, tx_ring_size);
735 }
736 
737 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
738 			   uint8_t vdev_id)
739 {
740 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
741 	struct dp_pdev *pdev =
742 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
743 
744 	if (!pdev) {
745 		dp_err("pdev is NULL");
746 		return;
747 	}
748 
749 	return dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
750 					     FLOW_TYPE_VDEV, vdev_id);
751 }
752