xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_flow_control.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <cds_api.h>
20 
21 /* OS abstraction libraries */
22 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
23 #include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
24 #include <qdf_util.h>           /* qdf_unlikely */
25 #include "dp_types.h"
26 #include "dp_tx_desc.h"
27 
28 #include <cdp_txrx_handle.h>
29 #include "dp_internal.h"
30 #define INVALID_FLOW_ID 0xFF
31 #define MAX_INVALID_BIN 3
32 
33 #ifdef QCA_AC_BASED_FLOW_CONTROL
34 /**
35  * dp_tx_initialize_threshold() - Threshold of flow Pool initialization
36  * @pool: flow_pool
37  * @stop_threshold: stop threshold of certian AC
38  * @start_threshold: start threshold of certian AC
39  * @flow_pool_size: flow pool size
40  *
41  * Return: none
42  */
43 static inline void
44 dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
45 			   uint32_t start_threshold,
46 			   uint32_t stop_threshold,
47 			   uint16_t flow_pool_size)
48 {
49 	/* BE_BK threshold is same as previous threahold */
50 	pool->start_th[DP_TH_BE_BK] = (start_threshold
51 					* flow_pool_size) / 100;
52 	pool->stop_th[DP_TH_BE_BK] = (stop_threshold
53 					* flow_pool_size) / 100;
54 
55 	/* Update VI threshold based on BE_BK threashold */
56 	pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK]
57 					* FL_TH_VI_PERCENTAGE) / 100;
58 	pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK]
59 					* FL_TH_VI_PERCENTAGE) / 100;
60 
61 	/* Update VO threshold based on BE_BK threashold */
62 	pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK]
63 					* FL_TH_VO_PERCENTAGE) / 100;
64 	pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK]
65 					* FL_TH_VO_PERCENTAGE) / 100;
66 
67 	/* Update High Priority threshold based on BE_BK threashold */
68 	pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK]
69 					* FL_TH_HI_PERCENTAGE) / 100;
70 	pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK]
71 					* FL_TH_HI_PERCENTAGE) / 100;
72 
73 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
74 		  "%s: tx flow control threshold is set, pool size is %d",
75 		  __func__, flow_pool_size);
76 }
77 
78 /**
79  * dp_tx_flow_pool_reattach() - Reattach flow_pool
80  * @pool: flow_pool
81  *
82  * Return: none
83  */
84 static inline void
85 dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
86 {
87 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
88 		  "%s: flow pool already allocated, attached %d times",
89 		  __func__, pool->pool_create_cnt);
90 
91 	if (pool->avail_desc > pool->start_th[DP_TH_BE_BK])
92 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
93 	else if (pool->avail_desc <= pool->start_th[DP_TH_BE_BK] &&
94 		 pool->avail_desc > pool->start_th[DP_TH_VI])
95 		pool->status = FLOW_POOL_BE_BK_PAUSED;
96 	else if (pool->avail_desc <= pool->start_th[DP_TH_VI] &&
97 		 pool->avail_desc > pool->start_th[DP_TH_VO])
98 		pool->status = FLOW_POOL_VI_PAUSED;
99 	else if (pool->avail_desc <= pool->start_th[DP_TH_VO] &&
100 		 pool->avail_desc > pool->start_th[DP_TH_HI])
101 		pool->status = FLOW_POOL_VO_PAUSED;
102 	else
103 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
104 
105 	pool->pool_create_cnt++;
106 }
107 
108 /**
109  * dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool
110  * @pool: flow_pool
111  *
112  * Return: none
113  */
114 static inline void
115 dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
116 {
117 	int i;
118 
119 	for (i = 0; i < FL_TH_MAX; i++) {
120 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
121 			  "Level %d :: Start threshold %d :: Stop threshold %d",
122 			  i, pool->start_th[i], pool->stop_th[i]);
123 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
124 			  "Level %d :: Maximun pause time %lu ms",
125 			  i, pool->max_pause_time[i]);
126 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
127 			  "Level %d :: Latest pause timestamp %lu",
128 			  i, pool->latest_pause_time[i]);
129 	}
130 }
131 
132 #else
133 static inline void
134 dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
135 			   uint32_t start_threshold,
136 			   uint32_t stop_threshold,
137 			   uint16_t flow_pool_size)
138 
139 {
140 	/* INI is in percentage so divide by 100 */
141 	pool->start_th = (start_threshold * flow_pool_size) / 100;
142 	pool->stop_th = (stop_threshold * flow_pool_size) / 100;
143 }
144 
145 static inline void
146 dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
147 {
148 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
149 		  "%s: flow pool already allocated, attached %d times",
150 		  __func__, pool->pool_create_cnt);
151 	if (pool->avail_desc > pool->start_th)
152 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
153 	else
154 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
155 
156 	pool->pool_create_cnt++;
157 }
158 
159 static inline void
160 dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
161 {
162 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
163 		  "Start threshold %d :: Stop threshold %d",
164 	pool->start_th, pool->stop_th);
165 }
166 
167 #endif
168 
169 /**
170  * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
171  *
172  * @ctx: Handle to struct dp_soc.
173  *
174  * Return: none
175  */
176 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
177 {
178 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
179 	struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
180 	struct dp_tx_desc_pool_s *pool = NULL;
181 	struct dp_tx_desc_pool_s tmp_pool;
182 	int i;
183 
184 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
185 		"No of pool map received %d", pool_stats->pool_map_count);
186 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
187 		"No of pool unmap received %d",	pool_stats->pool_unmap_count);
188 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
189 		"Pkt dropped due to unavailablity of pool %d",
190 		pool_stats->pkt_drop_no_pool);
191 
192 	/*
193 	 * Nested spin lock.
194 	 * Always take in below order.
195 	 * flow_pool_array_lock -> flow_pool_lock
196 	 */
197 	qdf_spin_lock_bh(&soc->flow_pool_array_lock);
198 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
199 		pool = &soc->tx_desc[i];
200 		if (pool->status > FLOW_POOL_INVALID)
201 			continue;
202 		qdf_spin_lock_bh(&pool->flow_pool_lock);
203 		qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
204 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
205 		qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
206 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
207 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
208 			"Flow_pool_id %d :: status %d",
209 			tmp_pool.flow_pool_id, tmp_pool.status);
210 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
211 			"Total %d :: Available %d",
212 			tmp_pool.pool_size, tmp_pool.avail_desc);
213 		dp_tx_flow_pool_dump_threshold(&tmp_pool);
214 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
215 			"Member flow_id  %d :: flow_type %d",
216 			tmp_pool.flow_pool_id, tmp_pool.flow_type);
217 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
218 			"Pkt dropped due to unavailablity of descriptors %d",
219 			tmp_pool.pkt_drop_no_desc);
220 		qdf_spin_lock_bh(&soc->flow_pool_array_lock);
221 	}
222 	qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
223 }
224 
225 /**
226  * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
227  *
228  * @soc: Handle to struct dp_soc.
229  *
230  * Return: None
231  */
232 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
233 {
234 
235 	if (!soc) {
236 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
237 			"%s: soc is null", __func__);
238 		return;
239 	}
240 	qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
241 }
242 
243 /**
244  * dp_tx_create_flow_pool() - create flow pool
245  * @soc: Handle to struct dp_soc
246  * @flow_pool_id: flow pool id
247  * @flow_pool_size: flow pool size
248  *
249  * Return: flow_pool pointer / NULL for error
250  */
251 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
252 	uint8_t flow_pool_id, uint16_t flow_pool_size)
253 {
254 	struct dp_tx_desc_pool_s *pool;
255 	uint32_t stop_threshold;
256 	uint32_t start_threshold;
257 
258 	if (flow_pool_id >= MAX_TXDESC_POOLS) {
259 		dp_err("invalid flow_pool_id %d", flow_pool_id);
260 		return NULL;
261 	}
262 	pool = &soc->tx_desc[flow_pool_id];
263 	qdf_spin_lock_bh(&pool->flow_pool_lock);
264 	if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
265 		dp_tx_flow_pool_reattach(pool);
266 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
267 		dp_err("cannot alloc desc, status=%d, create_cnt=%d",
268 		       pool->status, pool->pool_create_cnt);
269 		return pool;
270 	}
271 
272 	if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
273 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
274 		return NULL;
275 	}
276 
277 	if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size)) {
278 		dp_tx_desc_pool_free(soc, flow_pool_id);
279 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
280 		return NULL;
281 	}
282 
283 	stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
284 	start_threshold = stop_threshold +
285 		wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
286 
287 	pool->flow_pool_id = flow_pool_id;
288 	pool->pool_size = flow_pool_size;
289 	pool->avail_desc = flow_pool_size;
290 	pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
291 	dp_tx_initialize_threshold(pool, start_threshold, stop_threshold,
292 				   flow_pool_size);
293 	pool->pool_create_cnt++;
294 
295 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
296 
297 	return pool;
298 }
299 
300 /**
301  * dp_tx_delete_flow_pool() - delete flow pool
302  * @soc: Handle to struct dp_soc
303  * @pool: flow pool pointer
304  * @force: free pool forcefully
305  *
306  * Delete flow_pool if all tx descriptors are available.
307  * Otherwise put it in FLOW_POOL_INVALID state.
308  * If force is set then pull all available descriptors to
309  * global pool.
310  *
311  * Return: 0 for success or error
312  */
313 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
314 	bool force)
315 {
316 	struct dp_vdev *vdev;
317 
318 	if (!soc || !pool) {
319 		dp_err("pool or soc is NULL");
320 		QDF_ASSERT(0);
321 		return ENOMEM;
322 	}
323 
324 	dp_info("pool create_cnt=%d, avail_desc=%d, size=%d, status=%d",
325 		pool->pool_create_cnt, pool->avail_desc,
326 		pool->pool_size, pool->status);
327 	qdf_spin_lock_bh(&pool->flow_pool_lock);
328 	if (!pool->pool_create_cnt) {
329 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
330 		dp_err("flow pool either not created or alread deleted");
331 		return -ENOENT;
332 	}
333 	pool->pool_create_cnt--;
334 	if (pool->pool_create_cnt) {
335 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
336 		dp_err("pool is still attached, pending detach %d",
337 		       pool->pool_create_cnt);
338 		return -EAGAIN;
339 	}
340 
341 	if (pool->avail_desc < pool->pool_size) {
342 		pool->status = FLOW_POOL_INVALID;
343 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
344 		/* Reset TX desc associated to this Vdev as NULL */
345 		vdev = dp_vdev_get_ref_by_id(soc, pool->flow_pool_id,
346 					     DP_MOD_ID_MISC);
347 		if (vdev) {
348 			dp_tx_desc_flush(vdev->pdev, vdev, false);
349 			dp_vdev_unref_delete(soc, vdev,
350 					     DP_MOD_ID_MISC);
351 		}
352 		dp_err("avail desc less than pool size");
353 		return -EAGAIN;
354 	}
355 
356 	/* We have all the descriptors for the pool, we can delete the pool */
357 	dp_tx_desc_pool_deinit(soc, pool->flow_pool_id);
358 	dp_tx_desc_pool_free(soc, pool->flow_pool_id);
359 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
360 	return 0;
361 }
362 
363 /**
364  * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
365  * @pdev: Handle to struct dp_pdev
366  * @pool: flow_pool
367  * @vdev_id: flow_id /vdev_id
368  *
369  * Return: none
370  */
371 static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
372 	struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
373 {
374 	struct dp_vdev *vdev;
375 	struct dp_soc *soc = pdev->soc;
376 
377 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
378 	if (!vdev) {
379 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
380 		   "%s: invalid vdev_id %d",
381 		   __func__, vdev_id);
382 		return;
383 	}
384 
385 	vdev->pool = pool;
386 	qdf_spin_lock_bh(&pool->flow_pool_lock);
387 	pool->pool_owner_ctx = soc;
388 	pool->flow_pool_id = vdev_id;
389 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
390 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
391 }
392 
393 /**
394  * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
395  * @pdev: Handle to struct dp_pdev
396  * @pool: flow_pool
397  * @vdev_id: flow_id /vdev_id
398  *
399  * Return: none
400  */
401 static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
402 		struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
403 {
404 	struct dp_vdev *vdev;
405 	struct dp_soc *soc = pdev->soc;
406 
407 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
408 	if (!vdev) {
409 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
410 		   "%s: invalid vdev_id %d",
411 		   __func__, vdev_id);
412 		return;
413 	}
414 
415 	vdev->pool = NULL;
416 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
417 }
418 
419 /**
420  * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
421  * @pdev: Handle to struct dp_pdev
422  * @flow_id: flow id
423  * @flow_type: flow type
424  * @flow_pool_id: pool id
425  * @flow_pool_size: pool size
426  *
427  * Process below target to host message
428  * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
429  *
430  * Return: none
431  */
432 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
433 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
434 {
435 	struct dp_soc *soc = pdev->soc;
436 	struct dp_tx_desc_pool_s *pool;
437 	enum htt_flow_type type = flow_type;
438 
439 
440 	dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
441 		flow_id, flow_type, flow_pool_id, flow_pool_size);
442 
443 	if (qdf_unlikely(!soc)) {
444 		dp_err("soc is NULL");
445 		return QDF_STATUS_E_FAULT;
446 	}
447 	soc->pool_stats.pool_map_count++;
448 
449 	pool = dp_tx_create_flow_pool(soc, flow_pool_id,
450 			flow_pool_size);
451 	if (!pool) {
452 		dp_err("creation of flow_pool %d size %d failed",
453 		       flow_pool_id, flow_pool_size);
454 		return QDF_STATUS_E_RESOURCES;
455 	}
456 
457 	switch (type) {
458 
459 	case FLOW_TYPE_VDEV:
460 		dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
461 		break;
462 	default:
463 		dp_err("flow type %d not supported", type);
464 		break;
465 	}
466 
467 	return QDF_STATUS_SUCCESS;
468 }
469 
470 /**
471  * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
472  * @pdev: Handle to struct dp_pdev
473  * @flow_id: flow id
474  * @flow_type: flow type
475  * @flow_pool_id: pool id
476  *
477  * Process below target to host message
478  * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
479  *
480  * Return: none
481  */
482 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
483 	uint8_t flow_type, uint8_t flow_pool_id)
484 {
485 	struct dp_soc *soc = pdev->soc;
486 	struct dp_tx_desc_pool_s *pool;
487 	enum htt_flow_type type = flow_type;
488 
489 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
490 		"%s: flow_id %d flow_type %d flow_pool_id %d",
491 		__func__, flow_id, flow_type, flow_pool_id);
492 
493 	if (qdf_unlikely(!pdev)) {
494 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
495 			"%s: pdev is NULL", __func__);
496 		return;
497 	}
498 	soc->pool_stats.pool_unmap_count++;
499 
500 	pool = &soc->tx_desc[flow_pool_id];
501 	if (!pool) {
502 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
503 		   "%s: flow_pool not available flow_pool_id %d",
504 		   __func__, type);
505 		return;
506 	}
507 
508 	switch (type) {
509 
510 	case FLOW_TYPE_VDEV:
511 		dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
512 		break;
513 	default:
514 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
515 		   "%s: flow type %d not supported !!!",
516 		   __func__, type);
517 		return;
518 	}
519 
520 	/* only delete if all descriptors are available */
521 	dp_tx_delete_flow_pool(soc, pool, false);
522 }
523 
524 /**
525  * dp_tx_flow_control_init() - Initialize tx flow control
526  * @tx_desc_pool: Handle to flow_pool
527  *
528  * Return: none
529  */
530 void dp_tx_flow_control_init(struct dp_soc *soc)
531 {
532 	qdf_spinlock_create(&soc->flow_pool_array_lock);
533 }
534 
535 /**
536  * dp_tx_desc_pool_dealloc() - De-allocate tx desc pool
537  * @tx_desc_pool: Handle to flow_pool
538  *
539  * Return: none
540  */
541 static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
542 {
543 	struct dp_tx_desc_pool_s *tx_desc_pool;
544 	int i;
545 
546 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
547 		tx_desc_pool = &((soc)->tx_desc[i]);
548 		if (!tx_desc_pool->desc_pages.num_pages)
549 			continue;
550 
551 		dp_tx_desc_pool_deinit(soc, i);
552 		dp_tx_desc_pool_free(soc, i);
553 	}
554 }
555 
556 /**
557  * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
558  * @tx_desc_pool: Handle to flow_pool
559  *
560  * Return: none
561  */
562 void dp_tx_flow_control_deinit(struct dp_soc *soc)
563 {
564 	dp_tx_desc_pool_dealloc(soc);
565 
566 	qdf_spinlock_destroy(&soc->flow_pool_array_lock);
567 }
568 
569 /**
570  * dp_txrx_register_pause_cb() - Register pause callback
571  * @ctx: Handle to struct dp_soc
572  * @pause_cb: Tx pause_cb
573  *
574  * Return: none
575  */
576 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
577 	tx_pause_callback pause_cb)
578 {
579 	struct dp_soc *soc = (struct dp_soc *)handle;
580 
581 	if (!soc || !pause_cb) {
582 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
583 			FL("soc or pause_cb is NULL"));
584 		return QDF_STATUS_E_INVAL;
585 	}
586 	soc->pause_cb = pause_cb;
587 
588 	return QDF_STATUS_SUCCESS;
589 }
590 
591 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id,
592 			       uint8_t vdev_id)
593 {
594 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
595 	struct dp_pdev *pdev =
596 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
597 	int tx_ring_size = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
598 
599 	if (!pdev) {
600 		dp_err("pdev is NULL");
601 		return QDF_STATUS_E_INVAL;
602 	}
603 
604 	return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV,
605 					   vdev_id, tx_ring_size);
606 }
607 
608 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
609 			   uint8_t vdev_id)
610 {
611 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
612 	struct dp_pdev *pdev =
613 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
614 
615 	if (!pdev) {
616 		dp_err("pdev is NULL");
617 		return;
618 	}
619 
620 	return dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
621 					     FLOW_TYPE_VDEV, vdev_id);
622 }
623