xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_flow_control.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <cds_api.h>
20 
21 /* OS abstraction libraries */
22 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
23 #include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
24 #include <qdf_util.h>           /* qdf_unlikely */
25 #include "dp_types.h"
26 #include "dp_tx_desc.h"
27 
28 #include <cdp_txrx_handle.h>
29 #include "dp_internal.h"
30 #define INVALID_FLOW_ID 0xFF
31 #define MAX_INVALID_BIN 3
32 #define GLOBAL_FLOW_POOL_STATS_LEN 25
33 #define FLOW_POOL_LOG_LEN 50
34 
35 #ifdef QCA_AC_BASED_FLOW_CONTROL
36 /**
37  * dp_tx_initialize_threshold() - Threshold of flow Pool initialization
38  * @pool: flow_pool
39  * @stop_threshold: stop threshold of certian AC
40  * @start_threshold: start threshold of certian AC
41  * @flow_pool_size: flow pool size
42  *
43  * Return: none
44  */
45 static inline void
46 dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
47 			   uint32_t start_threshold,
48 			   uint32_t stop_threshold,
49 			   uint16_t flow_pool_size)
50 {
51 	/* BE_BK threshold is same as previous threahold */
52 	pool->start_th[DP_TH_BE_BK] = (start_threshold
53 					* flow_pool_size) / 100;
54 	pool->stop_th[DP_TH_BE_BK] = (stop_threshold
55 					* flow_pool_size) / 100;
56 
57 	/* Update VI threshold based on BE_BK threashold */
58 	pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK]
59 					* FL_TH_VI_PERCENTAGE) / 100;
60 	pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK]
61 					* FL_TH_VI_PERCENTAGE) / 100;
62 
63 	/* Update VO threshold based on BE_BK threashold */
64 	pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK]
65 					* FL_TH_VO_PERCENTAGE) / 100;
66 	pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK]
67 					* FL_TH_VO_PERCENTAGE) / 100;
68 
69 	/* Update High Priority threshold based on BE_BK threashold */
70 	pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK]
71 					* FL_TH_HI_PERCENTAGE) / 100;
72 	pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK]
73 					* FL_TH_HI_PERCENTAGE) / 100;
74 
75 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
76 		  "%s: tx flow control threshold is set, pool size is %d",
77 		  __func__, flow_pool_size);
78 }
79 
80 /**
81  * dp_tx_flow_pool_reattach() - Reattach flow_pool
82  * @pool: flow_pool
83  *
84  * Return: none
85  */
86 static inline void
87 dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
88 {
89 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
90 		  "%s: flow pool already allocated, attached %d times",
91 		  __func__, pool->pool_create_cnt);
92 
93 	if (pool->avail_desc > pool->start_th[DP_TH_BE_BK])
94 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
95 	else if (pool->avail_desc <= pool->start_th[DP_TH_BE_BK] &&
96 		 pool->avail_desc > pool->start_th[DP_TH_VI])
97 		pool->status = FLOW_POOL_BE_BK_PAUSED;
98 	else if (pool->avail_desc <= pool->start_th[DP_TH_VI] &&
99 		 pool->avail_desc > pool->start_th[DP_TH_VO])
100 		pool->status = FLOW_POOL_VI_PAUSED;
101 	else if (pool->avail_desc <= pool->start_th[DP_TH_VO] &&
102 		 pool->avail_desc > pool->start_th[DP_TH_HI])
103 		pool->status = FLOW_POOL_VO_PAUSED;
104 	else
105 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
106 
107 	pool->pool_create_cnt++;
108 }
109 
110 /**
111  * dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool
112  * @pool: flow_pool
113  *
114  * Return: none
115  */
116 static inline void
117 dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
118 {
119 	int i;
120 
121 	for (i = 0; i < FL_TH_MAX; i++) {
122 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
123 			  "Level %d :: Start threshold %d :: Stop threshold %d",
124 			  i, pool->start_th[i], pool->stop_th[i]);
125 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
126 			  "Level %d :: Maximun pause time %lu ms",
127 			  i, pool->max_pause_time[i]);
128 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
129 			  "Level %d :: Latest pause timestamp %lu",
130 			  i, pool->latest_pause_time[i]);
131 	}
132 }
133 
134 #else
135 static inline void
136 dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
137 			   uint32_t start_threshold,
138 			   uint32_t stop_threshold,
139 			   uint16_t flow_pool_size)
140 
141 {
142 	/* INI is in percentage so divide by 100 */
143 	pool->start_th = (start_threshold * flow_pool_size) / 100;
144 	pool->stop_th = (stop_threshold * flow_pool_size) / 100;
145 }
146 
147 static inline void
148 dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
149 {
150 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
151 		  "%s: flow pool already allocated, attached %d times",
152 		  __func__, pool->pool_create_cnt);
153 	if (pool->avail_desc > pool->start_th)
154 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
155 	else
156 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
157 
158 	pool->pool_create_cnt++;
159 }
160 
161 static inline void
162 dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
163 {
164 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
165 		  "Start threshold %d :: Stop threshold %d",
166 	pool->start_th, pool->stop_th);
167 }
168 
169 #endif
170 
171 /**
172  * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
173  *
174  * @ctx: Handle to struct dp_soc.
175  *
176  * Return: none
177  */
178 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
179 {
180 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
181 	struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
182 	struct dp_tx_desc_pool_s *pool = NULL;
183 	struct dp_tx_desc_pool_s tmp_pool;
184 	int i;
185 
186 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
187 		"No of pool map received %d", pool_stats->pool_map_count);
188 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
189 		"No of pool unmap received %d",	pool_stats->pool_unmap_count);
190 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
191 		"Pkt dropped due to unavailablity of pool %d",
192 		pool_stats->pkt_drop_no_pool);
193 
194 	/*
195 	 * Nested spin lock.
196 	 * Always take in below order.
197 	 * flow_pool_array_lock -> flow_pool_lock
198 	 */
199 	qdf_spin_lock_bh(&soc->flow_pool_array_lock);
200 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
201 		pool = &soc->tx_desc[i];
202 		if (pool->status > FLOW_POOL_INVALID)
203 			continue;
204 		qdf_spin_lock_bh(&pool->flow_pool_lock);
205 		qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
206 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
207 		qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
208 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
209 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
210 			"Flow_pool_id %d :: status %d",
211 			tmp_pool.flow_pool_id, tmp_pool.status);
212 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
213 			"Total %d :: Available %d",
214 			tmp_pool.pool_size, tmp_pool.avail_desc);
215 		dp_tx_flow_pool_dump_threshold(&tmp_pool);
216 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
217 			"Member flow_id  %d :: flow_type %d",
218 			tmp_pool.flow_pool_id, tmp_pool.flow_type);
219 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
220 			"Pkt dropped due to unavailablity of descriptors %d",
221 			tmp_pool.pkt_drop_no_desc);
222 		qdf_spin_lock_bh(&soc->flow_pool_array_lock);
223 	}
224 	qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
225 }
226 
227 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc)
228 {
229 	struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
230 	struct dp_tx_desc_pool_s *pool = NULL;
231 	char *comb_log_str;
232 	uint32_t comb_log_str_size;
233 	int bytes_written = 0;
234 	int i;
235 
236 	comb_log_str_size = GLOBAL_FLOW_POOL_STATS_LEN +
237 				(FLOW_POOL_LOG_LEN * MAX_TXDESC_POOLS) + 1;
238 	comb_log_str = qdf_mem_malloc(comb_log_str_size);
239 	if (!comb_log_str)
240 		return;
241 
242 	bytes_written = qdf_snprintf(&comb_log_str[bytes_written],
243 				     comb_log_str_size, "G:(%d,%d,%d) ",
244 				     pool_stats->pool_map_count,
245 				     pool_stats->pool_unmap_count,
246 				     pool_stats->pkt_drop_no_pool);
247 
248 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
249 		pool = &soc->tx_desc[i];
250 		if (pool->status > FLOW_POOL_INVALID)
251 			continue;
252 		bytes_written += qdf_snprintf(&comb_log_str[bytes_written],
253 				      (bytes_written >= comb_log_str_size) ? 0 :
254 				      comb_log_str_size - bytes_written,
255 				      "| %d %d: (%d,%d,%d)",
256 				      pool->flow_pool_id, pool->status,
257 				      pool->pool_size, pool->avail_desc,
258 				      pool->pkt_drop_no_desc);
259 	}
260 
261 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
262 		  "FLOW_POOL_STATS %s", comb_log_str);
263 
264 	qdf_mem_free(comb_log_str);
265 }
266 
267 /**
268  * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
269  *
270  * @soc: Handle to struct dp_soc.
271  *
272  * Return: None
273  */
274 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
275 {
276 
277 	if (!soc) {
278 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
279 			"%s: soc is null", __func__);
280 		return;
281 	}
282 	qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
283 }
284 
285 /**
286  * dp_tx_create_flow_pool() - create flow pool
287  * @soc: Handle to struct dp_soc
288  * @flow_pool_id: flow pool id
289  * @flow_pool_size: flow pool size
290  *
291  * Return: flow_pool pointer / NULL for error
292  */
293 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
294 	uint8_t flow_pool_id, uint16_t flow_pool_size)
295 {
296 	struct dp_tx_desc_pool_s *pool;
297 	uint32_t stop_threshold;
298 	uint32_t start_threshold;
299 
300 	if (flow_pool_id >= MAX_TXDESC_POOLS) {
301 		dp_err("invalid flow_pool_id %d", flow_pool_id);
302 		return NULL;
303 	}
304 	pool = &soc->tx_desc[flow_pool_id];
305 	qdf_spin_lock_bh(&pool->flow_pool_lock);
306 	if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
307 		dp_tx_flow_pool_reattach(pool);
308 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
309 		dp_err("cannot alloc desc, status=%d, create_cnt=%d",
310 		       pool->status, pool->pool_create_cnt);
311 		return pool;
312 	}
313 
314 	if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
315 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
316 		return NULL;
317 	}
318 
319 	if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size)) {
320 		dp_tx_desc_pool_free(soc, flow_pool_id);
321 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
322 		return NULL;
323 	}
324 
325 	stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
326 	start_threshold = stop_threshold +
327 		wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
328 
329 	pool->flow_pool_id = flow_pool_id;
330 	pool->pool_size = flow_pool_size;
331 	pool->avail_desc = flow_pool_size;
332 	pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
333 	dp_tx_initialize_threshold(pool, start_threshold, stop_threshold,
334 				   flow_pool_size);
335 	pool->pool_create_cnt++;
336 
337 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
338 
339 	return pool;
340 }
341 
342 /**
343  * dp_tx_delete_flow_pool() - delete flow pool
344  * @soc: Handle to struct dp_soc
345  * @pool: flow pool pointer
346  * @force: free pool forcefully
347  *
348  * Delete flow_pool if all tx descriptors are available.
349  * Otherwise put it in FLOW_POOL_INVALID state.
350  * If force is set then pull all available descriptors to
351  * global pool.
352  *
353  * Return: 0 for success or error
354  */
355 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
356 	bool force)
357 {
358 	struct dp_vdev *vdev;
359 
360 	if (!soc || !pool) {
361 		dp_err("pool or soc is NULL");
362 		QDF_ASSERT(0);
363 		return ENOMEM;
364 	}
365 
366 	dp_info("pool create_cnt=%d, avail_desc=%d, size=%d, status=%d",
367 		pool->pool_create_cnt, pool->avail_desc,
368 		pool->pool_size, pool->status);
369 	qdf_spin_lock_bh(&pool->flow_pool_lock);
370 	if (!pool->pool_create_cnt) {
371 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
372 		dp_err("flow pool either not created or alread deleted");
373 		return -ENOENT;
374 	}
375 	pool->pool_create_cnt--;
376 	if (pool->pool_create_cnt) {
377 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
378 		dp_err("pool is still attached, pending detach %d",
379 		       pool->pool_create_cnt);
380 		return -EAGAIN;
381 	}
382 
383 	if (pool->avail_desc < pool->pool_size) {
384 		pool->status = FLOW_POOL_INVALID;
385 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
386 		/* Reset TX desc associated to this Vdev as NULL */
387 		vdev = dp_vdev_get_ref_by_id(soc, pool->flow_pool_id,
388 					     DP_MOD_ID_MISC);
389 		if (vdev) {
390 			dp_tx_desc_flush(vdev->pdev, vdev, false);
391 			dp_vdev_unref_delete(soc, vdev,
392 					     DP_MOD_ID_MISC);
393 		}
394 		dp_err("avail desc less than pool size");
395 		return -EAGAIN;
396 	}
397 
398 	/* We have all the descriptors for the pool, we can delete the pool */
399 	dp_tx_desc_pool_deinit(soc, pool->flow_pool_id);
400 	dp_tx_desc_pool_free(soc, pool->flow_pool_id);
401 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
402 	return 0;
403 }
404 
405 /**
406  * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
407  * @pdev: Handle to struct dp_pdev
408  * @pool: flow_pool
409  * @vdev_id: flow_id /vdev_id
410  *
411  * Return: none
412  */
413 static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
414 	struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
415 {
416 	struct dp_vdev *vdev;
417 	struct dp_soc *soc = pdev->soc;
418 
419 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
420 	if (!vdev) {
421 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
422 		   "%s: invalid vdev_id %d",
423 		   __func__, vdev_id);
424 		return;
425 	}
426 
427 	vdev->pool = pool;
428 	qdf_spin_lock_bh(&pool->flow_pool_lock);
429 	pool->pool_owner_ctx = soc;
430 	pool->flow_pool_id = vdev_id;
431 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
432 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
433 }
434 
435 /**
436  * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
437  * @pdev: Handle to struct dp_pdev
438  * @pool: flow_pool
439  * @vdev_id: flow_id /vdev_id
440  *
441  * Return: none
442  */
443 static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
444 		struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
445 {
446 	struct dp_vdev *vdev;
447 	struct dp_soc *soc = pdev->soc;
448 
449 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
450 	if (!vdev) {
451 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
452 		   "%s: invalid vdev_id %d",
453 		   __func__, vdev_id);
454 		return;
455 	}
456 
457 	vdev->pool = NULL;
458 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
459 }
460 
461 /**
462  * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
463  * @pdev: Handle to struct dp_pdev
464  * @flow_id: flow id
465  * @flow_type: flow type
466  * @flow_pool_id: pool id
467  * @flow_pool_size: pool size
468  *
469  * Process below target to host message
470  * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
471  *
472  * Return: none
473  */
474 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
475 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
476 {
477 	struct dp_soc *soc = pdev->soc;
478 	struct dp_tx_desc_pool_s *pool;
479 	enum htt_flow_type type = flow_type;
480 
481 
482 	dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
483 		flow_id, flow_type, flow_pool_id, flow_pool_size);
484 
485 	if (qdf_unlikely(!soc)) {
486 		dp_err("soc is NULL");
487 		return QDF_STATUS_E_FAULT;
488 	}
489 	soc->pool_stats.pool_map_count++;
490 
491 	pool = dp_tx_create_flow_pool(soc, flow_pool_id,
492 			flow_pool_size);
493 	if (!pool) {
494 		dp_err("creation of flow_pool %d size %d failed",
495 		       flow_pool_id, flow_pool_size);
496 		return QDF_STATUS_E_RESOURCES;
497 	}
498 
499 	switch (type) {
500 
501 	case FLOW_TYPE_VDEV:
502 		dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
503 		break;
504 	default:
505 		dp_err("flow type %d not supported", type);
506 		break;
507 	}
508 
509 	return QDF_STATUS_SUCCESS;
510 }
511 
512 /**
513  * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
514  * @pdev: Handle to struct dp_pdev
515  * @flow_id: flow id
516  * @flow_type: flow type
517  * @flow_pool_id: pool id
518  *
519  * Process below target to host message
520  * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
521  *
522  * Return: none
523  */
524 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
525 	uint8_t flow_type, uint8_t flow_pool_id)
526 {
527 	struct dp_soc *soc = pdev->soc;
528 	struct dp_tx_desc_pool_s *pool;
529 	enum htt_flow_type type = flow_type;
530 
531 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
532 		"%s: flow_id %d flow_type %d flow_pool_id %d",
533 		__func__, flow_id, flow_type, flow_pool_id);
534 
535 	if (qdf_unlikely(!pdev)) {
536 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
537 			"%s: pdev is NULL", __func__);
538 		return;
539 	}
540 	soc->pool_stats.pool_unmap_count++;
541 
542 	pool = &soc->tx_desc[flow_pool_id];
543 	if (!pool) {
544 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
545 		   "%s: flow_pool not available flow_pool_id %d",
546 		   __func__, type);
547 		return;
548 	}
549 
550 	switch (type) {
551 
552 	case FLOW_TYPE_VDEV:
553 		dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
554 		break;
555 	default:
556 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
557 		   "%s: flow type %d not supported !!!",
558 		   __func__, type);
559 		return;
560 	}
561 
562 	/* only delete if all descriptors are available */
563 	dp_tx_delete_flow_pool(soc, pool, false);
564 }
565 
566 /**
567  * dp_tx_flow_control_init() - Initialize tx flow control
568  * @tx_desc_pool: Handle to flow_pool
569  *
570  * Return: none
571  */
572 void dp_tx_flow_control_init(struct dp_soc *soc)
573 {
574 	qdf_spinlock_create(&soc->flow_pool_array_lock);
575 }
576 
577 /**
578  * dp_tx_desc_pool_dealloc() - De-allocate tx desc pool
579  * @tx_desc_pool: Handle to flow_pool
580  *
581  * Return: none
582  */
583 static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
584 {
585 	struct dp_tx_desc_pool_s *tx_desc_pool;
586 	int i;
587 
588 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
589 		tx_desc_pool = &((soc)->tx_desc[i]);
590 		if (!tx_desc_pool->desc_pages.num_pages)
591 			continue;
592 
593 		dp_tx_desc_pool_deinit(soc, i);
594 		dp_tx_desc_pool_free(soc, i);
595 	}
596 }
597 
598 /**
599  * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
600  * @tx_desc_pool: Handle to flow_pool
601  *
602  * Return: none
603  */
604 void dp_tx_flow_control_deinit(struct dp_soc *soc)
605 {
606 	dp_tx_desc_pool_dealloc(soc);
607 
608 	qdf_spinlock_destroy(&soc->flow_pool_array_lock);
609 }
610 
611 /**
612  * dp_txrx_register_pause_cb() - Register pause callback
613  * @ctx: Handle to struct dp_soc
614  * @pause_cb: Tx pause_cb
615  *
616  * Return: none
617  */
618 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
619 	tx_pause_callback pause_cb)
620 {
621 	struct dp_soc *soc = (struct dp_soc *)handle;
622 
623 	if (!soc || !pause_cb) {
624 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
625 			FL("soc or pause_cb is NULL"));
626 		return QDF_STATUS_E_INVAL;
627 	}
628 	soc->pause_cb = pause_cb;
629 
630 	return QDF_STATUS_SUCCESS;
631 }
632 
633 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id,
634 			       uint8_t vdev_id)
635 {
636 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
637 	struct dp_pdev *pdev =
638 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
639 	int tx_ring_size = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
640 
641 	if (!pdev) {
642 		dp_err("pdev is NULL");
643 		return QDF_STATUS_E_INVAL;
644 	}
645 
646 	return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV,
647 					   vdev_id, tx_ring_size);
648 }
649 
650 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
651 			   uint8_t vdev_id)
652 {
653 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
654 	struct dp_pdev *pdev =
655 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
656 
657 	if (!pdev) {
658 		dp_err("pdev is NULL");
659 		return;
660 	}
661 
662 	return dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
663 					     FLOW_TYPE_VDEV, vdev_id);
664 }
665