xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_flow_control.c (revision de1e7e7e129e3f35eaee7ba04135d2734e70c50a)
1 /*
2  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <cds_api.h>
21 
22 /* OS abstraction libraries */
23 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
24 #include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
25 #include <qdf_util.h>           /* qdf_unlikely */
26 #include "dp_types.h"
27 #include "dp_tx_desc.h"
28 #include "dp_peer.h"
29 
30 #include <cdp_txrx_handle.h>
31 #include "dp_internal.h"
32 #define INVALID_FLOW_ID 0xFF
33 #define MAX_INVALID_BIN 3
34 #define GLOBAL_FLOW_POOL_STATS_LEN 25
35 #define FLOW_POOL_LOG_LEN 50
36 
37 #ifdef QCA_AC_BASED_FLOW_CONTROL
38 /**
39  * dp_tx_initialize_threshold() - Threshold of flow Pool initialization
40  * @pool: flow_pool
41  * @stop_threshold: stop threshold of certain AC
42  * @start_threshold: start threshold of certain AC
43  * @flow_pool_size: flow pool size
44  *
45  * Return: none
46  */
47 static inline void
48 dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
49 			   uint32_t start_threshold,
50 			   uint32_t stop_threshold,
51 			   uint16_t flow_pool_size)
52 {
53 	/* BE_BK threshold is same as previous threahold */
54 	pool->start_th[DP_TH_BE_BK] = (start_threshold
55 					* flow_pool_size) / 100;
56 	pool->stop_th[DP_TH_BE_BK] = (stop_threshold
57 					* flow_pool_size) / 100;
58 
59 	/* Update VI threshold based on BE_BK threshold */
60 	pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK]
61 					* FL_TH_VI_PERCENTAGE) / 100;
62 	pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK]
63 					* FL_TH_VI_PERCENTAGE) / 100;
64 
65 	/* Update VO threshold based on BE_BK threshold */
66 	pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK]
67 					* FL_TH_VO_PERCENTAGE) / 100;
68 	pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK]
69 					* FL_TH_VO_PERCENTAGE) / 100;
70 
71 	/* Update High Priority threshold based on BE_BK threshold */
72 	pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK]
73 					* FL_TH_HI_PERCENTAGE) / 100;
74 	pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK]
75 					* FL_TH_HI_PERCENTAGE) / 100;
76 
77 	dp_debug("tx flow control threshold is set, pool size is %d",
78 		 flow_pool_size);
79 }
80 
81 /**
82  * dp_tx_flow_pool_reattach() - Reattach flow_pool
83  * @pool: flow_pool
84  *
85  * Return: none
86  */
87 static inline void
88 dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
89 {
90 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
91 		  "%s: flow pool already allocated, attached %d times",
92 		  __func__, pool->pool_create_cnt);
93 
94 	pool->status = FLOW_POOL_ACTIVE_UNPAUSED_REATTACH;
95 	pool->pool_create_cnt++;
96 }
97 
98 /**
99  * dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool
100  * @pool: flow_pool
101  *
102  * Return: none
103  */
104 static inline void
105 dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
106 {
107 	int i;
108 
109 	for (i = 0; i < FL_TH_MAX; i++) {
110 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
111 			  "Level %d :: Start threshold %d :: Stop threshold %d",
112 			  i, pool->start_th[i], pool->stop_th[i]);
113 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
114 			  "Level %d :: Maximum pause time %lu ms",
115 			  i, pool->max_pause_time[i]);
116 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
117 			  "Level %d :: Latest pause timestamp %lu",
118 			  i, pool->latest_pause_time[i]);
119 	}
120 }
121 
122 /**
123  * dp_tx_flow_ctrl_reset_subqueues() - Reset subqueues to original state
124  * @soc: dp soc
125  * @pool: flow pool
126  * @pool_status: flow pool status
127  *
128  * Return: none
129  */
130 static inline void
131 dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
132 				struct dp_tx_desc_pool_s *pool,
133 				enum flow_pool_status pool_status)
134 {
135 	switch (pool_status) {
136 	case FLOW_POOL_ACTIVE_PAUSED:
137 		soc->pause_cb(pool->flow_pool_id,
138 			      WLAN_NETIF_PRIORITY_QUEUE_ON,
139 			      WLAN_DATA_FLOW_CTRL_PRI);
140 		fallthrough;
141 
142 	case FLOW_POOL_VO_PAUSED:
143 		soc->pause_cb(pool->flow_pool_id,
144 			      WLAN_NETIF_VO_QUEUE_ON,
145 			      WLAN_DATA_FLOW_CTRL_VO);
146 		fallthrough;
147 
148 	case FLOW_POOL_VI_PAUSED:
149 		soc->pause_cb(pool->flow_pool_id,
150 			      WLAN_NETIF_VI_QUEUE_ON,
151 			      WLAN_DATA_FLOW_CTRL_VI);
152 		fallthrough;
153 
154 	case FLOW_POOL_BE_BK_PAUSED:
155 		soc->pause_cb(pool->flow_pool_id,
156 			      WLAN_NETIF_BE_BK_QUEUE_ON,
157 			      WLAN_DATA_FLOW_CTRL_BE_BK);
158 		fallthrough;
159 	default:
160 		break;
161 	}
162 }
163 
164 #else
165 static inline void
166 dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
167 			   uint32_t start_threshold,
168 			   uint32_t stop_threshold,
169 			   uint16_t flow_pool_size)
170 
171 {
172 	/* INI is in percentage so divide by 100 */
173 	pool->start_th = (start_threshold * flow_pool_size) / 100;
174 	pool->stop_th = (stop_threshold * flow_pool_size) / 100;
175 }
176 
177 static inline void
178 dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
179 {
180 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
181 		  "%s: flow pool already allocated, attached %d times",
182 		  __func__, pool->pool_create_cnt);
183 	if (pool->avail_desc > pool->start_th)
184 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
185 	else
186 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
187 
188 	pool->pool_create_cnt++;
189 }
190 
191 static inline void
192 dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
193 {
194 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
195 		  "Start threshold %d :: Stop threshold %d",
196 	pool->start_th, pool->stop_th);
197 }
198 
199 static inline void
200 dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
201 				struct dp_tx_desc_pool_s *pool,
202 				enum flow_pool_status pool_status)
203 {
204 }
205 
206 #endif
207 
208 /**
209  * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
210  *
211  * @ctx: Handle to struct dp_soc.
212  *
213  * Return: none
214  */
215 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
216 {
217 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
218 	struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
219 	struct dp_tx_desc_pool_s *pool = NULL;
220 	struct dp_tx_desc_pool_s tmp_pool;
221 	int i;
222 
223 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
224 		"No of pool map received %d", pool_stats->pool_map_count);
225 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
226 		"No of pool unmap received %d",	pool_stats->pool_unmap_count);
227 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
228 		"Pkt dropped due to unavailablity of pool %d",
229 		pool_stats->pkt_drop_no_pool);
230 
231 	/*
232 	 * Nested spin lock.
233 	 * Always take in below order.
234 	 * flow_pool_array_lock -> flow_pool_lock
235 	 */
236 	qdf_spin_lock_bh(&soc->flow_pool_array_lock);
237 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
238 		pool = &soc->tx_desc[i];
239 		if (pool->status > FLOW_POOL_INVALID)
240 			continue;
241 		qdf_spin_lock_bh(&pool->flow_pool_lock);
242 		qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
243 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
244 		qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
245 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
246 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
247 			"Flow_pool_id %d :: status %d",
248 			tmp_pool.flow_pool_id, tmp_pool.status);
249 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
250 			"Total %d :: Available %d",
251 			tmp_pool.pool_size, tmp_pool.avail_desc);
252 		dp_tx_flow_pool_dump_threshold(&tmp_pool);
253 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
254 			"Member flow_id  %d :: flow_type %d",
255 			tmp_pool.flow_pool_id, tmp_pool.flow_type);
256 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
257 			"Pkt dropped due to unavailablity of descriptors %d",
258 			tmp_pool.pkt_drop_no_desc);
259 		qdf_spin_lock_bh(&soc->flow_pool_array_lock);
260 	}
261 	qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
262 }
263 
264 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc)
265 {
266 	struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
267 	struct dp_tx_desc_pool_s *pool = NULL;
268 	char *comb_log_str;
269 	uint32_t comb_log_str_size;
270 	int bytes_written = 0;
271 	int i;
272 
273 	comb_log_str_size = GLOBAL_FLOW_POOL_STATS_LEN +
274 				(FLOW_POOL_LOG_LEN * MAX_TXDESC_POOLS) + 1;
275 	comb_log_str = qdf_mem_malloc(comb_log_str_size);
276 	if (!comb_log_str)
277 		return;
278 
279 	bytes_written = qdf_snprintf(&comb_log_str[bytes_written],
280 				     comb_log_str_size, "G:(%d,%d,%d) ",
281 				     pool_stats->pool_map_count,
282 				     pool_stats->pool_unmap_count,
283 				     pool_stats->pkt_drop_no_pool);
284 
285 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
286 		pool = &soc->tx_desc[i];
287 		if (pool->status > FLOW_POOL_INVALID)
288 			continue;
289 		bytes_written += qdf_snprintf(&comb_log_str[bytes_written],
290 				      (bytes_written >= comb_log_str_size) ? 0 :
291 				      comb_log_str_size - bytes_written,
292 				      "| %d %d: (%d,%d,%d)",
293 				      pool->flow_pool_id, pool->status,
294 				      pool->pool_size, pool->avail_desc,
295 				      pool->pkt_drop_no_desc);
296 	}
297 
298 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
299 		  "FLOW_POOL_STATS %s", comb_log_str);
300 
301 	qdf_mem_free(comb_log_str);
302 }
303 
304 /**
305  * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
306  *
307  * @soc: Handle to struct dp_soc.
308  *
309  * Return: None
310  */
311 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
312 {
313 
314 	if (!soc) {
315 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
316 			"%s: soc is null", __func__);
317 		return;
318 	}
319 	qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
320 }
321 
322 /**
323  * dp_tx_create_flow_pool() - create flow pool
324  * @soc: Handle to struct dp_soc
325  * @flow_pool_id: flow pool id
326  * @flow_pool_size: flow pool size
327  *
328  * Return: flow_pool pointer / NULL for error
329  */
330 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
331 	uint8_t flow_pool_id, uint32_t flow_pool_size)
332 {
333 	struct dp_tx_desc_pool_s *pool;
334 	uint32_t stop_threshold;
335 	uint32_t start_threshold;
336 
337 	if (flow_pool_id >= MAX_TXDESC_POOLS) {
338 		dp_err("invalid flow_pool_id %d", flow_pool_id);
339 		return NULL;
340 	}
341 	pool = &soc->tx_desc[flow_pool_id];
342 	qdf_spin_lock_bh(&pool->flow_pool_lock);
343 	if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
344 		dp_tx_flow_pool_reattach(pool);
345 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
346 		dp_err("cannot alloc desc, status=%d, create_cnt=%d",
347 		       pool->status, pool->pool_create_cnt);
348 		return pool;
349 	}
350 
351 	if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
352 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
353 		dp_err("dp_tx_desc_pool_alloc failed flow_pool_id: %d",
354 			flow_pool_id);
355 		return NULL;
356 	}
357 
358 	if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size)) {
359 		dp_tx_desc_pool_free(soc, flow_pool_id);
360 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
361 		dp_err("dp_tx_desc_pool_init failed flow_pool_id: %d",
362 			flow_pool_id);
363 		return NULL;
364 	}
365 
366 	stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
367 	start_threshold = stop_threshold +
368 		wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
369 
370 	pool->flow_pool_id = flow_pool_id;
371 	pool->pool_size = flow_pool_size;
372 	pool->avail_desc = flow_pool_size;
373 	pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
374 	dp_tx_initialize_threshold(pool, start_threshold, stop_threshold,
375 				   flow_pool_size);
376 	pool->pool_create_cnt++;
377 
378 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
379 
380 	return pool;
381 }
382 
383 /**
384  * dp_is_tx_flow_pool_delete_allowed() - Can flow pool be deleted
385  * @soc: Handle to struct dp_soc
386  * @vdev_id: vdev_id corresponding to flow pool
387  *
388  * Check if it is OK to go ahead delete the flow pool. One of the case is
389  * MLO where it is not OK to delete the flow pool when link switch happens.
390  *
391  * Return: 0 for success or error
392  */
393 static bool dp_is_tx_flow_pool_delete_allowed(struct dp_soc *soc,
394 					      uint8_t vdev_id)
395 {
396 	struct dp_peer *peer;
397 	struct dp_peer *tmp_peer;
398 	struct dp_vdev *vdev = NULL;
399 	bool is_allow = true;
400 
401 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MISC);
402 
403 	/* only check for sta mode */
404 	if (!vdev || vdev->opmode != wlan_op_mode_sta)
405 		goto comp_ret;
406 
407 	/*
408 	 * Only if current vdev is belong to MLO connection and connected,
409 	 * then it's not allowed to delete current pool, for legacy
410 	 * connection, allowed always.
411 	 */
412 	qdf_spin_lock_bh(&vdev->peer_list_lock);
413 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
414 			   peer_list_elem,
415 			   tmp_peer) {
416 		if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
417 					QDF_STATUS_SUCCESS) {
418 			if (peer->valid && !peer->sta_self_peer)
419 				is_allow = false;
420 			dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
421 		}
422 	}
423 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
424 
425 comp_ret:
426 	if (vdev)
427 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC);
428 
429 	return is_allow;
430 }
431 
432 /**
433  * dp_tx_delete_flow_pool() - delete flow pool
434  * @soc: Handle to struct dp_soc
435  * @pool: flow pool pointer
436  * @force: free pool forcefully
437  *
438  * Delete flow_pool if all tx descriptors are available.
439  * Otherwise put it in FLOW_POOL_INVALID state.
440  * If force is set then pull all available descriptors to
441  * global pool.
442  *
443  * Return: 0 for success or error
444  */
445 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
446 	bool force)
447 {
448 	struct dp_vdev *vdev;
449 	enum flow_pool_status pool_status;
450 
451 	if (!soc || !pool) {
452 		dp_err("pool or soc is NULL");
453 		QDF_ASSERT(0);
454 		return ENOMEM;
455 	}
456 
457 	dp_info("pool_id %d create_cnt=%d, avail_desc=%d, size=%d, status=%d",
458 		pool->flow_pool_id, pool->pool_create_cnt, pool->avail_desc,
459 		pool->pool_size, pool->status);
460 
461 	if (!dp_is_tx_flow_pool_delete_allowed(soc, pool->flow_pool_id)) {
462 		dp_info("skip pool id %d delete as it's not allowed",
463 			pool->flow_pool_id);
464 		return -EAGAIN;
465 	}
466 
467 	qdf_spin_lock_bh(&pool->flow_pool_lock);
468 	if (!pool->pool_create_cnt) {
469 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
470 		dp_err("flow pool either not created or already deleted");
471 		return -ENOENT;
472 	}
473 	pool->pool_create_cnt--;
474 	if (pool->pool_create_cnt) {
475 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
476 		dp_err("pool is still attached, pending detach %d",
477 		       pool->pool_create_cnt);
478 		return -EAGAIN;
479 	}
480 
481 	if (pool->avail_desc < pool->pool_size) {
482 		pool_status = pool->status;
483 		pool->status = FLOW_POOL_INVALID;
484 		dp_tx_flow_ctrl_reset_subqueues(soc, pool, pool_status);
485 
486 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
487 		/* Reset TX desc associated to this Vdev as NULL */
488 		vdev = dp_vdev_get_ref_by_id(soc, pool->flow_pool_id,
489 					     DP_MOD_ID_MISC);
490 		if (vdev) {
491 			dp_tx_desc_flush(vdev->pdev, vdev, false);
492 			dp_vdev_unref_delete(soc, vdev,
493 					     DP_MOD_ID_MISC);
494 		}
495 		dp_err("avail desc less than pool size");
496 		return -EAGAIN;
497 	}
498 
499 	/* We have all the descriptors for the pool, we can delete the pool */
500 	dp_tx_desc_pool_deinit(soc, pool->flow_pool_id);
501 	dp_tx_desc_pool_free(soc, pool->flow_pool_id);
502 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
503 	return 0;
504 }
505 
506 /**
507  * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
508  * @pdev: Handle to struct dp_pdev
509  * @pool: flow_pool
510  * @vdev_id: flow_id /vdev_id
511  *
512  * Return: none
513  */
514 static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
515 	struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
516 {
517 	struct dp_vdev *vdev;
518 	struct dp_soc *soc = pdev->soc;
519 
520 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
521 	if (!vdev) {
522 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
523 		   "%s: invalid vdev_id %d",
524 		   __func__, vdev_id);
525 		return;
526 	}
527 
528 	vdev->pool = pool;
529 	qdf_spin_lock_bh(&pool->flow_pool_lock);
530 	pool->pool_owner_ctx = soc;
531 	pool->flow_pool_id = vdev_id;
532 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
533 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
534 }
535 
536 /**
537  * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
538  * @pdev: Handle to struct dp_pdev
539  * @pool: flow_pool
540  * @vdev_id: flow_id /vdev_id
541  *
542  * Return: none
543  */
544 static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
545 		struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
546 {
547 	struct dp_vdev *vdev;
548 	struct dp_soc *soc = pdev->soc;
549 
550 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
551 	if (!vdev) {
552 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
553 		   "%s: invalid vdev_id %d",
554 		   __func__, vdev_id);
555 		return;
556 	}
557 
558 	vdev->pool = NULL;
559 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
560 }
561 
562 /**
563  * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
564  * @pdev: Handle to struct dp_pdev
565  * @flow_id: flow id
566  * @flow_type: flow type
567  * @flow_pool_id: pool id
568  * @flow_pool_size: pool size
569  *
570  * Process below target to host message
571  * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
572  *
573  * Return: none
574  */
575 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
576 	uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size)
577 {
578 	struct dp_soc *soc = pdev->soc;
579 	struct dp_tx_desc_pool_s *pool;
580 	enum htt_flow_type type = flow_type;
581 
582 
583 	dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
584 		flow_id, flow_type, flow_pool_id, flow_pool_size);
585 
586 	if (qdf_unlikely(!soc)) {
587 		dp_err("soc is NULL");
588 		return QDF_STATUS_E_FAULT;
589 	}
590 	soc->pool_stats.pool_map_count++;
591 
592 	pool = dp_tx_create_flow_pool(soc, flow_pool_id,
593 			flow_pool_size);
594 	if (!pool) {
595 		dp_err("creation of flow_pool %d size %d failed",
596 		       flow_pool_id, flow_pool_size);
597 		return QDF_STATUS_E_RESOURCES;
598 	}
599 
600 	switch (type) {
601 
602 	case FLOW_TYPE_VDEV:
603 		dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
604 		break;
605 	default:
606 		dp_err("flow type %d not supported", type);
607 		break;
608 	}
609 
610 	return QDF_STATUS_SUCCESS;
611 }
612 
613 /**
614  * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
615  * @pdev: Handle to struct dp_pdev
616  * @flow_id: flow id
617  * @flow_type: flow type
618  * @flow_pool_id: pool id
619  *
620  * Process below target to host message
621  * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
622  *
623  * Return: none
624  */
625 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
626 	uint8_t flow_type, uint8_t flow_pool_id)
627 {
628 	struct dp_soc *soc = pdev->soc;
629 	struct dp_tx_desc_pool_s *pool;
630 	enum htt_flow_type type = flow_type;
631 
632 	dp_info("flow_id %d flow_type %d flow_pool_id %d", flow_id, flow_type,
633 		flow_pool_id);
634 
635 	if (qdf_unlikely(!pdev)) {
636 		dp_err("pdev is NULL");
637 		return;
638 	}
639 	soc->pool_stats.pool_unmap_count++;
640 
641 	pool = &soc->tx_desc[flow_pool_id];
642 	dp_info("pool status: %d", pool->status);
643 
644 	if (pool->status == FLOW_POOL_INACTIVE) {
645 		dp_err("flow pool id: %d is inactive, ignore unmap",
646 			flow_pool_id);
647 		return;
648 	}
649 
650 	switch (type) {
651 
652 	case FLOW_TYPE_VDEV:
653 		dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
654 		break;
655 	default:
656 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
657 		   "%s: flow type %d not supported !!!",
658 		   __func__, type);
659 		return;
660 	}
661 
662 	/* only delete if all descriptors are available */
663 	dp_tx_delete_flow_pool(soc, pool, false);
664 }
665 
666 /**
667  * dp_tx_flow_control_init() - Initialize tx flow control
668  * @tx_desc_pool: Handle to flow_pool
669  *
670  * Return: none
671  */
672 void dp_tx_flow_control_init(struct dp_soc *soc)
673 {
674 	qdf_spinlock_create(&soc->flow_pool_array_lock);
675 }
676 
677 /**
678  * dp_tx_desc_pool_dealloc() - De-allocate tx desc pool
679  * @tx_desc_pool: Handle to flow_pool
680  *
681  * Return: none
682  */
683 static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
684 {
685 	struct dp_tx_desc_pool_s *tx_desc_pool;
686 	int i;
687 
688 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
689 		tx_desc_pool = &((soc)->tx_desc[i]);
690 		if (!tx_desc_pool->desc_pages.num_pages)
691 			continue;
692 
693 		dp_tx_desc_pool_deinit(soc, i);
694 		dp_tx_desc_pool_free(soc, i);
695 	}
696 }
697 
698 /**
699  * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
700  * @tx_desc_pool: Handle to flow_pool
701  *
702  * Return: none
703  */
704 void dp_tx_flow_control_deinit(struct dp_soc *soc)
705 {
706 	dp_tx_desc_pool_dealloc(soc);
707 
708 	qdf_spinlock_destroy(&soc->flow_pool_array_lock);
709 }
710 
711 /**
712  * dp_txrx_register_pause_cb() - Register pause callback
713  * @ctx: Handle to struct dp_soc
714  * @pause_cb: Tx pause_cb
715  *
716  * Return: none
717  */
718 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
719 	tx_pause_callback pause_cb)
720 {
721 	struct dp_soc *soc = (struct dp_soc *)handle;
722 
723 	if (!soc || !pause_cb) {
724 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
725 			FL("soc or pause_cb is NULL"));
726 		return QDF_STATUS_E_INVAL;
727 	}
728 	soc->pause_cb = pause_cb;
729 
730 	return QDF_STATUS_SUCCESS;
731 }
732 
733 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id,
734 			       uint8_t vdev_id)
735 {
736 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
737 	struct dp_pdev *pdev =
738 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
739 	int tx_ring_size = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
740 
741 	if (!pdev) {
742 		dp_err("pdev is NULL");
743 		return QDF_STATUS_E_INVAL;
744 	}
745 
746 	return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV,
747 					   vdev_id, tx_ring_size);
748 }
749 
750 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
751 			   uint8_t vdev_id)
752 {
753 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
754 	struct dp_pdev *pdev =
755 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
756 
757 	if (!pdev) {
758 		dp_err("pdev is NULL");
759 		return;
760 	}
761 
762 	return dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
763 					     FLOW_TYPE_VDEV, vdev_id);
764 }
765