xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_flow_control.c (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <cds_api.h>
20 
21 /* OS abstraction libraries */
22 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
23 #include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
24 #include <qdf_util.h>           /* qdf_unlikely */
25 #include "dp_types.h"
26 #include "dp_tx_desc.h"
27 
28 #include <cdp_txrx_handle.h>
29 #include "dp_internal.h"
30 #define INVALID_FLOW_ID 0xFF
31 #define MAX_INVALID_BIN 3
32 
33 /**
34  * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
35  *
36  * @ctx: Handle to struct dp_soc.
37  *
38  * Return: none
39  */
40 void dp_tx_dump_flow_pool_info(void *ctx)
41 {
42 	struct dp_soc *soc = ctx;
43 	struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
44 	struct dp_tx_desc_pool_s *pool = NULL;
45 	struct dp_tx_desc_pool_s tmp_pool;
46 	int i;
47 
48 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
49 		"No of pool map received %d", pool_stats->pool_map_count);
50 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
51 		"No of pool unmap received %d",	pool_stats->pool_unmap_count);
52 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
53 		"Pkt dropped due to unavailablity of pool %d",
54 		pool_stats->pkt_drop_no_pool);
55 
56 	/*
57 	 * Nested spin lock.
58 	 * Always take in below order.
59 	 * flow_pool_array_lock -> flow_pool_lock
60 	 */
61 	qdf_spin_lock_bh(&soc->flow_pool_array_lock);
62 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
63 		pool = &soc->tx_desc[i];
64 		if (pool->status > FLOW_POOL_INVALID)
65 			continue;
66 		qdf_spin_lock_bh(&pool->flow_pool_lock);
67 		qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
68 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
69 		qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
70 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
71 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
72 			"Flow_pool_id %d :: status %d",
73 			tmp_pool.flow_pool_id, tmp_pool.status);
74 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
75 			"Total %d :: Available %d",
76 			tmp_pool.pool_size, tmp_pool.avail_desc);
77 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
78 			"Start threshold %d :: Stop threshold %d",
79 			 tmp_pool.start_th, tmp_pool.stop_th);
80 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
81 			"Member flow_id  %d :: flow_type %d",
82 			tmp_pool.flow_pool_id, tmp_pool.flow_type);
83 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
84 			"Pkt dropped due to unavailablity of descriptors %d",
85 			tmp_pool.pkt_drop_no_desc);
86 		qdf_spin_lock_bh(&soc->flow_pool_array_lock);
87 	}
88 	qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
89 }
90 
91 /**
92  * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
93  *
94  * @soc: Handle to struct dp_soc.
95  *
96  * Return: None
97  */
98 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
99 {
100 
101 	if (!soc) {
102 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
103 			"%s: soc is null\n", __func__);
104 		return;
105 	}
106 	qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
107 }
108 
109 /**
110  * dp_tx_create_flow_pool() - create flow pool
111  * @soc: Handle to struct dp_soc
112  * @flow_pool_id: flow pool id
113  * @flow_pool_size: flow pool size
114  *
115  * Return: flow_pool pointer / NULL for error
116  */
117 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
118 	uint8_t flow_pool_id, uint16_t flow_pool_size)
119 {
120 	struct dp_tx_desc_pool_s *pool;
121 	uint32_t stop_threshold;
122 	uint32_t start_threshold;
123 
124 	if (flow_pool_id >= MAX_TXDESC_POOLS) {
125 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
126 		   "%s: invalid flow_pool_id %d", __func__, flow_pool_id);
127 		return NULL;
128 	}
129 	pool = &soc->tx_desc[flow_pool_id];
130 	qdf_spin_lock_bh(&pool->flow_pool_lock);
131 	if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
132 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
133 			"%s: flow pool already allocated, attached %d times\n",
134 			__func__, pool->pool_create_cnt);
135 		if (pool->avail_desc > pool->start_th)
136 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
137 		else
138 			pool->status = FLOW_POOL_ACTIVE_PAUSED;
139 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
140 		return pool;
141 	}
142 
143 	if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
144 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
145 		return NULL;
146 	}
147 
148 	stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
149 	start_threshold = stop_threshold +
150 		wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
151 
152 	pool->flow_pool_id = flow_pool_id;
153 	pool->pool_size = flow_pool_size;
154 	pool->avail_desc = flow_pool_size;
155 	pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
156 	/* INI is in percentage so divide by 100 */
157 	pool->start_th = (start_threshold * flow_pool_size)/100;
158 	pool->stop_th = (stop_threshold * flow_pool_size)/100;
159 	pool->pool_create_cnt++;
160 
161 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
162 
163 	return pool;
164 }
165 
166 /**
167  * dp_tx_delete_flow_pool() - delete flow pool
168  * @soc: Handle to struct dp_soc
169  * @pool: flow pool pointer
170  * @force: free pool forcefully
171  *
172  * Delete flow_pool if all tx descriptors are available.
173  * Otherwise put it in FLOW_POOL_INVALID state.
174  * If force is set then pull all available descriptors to
175  * global pool.
176  *
177  * Return: 0 for success or error
178  */
179 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
180 	bool force)
181 {
182 	if (!soc || !pool) {
183 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
184 		   "%s: pool or soc is NULL\n", __func__);
185 		QDF_ASSERT(0);
186 		return ENOMEM;
187 	}
188 
189 	qdf_spin_lock_bh(&pool->flow_pool_lock);
190 	if (!pool->pool_create_cnt) {
191 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
192 			  "flow pool either not created or alread deleted");
193 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
194 		return -ENOENT;
195 	}
196 	pool->pool_create_cnt--;
197 	if (pool->pool_create_cnt) {
198 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
199 			  "%s: pool is still attached, pending detach %d\n",
200 			  __func__, pool->pool_create_cnt);
201 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
202 		return -EAGAIN;
203 	}
204 
205 	if (pool->avail_desc < pool->pool_size) {
206 		pool->status = FLOW_POOL_INVALID;
207 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
208 		return -EAGAIN;
209 	}
210 
211 	/* We have all the descriptors for the pool, we can delete the pool */
212 	dp_tx_desc_pool_free(soc, pool->flow_pool_id);
213 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
214 	return 0;
215 }
216 
217 /**
218  * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
219  * @pdev: Handle to struct dp_pdev
220  * @pool: flow_pool
221  * @vdev_id: flow_id /vdev_id
222  *
223  * Return: none
224  */
225 static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
226 	struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
227 {
228 	struct dp_vdev *vdev;
229 	struct dp_soc *soc = pdev->soc;
230 
231 	vdev = (struct dp_vdev *)cdp_get_vdev_from_vdev_id((void *)soc,
232 					(struct cdp_pdev *)pdev, vdev_id);
233 	if (!vdev) {
234 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
235 		   "%s: invalid vdev_id %d\n",
236 		   __func__, vdev_id);
237 		return;
238 	}
239 
240 	vdev->pool = pool;
241 	qdf_spin_lock_bh(&pool->flow_pool_lock);
242 	pool->pool_owner_ctx = soc;
243 	pool->flow_pool_id = vdev_id;
244 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
245 }
246 
247 /**
248  * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
249  * @pdev: Handle to struct dp_pdev
250  * @pool: flow_pool
251  * @vdev_id: flow_id /vdev_id
252  *
253  * Return: none
254  */
255 static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
256 		struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
257 {
258 	struct dp_vdev *vdev;
259 	struct dp_soc *soc = pdev->soc;
260 
261 	vdev = (struct dp_vdev *)cdp_get_vdev_from_vdev_id((void *)soc,
262 					(struct cdp_pdev *)pdev, vdev_id);
263 	if (!vdev) {
264 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
265 		   "%s: invalid vdev_id %d\n",
266 		   __func__, vdev_id);
267 		return;
268 	}
269 
270 	vdev->pool = NULL;
271 }
272 
273 /**
274  * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
275  * @pdev: Handle to struct dp_pdev
276  * @flow_id: flow id
277  * @flow_type: flow type
278  * @flow_pool_id: pool id
279  * @flow_pool_size: pool size
280  *
281  * Process below target to host message
282  * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
283  *
284  * Return: none
285  */
286 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
287 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
288 {
289 	struct dp_soc *soc = pdev->soc;
290 	struct dp_tx_desc_pool_s *pool;
291 	enum htt_flow_type type = flow_type;
292 
293 
294 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
295 		"%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
296 		__func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
297 
298 	if (qdf_unlikely(!soc)) {
299 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
300 			"%s: soc is NULL", __func__);
301 		return QDF_STATUS_E_FAULT;
302 	}
303 	soc->pool_stats.pool_map_count++;
304 
305 	pool = dp_tx_create_flow_pool(soc, flow_pool_id,
306 			flow_pool_size);
307 	if (pool == NULL) {
308 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
309 			   "%s: creation of flow_pool %d size %d failed\n",
310 			   __func__, flow_pool_id, flow_pool_size);
311 		return QDF_STATUS_E_RESOURCES;
312 	}
313 
314 	switch (type) {
315 
316 	case FLOW_TYPE_VDEV:
317 		dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
318 		break;
319 	default:
320 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
321 		   "%s: flow type %d not supported !!!\n",
322 		   __func__, type);
323 		break;
324 	}
325 
326 	return QDF_STATUS_SUCCESS;
327 }
328 
329 /**
330  * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
331  * @pdev: Handle to struct dp_pdev
332  * @flow_id: flow id
333  * @flow_type: flow type
334  * @flow_pool_id: pool id
335  *
336  * Process below target to host message
337  * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
338  *
339  * Return: none
340  */
341 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
342 	uint8_t flow_type, uint8_t flow_pool_id)
343 {
344 	struct dp_soc *soc = pdev->soc;
345 	struct dp_tx_desc_pool_s *pool;
346 	enum htt_flow_type type = flow_type;
347 
348 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
349 		"%s: flow_id %d flow_type %d flow_pool_id %d\n",
350 		__func__, flow_id, flow_type, flow_pool_id);
351 
352 	if (qdf_unlikely(!pdev)) {
353 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
354 			"%s: pdev is NULL", __func__);
355 		return;
356 	}
357 	soc->pool_stats.pool_unmap_count++;
358 
359 	pool = &soc->tx_desc[flow_pool_id];
360 	if (!pool) {
361 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
362 		   "%s: flow_pool not available flow_pool_id %d\n",
363 		   __func__, type);
364 		return;
365 	}
366 
367 	switch (type) {
368 
369 	case FLOW_TYPE_VDEV:
370 		dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
371 		break;
372 	default:
373 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
374 		   "%s: flow type %d not supported !!!\n",
375 		   __func__, type);
376 		return;
377 	}
378 
379 	/* only delete if all descriptors are available */
380 	dp_tx_delete_flow_pool(soc, pool, false);
381 }
382 
383 /**
384  * dp_tx_flow_control_init() - Initialize tx flow control
385  * @tx_desc_pool: Handle to flow_pool
386  *
387  * Return: none
388  */
389 void dp_tx_flow_control_init(struct dp_soc *soc)
390 {
391 	qdf_spinlock_create(&soc->flow_pool_array_lock);
392 }
393 
394 /**
395  * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
396  * @tx_desc_pool: Handle to flow_pool
397  *
398  * Return: none
399  */
400 void dp_tx_flow_control_deinit(struct dp_soc *soc)
401 {
402 	qdf_spinlock_destroy(&soc->flow_pool_array_lock);
403 }
404 
405 /**
406  * dp_txrx_register_pause_cb() - Register pause callback
407  * @ctx: Handle to struct dp_soc
408  * @pause_cb: Tx pause_cb
409  *
410  * Return: none
411  */
412 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
413 	tx_pause_callback pause_cb)
414 {
415 	struct dp_soc *soc = (struct dp_soc *)handle;
416 
417 	if (!soc || !pause_cb) {
418 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
419 			FL("soc or pause_cb is NULL"));
420 		return QDF_STATUS_E_INVAL;
421 	}
422 	soc->pause_cb = pause_cb;
423 
424 	return QDF_STATUS_SUCCESS;
425 }
426 
427 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, struct cdp_pdev *pdev,
428 				uint8_t vdev_id)
429 {
430 	struct dp_soc *soc = (struct dp_soc *)handle;
431 	int tx_ring_size = wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
432 
433 	return (dp_tx_flow_pool_map_handler((struct dp_pdev *)pdev, vdev_id,
434 				FLOW_TYPE_VDEV,	vdev_id, tx_ring_size));
435 }
436 
437 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
438 			   uint8_t vdev_id)
439 {
440 	return(dp_tx_flow_pool_unmap_handler((struct dp_pdev *)pdev, vdev_id,
441 				FLOW_TYPE_VDEV, vdev_id));
442 }
443