xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_flow_control.c (revision 6ecd284e5a94a1c96e26d571dd47419ac305990d)
1 /*
2  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 #include <cds_api.h>
28 
29 /* OS abstraction libraries */
30 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
31 #include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
32 #include <qdf_util.h>           /* qdf_unlikely */
33 #include "dp_types.h"
34 #include "dp_tx_desc.h"
35 
36 #include <cdp_txrx_handle.h>
37 #include "dp_internal.h"
38 #define INVALID_FLOW_ID 0xFF
39 #define MAX_INVALID_BIN 3
40 
41 /**
42  * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
43  *
44  * @ctx: Handle to struct dp_soc.
45  *
46  * Return: none
47  */
48 void dp_tx_dump_flow_pool_info(void *ctx)
49 {
50 	struct dp_soc *soc = ctx;
51 	struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
52 	struct dp_tx_desc_pool_s *pool = NULL;
53 	struct dp_tx_desc_pool_s tmp_pool;
54 	int i;
55 
56 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
57 		"No of pool map received %d", pool_stats->pool_map_count);
58 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
59 		"No of pool unmap received %d",	pool_stats->pool_unmap_count);
60 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
61 		"Pkt dropped due to unavailablity of pool %d",
62 		pool_stats->pkt_drop_no_pool);
63 
64 	/*
65 	 * Nested spin lock.
66 	 * Always take in below order.
67 	 * flow_pool_array_lock -> flow_pool_lock
68 	 */
69 	qdf_spin_lock_bh(&soc->flow_pool_array_lock);
70 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
71 		pool = &soc->tx_desc[i];
72 		if (pool->status > FLOW_POOL_INVALID)
73 			continue;
74 		qdf_spin_lock_bh(&pool->flow_pool_lock);
75 		qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
76 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
77 		qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
78 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
79 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
80 			"Flow_pool_id %d :: status %d",
81 			tmp_pool.flow_pool_id, tmp_pool.status);
82 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
83 			"Total %d :: Available %d",
84 			tmp_pool.pool_size, tmp_pool.avail_desc);
85 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
86 			"Start threshold %d :: Stop threshold %d",
87 			 tmp_pool.start_th, tmp_pool.stop_th);
88 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
89 			"Member flow_id  %d :: flow_type %d",
90 			tmp_pool.flow_pool_id, tmp_pool.flow_type);
91 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
92 			"Pkt dropped due to unavailablity of descriptors %d",
93 			tmp_pool.pkt_drop_no_desc);
94 		qdf_spin_lock_bh(&soc->flow_pool_array_lock);
95 	}
96 	qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
97 }
98 
99 /**
100  * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
101  *
102  * @soc: Handle to struct dp_soc.
103  *
104  * Return: None
105  */
106 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
107 {
108 
109 	if (!soc) {
110 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
111 			"%s: soc is null\n", __func__);
112 		return;
113 	}
114 	qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
115 }
116 
117 /**
118  * dp_tx_create_flow_pool() - create flow pool
119  * @soc: Handle to struct dp_soc
120  * @flow_pool_id: flow pool id
121  * @flow_pool_size: flow pool size
122  *
123  * Return: flow_pool pointer / NULL for error
124  */
125 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
126 	uint8_t flow_pool_id, uint16_t flow_pool_size)
127 {
128 	struct dp_tx_desc_pool_s *pool;
129 	uint32_t stop_threshold;
130 	uint32_t start_threshold;
131 
132 	if (!soc) {
133 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
134 		   "%s: soc is NULL\n", __func__);
135 		return NULL;
136 	}
137 	pool = &soc->tx_desc[flow_pool_id];
138 	qdf_spin_lock_bh(&pool->flow_pool_lock);
139 	if (pool->status == FLOW_POOL_INVALID) {
140 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
141 		   "%s: flow pool already allocated\n", __func__);
142 		if (pool->avail_desc > pool->start_th)
143 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
144 		else
145 			pool->status = FLOW_POOL_ACTIVE_PAUSED;
146 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
147 		return pool;
148 	}
149 
150 	if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) {
151 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
152 		return NULL;
153 	}
154 
155 	stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
156 	start_threshold = stop_threshold +
157 		wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
158 
159 	pool->flow_pool_id = flow_pool_id;
160 	pool->pool_size = flow_pool_size;
161 	pool->avail_desc = flow_pool_size;
162 	pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
163 	/* INI is in percentage so divide by 100 */
164 	pool->start_th = (start_threshold * flow_pool_size)/100;
165 	pool->stop_th = (stop_threshold * flow_pool_size)/100;
166 
167 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
168 
169 	return pool;
170 }
171 
172 /**
173  * dp_tx_delete_flow_pool() - delete flow pool
174  * @soc: Handle to struct dp_soc
175  * @pool: flow pool pointer
176  * @force: free pool forcefully
177  *
178  * Delete flow_pool if all tx descriptors are available.
179  * Otherwise put it in FLOW_POOL_INVALID state.
180  * If force is set then pull all available descriptors to
181  * global pool.
182  *
183  * Return: 0 for success or error
184  */
185 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
186 	bool force)
187 {
188 	if (!soc || !pool) {
189 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
190 		   "%s: pool or soc is NULL\n", __func__);
191 		QDF_ASSERT(0);
192 		return ENOMEM;
193 	}
194 
195 	qdf_spin_lock_bh(&pool->flow_pool_lock);
196 	if (pool->avail_desc < pool->pool_size) {
197 		pool->status = FLOW_POOL_INVALID;
198 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
199 		return EAGAIN;
200 	}
201 
202 	/* We have all the descriptors for the pool, we can delete the pool */
203 	dp_tx_desc_pool_free(soc, pool->flow_pool_id);
204 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
205 	return 0;
206 }
207 
208 /**
209  * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
210  * @pdev: Handle to struct dp_pdev
211  * @pool: flow_pool
212  * @vdev_id: flow_id /vdev_id
213  *
214  * Return: none
215  */
216 static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
217 	struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
218 {
219 	struct dp_vdev *vdev;
220 	struct dp_soc *soc = pdev->soc;
221 
222 	vdev = (struct dp_vdev *)cdp_get_vdev_from_vdev_id((void *)soc,
223 					(struct cdp_pdev *)pdev, vdev_id);
224 	if (!vdev) {
225 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
226 		   "%s: invalid vdev_id %d\n",
227 		   __func__, vdev_id);
228 		return;
229 	}
230 
231 	vdev->pool = pool;
232 	qdf_spin_lock_bh(&pool->flow_pool_lock);
233 	pool->pool_owner_ctx = soc;
234 	pool->flow_pool_id = vdev_id;
235 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
236 }
237 
238 /**
239  * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
240  * @pdev: Handle to struct dp_pdev
241  * @pool: flow_pool
242  * @vdev_id: flow_id /vdev_id
243  *
244  * Return: none
245  */
246 static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
247 		struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
248 {
249 	struct dp_vdev *vdev;
250 	struct dp_soc *soc = pdev->soc;
251 
252 	vdev = (struct dp_vdev *)cdp_get_vdev_from_vdev_id((void *)soc,
253 					(struct cdp_pdev *)pdev, vdev_id);
254 	if (!vdev) {
255 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
256 		   "%s: invalid vdev_id %d\n",
257 		   __func__, vdev_id);
258 		return;
259 	}
260 
261 	vdev->pool = NULL;
262 	qdf_spin_lock_bh(&pool->flow_pool_lock);
263 	pool->flow_pool_id = INVALID_FLOW_ID;
264 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
265 }
266 
267 /**
268  * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
269  * @pdev: Handle to struct dp_pdev
270  * @flow_id: flow id
271  * @flow_type: flow type
272  * @flow_pool_id: pool id
273  * @flow_pool_size: pool size
274  *
275  * Process below target to host message
276  * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
277  *
278  * Return: none
279  */
280 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
281 	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
282 {
283 	struct dp_soc *soc = pdev->soc;
284 	struct dp_tx_desc_pool_s *pool;
285 	enum htt_flow_type type = flow_type;
286 
287 
288 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
289 		"%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
290 		__func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
291 
292 	if (qdf_unlikely(!soc)) {
293 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
294 			"%s: soc is NULL", __func__);
295 		return QDF_STATUS_E_FAULT;
296 	}
297 	soc->pool_stats.pool_map_count++;
298 
299 	pool = dp_tx_create_flow_pool(soc, flow_pool_id,
300 			flow_pool_size);
301 	if (pool == NULL) {
302 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
303 			   "%s: creation of flow_pool %d size %d failed\n",
304 			   __func__, flow_pool_id, flow_pool_size);
305 		return QDF_STATUS_E_RESOURCES;
306 	}
307 
308 	switch (type) {
309 
310 	case FLOW_TYPE_VDEV:
311 		dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
312 		break;
313 	default:
314 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
315 		   "%s: flow type %d not supported !!!\n",
316 		   __func__, type);
317 		break;
318 	}
319 
320 	return QDF_STATUS_SUCCESS;
321 }
322 
323 /**
324  * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
325  * @pdev: Handle to struct dp_pdev
326  * @flow_id: flow id
327  * @flow_type: flow type
328  * @flow_pool_id: pool id
329  *
330  * Process below target to host message
331  * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
332  *
333  * Return: none
334  */
335 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
336 	uint8_t flow_type, uint8_t flow_pool_id)
337 {
338 	struct dp_soc *soc = pdev->soc;
339 	struct dp_tx_desc_pool_s *pool;
340 	enum htt_flow_type type = flow_type;
341 
342 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
343 		"%s: flow_id %d flow_type %d flow_pool_id %d\n",
344 		__func__, flow_id, flow_type, flow_pool_id);
345 
346 	if (qdf_unlikely(!pdev)) {
347 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
348 			"%s: pdev is NULL", __func__);
349 		return;
350 	}
351 	soc->pool_stats.pool_unmap_count++;
352 
353 	pool = &soc->tx_desc[flow_pool_id];
354 	if (!pool) {
355 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
356 		   "%s: flow_pool not available flow_pool_id %d\n",
357 		   __func__, type);
358 		return;
359 	}
360 
361 	switch (type) {
362 
363 	case FLOW_TYPE_VDEV:
364 		dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
365 		break;
366 	default:
367 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
368 		   "%s: flow type %d not supported !!!\n",
369 		   __func__, type);
370 		return;
371 	}
372 
373 	/* only delete if all descriptors are available */
374 	dp_tx_delete_flow_pool(soc, pool, false);
375 }
376 
377 /**
378  * dp_tx_flow_control_init() - Initialize tx flow control
379  * @tx_desc_pool: Handle to flow_pool
380  *
381  * Return: none
382  */
383 void dp_tx_flow_control_init(struct dp_soc *soc)
384 {
385 	qdf_spinlock_create(&soc->flow_pool_array_lock);
386 }
387 
388 /**
389  * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
390  * @tx_desc_pool: Handle to flow_pool
391  *
392  * Return: none
393  */
394 void dp_tx_flow_control_deinit(struct dp_soc *soc)
395 {
396 	qdf_spinlock_destroy(&soc->flow_pool_array_lock);
397 }
398 
399 /**
400  * dp_txrx_register_pause_cb() - Register pause callback
401  * @ctx: Handle to struct dp_soc
402  * @pause_cb: Tx pause_cb
403  *
404  * Return: none
405  */
406 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
407 	tx_pause_callback pause_cb)
408 {
409 	struct dp_soc *soc = (struct dp_soc *)handle;
410 
411 	if (!soc || !pause_cb) {
412 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
413 			FL("soc or pause_cb is NULL"));
414 		return QDF_STATUS_E_INVAL;
415 	}
416 	soc->pause_cb = pause_cb;
417 
418 	return QDF_STATUS_SUCCESS;
419 }
420 
421 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, struct cdp_pdev *pdev,
422 				uint8_t vdev_id)
423 {
424 	struct dp_soc *soc = (struct dp_soc *)handle;
425 	int tx_ring_size = wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
426 
427 	return (dp_tx_flow_pool_map_handler((struct dp_pdev *)pdev, vdev_id,
428 				FLOW_TYPE_VDEV,	vdev_id, tx_ring_size));
429 }
430 
431 void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev,
432 			   uint8_t vdev_id)
433 {
434 	return(dp_tx_flow_pool_unmap_handler((struct dp_pdev *)pdev, vdev_id,
435 				FLOW_TYPE_VDEV, vdev_id));
436 }
437