1 /*
2  * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /* OS abstraction libraries */
19 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
20 #include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
21 #include <qdf_util.h>           /* qdf_unlikely */
22 
23 /* APIs for other modules */
24 #include <htt.h>                /* HTT_TX_EXT_TID_MGMT */
25 #include <ol_htt_tx_api.h>      /* htt_tx_desc_tid */
26 
27 /* internal header files relevant for all systems */
28 #include <ol_txrx_internal.h>   /* TXRX_ASSERT1 */
29 #include <ol_tx_desc.h>         /* ol_tx_desc */
30 #include <ol_tx_send.h>         /* ol_tx_send */
31 #include <ol_txrx.h>            /* ol_txrx_get_vdev_from_vdev_id */
32 
33 /* internal header files relevant only for HL systems */
34 #include <ol_tx_queue.h>        /* ol_tx_enqueue */
35 
36 /* internal header files relevant only for specific systems (Pronto) */
37 #include <ol_txrx_encap.h>      /* OL_TX_ENCAP, etc */
38 #include <ol_tx.h>
39 #include <ol_cfg.h>
40 #include <cdp_txrx_handle.h>
41 
ol_txrx_vdev_pause(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint32_t reason,uint32_t pause_type)42 void ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
43 			uint32_t reason, uint32_t pause_type)
44 {
45 	struct ol_txrx_vdev_t *vdev =
46 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
47 
48 	if (qdf_unlikely(!vdev)) {
49 		ol_txrx_err("vdev is NULL");
50 		return;
51 	}
52 
53 	/* TO DO: log the queue pause */
54 	/* acquire the mutex lock, since we'll be modifying the queues */
55 	TX_SCHED_DEBUG_PRINT("Enter");
56 
57 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
58 	vdev->ll_pause.paused_reason |= reason;
59 	vdev->ll_pause.q_pause_cnt++;
60 	vdev->ll_pause.is_q_paused = true;
61 	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
62 
63 	TX_SCHED_DEBUG_PRINT("Leave");
64 }
65 
ol_txrx_vdev_unpause(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint32_t reason,uint32_t pause_type)66 void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
67 			  uint32_t reason, uint32_t pause_type)
68 {
69 	struct ol_txrx_vdev_t *vdev =
70 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
71 
72 	if (qdf_unlikely(!vdev)) {
73 		ol_txrx_err("vdev is NULL");
74 		return;
75 	}
76 
77 	/* TO DO: log the queue unpause */
78 	/* acquire the mutex lock, since we'll be modifying the queues */
79 	TX_SCHED_DEBUG_PRINT("Enter");
80 
81 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
82 	if (vdev->ll_pause.paused_reason & reason) {
83 		vdev->ll_pause.paused_reason &= ~reason;
84 		if (!vdev->ll_pause.paused_reason) {
85 			vdev->ll_pause.is_q_paused = false;
86 			vdev->ll_pause.q_unpause_cnt++;
87 			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
88 			ol_tx_vdev_ll_pause_queue_send((void *)vdev);
89 		} else {
90 			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
91 		}
92 	} else {
93 		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
94 	}
95 	TX_SCHED_DEBUG_PRINT("Leave");
96 }
97 
ol_txrx_vdev_flush(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)98 void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
99 {
100 	struct ol_txrx_vdev_t *vdev =
101 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
102 
103 	if (qdf_unlikely(!vdev)) {
104 		ol_txrx_err("vdev is NULL");
105 		return;
106 	}
107 
108 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
109 	qdf_timer_stop(&vdev->ll_pause.timer);
110 	vdev->ll_pause.is_q_timer_on = false;
111 	while (vdev->ll_pause.txq.head) {
112 		qdf_nbuf_t next =
113 			qdf_nbuf_next(vdev->ll_pause.txq.head);
114 		qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
115 		if (QDF_NBUF_CB_PADDR(vdev->ll_pause.txq.head)) {
116 			if (!qdf_nbuf_ipa_owned_get(vdev->ll_pause.txq.head))
117 				qdf_nbuf_unmap(vdev->pdev->osdev,
118 					       vdev->ll_pause.txq.head,
119 					       QDF_DMA_TO_DEVICE);
120 		}
121 		qdf_nbuf_tx_free(vdev->ll_pause.txq.head,
122 				 QDF_NBUF_PKT_ERROR);
123 		vdev->ll_pause.txq.head = next;
124 	}
125 	vdev->ll_pause.txq.tail = NULL;
126 	vdev->ll_pause.txq.depth = 0;
127 	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
128 }
129 
130 #define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
131 #define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
132 
133 #define OL_TX_THROTTLE_MAX_SEND_LEVEL1 80
134 #define OL_TX_THROTTLE_MAX_SEND_LEVEL2 65
135 #define OL_TX_THROTTLE_MAX_SEND_LEVEL3 55
136 #define OL_TX_THROTTLE_MAX_SEND_LEVEL4 45
137 #define OL_TX_THROTTLE_MAX_SEND_LEVEL5 35
138 #define OL_TX_THROTTLE_MAX_SEND_LEVEL6 20
139 #define OL_TX_THROTTLE_MAX_SEND_LEVEL7 10
140 #define OL_TX_THROTTLE_MAX_SEND_LEVEL8 5
141 #define OL_TX_THROTTLE_MAX_SEND_LEVEL9 1
142 
143 /**
144  * ol_tx_get_max_to_send() - Get the maximum number of packets
145  * that can be sent for different phy rate
146  * @pdev: datapath pdev handle
147  *
148  * Return: int type value
149  */
ol_tx_get_max_to_send(struct ol_txrx_pdev_t * pdev)150 static int ol_tx_get_max_to_send(struct ol_txrx_pdev_t *pdev)
151 {
152 	uint16_t consume_num_last_timer;
153 	int max_to_send;
154 
155 	qdf_spin_lock_bh(&pdev->tx_mutex);
156 	if (!pdev->tx_throttle.prev_outstanding_num) {
157 		max_to_send = OL_TX_THROTTLE_MAX_SEND_LEVEL5;
158 	} else {
159 		consume_num_last_timer =
160 			(pdev->tx_throttle.prev_outstanding_num -
161 			 pdev->tx_desc.pool_size +
162 			 pdev->tx_desc.num_free);
163 		if (consume_num_last_timer >=
164 			OL_TX_THROTTLE_MAX_SEND_LEVEL1) {
165 			max_to_send = pdev->tx_throttle.tx_threshold;
166 		} else if (consume_num_last_timer >=
167 				OL_TX_THROTTLE_MAX_SEND_LEVEL2) {
168 			max_to_send =
169 				OL_TX_THROTTLE_MAX_SEND_LEVEL1;
170 		} else if (consume_num_last_timer >=
171 				OL_TX_THROTTLE_MAX_SEND_LEVEL3) {
172 			max_to_send =
173 				OL_TX_THROTTLE_MAX_SEND_LEVEL2;
174 		} else if (consume_num_last_timer >=
175 				OL_TX_THROTTLE_MAX_SEND_LEVEL4) {
176 			max_to_send =
177 				OL_TX_THROTTLE_MAX_SEND_LEVEL3;
178 		} else if (consume_num_last_timer >=
179 				OL_TX_THROTTLE_MAX_SEND_LEVEL5) {
180 			max_to_send =
181 				OL_TX_THROTTLE_MAX_SEND_LEVEL4;
182 		} else if (pdev->tx_throttle.prev_outstanding_num >
183 				consume_num_last_timer) {
184 			/*
185 			 * when TX packet number is smaller than 35,
186 			 * most likely low phy rate is being used.
187 			 * As long as pdev->tx_throttle.prev_outstanding_num
188 			 * is greater than consume_num_last_timer, it
189 			 * means small TX packet number isn't limited
190 			 * by packets injected from host.
191 			 */
192 			if (consume_num_last_timer >=
193 				OL_TX_THROTTLE_MAX_SEND_LEVEL6)
194 				max_to_send =
195 					OL_TX_THROTTLE_MAX_SEND_LEVEL6;
196 			else if (consume_num_last_timer >=
197 					OL_TX_THROTTLE_MAX_SEND_LEVEL7)
198 				max_to_send =
199 					OL_TX_THROTTLE_MAX_SEND_LEVEL7;
200 			else if (consume_num_last_timer >=
201 					OL_TX_THROTTLE_MAX_SEND_LEVEL8)
202 				max_to_send =
203 					OL_TX_THROTTLE_MAX_SEND_LEVEL8;
204 			else
205 				max_to_send =
206 					OL_TX_THROTTLE_MAX_SEND_LEVEL9;
207 		} else {
208 			/*
209 			 * when come here, it means it's hard to evaluate
210 			 * current phy rate, for safety, max_to_send set
211 			 * to OL_TX_THROTTLE_MAX_SEND_LEVEL5.
212 			 */
213 			max_to_send = OL_TX_THROTTLE_MAX_SEND_LEVEL5;
214 		}
215 	}
216 	qdf_spin_unlock_bh(&pdev->tx_mutex);
217 
218 	return max_to_send;
219 }
220 
ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t * vdev)221 static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
222 {
223 	int max_to_accept;
224 
225 	if (!vdev)
226 		return;
227 
228 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
229 	if (vdev->ll_pause.paused_reason) {
230 		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
231 		return;
232 	}
233 
234 	/*
235 	 * Send as much of the backlog as possible, but leave some margin
236 	 * of unallocated tx descriptors that can be used for new frames
237 	 * being transmitted by other vdevs.
238 	 * Ideally there would be a scheduler, which would not only leave
239 	 * some margin for new frames for other vdevs, but also would
240 	 * fairly apportion the tx descriptors between multiple vdevs that
241 	 * have backlogs in their pause queues.
242 	 * However, the fairness benefit of having a scheduler for frames
243 	 * from multiple vdev's pause queues is not sufficient to outweigh
244 	 * the extra complexity.
245 	 */
246 	max_to_accept = vdev->pdev->tx_desc.num_free -
247 		OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
248 	while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
249 		qdf_nbuf_t tx_msdu;
250 
251 		max_to_accept--;
252 		vdev->ll_pause.txq.depth--;
253 		tx_msdu = vdev->ll_pause.txq.head;
254 		if (tx_msdu) {
255 			vdev->ll_pause.txq.head = qdf_nbuf_next(tx_msdu);
256 			if (!vdev->ll_pause.txq.head)
257 				vdev->ll_pause.txq.tail = NULL;
258 			qdf_nbuf_set_next(tx_msdu, NULL);
259 			QDF_NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
260 						QDF_NBUF_TX_PKT_TXRX_DEQUEUE);
261 			tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
262 			/*
263 			 * It is unexpected that ol_tx_ll would reject the frame
264 			 * since we checked that there's room for it, though
265 			 * there's an infinitesimal possibility that between the
266 			 * time we checked the room available and now, a
267 			 * concurrent batch of tx frames used up all the room.
268 			 * For simplicity, just drop the frame.
269 			 */
270 			if (tx_msdu) {
271 				qdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
272 					       QDF_DMA_TO_DEVICE);
273 				qdf_nbuf_tx_free(tx_msdu, QDF_NBUF_PKT_ERROR);
274 			}
275 		}
276 	}
277 	if (vdev->ll_pause.txq.depth) {
278 		qdf_timer_stop(&vdev->ll_pause.timer);
279 		if (!qdf_atomic_read(&vdev->delete.detaching)) {
280 			qdf_timer_start(&vdev->ll_pause.timer,
281 					OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
282 			vdev->ll_pause.is_q_timer_on = true;
283 		}
284 		if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
285 			vdev->ll_pause.q_overflow_cnt++;
286 	}
287 
288 	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
289 }
290 
291 static qdf_nbuf_t
ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t * vdev,qdf_nbuf_t msdu_list,uint8_t start_timer)292 ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
293 			      qdf_nbuf_t msdu_list, uint8_t start_timer)
294 {
295 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
296 	while (msdu_list &&
297 	       vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
298 		qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
299 
300 		QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
301 					     QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
302 		DPTRACE(qdf_dp_trace(msdu_list,
303 			QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
304 			QDF_TRACE_DEFAULT_PDEV_ID,
305 			qdf_nbuf_data_addr(msdu_list),
306 			sizeof(qdf_nbuf_data(msdu_list)), QDF_TX));
307 
308 		vdev->ll_pause.txq.depth++;
309 		if (!vdev->ll_pause.txq.head) {
310 			vdev->ll_pause.txq.head = msdu_list;
311 			vdev->ll_pause.txq.tail = msdu_list;
312 		} else {
313 			qdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
314 		}
315 		vdev->ll_pause.txq.tail = msdu_list;
316 
317 		msdu_list = next;
318 	}
319 	if (vdev->ll_pause.txq.tail)
320 		qdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
321 
322 	if (start_timer) {
323 		qdf_timer_stop(&vdev->ll_pause.timer);
324 		if (!qdf_atomic_read(&vdev->delete.detaching)) {
325 			qdf_timer_start(&vdev->ll_pause.timer,
326 					OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
327 			vdev->ll_pause.is_q_timer_on = true;
328 		}
329 	}
330 	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
331 
332 	return msdu_list;
333 }
334 
335 /*
336  * Store up the tx frame in the vdev's tx queue if the vdev is paused.
337  * If there are too many frames in the tx queue, reject it.
338  */
ol_tx_ll_queue(ol_txrx_vdev_handle vdev,qdf_nbuf_t msdu_list)339 qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
340 {
341 	uint16_t eth_type;
342 	uint32_t paused_reason;
343 
344 	if (!msdu_list)
345 		return NULL;
346 
347 	paused_reason = vdev->ll_pause.paused_reason;
348 	if (paused_reason) {
349 		if (qdf_unlikely((paused_reason &
350 				  OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
351 				 paused_reason)) {
352 			eth_type = (((struct ethernet_hdr_t *)
353 				     qdf_nbuf_data(msdu_list))->
354 				    ethertype[0] << 8) |
355 				   (((struct ethernet_hdr_t *)
356 				     qdf_nbuf_data(msdu_list))->ethertype[1]);
357 			if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
358 				msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
359 				return msdu_list;
360 			}
361 		}
362 		msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
363 	} else {
364 		if (vdev->ll_pause.txq.depth > 0 ||
365 		    vdev->pdev->tx_throttle.current_throttle_level !=
366 		    THROTTLE_LEVEL_0) {
367 			/*
368 			 * not paused, but there is a backlog of frms
369 			 * from a prior pause or throttle off phase
370 			 */
371 			msdu_list = ol_tx_vdev_pause_queue_append(
372 				vdev, msdu_list, 0);
373 			/*
374 			 * if throttle is disabled or phase is "on",
375 			 * send the frame
376 			 */
377 			if (vdev->pdev->tx_throttle.current_throttle_level ==
378 			    THROTTLE_LEVEL_0) {
379 				/*
380 				 * send as many frames as possible
381 				 * from the vdevs backlog
382 				 */
383 				ol_tx_vdev_ll_pause_queue_send_base(vdev);
384 			}
385 		} else {
386 			/*
387 			 * not paused, no throttle and no backlog -
388 			 * send the new frames
389 			 */
390 			msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
391 		}
392 	}
393 	return msdu_list;
394 }
395 
396 /*
397  * Run through the transmit queues for all the vdevs and
398  * send the pending frames
399  */
ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t * pdev)400 void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
401 {
402 	int max_to_send;        /* tracks how many frames have been sent */
403 	qdf_nbuf_t tx_msdu;
404 	struct ol_txrx_vdev_t *vdev = NULL;
405 	uint8_t more;
406 
407 	if (!pdev)
408 		return;
409 
410 	if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
411 		return;
412 
413 	/*
414 	 * For host implementation thermal mitigation, there has limitation
415 	 * in low phy rate case, like 11A 6M, 11B 11M. Host may have entered
416 	 * throttle off state, if a big number packets are queued to ring
417 	 * buffer in low phy rate, FW will have to keep active state during
418 	 * the whole throttle cycle. So you need to be careful when
419 	 * configuring the max_to_send value to avoid the chip temperature
420 	 * suddenly rises to very high in high temperature test.
421 	 * So add variable prev_outstanding_num to save last time outstanding
422 	 * number, when pdev->tx_throttle.tx_timer come again, we can check
423 	 * the gap to know high or low phy rate is being used, then choose
424 	 * right max_to_send.
425 	 * When it's the first time to enter the function, there doesn't have
426 	 * info for prev_outstanding_num, to satisfy all rate, the maximum
427 	 * safe number is OL_TX_THROTTLE_MAX_SEND_LEVEL5(35).
428 	 */
429 	max_to_send = ol_tx_get_max_to_send(pdev);
430 
431 	/* round robin through the vdev queues for the given pdev */
432 
433 	/*
434 	 * Potential improvement: download several frames from the same vdev
435 	 * at a time, since it is more likely that those frames could be
436 	 * aggregated together, remember which vdev was serviced last,
437 	 * so the next call this function can resume the round-robin
438 	 * traversing where the current invocation left off
439 	 */
440 	do {
441 		more = 0;
442 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
443 			qdf_spin_lock_bh(&vdev->ll_pause.mutex);
444 			if (vdev->ll_pause.txq.depth) {
445 				if (vdev->ll_pause.paused_reason) {
446 					qdf_spin_unlock_bh(&vdev->ll_pause.
447 							   mutex);
448 					continue;
449 				}
450 
451 				tx_msdu = vdev->ll_pause.txq.head;
452 				if (!tx_msdu) {
453 					qdf_spin_unlock_bh(&vdev->ll_pause.
454 							   mutex);
455 					continue;
456 				}
457 
458 				max_to_send--;
459 				vdev->ll_pause.txq.depth--;
460 
461 				vdev->ll_pause.txq.head =
462 					qdf_nbuf_next(tx_msdu);
463 
464 				if (!vdev->ll_pause.txq.head)
465 					vdev->ll_pause.txq.tail = NULL;
466 
467 				qdf_nbuf_set_next(tx_msdu, NULL);
468 				tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
469 				/*
470 				 * It is unexpected that ol_tx_ll would reject
471 				 * the frame, since we checked that there's
472 				 * room for it, though there's an infinitesimal
473 				 * possibility that between the time we checked
474 				 * the room available and now, a concurrent
475 				 * batch of tx frames used up all the room.
476 				 * For simplicity, just drop the frame.
477 				 */
478 				if (tx_msdu) {
479 					qdf_nbuf_unmap(pdev->osdev, tx_msdu,
480 						       QDF_DMA_TO_DEVICE);
481 					qdf_nbuf_tx_free(tx_msdu,
482 							 QDF_NBUF_PKT_ERROR);
483 				}
484 			}
485 			/*check if there are more msdus to transmit */
486 			if (vdev->ll_pause.txq.depth)
487 				more = 1;
488 			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
489 		}
490 	} while (more && max_to_send);
491 
492 	qdf_spin_lock_bh(&pdev->tx_mutex);
493 	pdev->tx_throttle.prev_outstanding_num =
494 		(pdev->tx_desc.pool_size - pdev->tx_desc.num_free);
495 	qdf_spin_unlock_bh(&pdev->tx_mutex);
496 
497 	/*
498 	 * currently as long as pdev->tx_throttle.current_throttle_level
499 	 * isn't THROTTLE_LEVEL_0, all TX data is scheduled by Tx
500 	 * throttle. It's needed to always start pdev->tx_throttle.tx_timer
501 	 * at the end of each TX throttle processing to avoid TX cannot be
502 	 * scheduled in the remaining throttle_on time.
503 	 */
504 	qdf_timer_stop(&pdev->tx_throttle.tx_timer);
505 	qdf_timer_start(&pdev->tx_throttle.tx_timer,
506 			OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
507 }
508 
ol_tx_vdev_ll_pause_queue_send(void * context)509 void ol_tx_vdev_ll_pause_queue_send(void *context)
510 {
511 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
512 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
513 
514 	if (pdev &&
515 	    pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0)
516 		return;
517 	ol_tx_vdev_ll_pause_queue_send_base(vdev);
518 }
519 
ol_txrx_register_tx_flow_control(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,ol_txrx_tx_flow_control_fp flowControl,void * osif_fc_ctx,ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause)520 int ol_txrx_register_tx_flow_control(struct cdp_soc_t *soc_hdl,
521 				     uint8_t vdev_id,
522 				     ol_txrx_tx_flow_control_fp flowControl,
523 				     void *osif_fc_ctx,
524 				     ol_txrx_tx_flow_control_is_pause_fp
525 				     flow_control_is_pause)
526 {
527 	struct ol_txrx_vdev_t *vdev =
528 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
529 
530 	if (!vdev) {
531 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
532 			  "%s: Invalid vdev_id %d", __func__, vdev_id);
533 		return -EINVAL;
534 	}
535 
536 	qdf_spin_lock_bh(&vdev->flow_control_lock);
537 	vdev->osif_flow_control_cb = flowControl;
538 	vdev->osif_flow_control_is_pause = flow_control_is_pause;
539 	vdev->osif_fc_ctx = osif_fc_ctx;
540 	qdf_spin_unlock_bh(&vdev->flow_control_lock);
541 	return 0;
542 }
543 
ol_txrx_deregister_tx_flow_control_cb(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)544 int ol_txrx_deregister_tx_flow_control_cb(struct cdp_soc_t *soc_hdl,
545 					  uint8_t vdev_id)
546 {
547 	struct ol_txrx_vdev_t *vdev =
548 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
549 
550 	if (!vdev) {
551 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
552 			  "%s: Invalid vdev_id", __func__);
553 		return -EINVAL;
554 	}
555 
556 	qdf_spin_lock_bh(&vdev->flow_control_lock);
557 	vdev->osif_flow_control_cb = NULL;
558 	vdev->osif_flow_control_is_pause = NULL;
559 	vdev->osif_fc_ctx = NULL;
560 	qdf_spin_unlock_bh(&vdev->flow_control_lock);
561 	return 0;
562 }
563 
564 /**
565  * ol_txrx_get_tx_resource() - if tx resource less than low_watermark
566  * soc_hdl: soc handle
567  * @pdev_id: datapath pdev identifier
568  * @peer_addr: peer mac address
569  * @low_watermark: low watermark
570  * @high_watermark_offset: high watermark offset value
571  *
572  * Return: true/false
573  */
574 bool
ol_txrx_get_tx_resource(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct qdf_mac_addr peer_addr,unsigned int low_watermark,unsigned int high_watermark_offset)575 ol_txrx_get_tx_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
576 			struct qdf_mac_addr peer_addr,
577 			unsigned int low_watermark,
578 			unsigned int high_watermark_offset)
579 {
580 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
581 	ol_txrx_pdev_handle pdev =
582 				ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
583 	ol_txrx_vdev_handle vdev;
584 
585 	if (qdf_unlikely(!pdev)) {
586 		ol_txrx_err("pdev is NULL");
587 		return true;
588 	}
589 
590 	vdev = ol_txrx_get_vdev_by_peer_addr(ol_txrx_pdev_t_to_cdp_pdev(pdev),
591 					     peer_addr);
592 	if (!vdev) {
593 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
594 			  "%s: Invalid peer address: " QDF_MAC_ADDR_FMT,
595 			  __func__, QDF_MAC_ADDR_REF(peer_addr.bytes));
596 		/* Return true so caller do not understand that resource
597 		 * is less than low_watermark.
598 		 * sta_id validation will be done in ol_tx_send_data_frame
599 		 * and if sta_id is not registered then host will drop
600 		 * packet.
601 		 */
602 		return true;
603 	}
604 
605 	qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
606 
607 	if (vdev->pdev->tx_desc.num_free < (uint16_t)low_watermark) {
608 		vdev->tx_fl_lwm = (uint16_t)low_watermark;
609 		vdev->tx_fl_hwm =
610 			(uint16_t)(low_watermark + high_watermark_offset);
611 		/* Not enough free resource, stop TX OS Q */
612 		qdf_atomic_set(&vdev->os_q_paused, 1);
613 		qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
614 		return false;
615 	}
616 	qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
617 	return true;
618 }
619 
ol_txrx_ll_set_tx_pause_q_depth(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,int pause_q_depth)620 int ol_txrx_ll_set_tx_pause_q_depth(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
621 				    int pause_q_depth)
622 {
623 	struct ol_txrx_vdev_t *vdev =
624 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
625 
626 	if (!vdev) {
627 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
628 			  "%s: Invalid vdev_id %d", __func__, vdev_id);
629 		return -EINVAL;
630 	}
631 
632 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
633 	vdev->ll_pause.max_q_depth = pause_q_depth;
634 	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
635 
636 	return 0;
637 }
638 
ol_txrx_flow_control_cb(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,bool tx_resume)639 void ol_txrx_flow_control_cb(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
640 			     bool tx_resume)
641 {
642 	struct ol_txrx_vdev_t *vdev =
643 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
644 
645 	if (qdf_unlikely(!vdev)) {
646 		ol_txrx_err("vdev is NULL");
647 		return;
648 	}
649 
650 	qdf_spin_lock_bh(&vdev->flow_control_lock);
651 	if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
652 		vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume);
653 	qdf_spin_unlock_bh(&vdev->flow_control_lock);
654 }
655 
656 /**
657  * ol_txrx_flow_control_is_pause() - is osif paused by flow control
658  * @vdev: vdev handle
659  *
660  * Return: true if osif is paused by flow control
661  */
ol_txrx_flow_control_is_pause(ol_txrx_vdev_handle vdev)662 static bool ol_txrx_flow_control_is_pause(ol_txrx_vdev_handle vdev)
663 {
664 	bool is_pause = false;
665 
666 	if ((vdev->osif_flow_control_is_pause) && (vdev->osif_fc_ctx))
667 		is_pause = vdev->osif_flow_control_is_pause(vdev->osif_fc_ctx);
668 
669 	return is_pause;
670 }
671 
672 /**
673  * ol_tx_flow_ct_unpause_os_q() - Unpause OS Q
674  * @pdev: physical device object
675  *
676  *
677  * Return: None
678  */
ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)679 void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
680 {
681 	struct ol_txrx_vdev_t *vdev;
682 	struct cdp_soc_t *soc_hdl = ol_txrx_soc_t_to_cdp_soc_t(pdev->soc);
683 
684 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
685 		if ((qdf_atomic_read(&vdev->os_q_paused) &&
686 		     (vdev->tx_fl_hwm != 0)) ||
687 		     ol_txrx_flow_control_is_pause(vdev)) {
688 			qdf_spin_lock(&pdev->tx_mutex);
689 			if (pdev->tx_desc.num_free > vdev->tx_fl_hwm) {
690 				qdf_atomic_set(&vdev->os_q_paused, 0);
691 				qdf_spin_unlock(&pdev->tx_mutex);
692 				ol_txrx_flow_control_cb(soc_hdl,
693 							vdev->vdev_id, true);
694 			} else {
695 				qdf_spin_unlock(&pdev->tx_mutex);
696 			}
697 		}
698 	}
699 }
700 
701