1 /*
2 * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * This file was originally distributed by Qualcomm Atheros, Inc.
22 * under proprietary terms before Copyright ownership was assigned
23 * to the Linux Foundation.
24 */
25
26 #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
27 #include <qdf_atomic.h> /* qdf_atomic_read, etc. */
28 #include <ol_cfg.h> /* ol_cfg_addba_retry */
29 #include <htt.h> /* HTT_TX_EXT_TID_MGMT */
30 #include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
31 #include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
32 #include <ol_txrx_ctrl_api.h> /* ol_txrx_sync, ol_tx_addba_conf */
33 #include <cdp_txrx_tx_throttle.h>
34 #include <ol_ctrl_txrx_api.h> /* ol_ctrl_addba_req */
35 #include <ol_txrx_internal.h> /* TXRX_ASSERT1, etc. */
36 #include <ol_tx_desc.h> /* ol_tx_desc, ol_tx_desc_frame_list_free */
37 #include <ol_tx.h> /* ol_tx_vdev_ll_pause_queue_send */
38 #include <ol_tx_sched.h> /* ol_tx_sched_notify, etc. */
39 #include <ol_tx_queue.h>
40 #include <ol_txrx.h> /* ol_tx_desc_pool_size_hl */
41 #include <ol_txrx_dbg.h> /* ENABLE_TX_QUEUE_LOG */
42 #include <qdf_types.h> /* bool */
43 #include "cdp_txrx_flow_ctrl_legacy.h"
44 #include <ol_txrx_peer_find.h>
45 #include <cdp_txrx_handle.h>
46
47 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
48 /**
49 * ol_txrx_thermal_pause() - pause due to thermal mitigation
50 * @pdev: pdev handle
51 *
52 * Return: none
53 */
54 static inline
ol_txrx_thermal_pause(struct ol_txrx_pdev_t * pdev)55 void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
56 {
57 ol_txrx_pdev_pause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
58 }
59
60 /**
61 * ol_txrx_thermal_unpause() - unpause due to thermal mitigation
62 * @pdev: pdev handle
63 *
64 * Return: none
65 */
66 static inline
ol_txrx_thermal_unpause(struct ol_txrx_pdev_t * pdev)67 void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
68 {
69 ol_txrx_pdev_unpause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
70 }
71 #else
72 /**
73 * ol_txrx_thermal_pause() - pause due to thermal mitigation
74 * @pdev: pdev handle
75 *
76 * Return: none
77 */
78 static inline
ol_txrx_thermal_pause(struct ol_txrx_pdev_t * pdev)79 void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
80 {
81 }
82
83 /**
84 * ol_txrx_thermal_unpause() - unpause due to thermal mitigation
85 * @pdev: pdev handle
86 *
87 * Return: none
88 */
89 static inline
ol_txrx_thermal_unpause(struct ol_txrx_pdev_t * pdev)90 void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
91 {
92 ol_tx_pdev_ll_pause_queue_send_all(pdev);
93 }
94 #endif
95
ol_tx_pdev_throttle_phase_timer(void * context)96 static void ol_tx_pdev_throttle_phase_timer(void *context)
97 {
98 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
99 int ms;
100 enum throttle_level cur_level;
101 enum throttle_phase cur_phase;
102
103 /* update the phase */
104 pdev->tx_throttle.current_throttle_phase++;
105
106 if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_MAX)
107 pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
108
109 if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF) {
110 /* Traffic is stopped */
111 ol_txrx_dbg(
112 "throttle phase --> OFF");
113 ol_txrx_throttle_pause(pdev);
114 ol_txrx_thermal_pause(pdev);
115 pdev->tx_throttle.prev_outstanding_num = 0;
116 cur_level = pdev->tx_throttle.current_throttle_level;
117 cur_phase = pdev->tx_throttle.current_throttle_phase;
118 ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
119 if (pdev->tx_throttle.current_throttle_level !=
120 THROTTLE_LEVEL_0) {
121 ol_txrx_dbg(
122 "start timer %d ms", ms);
123 qdf_timer_start(&pdev->tx_throttle.
124 phase_timer, ms);
125 }
126 } else {
127 /* Traffic can go */
128 ol_txrx_dbg(
129 "throttle phase --> ON");
130 ol_txrx_throttle_unpause(pdev);
131 ol_txrx_thermal_unpause(pdev);
132 cur_level = pdev->tx_throttle.current_throttle_level;
133 cur_phase = pdev->tx_throttle.current_throttle_phase;
134 ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
135 if (pdev->tx_throttle.current_throttle_level !=
136 THROTTLE_LEVEL_0) {
137 ol_txrx_dbg("start timer %d ms", ms);
138 qdf_timer_start(&pdev->tx_throttle.phase_timer, ms);
139 }
140 }
141 }
142
143 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
ol_tx_pdev_throttle_tx_timer(void * context)144 static void ol_tx_pdev_throttle_tx_timer(void *context)
145 {
146 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
147
148 ol_tx_pdev_ll_pause_queue_send_all(pdev);
149 }
150 #endif
151
152 #ifdef CONFIG_HL_SUPPORT
153
154 /**
155 * ol_tx_set_throttle_phase_time() - Set the thermal mitgation throttle phase
156 * and time
157 * @pdev: the peer device object
158 * @level: throttle phase level
159 * @ms: throttle time
160 *
161 * Return: None
162 */
163 static void
ol_tx_set_throttle_phase_time(struct ol_txrx_pdev_t * pdev,int level,int * ms)164 ol_tx_set_throttle_phase_time(struct ol_txrx_pdev_t *pdev, int level, int *ms)
165 {
166 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
167
168 /* Set the phase */
169 if (level != THROTTLE_LEVEL_0) {
170 pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
171 *ms = pdev->tx_throttle.throttle_time_ms[level]
172 [THROTTLE_PHASE_OFF];
173
174 /* pause all */
175 ol_txrx_throttle_pause(pdev);
176 } else {
177 pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_ON;
178 *ms = pdev->tx_throttle.throttle_time_ms[level]
179 [THROTTLE_PHASE_ON];
180
181 /* unpause all */
182 ol_txrx_throttle_unpause(pdev);
183 }
184 }
185 #else
186
187 static void
ol_tx_set_throttle_phase_time(struct ol_txrx_pdev_t * pdev,int level,int * ms)188 ol_tx_set_throttle_phase_time(struct ol_txrx_pdev_t *pdev, int level, int *ms)
189 {
190 int phase_on_time, phase_off_time;
191
192 qdf_timer_stop(&pdev->tx_throttle.phase_timer);
193
194 phase_on_time =
195 pdev->tx_throttle.throttle_time_ms[level][THROTTLE_PHASE_ON];
196 phase_off_time =
197 pdev->tx_throttle.throttle_time_ms[level][THROTTLE_PHASE_OFF];
198 if (phase_on_time && phase_off_time) {
199 pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
200 *ms =
201 pdev->tx_throttle.throttle_time_ms[level][THROTTLE_PHASE_OFF];
202 ol_txrx_throttle_pause(pdev);
203 ol_txrx_thermal_pause(pdev);
204 } else if (!phase_off_time) {
205 pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
206 *ms = 0;
207 ol_txrx_throttle_unpause(pdev);
208 ol_txrx_thermal_unpause(pdev);
209 } else {
210 pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
211 *ms = 0;
212 ol_txrx_throttle_pause(pdev);
213 ol_txrx_thermal_pause(pdev);
214 }
215 }
216 #endif
217
ol_tx_throttle_set_level(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int level)218 void ol_tx_throttle_set_level(struct cdp_soc_t *soc_hdl,
219 uint8_t pdev_id, int level)
220 {
221 struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
222 ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
223 int ms = 0;
224
225 if (level >= THROTTLE_LEVEL_MAX) {
226 ol_txrx_dbg("invalid throttle level set %d, ignoring", level);
227 return;
228 }
229
230 if (qdf_unlikely(!pdev)) {
231 ol_txrx_err("pdev is NULL");
232 return;
233 }
234
235 ol_txrx_info("Setting throttle level %d", level);
236
237 /* Set the current throttle level */
238 pdev->tx_throttle.current_throttle_level = (enum throttle_level)level;
239 pdev->tx_throttle.prev_outstanding_num = 0;
240
241 ol_tx_set_throttle_phase_time(pdev, level, &ms);
242
243 if (ms)
244 qdf_timer_start(&pdev->tx_throttle.phase_timer, ms);
245 }
246
ol_tx_throttle_init_period(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int period,uint8_t * dutycycle_level)247 void ol_tx_throttle_init_period(struct cdp_soc_t *soc_hdl,
248 uint8_t pdev_id, int period,
249 uint8_t *dutycycle_level)
250 {
251 struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
252 ol_txrx_pdev_handle pdev;
253 int i;
254
255 if (qdf_unlikely(!soc)) {
256 ol_txrx_err("soc is NULL");
257 return;
258 }
259
260 pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
261 if (qdf_unlikely(!pdev)) {
262 ol_txrx_err("pdev is NULL");
263 return;
264 }
265
266 /* Set the current throttle level */
267 pdev->tx_throttle.throttle_period_ms = period;
268
269 ol_txrx_dbg("level OFF ON");
270 for (i = 0; i < THROTTLE_LEVEL_MAX; i++) {
271 pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_ON] =
272 pdev->tx_throttle.throttle_period_ms -
273 ((dutycycle_level[i] *
274 pdev->tx_throttle.throttle_period_ms) / 100);
275 pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_OFF] =
276 pdev->tx_throttle.throttle_period_ms -
277 pdev->tx_throttle.throttle_time_ms[
278 i][THROTTLE_PHASE_ON];
279 ol_txrx_dbg("%d %d %d", i,
280 pdev->tx_throttle.
281 throttle_time_ms[i][THROTTLE_PHASE_OFF],
282 pdev->tx_throttle.
283 throttle_time_ms[i][THROTTLE_PHASE_ON]);
284 }
285 }
286
ol_tx_throttle_init(struct ol_txrx_pdev_t * pdev)287 void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev)
288 {
289 uint32_t throttle_period;
290 uint8_t dutycycle_level[THROTTLE_LEVEL_MAX];
291 int i;
292
293 pdev->tx_throttle.current_throttle_level = THROTTLE_LEVEL_0;
294 pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
295 qdf_spinlock_create(&pdev->tx_throttle.mutex);
296
297 throttle_period = ol_cfg_throttle_period_ms(pdev->ctrl_pdev);
298
299 for (i = 0; i < THROTTLE_LEVEL_MAX; i++)
300 dutycycle_level[i] =
301 ol_cfg_throttle_duty_cycle_level(pdev->ctrl_pdev, i);
302
303 ol_tx_throttle_init_period(cds_get_context(QDF_MODULE_ID_SOC), pdev->id,
304 throttle_period, &dutycycle_level[0]);
305
306 qdf_timer_init(pdev->osdev, &pdev->tx_throttle.phase_timer,
307 ol_tx_pdev_throttle_phase_timer, pdev,
308 QDF_TIMER_TYPE_SW);
309
310 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
311 qdf_timer_init(pdev->osdev, &pdev->tx_throttle.tx_timer,
312 ol_tx_pdev_throttle_tx_timer, pdev, QDF_TIMER_TYPE_SW);
313 #endif
314
315 pdev->tx_throttle.tx_threshold = THROTTLE_TX_THRESHOLD;
316 }
317
318 void
ol_txrx_throttle_pause(ol_txrx_pdev_handle pdev)319 ol_txrx_throttle_pause(ol_txrx_pdev_handle pdev)
320 {
321 qdf_spin_lock_bh(&pdev->tx_throttle.mutex);
322
323 if (pdev->tx_throttle.is_paused) {
324 qdf_spin_unlock_bh(&pdev->tx_throttle.mutex);
325 return;
326 }
327
328 pdev->tx_throttle.is_paused = true;
329 qdf_spin_unlock_bh(&pdev->tx_throttle.mutex);
330 ol_txrx_pdev_pause(pdev, 0);
331 }
332
333 void
ol_txrx_throttle_unpause(ol_txrx_pdev_handle pdev)334 ol_txrx_throttle_unpause(ol_txrx_pdev_handle pdev)
335 {
336 qdf_spin_lock_bh(&pdev->tx_throttle.mutex);
337
338 if (!pdev->tx_throttle.is_paused) {
339 qdf_spin_unlock_bh(&pdev->tx_throttle.mutex);
340 return;
341 }
342
343 pdev->tx_throttle.is_paused = false;
344 qdf_spin_unlock_bh(&pdev->tx_throttle.mutex);
345 ol_txrx_pdev_unpause(pdev, 0);
346 }
347