1 /*
2  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: wlan_dp_bus_bandwidth.c
21  *
22  * Bus Bandwidth Manager implementation
23  */
24 
25 #include "wlan_dp_bus_bandwidth.h"
26 #include "wlan_dp_main.h"
27 #include <wlan_objmgr_psoc_obj_i.h>
28 #include "pld_common.h"
29 #include "cds_api.h"
30 #include <wlan_nlink_common.h>
31 #include "wlan_ipa_ucfg_api.h"
32 #include "wlan_dp_rx_thread.h"
33 #include "wlan_mlme_vdev_mgr_interface.h"
34 #include "hif.h"
35 #include "qdf_trace.h"
36 #include <wlan_cm_api.h>
37 #include <qdf_threads.h>
38 #include <qdf_net_stats.h>
39 #include "wlan_dp_periodic_sta_stats.h"
40 #include "wlan_mlme_api.h"
41 #include "wlan_dp_txrx.h"
42 #include "cdp_txrx_host_stats.h"
43 #include "wlan_cm_roam_api.h"
44 #include "hif_main.h"
45 
46 #ifdef FEATURE_BUS_BANDWIDTH_MGR
47 /*
48  * bus_bw_table_default: default table which provides bus
49  * bandwidth level corresponding to a given connection mode and throughput
50  * level.
51  */
52 static bus_bw_table_type bus_bw_table_default = {
53 	[QCA_WLAN_802_11_MODE_11B] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_1,
54 				      BUS_BW_LEVEL_2, BUS_BW_LEVEL_3,
55 				      BUS_BW_LEVEL_4, BUS_BW_LEVEL_6,
56 				      BUS_BW_LEVEL_7, BUS_BW_LEVEL_8,
57 				      BUS_BW_LEVEL_9},
58 	[QCA_WLAN_802_11_MODE_11G] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_5,
59 				      BUS_BW_LEVEL_5, BUS_BW_LEVEL_5,
60 				      BUS_BW_LEVEL_5, BUS_BW_LEVEL_5,
61 				      BUS_BW_LEVEL_5, BUS_BW_LEVEL_5,
62 				      BUS_BW_LEVEL_5},
63 	[QCA_WLAN_802_11_MODE_11A] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_5,
64 				      BUS_BW_LEVEL_5, BUS_BW_LEVEL_5,
65 				      BUS_BW_LEVEL_5, BUS_BW_LEVEL_5,
66 				      BUS_BW_LEVEL_5, BUS_BW_LEVEL_5,
67 				      BUS_BW_LEVEL_5},
68 	[QCA_WLAN_802_11_MODE_11N] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_1,
69 				      BUS_BW_LEVEL_2, BUS_BW_LEVEL_3,
70 				      BUS_BW_LEVEL_4, BUS_BW_LEVEL_6,
71 				      BUS_BW_LEVEL_7, BUS_BW_LEVEL_8,
72 				      BUS_BW_LEVEL_9},
73 	[QCA_WLAN_802_11_MODE_11AC] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_1,
74 				       BUS_BW_LEVEL_2, BUS_BW_LEVEL_3,
75 				       BUS_BW_LEVEL_4, BUS_BW_LEVEL_6,
76 				       BUS_BW_LEVEL_7, BUS_BW_LEVEL_8,
77 				       BUS_BW_LEVEL_9},
78 	[QCA_WLAN_802_11_MODE_11AX] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_1,
79 				       BUS_BW_LEVEL_2, BUS_BW_LEVEL_3,
80 				       BUS_BW_LEVEL_4, BUS_BW_LEVEL_6,
81 				       BUS_BW_LEVEL_7, BUS_BW_LEVEL_8,
82 				       BUS_BW_LEVEL_9},
83 	[QCA_WLAN_802_11_MODE_11BE] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_1,
84 				       BUS_BW_LEVEL_2, BUS_BW_LEVEL_3,
85 				       BUS_BW_LEVEL_4, BUS_BW_LEVEL_6,
86 				       BUS_BW_LEVEL_7, BUS_BW_LEVEL_8,
87 				       BUS_BW_LEVEL_9},
88 };
89 
90 /*
91  * bus_bw_table_low_latency: table which provides bus
92  * bandwidth level corresponding to a given connection mode and throughput
93  * level in low latency setting.
94  */
95 static bus_bw_table_type bus_bw_table_low_latency = {
96 	[QCA_WLAN_802_11_MODE_11B] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_9,
97 				      BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
98 				      BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
99 				      BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
100 				      BUS_BW_LEVEL_9},
101 	[QCA_WLAN_802_11_MODE_11G] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_9,
102 				      BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
103 				      BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
104 				      BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
105 				      BUS_BW_LEVEL_9},
106 	[QCA_WLAN_802_11_MODE_11A] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_9,
107 				      BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
108 				      BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
109 				      BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
110 				      BUS_BW_LEVEL_9},
111 	[QCA_WLAN_802_11_MODE_11N] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_9,
112 				      BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
113 				      BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
114 				      BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
115 				      BUS_BW_LEVEL_9},
116 	[QCA_WLAN_802_11_MODE_11AC] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_9,
117 				       BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
118 				       BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
119 				       BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
120 				       BUS_BW_LEVEL_9},
121 	[QCA_WLAN_802_11_MODE_11AX] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_9,
122 				       BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
123 				       BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
124 				       BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
125 				       BUS_BW_LEVEL_9},
126 	[QCA_WLAN_802_11_MODE_11BE] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_9,
127 				       BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
128 				       BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
129 				       BUS_BW_LEVEL_9, BUS_BW_LEVEL_9,
130 				       BUS_BW_LEVEL_9},
131 };
132 
133 /**
134  * bbm_convert_to_pld_bus_lvl() - Convert from internal bus vote level to
135  *  PLD bus vote level
136  * @vote_lvl: internal bus bw vote level
137  *
138  * Returns: PLD bus vote level
139  */
140 static enum pld_bus_width_type
bbm_convert_to_pld_bus_lvl(enum bus_bw_level vote_lvl)141 bbm_convert_to_pld_bus_lvl(enum bus_bw_level vote_lvl)
142 {
143 	switch (vote_lvl) {
144 	case BUS_BW_LEVEL_1:
145 		return PLD_BUS_WIDTH_IDLE;
146 	case BUS_BW_LEVEL_2:
147 		return PLD_BUS_WIDTH_LOW;
148 	case BUS_BW_LEVEL_3:
149 		return PLD_BUS_WIDTH_MEDIUM;
150 	case BUS_BW_LEVEL_4:
151 		return PLD_BUS_WIDTH_HIGH;
152 	case BUS_BW_LEVEL_5:
153 		return PLD_BUS_WIDTH_LOW_LATENCY;
154 	case BUS_BW_LEVEL_6:
155 		return PLD_BUS_WIDTH_MID_HIGH;
156 	case BUS_BW_LEVEL_7:
157 		return PLD_BUS_WIDTH_VERY_HIGH;
158 	case BUS_BW_LEVEL_8:
159 		return PLD_BUS_WIDTH_ULTRA_HIGH;
160 	case BUS_BW_LEVEL_9:
161 		return PLD_BUS_WIDTH_MAX;
162 	case BUS_BW_LEVEL_NONE:
163 	default:
164 		return PLD_BUS_WIDTH_NONE;
165 	}
166 }
167 
168 /**
169  * bbm_get_bus_bw_level_vote() - Select bus bw vote level per interface based
170  *  on connection mode and throughput level
171  * @dp_intf: DP Interface, caller assure that interface is valid.
172  * @tput_level: throughput level
173  *
174  * Returns: Bus bw level
175  */
176 static enum bus_bw_level
bbm_get_bus_bw_level_vote(struct wlan_dp_intf * dp_intf,enum tput_level tput_level)177 bbm_get_bus_bw_level_vote(struct wlan_dp_intf *dp_intf,
178 			  enum tput_level tput_level)
179 {
180 	enum qca_wlan_802_11_mode i;
181 	enum qca_wlan_802_11_mode dot11_mode;
182 	enum bus_bw_level vote_lvl = BUS_BW_LEVEL_NONE;
183 	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
184 	struct bbm_context *bbm_ctx = dp_ctx->bbm_ctx;
185 	bus_bw_table_type *lkp_table = bbm_ctx->curr_bus_bw_lookup_table;
186 	uint16_t client_count[QCA_WLAN_802_11_MODE_INVALID];
187 	struct wlan_dp_psoc_callbacks *cb_obj = &dp_ctx->dp_ops;
188 	hdd_cb_handle ctx = cb_obj->callback_ctx;
189 
190 	if (tput_level >= TPUT_LEVEL_MAX) {
191 		dp_err("invalid tput level %d", tput_level);
192 		return  BUS_BW_LEVEL_NONE;
193 	}
194 
195 	switch (dp_intf->device_mode) {
196 	case QDF_STA_MODE:
197 	case QDF_P2P_CLIENT_MODE:
198 		if (!cb_obj->wlan_dp_sta_get_dot11mode(ctx,
199 						       dp_intf->dev,
200 						       &dot11_mode))
201 			break;
202 
203 		if (dot11_mode >= QCA_WLAN_802_11_MODE_INVALID)
204 			break;
205 
206 		return (*lkp_table)[dot11_mode][tput_level];
207 	case QDF_SAP_MODE:
208 	case QDF_P2P_GO_MODE:
209 		if (!cb_obj->wlan_dp_get_ap_client_count(ctx,
210 							 dp_intf->dev,
211 							 client_count))
212 			break;
213 
214 		for (i = QCA_WLAN_802_11_MODE_11B;
215 		     i < QCA_WLAN_802_11_MODE_INVALID; i++) {
216 			if (client_count[i] &&
217 			    (*lkp_table)[i][tput_level] > vote_lvl)
218 				vote_lvl = (*lkp_table)[i][tput_level];
219 		}
220 
221 		return vote_lvl;
222 	case QDF_NDI_MODE:
223 		if (!cb_obj->wlan_dp_sta_ndi_connected(ctx,
224 						       dp_intf->dev))
225 			break;
226 
227 		/*
228 		 * If the tput levels are between mid to high range, then
229 		 * apply next SNOC voting level BUS_BW_LEVEL_5 which maps
230 		 * to PLD_BUS_WIDTH_LOW_LATENCY.
231 		 *
232 		 * NDI dot11mode is currently hardcoded to 11AC in driver and
233 		 * since the bus bw levels in table do not differ between 11AC
234 		 * and 11AX, using max supported mode instead. Dot11mode of the
235 		 * peers are not saved in driver and legacy modes are not
236 		 * supported in NAN.
237 		 */
238 		if (tput_level <= TPUT_LEVEL_HIGH)
239 			return BUS_BW_LEVEL_5;
240 		else
241 			return (*lkp_table)[QCA_WLAN_802_11_MODE_11AX][tput_level];
242 	default:
243 		break;
244 	}
245 
246 	return vote_lvl;
247 }
248 
249 /**
250  * bbm_apply_tput_policy() - Apply tput BBM policy by considering
251  *  throughput level and connection modes across adapters
252  * @dp_ctx: DP context
253  * @tput_level: throughput level
254  *
255  * Returns: None
256  */
257 static void
bbm_apply_tput_policy(struct wlan_dp_psoc_context * dp_ctx,enum tput_level tput_level)258 bbm_apply_tput_policy(struct wlan_dp_psoc_context *dp_ctx,
259 		      enum tput_level tput_level)
260 {
261 	struct wlan_dp_intf *dp_intf;
262 	struct wlan_dp_intf *dp_intf_next;
263 	struct wlan_objmgr_psoc *psoc;
264 	enum bus_bw_level next_vote = BUS_BW_LEVEL_NONE;
265 	enum bus_bw_level tmp_vote;
266 	struct bbm_context *bbm_ctx = dp_ctx->bbm_ctx;
267 	hdd_cb_handle ctx = dp_ctx->dp_ops.callback_ctx;
268 
269 	if (tput_level == TPUT_LEVEL_NONE) {
270 		/*
271 		 * This is to handle the scenario where bus bw periodic work
272 		 * is force cancelled
273 		 */
274 		if (dp_ctx->dp_ops.dp_any_adapter_connected(ctx))
275 			bbm_ctx->per_policy_vote[BBM_TPUT_POLICY] = next_vote;
276 		return;
277 	}
278 
279 	psoc = dp_ctx->psoc;
280 	if (!psoc) {
281 		dp_err("psoc is NULL");
282 		return;
283 	}
284 
285 	dp_for_each_intf_held_safe(dp_ctx, dp_intf, dp_intf_next) {
286 		if (dp_intf->num_links == 0)
287 			continue;
288 
289 		tmp_vote = bbm_get_bus_bw_level_vote(dp_intf, tput_level);
290 		if (tmp_vote > next_vote)
291 			next_vote = tmp_vote;
292 	}
293 
294 	bbm_ctx->per_policy_vote[BBM_TPUT_POLICY] = next_vote;
295 }
296 
297 /**
298  * bbm_apply_driver_mode_policy() - Apply driver mode BBM policy
299  * @bbm_ctx: bus bw mgr context
300  * @driver_mode: global driver mode
301  *
302  * Returns: None
303  */
304 static void
bbm_apply_driver_mode_policy(struct bbm_context * bbm_ctx,enum QDF_GLOBAL_MODE driver_mode)305 bbm_apply_driver_mode_policy(struct bbm_context *bbm_ctx,
306 			     enum QDF_GLOBAL_MODE driver_mode)
307 {
308 	switch (driver_mode) {
309 	case QDF_GLOBAL_MONITOR_MODE:
310 	case QDF_GLOBAL_FTM_MODE:
311 		bbm_ctx->per_policy_vote[BBM_DRIVER_MODE_POLICY] =
312 							    BUS_BW_LEVEL_7;
313 		return;
314 	default:
315 		bbm_ctx->per_policy_vote[BBM_DRIVER_MODE_POLICY] =
316 							 BUS_BW_LEVEL_NONE;
317 		return;
318 	}
319 }
320 
321 /**
322  * bbm_apply_non_persistent_policy() - Apply non persistent policy and set
323  *  the bus bandwidth
324  * @dp_ctx: DP context
325  * @flag: flag
326  *
327  * Returns: None
328  */
329 static void
bbm_apply_non_persistent_policy(struct wlan_dp_psoc_context * dp_ctx,enum bbm_non_per_flag flag)330 bbm_apply_non_persistent_policy(struct wlan_dp_psoc_context *dp_ctx,
331 				enum bbm_non_per_flag flag)
332 {
333 	hdd_cb_handle ctx = dp_ctx->dp_ops.callback_ctx;
334 
335 	switch (flag) {
336 	case BBM_APPS_RESUME:
337 		if (dp_ctx->dp_ops.dp_any_adapter_connected(ctx)) {
338 			dp_ctx->bbm_ctx->curr_vote_level = BUS_BW_LEVEL_RESUME;
339 			pld_request_bus_bandwidth(dp_ctx->qdf_dev->dev,
340 			       bbm_convert_to_pld_bus_lvl(BUS_BW_LEVEL_RESUME));
341 		} else {
342 			dp_ctx->bbm_ctx->curr_vote_level = BUS_BW_LEVEL_NONE;
343 			pld_request_bus_bandwidth(dp_ctx->qdf_dev->dev,
344 				 bbm_convert_to_pld_bus_lvl(BUS_BW_LEVEL_NONE));
345 		}
346 		return;
347 	case BBM_APPS_SUSPEND:
348 		dp_ctx->bbm_ctx->curr_vote_level = BUS_BW_LEVEL_NONE;
349 		pld_request_bus_bandwidth(dp_ctx->qdf_dev->dev,
350 			    bbm_convert_to_pld_bus_lvl(BUS_BW_LEVEL_NONE));
351 		return;
352 	default:
353 		dp_info("flag %d not handled in res/sus BBM policy", flag);
354 		return;
355 	}
356 }
357 
358 /**
359  * bbm_apply_wlm_policy() - Apply WLM based BBM policy by selecting
360  *  lookup tables based on the latency level
361  * @bbm_ctx: Bus BW mgr context
362  * @wlm_level: WLM latency level
363  *
364  * Returns: None
365  */
366 static void
bbm_apply_wlm_policy(struct bbm_context * bbm_ctx,enum wlm_ll_level wlm_level)367 bbm_apply_wlm_policy(struct bbm_context *bbm_ctx, enum wlm_ll_level wlm_level)
368 {
369 	switch (wlm_level) {
370 	case WLM_LL_NORMAL:
371 		bbm_ctx->curr_bus_bw_lookup_table = &bus_bw_table_default;
372 		break;
373 	case WLM_LL_LOW:
374 		bbm_ctx->curr_bus_bw_lookup_table = &bus_bw_table_low_latency;
375 		break;
376 	default:
377 		dp_info("wlm level %d not handled in BBM WLM policy",
378 			wlm_level);
379 		break;
380 	}
381 }
382 
383 /**
384  * bbm_apply_user_policy() - Apply user specified bus voting
385  *  level
386  * @bbm_ctx: Bus BW mgr context
387  * @set: set or reset flag
388  * @user_level: user bus vote level
389  *
390  * Returns: qdf status
391  */
392 static QDF_STATUS
bbm_apply_user_policy(struct bbm_context * bbm_ctx,bool set,enum bus_bw_level user_level)393 bbm_apply_user_policy(struct bbm_context *bbm_ctx, bool set,
394 		      enum bus_bw_level user_level)
395 {
396 	if (user_level >= BUS_BW_LEVEL_MAX) {
397 		dp_err("Invalid user vote level %d", user_level);
398 		return QDF_STATUS_E_FAILURE;
399 	}
400 
401 	if (set)
402 		bbm_ctx->per_policy_vote[BBM_USER_POLICY] = user_level;
403 	else
404 		bbm_ctx->per_policy_vote[BBM_USER_POLICY] = BUS_BW_LEVEL_NONE;
405 
406 	return QDF_STATUS_SUCCESS;
407 }
408 
409 /**
410  * bbm_request_bus_bandwidth() - Set bus bandwidth level
411  * @dp_ctx: DP context
412  *
413  * Returns: None
414  */
415 static void
bbm_request_bus_bandwidth(struct wlan_dp_psoc_context * dp_ctx)416 bbm_request_bus_bandwidth(struct wlan_dp_psoc_context *dp_ctx)
417 {
418 	enum bbm_policy i;
419 	enum bus_bw_level next_vote = BUS_BW_LEVEL_NONE;
420 	enum pld_bus_width_type pld_vote;
421 	struct bbm_context *bbm_ctx = dp_ctx->bbm_ctx;
422 
423 	for (i = BBM_DRIVER_MODE_POLICY; i < BBM_MAX_POLICY; i++) {
424 		if (bbm_ctx->per_policy_vote[i] > next_vote)
425 			next_vote = bbm_ctx->per_policy_vote[i];
426 	}
427 
428 	if (next_vote != bbm_ctx->curr_vote_level) {
429 		pld_vote = bbm_convert_to_pld_bus_lvl(next_vote);
430 		dp_info("Bus bandwidth vote level change from %d to %d pld_vote: %d",
431 			bbm_ctx->curr_vote_level, next_vote, pld_vote);
432 		bbm_ctx->curr_vote_level = next_vote;
433 		pld_request_bus_bandwidth(dp_ctx->qdf_dev->dev, pld_vote);
434 	}
435 }
436 
dp_bbm_apply_independent_policy(struct wlan_objmgr_psoc * psoc,struct bbm_params * params)437 void dp_bbm_apply_independent_policy(struct wlan_objmgr_psoc *psoc,
438 				     struct bbm_params *params)
439 {
440 	struct wlan_dp_psoc_context *dp_ctx;
441 	struct bbm_context *bbm_ctx;
442 	QDF_STATUS status;
443 
444 	dp_ctx = dp_psoc_get_priv(psoc);
445 	if (!dp_ctx || !params)
446 		return;
447 
448 	bbm_ctx = dp_ctx->bbm_ctx;
449 
450 	qdf_mutex_acquire(&bbm_ctx->bbm_lock);
451 
452 	switch (params->policy) {
453 	case BBM_TPUT_POLICY:
454 		bbm_apply_tput_policy(dp_ctx, params->policy_info.tput_level);
455 		break;
456 	case BBM_NON_PERSISTENT_POLICY:
457 		bbm_apply_non_persistent_policy(dp_ctx,
458 						params->policy_info.flag);
459 		goto done;
460 	case BBM_DRIVER_MODE_POLICY:
461 		bbm_apply_driver_mode_policy(bbm_ctx,
462 					     params->policy_info.driver_mode);
463 		break;
464 	case BBM_SELECT_TABLE_POLICY:
465 		bbm_apply_wlm_policy(bbm_ctx, params->policy_info.wlm_level);
466 		goto done;
467 	case BBM_USER_POLICY:
468 		/*
469 		 * This policy is not used currently.
470 		 */
471 		status = bbm_apply_user_policy(bbm_ctx,
472 					       params->policy_info.usr.set,
473 					       params->policy_info.usr.user_level);
474 		if (QDF_IS_STATUS_ERROR(status))
475 			goto done;
476 		break;
477 	default:
478 		dp_info("BBM policy %d not handled", params->policy);
479 		goto done;
480 	}
481 
482 	bbm_request_bus_bandwidth(dp_ctx);
483 
484 done:
485 	qdf_mutex_release(&bbm_ctx->bbm_lock);
486 }
487 
dp_bbm_context_init(struct wlan_objmgr_psoc * psoc)488 int dp_bbm_context_init(struct wlan_objmgr_psoc *psoc)
489 {
490 	struct wlan_dp_psoc_context *dp_ctx;
491 	struct bbm_context *bbm_ctx;
492 	QDF_STATUS status;
493 
494 	dp_ctx = dp_psoc_get_priv(psoc);
495 	if (!dp_ctx)
496 		return -EINVAL;
497 	bbm_ctx = qdf_mem_malloc(sizeof(*bbm_ctx));
498 	if (!bbm_ctx)
499 		return -ENOMEM;
500 
501 	bbm_ctx->curr_bus_bw_lookup_table = &bus_bw_table_default;
502 
503 	status = qdf_mutex_create(&bbm_ctx->bbm_lock);
504 	if (QDF_IS_STATUS_ERROR(status))
505 		goto free_ctx;
506 
507 	dp_ctx->bbm_ctx = bbm_ctx;
508 
509 	return 0;
510 
511 free_ctx:
512 	qdf_mem_free(bbm_ctx);
513 
514 	return qdf_status_to_os_return(status);
515 }
516 
dp_bbm_context_deinit(struct wlan_objmgr_psoc * psoc)517 void dp_bbm_context_deinit(struct wlan_objmgr_psoc *psoc)
518 {
519 	struct wlan_dp_psoc_context *dp_ctx;
520 	struct bbm_context *bbm_ctx;
521 
522 	dp_ctx = dp_psoc_get_priv(psoc);
523 	if (!dp_ctx)
524 		return;
525 	bbm_ctx = dp_ctx->bbm_ctx;
526 	if (!bbm_ctx)
527 		return;
528 
529 	dp_ctx->bbm_ctx = NULL;
530 	qdf_mutex_destroy(&bbm_ctx->bbm_lock);
531 
532 	qdf_mem_free(bbm_ctx);
533 }
534 #endif /* FEATURE_BUS_BANDWIDTH_MGR */
535 #ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
536 #ifdef FEATURE_RUNTIME_PM
dp_rtpm_tput_policy_init(struct wlan_objmgr_psoc * psoc)537 void dp_rtpm_tput_policy_init(struct wlan_objmgr_psoc *psoc)
538 {
539 	struct wlan_dp_psoc_context *dp_ctx;
540 	struct dp_rtpm_tput_policy_context *ctx;
541 
542 	dp_ctx = dp_psoc_get_priv(psoc);
543 	if (!dp_ctx) {
544 		dp_err("Unable to get DP context");
545 		return;
546 	}
547 
548 	ctx = &dp_ctx->rtpm_tput_policy_ctx;
549 	qdf_runtime_lock_init(&ctx->rtpm_lock);
550 	ctx->curr_state = DP_RTPM_TPUT_POLICY_STATE_REQUIRED;
551 	qdf_atomic_init(&ctx->high_tput_vote);
552 }
553 
dp_rtpm_tput_policy_deinit(struct wlan_objmgr_psoc * psoc)554 void dp_rtpm_tput_policy_deinit(struct wlan_objmgr_psoc *psoc)
555 {
556 	struct wlan_dp_psoc_context *dp_ctx;
557 	struct dp_rtpm_tput_policy_context *ctx;
558 
559 	dp_ctx = dp_psoc_get_priv(psoc);
560 	if (!dp_ctx) {
561 		dp_err("Unable to get DP context");
562 		return;
563 	}
564 
565 	ctx = &dp_ctx->rtpm_tput_policy_ctx;
566 	ctx->curr_state = DP_RTPM_TPUT_POLICY_STATE_INVALID;
567 	qdf_runtime_lock_deinit(&ctx->rtpm_lock);
568 }
569 
570 /**
571  * dp_rtpm_tput_policy_prevent() - prevent a runtime bus suspend
572  * @dp_ctx: DP handle
573  *
574  * return: None
575  */
dp_rtpm_tput_policy_prevent(struct wlan_dp_psoc_context * dp_ctx)576 static void dp_rtpm_tput_policy_prevent(struct wlan_dp_psoc_context *dp_ctx)
577 {
578 	struct dp_rtpm_tput_policy_context *ctx;
579 
580 	ctx = &dp_ctx->rtpm_tput_policy_ctx;
581 	qdf_runtime_pm_prevent_suspend(&ctx->rtpm_lock);
582 }
583 
584 /**
585  * dp_rtpm_tput_policy_allow() - allow a runtime bus suspend
586  * @dp_ctx: DP handle
587  *
588  * return: None
589  */
dp_rtpm_tput_policy_allow(struct wlan_dp_psoc_context * dp_ctx)590 static void dp_rtpm_tput_policy_allow(struct wlan_dp_psoc_context *dp_ctx)
591 {
592 	struct dp_rtpm_tput_policy_context *ctx;
593 
594 	ctx = &dp_ctx->rtpm_tput_policy_ctx;
595 	qdf_runtime_pm_allow_suspend(&ctx->rtpm_lock);
596 }
597 
598 #define DP_RTPM_POLICY_HIGH_TPUT_THRESH TPUT_LEVEL_MEDIUM
599 
dp_rtpm_tput_policy_apply(struct wlan_dp_psoc_context * dp_ctx,enum tput_level tput_level)600 void dp_rtpm_tput_policy_apply(struct wlan_dp_psoc_context *dp_ctx,
601 			       enum tput_level tput_level)
602 {
603 	int vote;
604 	enum dp_rtpm_tput_policy_state temp_state;
605 	struct dp_rtpm_tput_policy_context *ctx;
606 	ol_txrx_soc_handle soc = cds_get_context(QDF_MODULE_ID_SOC);
607 
608 	if (qdf_unlikely(!soc))
609 		return;
610 
611 	ctx = &dp_ctx->rtpm_tput_policy_ctx;
612 
613 	if (tput_level >= DP_RTPM_POLICY_HIGH_TPUT_THRESH)
614 		temp_state = DP_RTPM_TPUT_POLICY_STATE_NOT_REQUIRED;
615 	else
616 		temp_state = DP_RTPM_TPUT_POLICY_STATE_REQUIRED;
617 
618 	if (ctx->curr_state == temp_state)
619 		return;
620 
621 	if (temp_state == DP_RTPM_TPUT_POLICY_STATE_REQUIRED) {
622 		cdp_set_rtpm_tput_policy_requirement(soc, false);
623 		qdf_atomic_dec(&ctx->high_tput_vote);
624 		dp_rtpm_tput_policy_allow(dp_ctx);
625 	} else {
626 		cdp_set_rtpm_tput_policy_requirement(soc, true);
627 		qdf_atomic_inc(&ctx->high_tput_vote);
628 		dp_rtpm_tput_policy_prevent(dp_ctx);
629 	}
630 
631 	ctx->curr_state = temp_state;
632 	vote = qdf_atomic_read(&ctx->high_tput_vote);
633 
634 	if (vote < 0 || vote > 1) {
635 		dp_alert_rl("Incorrect vote!");
636 		QDF_BUG(0);
637 	}
638 }
639 
dp_rtpm_tput_policy_get_vote(struct wlan_dp_psoc_context * dp_ctx)640 int dp_rtpm_tput_policy_get_vote(struct wlan_dp_psoc_context *dp_ctx)
641 {
642 	struct dp_rtpm_tput_policy_context *ctx;
643 
644 	ctx = &dp_ctx->rtpm_tput_policy_ctx;
645 	return qdf_atomic_read(&ctx->high_tput_vote);
646 }
647 #endif /* FEATURE_RUNTIME_PM */
648 
dp_reset_tcp_delack(struct wlan_objmgr_psoc * psoc)649 void dp_reset_tcp_delack(struct wlan_objmgr_psoc *psoc)
650 {
651 	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
652 
653 	enum wlan_tp_level next_level = WLAN_SVC_TP_LOW;
654 	struct wlan_rx_tp_data rx_tp_data = {0};
655 
656 	if (!dp_ctx) {
657 		dp_err("Unable to get DP context");
658 		return;
659 	}
660 
661 	if (!dp_ctx->en_tcp_delack_no_lro)
662 		return;
663 
664 	rx_tp_data.rx_tp_flags |= TCP_DEL_ACK_IND;
665 	rx_tp_data.level = next_level;
666 	dp_ctx->rx_high_ind_cnt = 0;
667 	wlan_dp_update_tcp_rx_param(dp_ctx, &rx_tp_data);
668 }
669 
670 /**
671  * dp_reset_tcp_adv_win_scale() - Reset TCP advance window scaling
672  * value to default
673  * @dp_ctx: pointer to DP context (Should not be NULL)
674  *
675  * Function used to reset TCP advance window scaling
676  * value to its default value
677  *
678  * Return: None
679  */
dp_reset_tcp_adv_win_scale(struct wlan_dp_psoc_context * dp_ctx)680 static void dp_reset_tcp_adv_win_scale(struct wlan_dp_psoc_context *dp_ctx)
681 {
682 	enum wlan_tp_level next_level = WLAN_SVC_TP_NONE;
683 	struct wlan_rx_tp_data rx_tp_data = {0};
684 
685 	if (!dp_ctx->dp_cfg.enable_tcp_adv_win_scale)
686 		return;
687 
688 	rx_tp_data.rx_tp_flags |= TCP_ADV_WIN_SCL;
689 	rx_tp_data.level = next_level;
690 	dp_ctx->cur_rx_level = WLAN_SVC_TP_NONE;
691 	wlan_dp_update_tcp_rx_param(dp_ctx, &rx_tp_data);
692 }
693 
wlan_dp_update_tcp_rx_param(struct wlan_dp_psoc_context * dp_ctx,struct wlan_rx_tp_data * data)694 void wlan_dp_update_tcp_rx_param(struct wlan_dp_psoc_context *dp_ctx,
695 				 struct wlan_rx_tp_data *data)
696 {
697 	struct wlan_dp_psoc_callbacks *dp_ops = &dp_ctx->dp_ops;
698 
699 	if (!dp_ctx) {
700 		dp_err("psoc is null");
701 		return;
702 	}
703 
704 	if (!data) {
705 		dp_err("Data is null");
706 		return;
707 	}
708 
709 	if (dp_ctx->dp_cfg.enable_tcp_param_update)
710 		dp_ops->osif_dp_send_tcp_param_update_event(dp_ctx->psoc,
711 							    (union wlan_tp_data *)data,
712 							    1);
713 	else
714 		dp_ops->dp_send_svc_nlink_msg(cds_get_radio_index(),
715 					      WLAN_SVC_WLAN_TP_IND,
716 					      (void *)data,
717 					      sizeof(struct wlan_rx_tp_data));
718 }
719 
720 /**
721  * wlan_dp_update_tcp_tx_param() - update TCP param in Tx dir
722  * @dp_ctx: Pointer to DP context
723  * @data: Parameters to update
724  *
725  * Return: None
726  */
wlan_dp_update_tcp_tx_param(struct wlan_dp_psoc_context * dp_ctx,struct wlan_tx_tp_data * data)727 static void wlan_dp_update_tcp_tx_param(struct wlan_dp_psoc_context *dp_ctx,
728 					struct wlan_tx_tp_data *data)
729 {
730 	enum wlan_tp_level next_tx_level;
731 	struct wlan_tx_tp_data *tx_tp_data;
732 	struct wlan_dp_psoc_callbacks *dp_ops = &dp_ctx->dp_ops;
733 
734 	if (!dp_ctx) {
735 		dp_err("psoc is null");
736 		return;
737 	}
738 
739 	if (!data) {
740 		dp_err("Data is null");
741 		return;
742 	}
743 
744 	tx_tp_data = (struct wlan_tx_tp_data *)data;
745 	next_tx_level = tx_tp_data->level;
746 
747 	if (dp_ctx->dp_cfg.enable_tcp_param_update)
748 		dp_ops->osif_dp_send_tcp_param_update_event(dp_ctx->psoc,
749 							    (union wlan_tp_data *)data,
750 							    0);
751 	else
752 		dp_ops->dp_send_svc_nlink_msg(cds_get_radio_index(),
753 					      WLAN_SVC_WLAN_TP_TX_IND,
754 					      &next_tx_level,
755 					      sizeof(next_tx_level));
756 }
757 
758 /**
759  * dp_low_tput_gro_flush_skip_handler() - adjust GRO flush for low tput
760  * @dp_ctx: dp_ctx object
761  * @next_vote_level: next bus bandwidth level
762  * @legacy_client: legacy connection mode active
763  *
764  * If bus bandwidth level is PLD_BUS_WIDTH_LOW consistently and hit
765  * the bus_low_cnt_threshold, set flag to skip GRO flush.
766  * If bus bandwidth keeps going to PLD_BUS_WIDTH_IDLE, perform a GRO
767  * flush to avoid TCP traffic stall
768  *
769  * Return: none
770  */
dp_low_tput_gro_flush_skip_handler(struct wlan_dp_psoc_context * dp_ctx,enum pld_bus_width_type next_vote_level,bool legacy_client)771 static inline void dp_low_tput_gro_flush_skip_handler(
772 			struct wlan_dp_psoc_context *dp_ctx,
773 			enum pld_bus_width_type next_vote_level,
774 			bool legacy_client)
775 {
776 	uint32_t threshold = dp_ctx->dp_cfg.bus_low_cnt_threshold;
777 	ol_txrx_soc_handle soc = cds_get_context(QDF_MODULE_ID_SOC);
778 	int i;
779 
780 	if (next_vote_level == PLD_BUS_WIDTH_LOW && legacy_client) {
781 		if (++dp_ctx->bus_low_vote_cnt >= threshold)
782 			qdf_atomic_set(&dp_ctx->low_tput_gro_enable, 1);
783 	} else {
784 		if (qdf_atomic_read(&dp_ctx->low_tput_gro_enable) &&
785 		    dp_ctx->enable_dp_rx_threads) {
786 			/* flush pending rx pkts when LOW->IDLE */
787 			dp_info("flush queued GRO pkts");
788 			for (i = 0; i < cdp_get_num_rx_contexts(soc); i++) {
789 				dp_rx_gro_flush_ind(soc, i,
790 						    DP_RX_GRO_NORMAL_FLUSH);
791 			}
792 		}
793 
794 		dp_ctx->bus_low_vote_cnt = 0;
795 		qdf_atomic_set(&dp_ctx->low_tput_gro_enable, 0);
796 	}
797 }
798 
799 #ifdef WDI3_STATS_UPDATE
800 /**
801  * dp_ipa_set_perf_level() - set IPA perf level
802  * @dp_ctx: handle to dp context
803  * @tx_pkts: transmit packet count
804  * @rx_pkts: receive packet count
805  * @ipa_tx_pkts: IPA transmit packet count
806  * @ipa_rx_pkts: IPA receive packet count
807  *
808  * Return: none
809  */
810 static inline
dp_ipa_set_perf_level(struct wlan_dp_psoc_context * dp_ctx,uint64_t * tx_pkts,uint64_t * rx_pkts,uint32_t * ipa_tx_pkts,uint32_t * ipa_rx_pkts)811 void dp_ipa_set_perf_level(struct wlan_dp_psoc_context *dp_ctx,
812 			   uint64_t *tx_pkts, uint64_t *rx_pkts,
813 			   uint32_t *ipa_tx_pkts, uint32_t *ipa_rx_pkts)
814 {
815 }
816 #else
dp_ipa_set_perf_level(struct wlan_dp_psoc_context * dp_ctx,uint64_t * tx_pkts,uint64_t * rx_pkts,uint32_t * ipa_tx_pkts,uint32_t * ipa_rx_pkts)817 static void dp_ipa_set_perf_level(struct wlan_dp_psoc_context *dp_ctx,
818 				  uint64_t *tx_pkts, uint64_t *rx_pkts,
819 				  uint32_t *ipa_tx_pkts, uint32_t *ipa_rx_pkts)
820 {
821 	if (ucfg_ipa_is_fw_wdi_activated(dp_ctx->pdev)) {
822 		ucfg_ipa_uc_stat_query(dp_ctx->pdev, ipa_tx_pkts,
823 				       ipa_rx_pkts);
824 		*tx_pkts += *ipa_tx_pkts;
825 		*rx_pkts += *ipa_rx_pkts;
826 
827 		ucfg_ipa_set_perf_level(dp_ctx->pdev, *tx_pkts, *rx_pkts);
828 		ucfg_ipa_uc_stat_request(dp_ctx->pdev, 2);
829 	}
830 }
831 #endif /* WDI3_STATS_UPDATE */
832 
833 #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
834 /**
835  * dp_set_vdev_bundle_require_flag() - set vdev bundle require flag
836  * @vdev_id: vdev id
837  * @dp_ctx: handle to dp context
838  * @tx_bytes: Tx bytes
839  *
840  * Return: none
841  */
842 static inline
dp_set_vdev_bundle_require_flag(uint16_t vdev_id,struct wlan_dp_psoc_context * dp_ctx,uint64_t tx_bytes)843 void dp_set_vdev_bundle_require_flag(uint16_t vdev_id,
844 				     struct wlan_dp_psoc_context *dp_ctx,
845 				     uint64_t tx_bytes)
846 {
847 	struct wlan_dp_psoc_cfg *cfg = dp_ctx->dp_cfg;
848 
849 	cdp_vdev_set_bundle_require_flag(cds_get_context(QDF_MODULE_ID_SOC),
850 					 vdev_id, tx_bytes,
851 					 cfg->bus_bw_compute_interval,
852 					 cfg->pkt_bundle_threshold_high,
853 					 cfg->pkt_bundle_threshold_low);
854 }
855 #else
856 static inline
dp_set_vdev_bundle_require_flag(uint16_t vdev_id,struct wlan_dp_psoc_context * dp_ctx,uint64_t tx_bytes)857 void dp_set_vdev_bundle_require_flag(uint16_t vdev_id,
858 				     struct wlan_dp_psoc_context *dp_ctx,
859 				     uint64_t tx_bytes)
860 {
861 }
862 #endif /* WLAN_SUPPORT_TXRX_HL_BUNDLE */
863 
864 #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
865 /**
866  * dp_set_driver_del_ack_enable() - set driver delayed ack enabled flag
867  * @vdev_id: vdev id
868  * @dp_ctx: handle to dp context
869  * @rx_packets: receive packet count
870  *
871  * Return: none
872  */
873 static inline
dp_set_driver_del_ack_enable(uint16_t vdev_id,struct wlan_dp_psoc_context * dp_ctx,uint64_t rx_packets)874 void dp_set_driver_del_ack_enable(uint16_t vdev_id,
875 				  struct wlan_dp_psoc_context *dp_ctx,
876 				  uint64_t rx_packets)
877 {
878 	struct wlan_dp_psoc_cfg *cfg = dp_ctx->dp_cfg;
879 
880 	cdp_vdev_set_driver_del_ack_enable(cds_get_context(QDF_MODULE_ID_SOC),
881 					   vdev_id, rx_packets,
882 					   cfg->bus_bw_compute_interval,
883 					   cfg->del_ack_threshold_high,
884 					   cfg->del_ack_threshold_low);
885 }
886 #else
887 static inline
dp_set_driver_del_ack_enable(uint16_t vdev_id,struct wlan_dp_psoc_context * dp_ctx,uint64_t rx_packets)888 void dp_set_driver_del_ack_enable(uint16_t vdev_id,
889 				  struct wlan_dp_psoc_context *dp_ctx,
890 				  uint64_t rx_packets)
891 {
892 }
893 #endif /* QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK */
894 
895 #define DP_BW_GET_DIFF(_x, _y) ((unsigned long)((ULONG_MAX - (_y)) + (_x) + 1))
896 
897 #ifdef RX_PERFORMANCE
dp_is_current_high_throughput(struct wlan_dp_psoc_context * dp_ctx)898 bool dp_is_current_high_throughput(struct wlan_dp_psoc_context *dp_ctx)
899 {
900 	if (dp_ctx->cur_vote_level < PLD_BUS_WIDTH_MEDIUM)
901 		return false;
902 	else
903 		return true;
904 }
905 #endif /* RX_PERFORMANCE */
906 
907 /**
908  * wlan_dp_validate_context() - check the DP context
909  * @dp_ctx: Global DP context pointer
910  *
911  * Return: 0 if the context is valid. Error code otherwise
912  */
wlan_dp_validate_context(struct wlan_dp_psoc_context * dp_ctx)913 static int wlan_dp_validate_context(struct wlan_dp_psoc_context *dp_ctx)
914 {
915 	if (!dp_ctx) {
916 		dp_err("DP context is null");
917 		return -ENODEV;
918 	}
919 
920 	if (cds_is_driver_recovering()) {
921 		dp_info("Recovery in progress; state:0x%x",
922 			cds_get_driver_state());
923 		return -EAGAIN;
924 	}
925 
926 	if (cds_is_load_or_unload_in_progress()) {
927 		dp_info("Load/unload in progress; state:0x%x",
928 			cds_get_driver_state());
929 		return -EAGAIN;
930 	}
931 
932 	if (cds_is_driver_in_bad_state()) {
933 		dp_info("Driver in bad state; state:0x%x",
934 			cds_get_driver_state());
935 		return -EAGAIN;
936 	}
937 
938 	if (cds_is_fw_down()) {
939 		dp_info("FW is down; state:0x%x", cds_get_driver_state());
940 		return -EAGAIN;
941 	}
942 
943 	return 0;
944 }
945 
946 /**
947  * dp_tp_level_to_str() - Convert TPUT level to string
948  * @level: TPUT level
949  *
950  * Return: converted string
951  */
dp_tp_level_to_str(uint32_t level)952 static uint8_t *dp_tp_level_to_str(uint32_t level)
953 {
954 	switch (level) {
955 	/* initialize the wlan sub system */
956 	case WLAN_SVC_TP_NONE:
957 		return "NONE";
958 	case WLAN_SVC_TP_LOW:
959 		return "LOW";
960 	case WLAN_SVC_TP_MEDIUM:
961 		return "MED";
962 	case WLAN_SVC_TP_HIGH:
963 		return "HIGH";
964 	default:
965 		return "INVAL";
966 	}
967 }
968 
wlan_dp_display_tx_rx_histogram(struct wlan_objmgr_psoc * psoc)969 void wlan_dp_display_tx_rx_histogram(struct wlan_objmgr_psoc *psoc)
970 {
971 	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
972 	int i;
973 
974 	if (!dp_ctx) {
975 		dp_err("Unable to get DP context");
976 		return;
977 	}
978 
979 	dp_nofl_info("BW compute Interval: %d ms",
980 		     dp_ctx->dp_cfg.bus_bw_compute_interval);
981 	dp_nofl_info("BW TH - Very High: %d Mid High: %d High: %d Med: %d Low: %d DBS: %d",
982 		     dp_ctx->dp_cfg.bus_bw_very_high_threshold,
983 		     dp_ctx->dp_cfg.bus_bw_mid_high_threshold,
984 		     dp_ctx->dp_cfg.bus_bw_high_threshold,
985 		     dp_ctx->dp_cfg.bus_bw_medium_threshold,
986 		     dp_ctx->dp_cfg.bus_bw_low_threshold,
987 		     dp_ctx->dp_cfg.bus_bw_dbs_threshold);
988 	dp_nofl_info("Enable TCP DEL ACK: %d",
989 		     dp_ctx->en_tcp_delack_no_lro);
990 	dp_nofl_info("TCP DEL High TH: %d TCP DEL Low TH: %d",
991 		     dp_ctx->dp_cfg.tcp_delack_thres_high,
992 		     dp_ctx->dp_cfg.tcp_delack_thres_low);
993 	dp_nofl_info("TCP TX HIGH TP TH: %d (Use to set tcp_output_bytes_lim)",
994 		     dp_ctx->dp_cfg.tcp_tx_high_tput_thres);
995 
996 	dp_nofl_info("Total entries: %d Current index: %d",
997 		     NUM_TX_RX_HISTOGRAM, dp_ctx->txrx_hist_idx);
998 
999 	if (dp_ctx->txrx_hist) {
1000 		dp_nofl_info("[index][timestamp]: interval_rx, interval_tx, bus_bw_level, RX TP Level, TX TP Level, Rx:Tx pm_qos");
1001 
1002 		for (i = 0; i < NUM_TX_RX_HISTOGRAM; i++) {
1003 			struct tx_rx_histogram *hist;
1004 
1005 			/* using dp_log to avoid printing function name */
1006 			if (dp_ctx->txrx_hist[i].qtime <= 0)
1007 				continue;
1008 			hist = &dp_ctx->txrx_hist[i];
1009 			dp_nofl_info("[%3d][%15llu]: %6llu, %6llu, %s, %s, %s, %s:%s",
1010 				     i, hist->qtime, hist->interval_rx,
1011 				     hist->interval_tx,
1012 				     pld_bus_width_type_to_str(hist->next_vote_level),
1013 				     dp_tp_level_to_str(hist->next_rx_level),
1014 				     dp_tp_level_to_str(hist->next_tx_level),
1015 				     hist->is_rx_pm_qos_high ? "HIGH" : "LOW",
1016 				     hist->is_tx_pm_qos_high ? "HIGH" : "LOW");
1017 		}
1018 	}
1019 }
1020 
wlan_dp_clear_tx_rx_histogram(struct wlan_objmgr_psoc * psoc)1021 void wlan_dp_clear_tx_rx_histogram(struct wlan_objmgr_psoc *psoc)
1022 {
1023 	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
1024 
1025 	if (!dp_ctx) {
1026 		dp_err("Unable to get DP context");
1027 		return;
1028 	}
1029 
1030 	dp_ctx->txrx_hist_idx = 0;
1031 	if (dp_ctx->txrx_hist)
1032 		qdf_mem_zero(dp_ctx->txrx_hist,
1033 			     (sizeof(struct tx_rx_histogram) *
1034 			     NUM_TX_RX_HISTOGRAM));
1035 }
1036 
1037 /**
1038  * wlan_dp_init_tx_rx_histogram() - init tx/rx histogram stats
1039  * @dp_ctx: dp context
1040  *
1041  * Return: 0 for success or error code
1042  */
wlan_dp_init_tx_rx_histogram(struct wlan_dp_psoc_context * dp_ctx)1043 static int wlan_dp_init_tx_rx_histogram(struct wlan_dp_psoc_context *dp_ctx)
1044 {
1045 	dp_ctx->txrx_hist = qdf_mem_malloc(
1046 		(sizeof(struct tx_rx_histogram) * NUM_TX_RX_HISTOGRAM));
1047 	if (!dp_ctx->txrx_hist)
1048 		return -ENOMEM;
1049 
1050 	return 0;
1051 }
1052 
1053 /**
1054  * wlan_dp_deinit_tx_rx_histogram() - deinit tx/rx histogram stats
1055  * @dp_ctx: dp context
1056  *
1057  * Return: none
1058  */
wlan_dp_deinit_tx_rx_histogram(struct wlan_dp_psoc_context * dp_ctx)1059 static void wlan_dp_deinit_tx_rx_histogram(struct wlan_dp_psoc_context *dp_ctx)
1060 {
1061 	if (!dp_ctx || !dp_ctx->txrx_hist)
1062 		return;
1063 
1064 	qdf_mem_free(dp_ctx->txrx_hist);
1065 	dp_ctx->txrx_hist = NULL;
1066 }
1067 
1068 /**
1069  * wlan_dp_display_txrx_stats() - Display tx/rx histogram stats
1070  * @dp_ctx: dp context
1071  *
1072  * Return: none
1073  */
wlan_dp_display_txrx_stats(struct wlan_dp_psoc_context * dp_ctx)1074 static void wlan_dp_display_txrx_stats(struct wlan_dp_psoc_context *dp_ctx)
1075 {
1076 	struct wlan_dp_intf *dp_intf = NULL, *next_dp_intf = NULL;
1077 	struct dp_tx_rx_stats *stats;
1078 	hdd_cb_handle ctx = dp_ctx->dp_ops.callback_ctx;
1079 	int i = 0;
1080 	uint32_t total_rx_pkt, total_rx_dropped,
1081 		 total_rx_delv, total_rx_refused;
1082 	uint32_t total_tx_pkt;
1083 	uint32_t total_tx_dropped;
1084 	uint32_t total_tx_orphaned;
1085 
1086 	dp_for_each_intf_held_safe(dp_ctx, dp_intf, next_dp_intf) {
1087 		total_rx_pkt = 0;
1088 		total_rx_dropped = 0;
1089 		total_rx_delv = 0;
1090 		total_rx_refused = 0;
1091 		total_tx_pkt = 0;
1092 		total_tx_dropped = 0;
1093 		total_tx_orphaned = 0;
1094 		stats = &dp_intf->dp_stats.tx_rx_stats;
1095 
1096 		if (!dp_intf->num_links)
1097 			continue;
1098 
1099 		dp_info("dp_intf: " QDF_MAC_ADDR_FMT,
1100 			QDF_MAC_ADDR_REF(dp_intf->mac_addr.bytes));
1101 		for (i = 0; i < NUM_CPUS; i++) {
1102 			total_rx_pkt += stats->per_cpu[i].rx_packets;
1103 			total_rx_dropped += stats->per_cpu[i].rx_dropped;
1104 			total_rx_delv += stats->per_cpu[i].rx_delivered;
1105 			total_rx_refused += stats->per_cpu[i].rx_refused;
1106 			total_tx_pkt += stats->per_cpu[i].tx_called;
1107 			total_tx_dropped += stats->per_cpu[i].tx_dropped;
1108 			total_tx_orphaned += stats->per_cpu[i].tx_orphaned;
1109 		}
1110 
1111 		for (i = 0; i < NUM_CPUS; i++) {
1112 			if (!stats->per_cpu[i].tx_called)
1113 				continue;
1114 
1115 			dp_info("Tx CPU[%d]: called %u, dropped %u, orphaned %u",
1116 				i, stats->per_cpu[i].tx_called,
1117 				stats->per_cpu[i].tx_dropped,
1118 				stats->per_cpu[i].tx_orphaned);
1119 		}
1120 
1121 		dp_info("TX - called %u, dropped %u orphan %u",
1122 			total_tx_pkt, total_tx_dropped,
1123 			total_tx_orphaned);
1124 
1125 		dp_ctx->dp_ops.wlan_dp_display_tx_multiq_stats(ctx,
1126 							       dp_intf->dev);
1127 
1128 		for (i = 0; i < NUM_CPUS; i++) {
1129 			if (stats->per_cpu[i].rx_packets == 0)
1130 				continue;
1131 			dp_info("Rx CPU[%d]: packets %u, dropped %u, delivered %u, refused %u",
1132 				i, stats->per_cpu[i].rx_packets,
1133 				stats->per_cpu[i].rx_dropped,
1134 				stats->per_cpu[i].rx_delivered,
1135 				stats->per_cpu[i].rx_refused);
1136 		}
1137 
1138 		dp_info("RX - packets %u, dropped %u, unsol_arp_mcast_drp %u, delivered %u, refused %u GRO - agg %u drop %u non-agg %u flush_skip %u low_tput_flush %u disabled(conc %u low-tput %u)",
1139 			total_rx_pkt, total_rx_dropped,
1140 			qdf_atomic_read(&stats->rx_usolict_arp_n_mcast_drp),
1141 			total_rx_delv,
1142 			total_rx_refused, stats->rx_aggregated,
1143 			stats->rx_gro_dropped, stats->rx_non_aggregated,
1144 			stats->rx_gro_flush_skip,
1145 			stats->rx_gro_low_tput_flush,
1146 			qdf_atomic_read(&dp_ctx->disable_rx_ol_in_concurrency),
1147 			qdf_atomic_read(&dp_ctx->disable_rx_ol_in_low_tput));
1148 	}
1149 }
1150 
1151 /**
1152  * dp_display_periodic_stats() - Function to display periodic stats
1153  * @dp_ctx: handle to dp context
1154  * @data_in_interval: true, if data detected in bw time interval
1155  *
1156  * The periodicity is determined by dp_ctx->dp_cfg->periodic_stats_disp_time.
1157  * Stats show up in wlan driver logs.
1158  *
1159  * Returns: None
1160  */
dp_display_periodic_stats(struct wlan_dp_psoc_context * dp_ctx,bool data_in_interval)1161 static void dp_display_periodic_stats(struct wlan_dp_psoc_context *dp_ctx,
1162 				      bool data_in_interval)
1163 {
1164 	static uint32_t counter;
1165 	static bool data_in_time_period;
1166 	ol_txrx_soc_handle soc;
1167 	uint32_t periodic_stats_disp_time = 0;
1168 	hdd_cb_handle ctx = dp_ctx->dp_ops.callback_ctx;
1169 
1170 	wlan_mlme_stats_get_periodic_display_time(dp_ctx->psoc,
1171 						  &periodic_stats_disp_time);
1172 	if (!periodic_stats_disp_time)
1173 		return;
1174 
1175 	soc = cds_get_context(QDF_MODULE_ID_SOC);
1176 	if (!soc)
1177 		return;
1178 
1179 	counter++;
1180 	if (data_in_interval)
1181 		data_in_time_period = data_in_interval;
1182 
1183 	if (counter * dp_ctx->dp_cfg.bus_bw_compute_interval >=
1184 		periodic_stats_disp_time * 1000) {
1185 		hif_rtpm_display_last_busy_hist(cds_get_context(QDF_MODULE_ID_HIF));
1186 		if (data_in_time_period) {
1187 			wlan_dp_display_txrx_stats(dp_ctx);
1188 			dp_txrx_ext_dump_stats(soc, CDP_DP_RX_THREAD_STATS);
1189 			cdp_display_stats(soc,
1190 					  CDP_RX_RING_STATS,
1191 					  QDF_STATS_VERBOSITY_LEVEL_LOW);
1192 			cdp_display_stats(soc,
1193 					  CDP_DP_NAPI_STATS,
1194 					  QDF_STATS_VERBOSITY_LEVEL_LOW);
1195 			cdp_display_stats(soc,
1196 					  CDP_TXRX_PATH_STATS,
1197 					  QDF_STATS_VERBOSITY_LEVEL_LOW);
1198 			cdp_display_stats(soc,
1199 					  CDP_DUMP_TX_FLOW_POOL_INFO,
1200 					  QDF_STATS_VERBOSITY_LEVEL_LOW);
1201 			cdp_display_stats(soc,
1202 					  CDP_DP_SWLM_STATS,
1203 					  QDF_STATS_VERBOSITY_LEVEL_LOW);
1204 			dp_ctx->dp_ops.wlan_dp_display_netif_queue_history
1205 				(ctx, QDF_STATS_VERBOSITY_LEVEL_LOW);
1206 			cdp_display_txrx_hw_info(soc);
1207 			qdf_dp_trace_dump_stats();
1208 		}
1209 		counter = 0;
1210 		data_in_time_period = false;
1211 	}
1212 }
1213 
1214 /**
1215  * dp_pm_qos_update_cpu_mask() - Prepare CPU mask for PM_qos voting
1216  * @mask: return variable of cpumask for the TPUT
1217  * @enable_perf_cluster: Enable PERF cluster or not
1218  *
1219  * By default, the function sets CPU mask for silver cluster unless
1220  * enable_perf_cluster is set as true.
1221  *
1222  * Return: none
1223  */
dp_pm_qos_update_cpu_mask(qdf_cpu_mask * mask,bool enable_perf_cluster)1224 static inline void dp_pm_qos_update_cpu_mask(qdf_cpu_mask *mask,
1225 					     bool enable_perf_cluster)
1226 {
1227 	int package_id;
1228 	unsigned int cpus;
1229 	int perf_cpu_cluster = hif_get_perf_cluster_bitmap();
1230 	int little_cpu_cluster = BIT(CPU_CLUSTER_TYPE_LITTLE);
1231 
1232 	qdf_cpumask_clear(mask);
1233 	qdf_for_each_online_cpu(cpus) {
1234 		package_id = qdf_topology_physical_package_id(cpus);
1235 		if (package_id >= 0 &&
1236 		    (BIT(package_id) & little_cpu_cluster ||
1237 		     (enable_perf_cluster &&
1238 		      BIT(package_id) & perf_cpu_cluster))) {
1239 			qdf_cpumask_set_cpu(cpus, mask);
1240 		}
1241 	}
1242 }
1243 
1244 /**
1245  * dp_bus_bandwidth_work_tune_rx() - Function to tune for RX
1246  * @dp_ctx: handle to dp context
1247  * @rx_packets: receive packet count in last bus bandwidth interval
1248  * @diff_us: delta time since last invocation.
1249  * @next_rx_level: pointer to next_rx_level to be filled
1250  * @cpu_mask: pm_qos cpu_mask needed for RX, to be filled
1251  * @is_rx_pm_qos_high: pointer indicating if high qos is needed, to be filled
1252  *
1253  * The function tunes various aspects of driver based on a running average
1254  * of RX packets received in last bus bandwidth interval.
1255  *
1256  * Returns: true if RX level has changed, else return false
1257  */
1258 static
dp_bus_bandwidth_work_tune_rx(struct wlan_dp_psoc_context * dp_ctx,const uint64_t rx_packets,uint64_t diff_us,enum wlan_tp_level * next_rx_level,qdf_cpu_mask * cpu_mask,bool * is_rx_pm_qos_high)1259 bool dp_bus_bandwidth_work_tune_rx(struct wlan_dp_psoc_context *dp_ctx,
1260 				   const uint64_t rx_packets,
1261 				   uint64_t diff_us,
1262 				   enum wlan_tp_level *next_rx_level,
1263 				   qdf_cpu_mask *cpu_mask,
1264 				   bool *is_rx_pm_qos_high)
1265 {
1266 	bool rx_level_change = false;
1267 	bool rxthread_high_tput_req;
1268 	uint32_t bw_interval_us;
1269 	uint32_t delack_timer_cnt = dp_ctx->dp_cfg.tcp_delack_timer_count;
1270 	uint64_t avg_rx;
1271 	uint64_t no_rx_offload_pkts, avg_no_rx_offload_pkts;
1272 	uint64_t rx_offload_pkts, avg_rx_offload_pkts;
1273 
1274 	bw_interval_us = dp_ctx->dp_cfg.bus_bw_compute_interval * 1000;
1275 	no_rx_offload_pkts = dp_ctx->no_rx_offload_pkt_cnt;
1276 	dp_ctx->no_rx_offload_pkt_cnt = 0;
1277 
1278 	/* adjust for any sched delays */
1279 	no_rx_offload_pkts = no_rx_offload_pkts * bw_interval_us;
1280 	no_rx_offload_pkts = qdf_do_div(no_rx_offload_pkts, (uint32_t)diff_us);
1281 
1282 	/* average no-offload RX packets over last 2 BW intervals */
1283 	avg_no_rx_offload_pkts = (no_rx_offload_pkts +
1284 				  dp_ctx->prev_no_rx_offload_pkts) / 2;
1285 	dp_ctx->prev_no_rx_offload_pkts = no_rx_offload_pkts;
1286 
1287 	if (rx_packets >= no_rx_offload_pkts)
1288 		rx_offload_pkts = rx_packets - no_rx_offload_pkts;
1289 	else
1290 		rx_offload_pkts = 0;
1291 
1292 	/* average offloaded RX packets over last 2 BW intervals */
1293 	avg_rx_offload_pkts = (rx_offload_pkts +
1294 			       dp_ctx->prev_rx_offload_pkts) / 2;
1295 	dp_ctx->prev_rx_offload_pkts = rx_offload_pkts;
1296 
1297 	avg_rx = avg_no_rx_offload_pkts + avg_rx_offload_pkts;
1298 
1299 	qdf_cpumask_clear(cpu_mask);
1300 
1301 	if (avg_no_rx_offload_pkts > dp_ctx->dp_cfg.bus_bw_high_threshold) {
1302 		rxthread_high_tput_req = true;
1303 		*is_rx_pm_qos_high = true;
1304 		/*Todo: move hdd implementation to qdf */
1305 		dp_pm_qos_update_cpu_mask(cpu_mask, true);
1306 	} else if (avg_rx > dp_ctx->dp_cfg.bus_bw_high_threshold) {
1307 		rxthread_high_tput_req = false;
1308 		*is_rx_pm_qos_high = false;
1309 		dp_pm_qos_update_cpu_mask(cpu_mask, false);
1310 	} else {
1311 		*is_rx_pm_qos_high = false;
1312 		rxthread_high_tput_req = false;
1313 	}
1314 
1315 	/*
1316 	 * Takes care to set Rx_thread affinity for below case
1317 	 * 1)LRO/GRO not supported ROME case
1318 	 * 2)when rx_ol is disabled in cases like concurrency etc
1319 	 * 3)For UDP cases
1320 	 */
1321 	if (cds_sched_handle_throughput_req(rxthread_high_tput_req))
1322 		dp_warn("Rx thread high_tput(%d) affinity request failed",
1323 			rxthread_high_tput_req);
1324 
1325 	/* fine-tuning parameters for RX Flows */
1326 	if (avg_rx > dp_ctx->dp_cfg.tcp_delack_thres_high) {
1327 		if (dp_ctx->cur_rx_level != WLAN_SVC_TP_HIGH &&
1328 		    ++dp_ctx->rx_high_ind_cnt == delack_timer_cnt) {
1329 			*next_rx_level = WLAN_SVC_TP_HIGH;
1330 		}
1331 	} else {
1332 		dp_ctx->rx_high_ind_cnt = 0;
1333 		*next_rx_level = WLAN_SVC_TP_LOW;
1334 	}
1335 
1336 	if (dp_ctx->cur_rx_level != *next_rx_level) {
1337 		struct wlan_rx_tp_data rx_tp_data = {0};
1338 
1339 		dp_ctx->cur_rx_level = *next_rx_level;
1340 		rx_level_change = true;
1341 		/* Send throughput indication only if it is enabled.
1342 		 * Disabling tcp_del_ack will revert the tcp stack behavior
1343 		 * to default delayed ack. Note that this will disable the
1344 		 * dynamic delayed ack mechanism across the system
1345 		 */
1346 		if (dp_ctx->en_tcp_delack_no_lro)
1347 			rx_tp_data.rx_tp_flags |= TCP_DEL_ACK_IND;
1348 
1349 		if (dp_ctx->dp_cfg.enable_tcp_adv_win_scale)
1350 			rx_tp_data.rx_tp_flags |= TCP_ADV_WIN_SCL;
1351 
1352 		rx_tp_data.level = *next_rx_level;
1353 		wlan_dp_update_tcp_rx_param(dp_ctx, &rx_tp_data);
1354 	}
1355 
1356 	return rx_level_change;
1357 }
1358 
1359 /**
1360  * dp_bus_bandwidth_work_tune_tx() - Function to tune for TX
1361  * @dp_ctx: handle to dp context
1362  * @tx_packets: transmit packet count in last bus bandwidth interval
1363  * @diff_us: delta time since last invocation.
1364  * @next_tx_level: pointer to next_tx_level to be filled
1365  * @cpu_mask: pm_qos cpu_mask needed for TX, to be filled
1366  * @is_tx_pm_qos_high: pointer indicating if high qos is needed, to be filled
1367  *
1368  * The function tunes various aspects of the driver based on a running average
1369  * of TX packets received in last bus bandwidth interval.
1370  *
1371  * Returns: true if TX level has changed, else return false
1372  */
1373 static
dp_bus_bandwidth_work_tune_tx(struct wlan_dp_psoc_context * dp_ctx,const uint64_t tx_packets,uint64_t diff_us,enum wlan_tp_level * next_tx_level,qdf_cpu_mask * cpu_mask,bool * is_tx_pm_qos_high)1374 bool dp_bus_bandwidth_work_tune_tx(struct wlan_dp_psoc_context *dp_ctx,
1375 				   const uint64_t tx_packets,
1376 				   uint64_t diff_us,
1377 				   enum wlan_tp_level *next_tx_level,
1378 				   qdf_cpu_mask *cpu_mask,
1379 				   bool *is_tx_pm_qos_high)
1380 {
1381 	bool tx_level_change = false;
1382 	uint32_t bw_interval_us;
1383 	uint64_t no_tx_offload_pkts, avg_no_tx_offload_pkts;
1384 	uint64_t tx_offload_pkts, avg_tx_offload_pkts;
1385 	uint64_t avg_tx;
1386 
1387 	bw_interval_us = dp_ctx->dp_cfg.bus_bw_compute_interval * 1000;
1388 	no_tx_offload_pkts = dp_ctx->no_tx_offload_pkt_cnt;
1389 
1390 	/* adjust for any sched delays */
1391 	no_tx_offload_pkts = no_tx_offload_pkts * bw_interval_us;
1392 	no_tx_offload_pkts = qdf_do_div(no_tx_offload_pkts, (uint32_t)diff_us);
1393 
1394 	/* average no-offload TX packets over last 2 BW intervals */
1395 	avg_no_tx_offload_pkts = (no_tx_offload_pkts +
1396 				  dp_ctx->prev_no_tx_offload_pkts) / 2;
1397 	dp_ctx->no_tx_offload_pkt_cnt = 0;
1398 	dp_ctx->prev_no_tx_offload_pkts = no_tx_offload_pkts;
1399 
1400 	if (tx_packets >= no_tx_offload_pkts)
1401 		tx_offload_pkts = tx_packets - no_tx_offload_pkts;
1402 	else
1403 		tx_offload_pkts = 0;
1404 
1405 	/* average offloaded TX packets over last 2 BW intervals */
1406 	avg_tx_offload_pkts = (tx_offload_pkts +
1407 			       dp_ctx->prev_tx_offload_pkts) / 2;
1408 	dp_ctx->prev_tx_offload_pkts = tx_offload_pkts;
1409 
1410 	avg_tx = avg_no_tx_offload_pkts + avg_tx_offload_pkts;
1411 
1412 	/* fine-tuning parameters for TX Flows */
1413 	dp_ctx->prev_tx = tx_packets;
1414 
1415 	qdf_cpumask_clear(cpu_mask);
1416 
1417 	if (avg_no_tx_offload_pkts >
1418 		dp_ctx->dp_cfg.bus_bw_very_high_threshold) {
1419 		dp_pm_qos_update_cpu_mask(cpu_mask, true);
1420 		*is_tx_pm_qos_high = true;
1421 	} else if (avg_tx > dp_ctx->dp_cfg.bus_bw_high_threshold) {
1422 		dp_pm_qos_update_cpu_mask(cpu_mask, false);
1423 		*is_tx_pm_qos_high = false;
1424 	} else {
1425 		*is_tx_pm_qos_high = false;
1426 	}
1427 
1428 	if (avg_tx > dp_ctx->dp_cfg.tcp_tx_high_tput_thres)
1429 		*next_tx_level = WLAN_SVC_TP_HIGH;
1430 	else
1431 		*next_tx_level = WLAN_SVC_TP_LOW;
1432 
1433 	if (dp_ctx->dp_cfg.enable_tcp_limit_output &&
1434 	    dp_ctx->cur_tx_level != *next_tx_level) {
1435 		struct wlan_tx_tp_data tx_tp_data = {0};
1436 
1437 		dp_ctx->cur_tx_level = *next_tx_level;
1438 		tx_level_change = true;
1439 		tx_tp_data.level = *next_tx_level;
1440 		tx_tp_data.tcp_limit_output = true;
1441 		wlan_dp_update_tcp_tx_param(dp_ctx, &tx_tp_data);
1442 	}
1443 
1444 	return tx_level_change;
1445 }
1446 
1447 /**
1448  * dp_sap_p2p_update_mid_high_tput() - Update mid high BW for SAP and P2P mode
1449  * @dp_ctx: DP context
1450  * @total_pkts: Total Tx and Rx packets
1451  *
1452  * Return: True if mid high threshold is set and opmode is SAP or P2P GO
1453  */
1454 static inline
dp_sap_p2p_update_mid_high_tput(struct wlan_dp_psoc_context * dp_ctx,uint64_t total_pkts)1455 bool dp_sap_p2p_update_mid_high_tput(struct wlan_dp_psoc_context *dp_ctx,
1456 				     uint64_t total_pkts)
1457 {
1458 	struct wlan_dp_intf *dp_intf = NULL;
1459 	struct wlan_dp_intf *dp_intf_next = NULL;
1460 
1461 	if (dp_ctx->dp_cfg.bus_bw_mid_high_threshold &&
1462 	    total_pkts > dp_ctx->dp_cfg.bus_bw_mid_high_threshold) {
1463 		dp_for_each_intf_held_safe(dp_ctx, dp_intf, dp_intf_next) {
1464 			if (dp_intf->device_mode == QDF_SAP_MODE ||
1465 			    dp_intf->device_mode == QDF_P2P_GO_MODE)
1466 				return true;
1467 		}
1468 	}
1469 
1470 	return false;
1471 }
1472 
1473 /**
1474  * dp_pld_request_bus_bandwidth() - Function to control bus bandwidth
1475  * @dp_ctx: handle to DP context
1476  * @tx_packets: transmit packet count received in BW interval
1477  * @rx_packets: receive packet count received in BW interval
1478  * @diff_us: delta time since last invocation.
1479  *
1480  * The function controls the bus bandwidth and dynamic control of
1481  * tcp delayed ack configuration.
1482  *
1483  * Returns: None
1484  */
dp_pld_request_bus_bandwidth(struct wlan_dp_psoc_context * dp_ctx,const uint64_t tx_packets,const uint64_t rx_packets,const uint64_t diff_us)1485 static void dp_pld_request_bus_bandwidth(struct wlan_dp_psoc_context *dp_ctx,
1486 					 const uint64_t tx_packets,
1487 					 const uint64_t rx_packets,
1488 					 const uint64_t diff_us)
1489 {
1490 	uint16_t index;
1491 	bool vote_level_change = false;
1492 	bool rx_level_change;
1493 	bool tx_level_change;
1494 	bool dptrace_high_tput_req;
1495 	u64 total_pkts = tx_packets + rx_packets;
1496 	enum pld_bus_width_type next_vote_level = PLD_BUS_WIDTH_IDLE;
1497 	static enum wlan_tp_level next_rx_level = WLAN_SVC_TP_NONE;
1498 	enum wlan_tp_level next_tx_level = WLAN_SVC_TP_NONE;
1499 	qdf_cpu_mask pm_qos_cpu_mask_tx, pm_qos_cpu_mask_rx, pm_qos_cpu_mask;
1500 	bool is_rx_pm_qos_high;
1501 	bool is_tx_pm_qos_high;
1502 	bool pmqos_on_low_tput = false;
1503 	enum tput_level tput_level;
1504 	bool is_tput_level_high;
1505 	struct bbm_params param = {0};
1506 	bool legacy_client = false;
1507 	void *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1508 	ol_txrx_soc_handle soc = cds_get_context(QDF_MODULE_ID_SOC);
1509 	static enum tput_level prev_tput_level = TPUT_LEVEL_NONE;
1510 	struct wlan_dp_psoc_callbacks *dp_ops = &dp_ctx->dp_ops;
1511 	hdd_cb_handle ctx = dp_ops->callback_ctx;
1512 
1513 	if (!soc)
1514 		return;
1515 
1516 	if (dp_ctx->high_bus_bw_request) {
1517 		next_vote_level = PLD_BUS_WIDTH_VERY_HIGH;
1518 		tput_level = TPUT_LEVEL_VERY_HIGH;
1519 	} else if (total_pkts > dp_ctx->dp_cfg.bus_bw_super_high_threshold) {
1520 		next_vote_level = PLD_BUS_WIDTH_MAX;
1521 		tput_level = TPUT_LEVEL_SUPER_HIGH;
1522 	} else if (total_pkts > dp_ctx->dp_cfg.bus_bw_ultra_high_threshold) {
1523 		next_vote_level = PLD_BUS_WIDTH_ULTRA_HIGH;
1524 		tput_level = TPUT_LEVEL_ULTRA_HIGH;
1525 	} else if (total_pkts > dp_ctx->dp_cfg.bus_bw_very_high_threshold) {
1526 		next_vote_level = PLD_BUS_WIDTH_VERY_HIGH;
1527 		tput_level = TPUT_LEVEL_VERY_HIGH;
1528 	} else if (total_pkts > dp_ctx->dp_cfg.bus_bw_high_threshold) {
1529 		next_vote_level = PLD_BUS_WIDTH_HIGH;
1530 		tput_level = TPUT_LEVEL_HIGH;
1531 		if (dp_sap_p2p_update_mid_high_tput(dp_ctx, total_pkts)) {
1532 			next_vote_level = PLD_BUS_WIDTH_MID_HIGH;
1533 			tput_level = TPUT_LEVEL_MID_HIGH;
1534 		}
1535 	} else if (total_pkts > dp_ctx->dp_cfg.bus_bw_medium_threshold) {
1536 		next_vote_level = PLD_BUS_WIDTH_MEDIUM;
1537 		tput_level = TPUT_LEVEL_MEDIUM;
1538 	} else if (total_pkts > dp_ctx->dp_cfg.bus_bw_low_threshold) {
1539 		next_vote_level = PLD_BUS_WIDTH_LOW;
1540 		tput_level = TPUT_LEVEL_LOW;
1541 	} else {
1542 		next_vote_level = PLD_BUS_WIDTH_IDLE;
1543 		tput_level = TPUT_LEVEL_IDLE;
1544 	}
1545 
1546 	/*
1547 	 * DBS mode requires more DDR/SNOC resources, vote to ultra high
1548 	 * only when TPUT can reach VHT80 KPI and IPA is disabled,
1549 	 * for other cases, follow general voting logic
1550 	 */
1551 	if (!ucfg_ipa_is_fw_wdi_activated(dp_ctx->pdev) &&
1552 	    policy_mgr_is_current_hwmode_dbs(dp_ctx->psoc) &&
1553 	    (total_pkts > dp_ctx->dp_cfg.bus_bw_dbs_threshold) &&
1554 	    (tput_level < TPUT_LEVEL_SUPER_HIGH)) {
1555 		next_vote_level = PLD_BUS_WIDTH_ULTRA_HIGH;
1556 		tput_level = TPUT_LEVEL_ULTRA_HIGH;
1557 	}
1558 
1559 	param.policy = BBM_TPUT_POLICY;
1560 	param.policy_info.tput_level = tput_level;
1561 	dp_bbm_apply_independent_policy(dp_ctx->psoc, &param);
1562 
1563 	dp_rtpm_tput_policy_apply(dp_ctx, tput_level);
1564 
1565 	dptrace_high_tput_req =
1566 			next_vote_level > PLD_BUS_WIDTH_IDLE ? true : false;
1567 
1568 	if (qdf_atomic_read(&dp_ctx->num_latency_critical_clients))
1569 		legacy_client = true;
1570 
1571 	dp_low_tput_gro_flush_skip_handler(dp_ctx, next_vote_level,
1572 					   legacy_client);
1573 
1574 	if (dp_ctx->cur_vote_level != next_vote_level) {
1575 		/* Set affinity for tx completion grp interrupts */
1576 		if (tput_level >= TPUT_LEVEL_VERY_HIGH &&
1577 		    prev_tput_level < TPUT_LEVEL_VERY_HIGH)
1578 			hif_set_grp_intr_affinity(hif_ctx,
1579 				cdp_get_tx_rings_grp_bitmap(soc), true);
1580 		else if (tput_level < TPUT_LEVEL_VERY_HIGH &&
1581 			 prev_tput_level >= TPUT_LEVEL_VERY_HIGH)
1582 			hif_set_grp_intr_affinity(hif_ctx,
1583 				cdp_get_tx_rings_grp_bitmap(soc),
1584 				false);
1585 
1586 		prev_tput_level = tput_level;
1587 		dp_ctx->cur_vote_level = next_vote_level;
1588 		vote_level_change = true;
1589 
1590 		if ((next_vote_level == PLD_BUS_WIDTH_LOW) ||
1591 		    (next_vote_level == PLD_BUS_WIDTH_IDLE)) {
1592 			dp_ops->dp_pld_remove_pm_qos(ctx);
1593 			if (dp_ctx->dynamic_rps)
1594 				dp_clear_rps_cpu_mask(dp_ctx);
1595 		} else {
1596 			dp_ops->dp_pld_request_pm_qos(ctx);
1597 			if (dp_ctx->dynamic_rps)
1598 				/*Todo : check once hdd_set_rps_cpu_mask */
1599 				dp_set_rps_cpu_mask(dp_ctx);
1600 		}
1601 
1602 		if (dp_ctx->dp_cfg.rx_thread_ul_affinity_mask) {
1603 			if (next_vote_level == PLD_BUS_WIDTH_HIGH &&
1604 			    tx_packets >
1605 			    dp_ctx->dp_cfg.bus_bw_high_threshold &&
1606 			    rx_packets >
1607 			    dp_ctx->dp_cfg.bus_bw_low_threshold)
1608 				cds_sched_handle_rx_thread_affinity_req(true);
1609 			else if (next_vote_level != PLD_BUS_WIDTH_HIGH)
1610 				cds_sched_handle_rx_thread_affinity_req(false);
1611 		}
1612 
1613 		dp_ops->dp_napi_apply_throughput_policy(ctx,
1614 							tx_packets,
1615 							rx_packets);
1616 
1617 		if (rx_packets < dp_ctx->dp_cfg.bus_bw_low_threshold)
1618 			dp_disable_rx_ol_for_low_tput(dp_ctx, true);
1619 		else
1620 			dp_disable_rx_ol_for_low_tput(dp_ctx, false);
1621 
1622 		/*
1623 		 * force disable pktlog and only re-enable based
1624 		 * on ini config
1625 		 */
1626 		if (next_vote_level >= PLD_BUS_WIDTH_HIGH)
1627 			dp_ops->dp_pktlog_enable_disable(ctx,
1628 							 false, 0, 0);
1629 		else if (cds_is_packet_log_enabled())
1630 			dp_ops->dp_pktlog_enable_disable(ctx,
1631 							 true, 0, 0);
1632 	}
1633 
1634 	qdf_dp_trace_apply_tput_policy(dptrace_high_tput_req);
1635 
1636 	rx_level_change = dp_bus_bandwidth_work_tune_rx(dp_ctx,
1637 							rx_packets,
1638 							diff_us,
1639 							&next_rx_level,
1640 							&pm_qos_cpu_mask_rx,
1641 							&is_rx_pm_qos_high);
1642 
1643 	tx_level_change = dp_bus_bandwidth_work_tune_tx(dp_ctx,
1644 							tx_packets,
1645 							diff_us,
1646 							&next_tx_level,
1647 							&pm_qos_cpu_mask_tx,
1648 							&is_tx_pm_qos_high);
1649 
1650 	index = dp_ctx->txrx_hist_idx;
1651 
1652 	if (vote_level_change) {
1653 		/* Clear mask if BW is not HIGH or more */
1654 		if (next_vote_level < PLD_BUS_WIDTH_HIGH) {
1655 			is_rx_pm_qos_high = false;
1656 			is_tx_pm_qos_high = false;
1657 			qdf_cpumask_clear(&pm_qos_cpu_mask);
1658 			if (next_vote_level == PLD_BUS_WIDTH_LOW &&
1659 			    rx_packets > tx_packets &&
1660 			    !legacy_client) {
1661 				pmqos_on_low_tput = true;
1662 				dp_pm_qos_update_cpu_mask(&pm_qos_cpu_mask,
1663 							  false);
1664 			}
1665 		} else {
1666 			qdf_cpumask_clear(&pm_qos_cpu_mask);
1667 			qdf_cpumask_or(&pm_qos_cpu_mask,
1668 				       &pm_qos_cpu_mask_tx,
1669 				       &pm_qos_cpu_mask_rx);
1670 
1671 			/* Default mask in case throughput is high */
1672 			if (qdf_cpumask_empty(&pm_qos_cpu_mask))
1673 				dp_pm_qos_update_cpu_mask(&pm_qos_cpu_mask,
1674 							  false);
1675 		}
1676 		dp_ops->dp_pm_qos_update_request(ctx, &pm_qos_cpu_mask);
1677 		is_tput_level_high =
1678 			tput_level >= TPUT_LEVEL_HIGH ? true : false;
1679 		cdp_set_bus_vote_lvl_high(soc, is_tput_level_high);
1680 	}
1681 
1682 	if (vote_level_change || tx_level_change || rx_level_change) {
1683 		dp_info("tx:%llu[%llu(off)+%llu(no-off)] rx:%llu[%llu(off)+%llu(no-off)] next_level(vote %u rx %u tx %u rtpm %d) pm_qos(rx:%u,%*pb tx:%u,%*pb on_low_tput:%u)",
1684 			tx_packets,
1685 			dp_ctx->prev_tx_offload_pkts,
1686 			dp_ctx->prev_no_tx_offload_pkts,
1687 			rx_packets,
1688 			dp_ctx->prev_rx_offload_pkts,
1689 			dp_ctx->prev_no_rx_offload_pkts,
1690 			next_vote_level, next_rx_level, next_tx_level,
1691 			dp_rtpm_tput_policy_get_vote(dp_ctx),
1692 			is_rx_pm_qos_high,
1693 			qdf_cpumask_pr_args(&pm_qos_cpu_mask_rx),
1694 			is_tx_pm_qos_high,
1695 			qdf_cpumask_pr_args(&pm_qos_cpu_mask_tx),
1696 			pmqos_on_low_tput);
1697 
1698 		if (dp_ctx->txrx_hist) {
1699 			dp_ctx->txrx_hist[index].next_tx_level = next_tx_level;
1700 			dp_ctx->txrx_hist[index].next_rx_level = next_rx_level;
1701 			dp_ctx->txrx_hist[index].is_rx_pm_qos_high =
1702 				is_rx_pm_qos_high;
1703 			dp_ctx->txrx_hist[index].is_tx_pm_qos_high =
1704 				is_tx_pm_qos_high;
1705 			dp_ctx->txrx_hist[index].next_vote_level =
1706 				next_vote_level;
1707 			dp_ctx->txrx_hist[index].interval_rx = rx_packets;
1708 			dp_ctx->txrx_hist[index].interval_tx = tx_packets;
1709 			dp_ctx->txrx_hist[index].qtime =
1710 				qdf_get_log_timestamp();
1711 			dp_ctx->txrx_hist_idx++;
1712 			dp_ctx->txrx_hist_idx &= NUM_TX_RX_HISTOGRAM_MASK;
1713 		}
1714 	}
1715 
1716 	/* Roaming is a high priority job but gets processed in scheduler
1717 	 * thread, bypassing printing stats so that kworker exits quickly and
1718 	 * scheduler thread can utilize CPU.
1719 	 */
1720 	if (!dp_ops->dp_is_roaming_in_progress(ctx)) {
1721 		dp_display_periodic_stats(dp_ctx, (total_pkts > 0) ?
1722 					  true : false);
1723 		dp_periodic_sta_stats_display(dp_ctx);
1724 	}
1725 
1726 	hif_affinity_mgr_set_affinity(hif_ctx);
1727 }
1728 
1729 #ifdef WLAN_FEATURE_DYNAMIC_RX_AGGREGATION
1730 /**
1731  * dp_rx_check_qdisc_for_intf() - Check if any ingress qdisc is configured
1732  *  for given adapter
1733  * @dp_intf: pointer to DP interface context
1734  *
1735  * The function checks if ingress qdisc is registered for a given
1736  * net device.
1737  *
1738  * Return: None
1739  */
1740 static void
dp_rx_check_qdisc_for_intf(struct wlan_dp_intf * dp_intf)1741 dp_rx_check_qdisc_for_intf(struct wlan_dp_intf *dp_intf)
1742 {
1743 	struct wlan_dp_psoc_callbacks *dp_ops;
1744 	QDF_STATUS status;
1745 
1746 	dp_ops = &dp_intf->dp_ctx->dp_ops;
1747 	status = dp_ops->dp_rx_check_qdisc_configured(dp_intf->dev,
1748 				 dp_intf->dp_ctx->dp_agg_param.tc_ingress_prio);
1749 	if (QDF_IS_STATUS_SUCCESS(status)) {
1750 		if (qdf_likely(qdf_atomic_read(&dp_intf->gro_disallowed)))
1751 			return;
1752 
1753 		dp_debug("ingress qdisc/filter configured disable GRO");
1754 		qdf_atomic_set(&dp_intf->gro_disallowed, 1);
1755 
1756 		return;
1757 	} else if (status == QDF_STATUS_E_NOSUPPORT) {
1758 		if (qdf_unlikely(qdf_atomic_read(&dp_intf->gro_disallowed))) {
1759 			dp_debug("ingress qdisc/filter removed enable GRO");
1760 			qdf_atomic_set(&dp_intf->gro_disallowed, 0);
1761 		}
1762 	}
1763 }
1764 #else
1765 static void
dp_rx_check_qdisc_for_intf(struct wlan_dp_intf * dp_intf)1766 dp_rx_check_qdisc_for_intf(struct wlan_dp_intf *dp_intf)
1767 {
1768 }
1769 #endif
1770 
1771 #define NO_RX_PKT_LINK_SPEED_AGEOUT_COUNT 50
1772 static void
dp_link_monitoring(struct wlan_dp_psoc_context * dp_ctx,struct wlan_dp_intf * dp_intf)1773 dp_link_monitoring(struct wlan_dp_psoc_context *dp_ctx,
1774 		   struct wlan_dp_intf *dp_intf)
1775 {
1776 	struct cdp_peer_stats *peer_stats;
1777 	QDF_STATUS status;
1778 	ol_txrx_soc_handle soc;
1779 	struct wlan_objmgr_peer *bss_peer;
1780 	static uint32_t no_rx_times;
1781 	uint64_t  rx_packets;
1782 	uint32_t link_speed;
1783 	struct wlan_objmgr_psoc *psoc;
1784 	struct link_monitoring link_mon;
1785 	struct wlan_dp_link *def_link = dp_intf->def_link;
1786 
1787 	/*
1788 	 *  If throughput is high, link speed should be good,  don't check it
1789 	 *  to avoid performance penalty
1790 	 */
1791 	soc = cds_get_context(QDF_MODULE_ID_SOC);
1792 	if (cdp_get_bus_lvl_high(soc) == true)
1793 		return;
1794 
1795 	link_mon = dp_intf->link_monitoring;
1796 	if (!dp_ctx->dp_ops.link_monitoring_cb)
1797 		return;
1798 
1799 	psoc = dp_ctx->psoc;
1800 	/* If no rx packets received for N sec, set link speed to poor */
1801 	if (link_mon.is_rx_linkspeed_good) {
1802 		rx_packets = DP_BW_GET_DIFF(
1803 			qdf_net_stats_get_rx_pkts(&dp_intf->stats),
1804 			dp_intf->prev_rx_packets);
1805 		if (!rx_packets)
1806 			no_rx_times++;
1807 		else
1808 			no_rx_times = 0;
1809 		if (no_rx_times >= NO_RX_PKT_LINK_SPEED_AGEOUT_COUNT) {
1810 			no_rx_times = 0;
1811 			dp_ctx->dp_ops.link_monitoring_cb(psoc,
1812 							  def_link->link_id,
1813 							  false);
1814 			dp_intf->link_monitoring.is_rx_linkspeed_good = false;
1815 
1816 			return;
1817 		}
1818 	}
1819 	/* Get rx link speed from dp peer */
1820 	peer_stats = qdf_mem_malloc(sizeof(*peer_stats));
1821 	if (!peer_stats)
1822 		return;
1823 
1824 	/* TODO - Temp WAR, check what to do here */
1825 	/* Peer stats for any link peer is going to return the
1826 	 * stats from MLD peer, so its okay to query deflink
1827 	 */
1828 	bss_peer = wlan_vdev_get_bsspeer(def_link->vdev);
1829 	if (!bss_peer) {
1830 		dp_debug("Invalid bss peer");
1831 		qdf_mem_free(peer_stats);
1832 		return;
1833 	}
1834 
1835 	status = cdp_host_get_peer_stats(soc, def_link->link_id,
1836 					 bss_peer->macaddr,
1837 					 peer_stats);
1838 	if (QDF_IS_STATUS_ERROR(status)) {
1839 		qdf_mem_free(peer_stats);
1840 		return;
1841 	}
1842 	/* Convert rx linkspeed from kbps to mbps to compare with threshold */
1843 	link_speed = peer_stats->rx.last_rx_rate / 1000;
1844 
1845 	/*
1846 	 * When found current rx link speed becomes good(above threshold) or
1847 	 * poor, update to firmware.
1848 	 * If the current RX link speed is above the threshold, low rssi
1849 	 * roaming is not needed. If linkspeed_threshold is set to 0, the
1850 	 * firmware will not consider RX link speed in the roaming decision,
1851 	 * driver will send rx link speed poor state to firmware.
1852 	 */
1853 	if (!link_mon.rx_linkspeed_threshold) {
1854 		dp_ctx->dp_ops.link_monitoring_cb(psoc, def_link->link_id,
1855 						  false);
1856 		dp_intf->link_monitoring.is_rx_linkspeed_good = false;
1857 	} else if (link_speed > link_mon.rx_linkspeed_threshold &&
1858 	     !link_mon.is_rx_linkspeed_good) {
1859 		dp_ctx->dp_ops.link_monitoring_cb(psoc, def_link->link_id,
1860 						  true);
1861 		dp_intf->link_monitoring.is_rx_linkspeed_good = true;
1862 	} else if (link_speed < link_mon.rx_linkspeed_threshold &&
1863 		   link_mon.is_rx_linkspeed_good) {
1864 		dp_ctx->dp_ops.link_monitoring_cb(psoc, def_link->link_id,
1865 						  false);
1866 		dp_intf->link_monitoring.is_rx_linkspeed_good = false;
1867 	}
1868 
1869 	qdf_mem_free(peer_stats);
1870 }
1871 
1872 /**
1873  * __dp_bus_bw_work_handler() - Bus bandwidth work handler
1874  * @dp_ctx: handle to DP context
1875  *
1876  * The function handles the bus bandwidth work schedule
1877  *
1878  * Returns: None
1879  */
__dp_bus_bw_work_handler(struct wlan_dp_psoc_context * dp_ctx)1880 static void __dp_bus_bw_work_handler(struct wlan_dp_psoc_context *dp_ctx)
1881 {
1882 	struct wlan_objmgr_vdev *vdev;
1883 	struct wlan_dp_intf *dp_intf = NULL, *con_sap_dp_intf = NULL;
1884 	struct wlan_dp_intf *dp_intf_next = NULL;
1885 	struct wlan_dp_link *dp_link = NULL;
1886 	struct wlan_dp_link *dp_link_next;
1887 	uint64_t tx_packets = 0, rx_packets = 0, tx_bytes = 0;
1888 	uint64_t fwd_tx_packets = 0, fwd_rx_packets = 0;
1889 	uint64_t fwd_tx_packets_temp = 0, fwd_rx_packets_temp = 0;
1890 	uint64_t fwd_tx_packets_diff = 0, fwd_rx_packets_diff = 0;
1891 	uint64_t total_tx = 0, total_rx = 0;
1892 	A_STATUS ret;
1893 	bool connected = false;
1894 	uint32_t ipa_tx_packets = 0, ipa_rx_packets = 0;
1895 	uint64_t sta_tx_bytes = 0, sap_tx_bytes = 0;
1896 	uint64_t diff_us;
1897 	uint64_t curr_time_us;
1898 	uint32_t bw_interval_us;
1899 	hdd_cb_handle ctx = dp_ctx->dp_ops.callback_ctx;
1900 
1901 	if (wlan_dp_validate_context(dp_ctx))
1902 		goto stop_work;
1903 
1904 	if (dp_ctx->is_suspend)
1905 		return;
1906 
1907 	bw_interval_us = dp_ctx->dp_cfg.bus_bw_compute_interval * 1000;
1908 
1909 	curr_time_us = qdf_get_log_timestamp();
1910 	diff_us = qdf_log_timestamp_to_usecs(
1911 			curr_time_us - dp_ctx->bw_vote_time);
1912 	dp_ctx->bw_vote_time = curr_time_us;
1913 
1914 	dp_for_each_intf_held_safe(dp_ctx, dp_intf, dp_intf_next) {
1915 		vdev = dp_objmgr_get_vdev_by_user(dp_intf->def_link,
1916 						  WLAN_DP_ID);
1917 		if (!vdev)
1918 			continue;
1919 
1920 		if ((dp_intf->device_mode == QDF_STA_MODE ||
1921 		     dp_intf->device_mode == QDF_P2P_CLIENT_MODE) &&
1922 		    !wlan_cm_is_vdev_active(vdev)) {
1923 			dp_objmgr_put_vdev_by_user(vdev, WLAN_DP_ID);
1924 			continue;
1925 		}
1926 
1927 		if ((dp_intf->device_mode == QDF_SAP_MODE ||
1928 		     dp_intf->device_mode == QDF_P2P_GO_MODE) &&
1929 		     !dp_ctx->dp_ops.dp_is_ap_active(ctx,
1930 						     dp_intf->dev)) {
1931 			dp_objmgr_put_vdev_by_user(vdev, WLAN_DP_ID);
1932 			continue;
1933 		}
1934 
1935 		if (dp_ctx->dp_agg_param.tc_based_dyn_gro)
1936 			dp_rx_check_qdisc_for_intf(dp_intf);
1937 
1938 		tx_packets += DP_BW_GET_DIFF(
1939 			qdf_net_stats_get_tx_pkts(&dp_intf->stats),
1940 			dp_intf->prev_tx_packets);
1941 		rx_packets += DP_BW_GET_DIFF(
1942 			qdf_net_stats_get_rx_pkts(&dp_intf->stats),
1943 			dp_intf->prev_rx_packets);
1944 		tx_bytes = DP_BW_GET_DIFF(
1945 			qdf_net_stats_get_tx_bytes(&dp_intf->stats),
1946 			dp_intf->prev_tx_bytes);
1947 
1948 		if (dp_intf->device_mode == QDF_STA_MODE &&
1949 		    wlan_cm_is_vdev_active(vdev)) {
1950 			dp_ctx->dp_ops.dp_send_mscs_action_frame(ctx,
1951 							dp_intf->dev);
1952 			if (dp_intf->link_monitoring.enabled)
1953 				dp_link_monitoring(dp_ctx, dp_intf);
1954 		}
1955 
1956 		ret = A_ERROR;
1957 		fwd_tx_packets = 0;
1958 		fwd_rx_packets = 0;
1959 		if (dp_intf->device_mode == QDF_SAP_MODE ||
1960 		    dp_intf->device_mode == QDF_P2P_GO_MODE ||
1961 		    dp_intf->device_mode == QDF_NDI_MODE) {
1962 			dp_for_each_link_held_safe(dp_intf, dp_link,
1963 						   dp_link_next) {
1964 				ret = cdp_get_intra_bss_fwd_pkts_count(
1965 					cds_get_context(QDF_MODULE_ID_SOC),
1966 					dp_link->link_id,
1967 					&fwd_tx_packets_temp,
1968 					&fwd_rx_packets_temp);
1969 				if (ret == A_OK) {
1970 					fwd_tx_packets += fwd_tx_packets_temp;
1971 					fwd_rx_packets += fwd_rx_packets_temp;
1972 				} else {
1973 					break;
1974 				}
1975 			}
1976 		}
1977 
1978 		if (ret == A_OK) {
1979 			fwd_tx_packets_diff += DP_BW_GET_DIFF(
1980 				fwd_tx_packets,
1981 				dp_intf->prev_fwd_tx_packets);
1982 			fwd_rx_packets_diff += DP_BW_GET_DIFF(
1983 				fwd_rx_packets,
1984 				dp_intf->prev_fwd_rx_packets);
1985 		}
1986 
1987 		if (dp_intf->device_mode == QDF_SAP_MODE) {
1988 			con_sap_dp_intf = dp_intf;
1989 			sap_tx_bytes =
1990 				qdf_net_stats_get_tx_bytes(&dp_intf->stats);
1991 		}
1992 
1993 		if (dp_intf->device_mode == QDF_STA_MODE)
1994 			sta_tx_bytes =
1995 				qdf_net_stats_get_tx_bytes(&dp_intf->stats);
1996 
1997 		dp_for_each_link_held_safe(dp_intf, dp_link, dp_link_next) {
1998 			dp_set_driver_del_ack_enable(dp_link->link_id, dp_ctx,
1999 						     rx_packets);
2000 
2001 			dp_set_vdev_bundle_require_flag(dp_link->link_id,
2002 							dp_ctx, tx_bytes);
2003 		}
2004 
2005 		total_rx += qdf_net_stats_get_rx_pkts(&dp_intf->stats);
2006 		total_tx += qdf_net_stats_get_tx_pkts(&dp_intf->stats);
2007 
2008 		qdf_spin_lock_bh(&dp_ctx->bus_bw_lock);
2009 		dp_intf->prev_tx_packets =
2010 			qdf_net_stats_get_tx_pkts(&dp_intf->stats);
2011 		dp_intf->prev_rx_packets =
2012 			qdf_net_stats_get_rx_pkts(&dp_intf->stats);
2013 		dp_intf->prev_fwd_tx_packets = fwd_tx_packets;
2014 		dp_intf->prev_fwd_rx_packets = fwd_rx_packets;
2015 		dp_intf->prev_tx_bytes =
2016 			qdf_net_stats_get_tx_bytes(&dp_intf->stats);
2017 		qdf_spin_unlock_bh(&dp_ctx->bus_bw_lock);
2018 		connected = true;
2019 
2020 		dp_objmgr_put_vdev_by_user(vdev, WLAN_DP_ID);
2021 	}
2022 
2023 	if (!connected) {
2024 		dp_err("bus bandwidth timer running in disconnected state");
2025 		goto stop_work;
2026 	}
2027 
2028 	/* add intra bss forwarded tx and rx packets */
2029 	tx_packets += fwd_tx_packets_diff;
2030 	rx_packets += fwd_rx_packets_diff;
2031 
2032 	/* Send embedded Tx packet bytes on STA & SAP interface to IPA driver */
2033 	ucfg_ipa_update_tx_stats(dp_ctx->pdev, sta_tx_bytes, sap_tx_bytes);
2034 
2035 	dp_ipa_set_perf_level(dp_ctx, &tx_packets, &rx_packets,
2036 			      &ipa_tx_packets, &ipa_rx_packets);
2037 	if (con_sap_dp_intf) {
2038 		qdf_net_stats_add_tx_pkts(&con_sap_dp_intf->stats,
2039 					  ipa_tx_packets);
2040 		qdf_net_stats_add_rx_pkts(&con_sap_dp_intf->stats,
2041 					  ipa_rx_packets);
2042 	}
2043 
2044 	tx_packets = tx_packets * bw_interval_us;
2045 	tx_packets = qdf_do_div(tx_packets, (uint32_t)diff_us);
2046 
2047 	rx_packets = rx_packets * bw_interval_us;
2048 	rx_packets = qdf_do_div(rx_packets, (uint32_t)diff_us);
2049 
2050 	dp_pld_request_bus_bandwidth(dp_ctx, tx_packets, rx_packets, diff_us);
2051 
2052 	return;
2053 
2054 stop_work:
2055 	qdf_periodic_work_stop_async(&dp_ctx->bus_bw_work);
2056 }
2057 
2058 /**
2059  * dp_bus_bw_work_handler() - Bus bandwidth work handler
2060  * @context: handle to DP context
2061  *
2062  * The function handles the bus bandwidth work schedule
2063  *
2064  * Returns: None
2065  */
dp_bus_bw_work_handler(void * context)2066 static void dp_bus_bw_work_handler(void *context)
2067 {
2068 	struct wlan_dp_psoc_context *dp_ctx = context;
2069 	struct qdf_op_sync *op_sync;
2070 
2071 	if (!dp_ctx) {
2072 		dp_err("Unable to get DP context");
2073 		return;
2074 	}
2075 
2076 	if (qdf_op_protect(&op_sync))
2077 		return;
2078 
2079 	__dp_bus_bw_work_handler(dp_ctx);
2080 
2081 	qdf_op_unprotect(op_sync);
2082 }
2083 
dp_bus_bandwidth_init(struct wlan_objmgr_psoc * psoc)2084 int dp_bus_bandwidth_init(struct wlan_objmgr_psoc *psoc)
2085 {
2086 	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
2087 	hdd_cb_handle ctx = dp_ctx->dp_ops.callback_ctx;
2088 	QDF_STATUS status;
2089 
2090 	if (QDF_GLOBAL_FTM_MODE == cds_get_conparam())
2091 		return QDF_STATUS_SUCCESS;
2092 
2093 	dp_enter();
2094 
2095 	qdf_spinlock_create(&dp_ctx->bus_bw_lock);
2096 
2097 	dp_ctx->dp_ops.dp_pm_qos_add_request(ctx);
2098 
2099 	wlan_dp_init_tx_rx_histogram(dp_ctx);
2100 	status = qdf_periodic_work_create(&dp_ctx->bus_bw_work,
2101 					  dp_bus_bw_work_handler,
2102 					  dp_ctx);
2103 
2104 	dp_exit();
2105 
2106 	return qdf_status_to_os_return(status);
2107 }
2108 
dp_bus_bandwidth_deinit(struct wlan_objmgr_psoc * psoc)2109 void dp_bus_bandwidth_deinit(struct wlan_objmgr_psoc *psoc)
2110 {
2111 	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
2112 	hdd_cb_handle ctx;
2113 
2114 	if (!dp_ctx) {
2115 		dp_err("Unable to get DP context");
2116 		return;
2117 	}
2118 
2119 	ctx = dp_ctx->dp_ops.callback_ctx;
2120 
2121 	if (QDF_GLOBAL_FTM_MODE == cds_get_conparam())
2122 		return;
2123 
2124 	dp_enter();
2125 
2126 	/* it is expecting the timer has been stopped or not started
2127 	 * when coming deinit.
2128 	 */
2129 	QDF_BUG(!qdf_periodic_work_stop_sync(&dp_ctx->bus_bw_work));
2130 
2131 	qdf_periodic_work_destroy(&dp_ctx->bus_bw_work);
2132 	qdf_spinlock_destroy(&dp_ctx->bus_bw_lock);
2133 	wlan_dp_deinit_tx_rx_histogram(dp_ctx);
2134 	dp_ctx->dp_ops.dp_pm_qos_remove_request(ctx);
2135 
2136 	dp_exit();
2137 }
2138 
2139 /**
2140  * __dp_bus_bw_compute_timer_start() - start the bus bandwidth timer
2141  * @psoc: psoc handle
2142  *
2143  * Return: None
2144  */
__dp_bus_bw_compute_timer_start(struct wlan_objmgr_psoc * psoc)2145 static void __dp_bus_bw_compute_timer_start(struct wlan_objmgr_psoc *psoc)
2146 {
2147 	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
2148 
2149 	if (!dp_ctx) {
2150 		dp_err("Unable to get DP context");
2151 		return;
2152 	}
2153 
2154 	if (QDF_GLOBAL_FTM_MODE == cds_get_conparam())
2155 		return;
2156 
2157 	qdf_periodic_work_start(&dp_ctx->bus_bw_work,
2158 				dp_ctx->dp_cfg.bus_bw_compute_interval);
2159 	dp_ctx->bw_vote_time = qdf_get_log_timestamp();
2160 }
2161 
dp_bus_bw_compute_timer_start(struct wlan_objmgr_psoc * psoc)2162 void dp_bus_bw_compute_timer_start(struct wlan_objmgr_psoc *psoc)
2163 {
2164 	dp_enter();
2165 
2166 	__dp_bus_bw_compute_timer_start(psoc);
2167 
2168 	dp_exit();
2169 }
2170 
dp_bus_bw_compute_timer_try_start(struct wlan_objmgr_psoc * psoc)2171 void dp_bus_bw_compute_timer_try_start(struct wlan_objmgr_psoc *psoc)
2172 {
2173 	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
2174 	hdd_cb_handle ctx;
2175 
2176 	dp_enter();
2177 
2178 	if (!dp_ctx) {
2179 		dp_err("Unable to get DP context");
2180 		return;
2181 	}
2182 
2183 	ctx = dp_ctx->dp_ops.callback_ctx;
2184 
2185 	if (dp_ctx->dp_ops.dp_any_adapter_connected(ctx))
2186 		__dp_bus_bw_compute_timer_start(psoc);
2187 
2188 	dp_exit();
2189 }
2190 
2191 /**
2192  * __dp_bus_bw_compute_timer_stop() - stop the bus bandwidth timer
2193  * @psoc: psoc handle
2194  *
2195  * Return: None
2196  */
__dp_bus_bw_compute_timer_stop(struct wlan_objmgr_psoc * psoc)2197 static void __dp_bus_bw_compute_timer_stop(struct wlan_objmgr_psoc *psoc)
2198 {
2199 	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
2200 	hdd_cb_handle ctx;
2201 	ol_txrx_soc_handle soc = cds_get_context(QDF_MODULE_ID_SOC);
2202 
2203 	struct bbm_params param = {0};
2204 	bool is_any_adapter_conn;
2205 
2206 	if (QDF_GLOBAL_FTM_MODE == cds_get_conparam())
2207 		return;
2208 
2209 	if (!dp_ctx || !soc)
2210 		return;
2211 
2212 	ctx = dp_ctx->dp_ops.callback_ctx;
2213 	is_any_adapter_conn = dp_ctx->dp_ops.dp_any_adapter_connected(ctx);
2214 
2215 	if (!qdf_periodic_work_stop_sync(&dp_ctx->bus_bw_work))
2216 		goto exit;
2217 
2218 	ucfg_ipa_set_perf_level(dp_ctx->pdev, 0, 0);
2219 
2220 	dp_reset_tcp_delack(psoc);
2221 
2222 	if (!is_any_adapter_conn)
2223 		dp_reset_tcp_adv_win_scale(dp_ctx);
2224 
2225 	cdp_pdev_reset_driver_del_ack(cds_get_context(QDF_MODULE_ID_SOC),
2226 				      OL_TXRX_PDEV_ID);
2227 	cdp_pdev_reset_bundle_require_flag(cds_get_context(QDF_MODULE_ID_SOC),
2228 					   OL_TXRX_PDEV_ID);
2229 
2230 	cdp_set_bus_vote_lvl_high(soc, false);
2231 	dp_ctx->bw_vote_time = 0;
2232 
2233 exit:
2234 	/**
2235 	 * This check if for the case where the bus bw timer is forcibly
2236 	 * stopped. We should remove the bus bw voting, if no adapter is
2237 	 * connected
2238 	 */
2239 	if (!is_any_adapter_conn) {
2240 		uint64_t interval_us =
2241 			dp_ctx->dp_cfg.bus_bw_compute_interval * 1000;
2242 		qdf_atomic_set(&dp_ctx->num_latency_critical_clients, 0);
2243 		dp_pld_request_bus_bandwidth(dp_ctx, 0, 0, interval_us);
2244 	}
2245 	param.policy = BBM_TPUT_POLICY;
2246 	param.policy_info.tput_level = TPUT_LEVEL_NONE;
2247 	dp_bbm_apply_independent_policy(psoc, &param);
2248 }
2249 
dp_bus_bw_compute_timer_stop(struct wlan_objmgr_psoc * psoc)2250 void dp_bus_bw_compute_timer_stop(struct wlan_objmgr_psoc *psoc)
2251 {
2252 	dp_enter();
2253 
2254 	__dp_bus_bw_compute_timer_stop(psoc);
2255 
2256 	dp_exit();
2257 }
2258 
dp_bus_bw_compute_timer_try_stop(struct wlan_objmgr_psoc * psoc)2259 void dp_bus_bw_compute_timer_try_stop(struct wlan_objmgr_psoc *psoc)
2260 {
2261 	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
2262 	hdd_cb_handle ctx;
2263 
2264 	dp_enter();
2265 
2266 	if (!dp_ctx) {
2267 		dp_err("Unable to get DP context");
2268 		return;
2269 	}
2270 
2271 	ctx = dp_ctx->dp_ops.callback_ctx;
2272 
2273 	if (!dp_ctx->dp_ops.dp_any_adapter_connected(ctx))
2274 		__dp_bus_bw_compute_timer_stop(psoc);
2275 
2276 	dp_exit();
2277 }
2278 
dp_bus_bw_compute_prev_txrx_stats(struct wlan_objmgr_vdev * vdev)2279 void dp_bus_bw_compute_prev_txrx_stats(struct wlan_objmgr_vdev *vdev)
2280 {
2281 	struct wlan_objmgr_psoc *psoc = wlan_vdev_get_psoc(vdev);
2282 	struct wlan_dp_link *dp_link = dp_get_vdev_priv_obj(vdev);
2283 	struct wlan_dp_intf *dp_intf;
2284 	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
2285 
2286 	if (!dp_link) {
2287 		dp_err("No dp_link for objmgr vdev %pK", vdev);
2288 		return;
2289 	}
2290 
2291 	dp_intf = dp_link->dp_intf;
2292 	if (!dp_intf) {
2293 		dp_err("Invalid dp_intf for dp_link %pK (" QDF_MAC_ADDR_FMT ")",
2294 		       dp_link, QDF_MAC_ADDR_REF(dp_link->mac_addr.bytes));
2295 		return;
2296 	}
2297 
2298 	if (QDF_GLOBAL_FTM_MODE == cds_get_conparam())
2299 		return;
2300 
2301 	qdf_spin_lock_bh(&dp_ctx->bus_bw_lock);
2302 	dp_intf->prev_tx_packets = qdf_net_stats_get_tx_pkts(&dp_intf->stats);
2303 	dp_intf->prev_rx_packets = qdf_net_stats_get_rx_pkts(&dp_intf->stats);
2304 	dp_intf->prev_tx_bytes = qdf_net_stats_get_tx_bytes(&dp_intf->stats);
2305 
2306 	/*
2307 	 * TODO - Should the prev_fwd_tx_packets and
2308 	 * such stats be per link ??
2309 	 */
2310 	cdp_get_intra_bss_fwd_pkts_count(cds_get_context(QDF_MODULE_ID_SOC),
2311 					 dp_link->link_id,
2312 					 &dp_intf->prev_fwd_tx_packets,
2313 					 &dp_intf->prev_fwd_rx_packets);
2314 	qdf_spin_unlock_bh(&dp_ctx->bus_bw_lock);
2315 }
2316 
dp_bus_bw_compute_reset_prev_txrx_stats(struct wlan_objmgr_vdev * vdev)2317 void dp_bus_bw_compute_reset_prev_txrx_stats(struct wlan_objmgr_vdev *vdev)
2318 {
2319 	struct wlan_objmgr_psoc *psoc = wlan_vdev_get_psoc(vdev);
2320 	struct wlan_dp_link *dp_link = dp_get_vdev_priv_obj(vdev);
2321 	struct wlan_dp_intf *dp_intf;
2322 	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
2323 
2324 	if (!dp_link) {
2325 		dp_err("No dp_link for objmgr vdev %pK", vdev);
2326 		return;
2327 	}
2328 
2329 	dp_intf = dp_link->dp_intf;
2330 	if (!dp_intf) {
2331 		dp_err("Invalid dp_intf for dp_link %pK (" QDF_MAC_ADDR_FMT ")",
2332 		       dp_link, QDF_MAC_ADDR_REF(dp_link->mac_addr.bytes));
2333 		return;
2334 	}
2335 
2336 	if (QDF_GLOBAL_FTM_MODE == cds_get_conparam())
2337 		return;
2338 
2339 	qdf_spin_lock_bh(&dp_ctx->bus_bw_lock);
2340 	dp_intf->prev_tx_packets = 0;
2341 	dp_intf->prev_rx_packets = 0;
2342 	dp_intf->prev_fwd_tx_packets = 0;
2343 	dp_intf->prev_fwd_rx_packets = 0;
2344 	dp_intf->prev_tx_bytes = 0;
2345 	qdf_spin_unlock_bh(&dp_ctx->bus_bw_lock);
2346 }
2347 #endif /* WLAN_FEATURE_DP_BUS_BANDWIDTH */
2348