1 /*
2  * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <ol_cfg.h>
20 #include <ol_if_athvar.h>
21 #include <cdp_txrx_cfg.h>
22 #include <cdp_txrx_handle.h>
23 
24 unsigned int vow_config;
25 
26 #if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
27 /**
28  * ol_tx_set_flow_control_parameters() - set flow control parameters
29  * @cfg_ctx: cfg context
30  * @cfg_param: cfg parameters
31  *
32  * Return: none
33  */
ol_tx_set_flow_control_parameters(struct cdp_cfg * cfg_pdev,struct txrx_pdev_cfg_param_t * cfg_param)34 void ol_tx_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
35 	struct txrx_pdev_cfg_param_t *cfg_param)
36 {
37 	struct txrx_pdev_cfg_t *cfg_ctx = (struct txrx_pdev_cfg_t *)cfg_pdev;
38 
39 	cfg_ctx->tx_flow_start_queue_offset =
40 					cfg_param->tx_flow_start_queue_offset;
41 	cfg_ctx->tx_flow_stop_queue_th =
42 					cfg_param->tx_flow_stop_queue_th;
43 }
44 #endif
45 
46 #ifdef CONFIG_HL_SUPPORT
47 
48 #ifdef CONFIG_CREDIT_REP_THROUGH_CREDIT_UPDATE
49 static inline
ol_pdev_cfg_credit_update(struct txrx_pdev_cfg_t * cfg_ctx)50 void ol_pdev_cfg_credit_update(struct txrx_pdev_cfg_t *cfg_ctx)
51 {
52 	cfg_ctx->tx_free_at_download = 1;
53 	cfg_ctx->credit_update_enabled = 1;
54 }
55 #else
56 static inline
ol_pdev_cfg_credit_update(struct txrx_pdev_cfg_t * cfg_ctx)57 void ol_pdev_cfg_credit_update(struct txrx_pdev_cfg_t *cfg_ctx)
58 {
59 	cfg_ctx->tx_free_at_download = 0;
60 	cfg_ctx->credit_update_enabled = 0;
61 }
62 #endif /* CONFIG_CREDIT_REP_THROUGH_CREDIT_UPDATE */
63 
64 /**
65  * ol_pdev_cfg_param_update() - assign download size of tx frame for txrx
66  *				    pdev that will be used across datapath
67  * @cfg_ctx: ptr to config parameter for txrx pdev
68  *
69  * Return: None
70  */
71 static inline
ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t * cfg_ctx)72 void ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t *cfg_ctx)
73 {
74 	cfg_ctx->is_high_latency = 1;
75 	/* 802.1Q and SNAP / LLC headers are accounted for elsewhere */
76 	cfg_ctx->tx_download_size = 1500;
77 	ol_pdev_cfg_credit_update(cfg_ctx);
78 }
79 
80 #else /* CONFIG_HL_SUPPORT */
81 static inline
ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t * cfg_ctx)82 void ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t *cfg_ctx)
83 {
84 	/*
85 	 * Need to change HTT_LL_TX_HDR_SIZE_IP accordingly.
86 	 * Include payload, up to the end of UDP header for IPv4 case
87 	 */
88 	cfg_ctx->tx_download_size = 16;
89 }
90 #endif
91 
92 #ifdef CONFIG_RX_PN_CHECK_OFFLOAD
93 static inline
ol_pdev_cfg_rx_pn_check(struct txrx_pdev_cfg_t * cfg_ctx)94 void ol_pdev_cfg_rx_pn_check(struct txrx_pdev_cfg_t *cfg_ctx)
95 {
96 	/* Do not do pn check on host */
97 	cfg_ctx->rx_pn_check = 0;
98 }
99 #else
100 static inline
ol_pdev_cfg_rx_pn_check(struct txrx_pdev_cfg_t * cfg_ctx)101 void ol_pdev_cfg_rx_pn_check(struct txrx_pdev_cfg_t *cfg_ctx)
102 {
103 	/* Do pn check on host */
104 	cfg_ctx->rx_pn_check = 1;
105 }
106 #endif /* CONFIG_RX_PN_CHECK_OFFLOAD */
107 
108 #if CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
109 static inline
ol_defrag_timeout_check(void)110 uint8_t ol_defrag_timeout_check(void)
111 {
112 	return 1;
113 }
114 #else
115 static inline
ol_defrag_timeout_check(void)116 uint8_t ol_defrag_timeout_check(void)
117 {
118 	return 0;
119 }
120 #endif
121 
122 #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
123 /**
124  * ol_cfg_update_del_ack_params() - update delayed ack params
125  * @cfg_ctx: cfg context
126  * @cfg_param: parameters
127  *
128  * Return: none
129  */
ol_cfg_update_del_ack_params(struct txrx_pdev_cfg_t * cfg_ctx,struct txrx_pdev_cfg_param_t * cfg_param)130 void ol_cfg_update_del_ack_params(struct txrx_pdev_cfg_t *cfg_ctx,
131 				  struct txrx_pdev_cfg_param_t *cfg_param)
132 {
133 	cfg_ctx->del_ack_enable = cfg_param->del_ack_enable;
134 	cfg_ctx->del_ack_timer_value = cfg_param->del_ack_timer_value;
135 	cfg_ctx->del_ack_pkt_count = cfg_param->del_ack_pkt_count;
136 }
137 #endif
138 
139 #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
140 static inline
ol_cfg_update_bundle_params(struct txrx_pdev_cfg_t * cfg_ctx,struct txrx_pdev_cfg_param_t * cfg_param)141 void ol_cfg_update_bundle_params(struct txrx_pdev_cfg_t *cfg_ctx,
142 				 struct txrx_pdev_cfg_param_t *cfg_param)
143 {
144 	cfg_ctx->bundle_timer_value = cfg_param->bundle_timer_value;
145 	cfg_ctx->bundle_size = cfg_param->bundle_size;
146 }
147 #else
148 static inline
ol_cfg_update_bundle_params(struct txrx_pdev_cfg_t * cfg_ctx,struct txrx_pdev_cfg_param_t * cfg_param)149 void ol_cfg_update_bundle_params(struct txrx_pdev_cfg_t *cfg_ctx,
150 				 struct txrx_pdev_cfg_param_t *cfg_param)
151 {
152 }
153 #endif
154 
155 /* FIX THIS -
156  * For now, all these configuration parameters are hardcoded.
157  * Many of these should actually be determined dynamically instead.
158  */
159 
ol_pdev_cfg_attach(qdf_device_t osdev,void * pcfg_param)160 struct cdp_cfg *ol_pdev_cfg_attach(qdf_device_t osdev, void *pcfg_param)
161 {
162 	struct txrx_pdev_cfg_param_t *cfg_param = pcfg_param;
163 	struct txrx_pdev_cfg_t *cfg_ctx;
164 	int i;
165 
166 	cfg_ctx = qdf_mem_malloc(sizeof(*cfg_ctx));
167 	if (!cfg_ctx)
168 		return NULL;
169 
170 	ol_pdev_cfg_param_update(cfg_ctx);
171 	ol_pdev_cfg_rx_pn_check(cfg_ctx);
172 
173 	cfg_ctx->defrag_timeout_check = ol_defrag_timeout_check();
174 	cfg_ctx->max_peer_id = 511;
175 	cfg_ctx->max_vdev = CFG_TGT_NUM_VDEV;
176 	cfg_ctx->pn_rx_fwd_check = 1;
177 	cfg_ctx->frame_type = wlan_frm_fmt_802_3;
178 	cfg_ctx->max_thruput_mbps = MAX_THROUGHPUT;
179 	cfg_ctx->max_nbuf_frags = 1;
180 	cfg_ctx->vow_config = vow_config;
181 	cfg_ctx->target_tx_credit = CFG_TGT_NUM_MSDU_DESC;
182 	cfg_ctx->throttle_period_ms = 40;
183 	cfg_ctx->dutycycle_level[0] = THROTTLE_DUTY_CYCLE_LEVEL0;
184 	cfg_ctx->dutycycle_level[1] = THROTTLE_DUTY_CYCLE_LEVEL1;
185 	cfg_ctx->dutycycle_level[2] = THROTTLE_DUTY_CYCLE_LEVEL2;
186 	cfg_ctx->dutycycle_level[3] = THROTTLE_DUTY_CYCLE_LEVEL3;
187 	cfg_ctx->rx_fwd_disabled = 0;
188 	cfg_ctx->is_packet_log_enabled = 0;
189 	cfg_ctx->is_full_reorder_offload = cfg_param->is_full_reorder_offload;
190 #ifdef WLAN_FEATURE_TSF_PLUS
191 	cfg_ctx->is_ptp_rx_opt_enabled = 0;
192 #endif
193 	cfg_ctx->ipa_uc_rsc.uc_offload_enabled =
194 		cfg_param->is_uc_offload_enabled;
195 	cfg_ctx->ipa_uc_rsc.tx_max_buf_cnt = cfg_param->uc_tx_buffer_count;
196 	cfg_ctx->ipa_uc_rsc.tx_buf_size = cfg_param->uc_tx_buffer_size;
197 	cfg_ctx->ipa_uc_rsc.rx_ind_ring_size =
198 		cfg_param->uc_rx_indication_ring_count;
199 	cfg_ctx->ipa_uc_rsc.tx_partition_base = cfg_param->uc_tx_partition_base;
200 	cfg_ctx->enable_rxthread = cfg_param->enable_rxthread;
201 	cfg_ctx->ip_tcp_udp_checksum_offload =
202 		cfg_param->ip_tcp_udp_checksum_offload;
203 	cfg_ctx->p2p_ip_tcp_udp_checksum_offload =
204 		cfg_param->p2p_ip_tcp_udp_checksum_offload;
205 	cfg_ctx->nan_tcp_udp_checksumoffload =
206 		cfg_param->nan_ip_tcp_udp_checksum_offload;
207 	cfg_ctx->ce_classify_enabled = cfg_param->ce_classify_enabled;
208 	cfg_ctx->gro_enable = cfg_param->gro_enable;
209 	cfg_ctx->tso_enable = cfg_param->tso_enable;
210 	cfg_ctx->lro_enable = cfg_param->lro_enable;
211 	cfg_ctx->sg_enable = cfg_param->sg_enable;
212 	cfg_ctx->enable_data_stall_detection =
213 		cfg_param->enable_data_stall_detection;
214 	cfg_ctx->enable_flow_steering = cfg_param->enable_flow_steering;
215 	cfg_ctx->disable_intra_bss_fwd = cfg_param->disable_intra_bss_fwd;
216 	cfg_ctx->pktlog_buffer_size = cfg_param->pktlog_buffer_size;
217 
218 	ol_cfg_update_del_ack_params(cfg_ctx, cfg_param);
219 
220 	ol_cfg_update_bundle_params(cfg_ctx, cfg_param);
221 
222 	ol_tx_set_flow_control_parameters((struct cdp_cfg *)cfg_ctx, cfg_param);
223 
224 	for (i = 0; i < QCA_WLAN_AC_ALL; i++) {
225 		cfg_ctx->ac_specs[i].wrr_skip_weight =
226 			cfg_param->ac_specs[i].wrr_skip_weight;
227 		cfg_ctx->ac_specs[i].credit_threshold =
228 			cfg_param->ac_specs[i].credit_threshold;
229 		cfg_ctx->ac_specs[i].send_limit =
230 			cfg_param->ac_specs[i].send_limit;
231 		cfg_ctx->ac_specs[i].credit_reserve =
232 			cfg_param->ac_specs[i].credit_reserve;
233 		cfg_ctx->ac_specs[i].discard_weight =
234 			cfg_param->ac_specs[i].discard_weight;
235 	}
236 
237 	return (struct cdp_cfg *)cfg_ctx;
238 }
239 
240 #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
241 
ol_cfg_get_bundle_timer_value(struct cdp_cfg * cfg_pdev)242 int ol_cfg_get_bundle_timer_value(struct cdp_cfg *cfg_pdev)
243 {
244 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
245 
246 	return cfg->bundle_timer_value;
247 }
248 
ol_cfg_get_bundle_size(struct cdp_cfg * cfg_pdev)249 int ol_cfg_get_bundle_size(struct cdp_cfg *cfg_pdev)
250 {
251 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
252 
253 	return cfg->bundle_size;
254 }
255 #endif
256 
257 #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
258 /**
259  * ol_cfg_get_del_ack_timer_value() - get delayed ack timer value
260  * @cfg_pdev: pdev handle
261  *
262  * Return: timer value
263  */
ol_cfg_get_del_ack_timer_value(struct cdp_cfg * cfg_pdev)264 int ol_cfg_get_del_ack_timer_value(struct cdp_cfg *cfg_pdev)
265 {
266 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
267 
268 	return cfg->del_ack_timer_value;
269 }
270 
271 /**
272  * ol_cfg_get_del_ack_enable_value() - get delayed ack enable value
273  * @cfg_pdev: pdev handle
274  *
275  * Return: enable/disable
276  */
ol_cfg_get_del_ack_enable_value(struct cdp_cfg * cfg_pdev)277 bool ol_cfg_get_del_ack_enable_value(struct cdp_cfg *cfg_pdev)
278 {
279 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
280 
281 	return cfg->del_ack_enable;
282 }
283 
284 /**
285  * ol_cfg_get_del_ack_count_value() - get delayed ack count value
286  * @pdev: cfg_pdev handle
287  *
288  * Return: count value
289  */
ol_cfg_get_del_ack_count_value(struct cdp_cfg * cfg_pdev)290 int ol_cfg_get_del_ack_count_value(struct cdp_cfg *cfg_pdev)
291 {
292 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
293 
294 	return cfg->del_ack_pkt_count;
295 }
296 #endif
297 
ol_cfg_is_high_latency(struct cdp_cfg * cfg_pdev)298 int ol_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
299 {
300 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
301 
302 	return cfg->is_high_latency;
303 }
304 
ol_cfg_is_credit_update_enabled(struct cdp_cfg * cfg_pdev)305 int ol_cfg_is_credit_update_enabled(struct cdp_cfg *cfg_pdev)
306 {
307 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
308 
309 	return cfg->credit_update_enabled;
310 }
311 
ol_cfg_max_peer_id(struct cdp_cfg * cfg_pdev)312 int ol_cfg_max_peer_id(struct cdp_cfg *cfg_pdev)
313 {
314 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
315 	/*
316 	 * TBDXXX - this value must match the peer table
317 	 * size allocated in FW
318 	 */
319 	return cfg->max_peer_id;
320 }
321 
ol_cfg_max_vdevs(struct cdp_cfg * cfg_pdev)322 int ol_cfg_max_vdevs(struct cdp_cfg *cfg_pdev)
323 {
324 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
325 
326 	return cfg->max_vdev;
327 }
328 
ol_cfg_rx_pn_check(struct cdp_cfg * cfg_pdev)329 int ol_cfg_rx_pn_check(struct cdp_cfg *cfg_pdev)
330 {
331 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
332 
333 	return cfg->rx_pn_check;
334 }
335 
ol_cfg_rx_fwd_check(struct cdp_cfg * cfg_pdev)336 int ol_cfg_rx_fwd_check(struct cdp_cfg *cfg_pdev)
337 {
338 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
339 
340 	return cfg->pn_rx_fwd_check;
341 }
342 
ol_set_cfg_rx_fwd_disabled(struct cdp_cfg * cfg_pdev,uint8_t disable_rx_fwd)343 void ol_set_cfg_rx_fwd_disabled(struct cdp_cfg *cfg_pdev,
344 		uint8_t disable_rx_fwd)
345 {
346 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
347 
348 	cfg->rx_fwd_disabled = disable_rx_fwd;
349 }
350 
ol_set_cfg_packet_log_enabled(struct cdp_cfg * cfg_pdev,uint8_t val)351 void ol_set_cfg_packet_log_enabled(struct cdp_cfg *cfg_pdev, uint8_t val)
352 {
353 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
354 
355 	cfg->is_packet_log_enabled = val;
356 }
357 
ol_cfg_is_packet_log_enabled(struct cdp_cfg * cfg_pdev)358 uint8_t ol_cfg_is_packet_log_enabled(struct cdp_cfg *cfg_pdev)
359 {
360 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
361 
362 	return cfg->is_packet_log_enabled;
363 }
364 
ol_cfg_rx_fwd_disabled(struct cdp_cfg * cfg_pdev)365 int ol_cfg_rx_fwd_disabled(struct cdp_cfg *cfg_pdev)
366 {
367 #if defined(ATHR_WIN_NWF)
368 	/* for Windows, let the OS handle the forwarding */
369 	return 1;
370 #else
371 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
372 
373 	return cfg->rx_fwd_disabled;
374 #endif
375 }
376 
ol_cfg_rx_fwd_inter_bss(struct cdp_cfg * cfg_pdev)377 int ol_cfg_rx_fwd_inter_bss(struct cdp_cfg *cfg_pdev)
378 {
379 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
380 
381 	return cfg->rx_fwd_inter_bss;
382 }
383 
ol_cfg_frame_type(struct cdp_cfg * cfg_pdev)384 enum wlan_frm_fmt ol_cfg_frame_type(struct cdp_cfg *cfg_pdev)
385 {
386 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
387 
388 	return cfg->frame_type;
389 }
390 
ol_cfg_max_thruput_mbps(struct cdp_cfg * cfg_pdev)391 int ol_cfg_max_thruput_mbps(struct cdp_cfg *cfg_pdev)
392 {
393 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
394 
395 	return cfg->max_thruput_mbps;
396 }
397 
ol_cfg_netbuf_frags_max(struct cdp_cfg * cfg_pdev)398 int ol_cfg_netbuf_frags_max(struct cdp_cfg *cfg_pdev)
399 {
400 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
401 
402 	return cfg->max_nbuf_frags;
403 }
404 
ol_cfg_tx_free_at_download(struct cdp_cfg * cfg_pdev)405 int ol_cfg_tx_free_at_download(struct cdp_cfg *cfg_pdev)
406 {
407 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
408 
409 	return cfg->tx_free_at_download;
410 }
411 
ol_cfg_set_tx_free_at_download(struct cdp_cfg * cfg_pdev)412 void ol_cfg_set_tx_free_at_download(struct cdp_cfg *cfg_pdev)
413 {
414 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
415 
416 	cfg->tx_free_at_download = 1;
417 }
418 
419 
420 #ifdef CONFIG_HL_SUPPORT
ol_cfg_target_tx_credit(struct cdp_cfg * cfg_pdev)421 uint16_t ol_cfg_target_tx_credit(struct cdp_cfg *cfg_pdev)
422 {
423 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
424 
425 	return cfg->target_tx_credit;
426 }
427 #else
428 
ol_cfg_target_tx_credit(struct cdp_cfg * cfg_pdev)429 uint16_t ol_cfg_target_tx_credit(struct cdp_cfg *cfg_pdev)
430 {
431 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
432 	uint16_t rc;
433 	uint16_t vow_max_sta = (cfg->vow_config & 0xffff0000) >> 16;
434 	uint16_t vow_max_desc_persta = cfg->vow_config & 0x0000ffff;
435 
436 	rc =  (cfg->target_tx_credit + (vow_max_sta * vow_max_desc_persta));
437 
438 	return rc;
439 }
440 #endif
441 
ol_cfg_tx_download_size(struct cdp_cfg * cfg_pdev)442 int ol_cfg_tx_download_size(struct cdp_cfg *cfg_pdev)
443 {
444 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
445 
446 	return cfg->tx_download_size;
447 }
448 
ol_cfg_rx_host_defrag_timeout_duplicate_check(struct cdp_cfg * cfg_pdev)449 int ol_cfg_rx_host_defrag_timeout_duplicate_check(struct cdp_cfg *cfg_pdev)
450 {
451 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
452 
453 	return cfg->defrag_timeout_check;
454 }
455 
ol_cfg_throttle_period_ms(struct cdp_cfg * cfg_pdev)456 int ol_cfg_throttle_period_ms(struct cdp_cfg *cfg_pdev)
457 {
458 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
459 
460 	return cfg->throttle_period_ms;
461 }
462 
ol_cfg_throttle_duty_cycle_level(struct cdp_cfg * cfg_pdev,int level)463 int ol_cfg_throttle_duty_cycle_level(struct cdp_cfg *cfg_pdev, int level)
464 {
465 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
466 
467 	return cfg->dutycycle_level[level];
468 }
469 
470 #ifdef CONFIG_HL_SUPPORT
ol_cfg_is_full_reorder_offload(struct cdp_cfg * cfg_pdev)471 int ol_cfg_is_full_reorder_offload(struct cdp_cfg *cfg_pdev)
472 {
473 	return 0;
474 }
475 #else
ol_cfg_is_full_reorder_offload(struct cdp_cfg * cfg_pdev)476 int ol_cfg_is_full_reorder_offload(struct cdp_cfg *cfg_pdev)
477 {
478 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
479 
480 	return cfg->is_full_reorder_offload;
481 }
482 #endif
483 
484 #ifdef WLAN_FEATURE_TSF_PLUS
ol_set_cfg_ptp_rx_opt_enabled(struct cdp_cfg * cfg_pdev,u_int8_t val)485 void ol_set_cfg_ptp_rx_opt_enabled(struct cdp_cfg *cfg_pdev, u_int8_t val)
486 {
487 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
488 
489 	cfg->is_ptp_rx_opt_enabled = val;
490 }
491 
ol_cfg_is_ptp_rx_opt_enabled(struct cdp_cfg * cfg_pdev)492 u_int8_t ol_cfg_is_ptp_rx_opt_enabled(struct cdp_cfg *cfg_pdev)
493 {
494 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
495 
496 	return cfg->is_ptp_rx_opt_enabled;
497 }
498 #endif
499 
500 /**
501  * ol_cfg_is_rx_thread_enabled() - return rx_thread is enable/disable
502  * @pdev : handle to the physical device
503  *
504  * Return: 1 - enable, 0 - disable
505  */
ol_cfg_is_rx_thread_enabled(struct cdp_cfg * cfg_pdev)506 int ol_cfg_is_rx_thread_enabled(struct cdp_cfg *cfg_pdev)
507 {
508 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
509 
510 	return cfg->enable_rxthread;
511 }
512 
513 #if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
514 /**
515  * ol_cfg_get_tx_flow_stop_queue_th() - return stop queue threshold
516  * @pdev : handle to the physical device
517  *
518  * Return: stop queue threshold
519  */
ol_cfg_get_tx_flow_stop_queue_th(struct cdp_cfg * cfg_pdev)520 int ol_cfg_get_tx_flow_stop_queue_th(struct cdp_cfg *cfg_pdev)
521 {
522 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
523 
524 	return cfg->tx_flow_stop_queue_th;
525 }
526 
527 /**
528  * ol_cfg_get_tx_flow_start_queue_offset() - return start queue offset
529  * @pdev : handle to the physical device
530  *
531  * Return: start queue offset
532  */
ol_cfg_get_tx_flow_start_queue_offset(struct cdp_cfg * cfg_pdev)533 int ol_cfg_get_tx_flow_start_queue_offset(struct cdp_cfg *cfg_pdev)
534 {
535 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
536 
537 	return cfg->tx_flow_start_queue_offset;
538 }
539 #endif
540 
541 #ifdef IPA_OFFLOAD
ol_cfg_ipa_uc_offload_enabled(struct cdp_cfg * cfg_pdev)542 unsigned int ol_cfg_ipa_uc_offload_enabled(struct cdp_cfg *cfg_pdev)
543 {
544 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
545 
546 	return (unsigned int)cfg->ipa_uc_rsc.uc_offload_enabled;
547 }
548 
ol_cfg_ipa_uc_tx_buf_size(struct cdp_cfg * cfg_pdev)549 unsigned int ol_cfg_ipa_uc_tx_buf_size(struct cdp_cfg *cfg_pdev)
550 {
551 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
552 
553 	return cfg->ipa_uc_rsc.tx_buf_size;
554 }
555 
ol_cfg_ipa_uc_tx_max_buf_cnt(struct cdp_cfg * cfg_pdev)556 unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(struct cdp_cfg *cfg_pdev)
557 {
558 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
559 
560 	return cfg->ipa_uc_rsc.tx_max_buf_cnt;
561 }
562 
ol_cfg_ipa_uc_rx_ind_ring_size(struct cdp_cfg * cfg_pdev)563 unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(struct cdp_cfg *cfg_pdev)
564 {
565 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
566 
567 	return cfg->ipa_uc_rsc.rx_ind_ring_size;
568 }
569 
ol_cfg_ipa_uc_tx_partition_base(struct cdp_cfg * cfg_pdev)570 unsigned int ol_cfg_ipa_uc_tx_partition_base(struct cdp_cfg *cfg_pdev)
571 {
572 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
573 
574 	return cfg->ipa_uc_rsc.tx_partition_base;
575 }
576 
ol_cfg_set_ipa_uc_tx_partition_base(struct cdp_cfg * cfg_pdev,uint32_t val)577 void ol_cfg_set_ipa_uc_tx_partition_base(struct cdp_cfg *cfg_pdev, uint32_t val)
578 {
579 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
580 
581 	cfg->ipa_uc_rsc.tx_partition_base = val;
582 }
583 #endif /* IPA_OFFLOAD */
584 
585 /**
586  * ol_cfg_is_ce_classify_enabled() - Return if CE classification is enabled
587  *				     or disabled
588  * @pdev : handle to the physical device
589  *
590  * Return: 1 - enabled, 0 - disabled
591  */
ol_cfg_is_ce_classify_enabled(struct cdp_cfg * cfg_pdev)592 bool ol_cfg_is_ce_classify_enabled(struct cdp_cfg *cfg_pdev)
593 {
594 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
595 
596 	return cfg->ce_classify_enabled;
597 }
598 
599 /**
600  * ol_cfg_get_wrr_skip_weight() - brief Query for the param of wrr_skip_weight
601  * @pdev: handle to the physical device.
602  * @ac: access control, it will be BE, BK, VI, VO
603  *
604  * Return: wrr_skip_weight for specified ac.
605  */
ol_cfg_get_wrr_skip_weight(struct cdp_cfg * pdev,int ac)606 int ol_cfg_get_wrr_skip_weight(struct cdp_cfg *pdev, int ac)
607 {
608 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
609 
610 	if (ac >= QCA_WLAN_AC_BE && ac <= QCA_WLAN_AC_VO)
611 		return cfg->ac_specs[ac].wrr_skip_weight;
612 
613 	return 0;
614 }
615 
616 /**
617  * ol_cfg_get_credit_threshold() - Query for the param of credit_threshold
618  * @pdev: handle to the physical device.
619  * @ac: access control, it will be BE, BK, VI, VO
620  *
621  * Return: credit_threshold for specified ac.
622  */
ol_cfg_get_credit_threshold(struct cdp_cfg * pdev,int ac)623 uint32_t ol_cfg_get_credit_threshold(struct cdp_cfg *pdev, int ac)
624 {
625 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
626 
627 	if (ac >= QCA_WLAN_AC_BE && ac <= QCA_WLAN_AC_VO)
628 		return cfg->ac_specs[ac].credit_threshold;
629 
630 	return 0;
631 }
632 
633 /**
634  * ol_cfg_get_send_limit() - Query for the param of send_limit
635  * @pdev: handle to the physical device.
636  * @ac: access control, it will be BE, BK, VI, VO
637  *
638  * Return: send_limit for specified ac.
639  */
ol_cfg_get_send_limit(struct cdp_cfg * pdev,int ac)640 uint16_t ol_cfg_get_send_limit(struct cdp_cfg *pdev, int ac)
641 {
642 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
643 
644 	if (ac >= QCA_WLAN_AC_BE && ac <= QCA_WLAN_AC_VO)
645 		return cfg->ac_specs[ac].send_limit;
646 
647 	return 0;
648 }
649 
650 /**
651  * ol_cfg_get_credit_reserve() - Query for the param of credit_reserve
652  * @pdev: handle to the physical device.
653  * @ac: access control, it will be BE, BK, VI, VO
654  *
655  * Return: credit_reserve for specified ac.
656  */
ol_cfg_get_credit_reserve(struct cdp_cfg * pdev,int ac)657 int ol_cfg_get_credit_reserve(struct cdp_cfg *pdev, int ac)
658 {
659 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
660 
661 	if (ac >= QCA_WLAN_AC_BE && ac <= QCA_WLAN_AC_VO)
662 		return cfg->ac_specs[ac].credit_reserve;
663 
664 	return 0;
665 }
666 
667 /**
668  * ol_cfg_get_discard_weight() - Query for the param of discard_weight
669  * @pdev: handle to the physical device.
670  * @ac: access control, it will be BE, BK, VI, VO
671  *
672  * Return: discard_weight for specified ac.
673  */
ol_cfg_get_discard_weight(struct cdp_cfg * pdev,int ac)674 int ol_cfg_get_discard_weight(struct cdp_cfg *pdev, int ac)
675 {
676 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
677 
678 	if (ac >= QCA_WLAN_AC_BE && ac <= QCA_WLAN_AC_VO)
679 		return cfg->ac_specs[ac].discard_weight;
680 
681 	return 0;
682 }
683