xref: /wlan-dirver/qcacld-3.0/core/dp/txrx/ol_cfg.c (revision f65bd4cf8fca8a30dcc78601a42879626d6bc7ee)
1 /*
2  * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include <ol_cfg.h>
29 #include <ol_if_athvar.h>
30 #include <cdp_txrx_cfg.h>
31 
32 unsigned int vow_config = 0;
33 
34 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
35 /**
36  * ol_tx_set_flow_control_parameters() - set flow control parameters
37  * @cfg_ctx: cfg context
38  * @cfg_param: cfg parameters
39  *
40  * Return: none
41  */
42 void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
43 	struct txrx_pdev_cfg_param_t *cfg_param)
44 {
45 	cfg_ctx->tx_flow_start_queue_offset =
46 					cfg_param->tx_flow_start_queue_offset;
47 	cfg_ctx->tx_flow_stop_queue_th =
48 					cfg_param->tx_flow_stop_queue_th;
49 }
50 #else
51 void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
52 	struct txrx_pdev_cfg_param_t *cfg_param)
53 {
54 	return;
55 }
56 #endif
57 
58 #ifdef CONFIG_HL_SUPPORT
59 
60 /**
61  * ol_pdev_cfg_param_update() - assign download size of tx frame for txrx
62  *				    pdev that will be used across datapath
63  * @cfg_ctx: ptr to config parameter for txrx pdev
64  *
65  * Return: None
66  */
67 static inline
68 void ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t *cfg_ctx)
69 {
70 	cfg_ctx->is_high_latency = 1;
71 	/* 802.1Q and SNAP / LLC headers are accounted for elsewhere */
72 	cfg_ctx->tx_download_size = 1500;
73 	cfg_ctx->tx_free_at_download = 0;
74 }
75 #else
76 
77 static inline
78 void ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t *cfg_ctx)
79 {
80 	/*
81 	 * Need to change HTT_LL_TX_HDR_SIZE_IP accordingly.
82 	 * Include payload, up to the end of UDP header for IPv4 case
83 	 */
84 	cfg_ctx->tx_download_size = 16;
85 }
86 #endif
87 
88 #if CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
89 static inline
90 uint8_t ol_defrag_timeout_check(void)
91 {
92 	return 1;
93 }
94 #else
95 static inline
96 uint8_t ol_defrag_timeout_check(void)
97 {
98 	return 0;
99 }
100 #endif
101 
102 /* FIX THIS -
103  * For now, all these configuration parameters are hardcoded.
104  * Many of these should actually be determined dynamically instead.
105  */
106 
107 /**
108  * ol_pdev_cfg_attach - setup configuration parameters
109  *
110  *@osdev - OS handle needed as an argument for some OS primitives
111  *@cfg_param - configuration parameters
112  *
113  * Allocation configuration context that will be used across data path
114  *
115  * Return: the control device object
116  */
117 
118 void *ol_pdev_cfg_attach(qdf_device_t osdev, void *pcfg_param)
119 {
120 	struct txrx_pdev_cfg_param_t *cfg_param = pcfg_param;
121 	struct txrx_pdev_cfg_t *cfg_ctx;
122 
123 	cfg_ctx = qdf_mem_malloc(sizeof(*cfg_ctx));
124 	if (!cfg_ctx) {
125 		printk(KERN_ERR "cfg ctx allocation failed\n");
126 		return NULL;
127 	}
128 
129 	ol_pdev_cfg_param_update(cfg_ctx);
130 
131 	/* temporarily diabled PN check for Riva/Pronto */
132 	cfg_ctx->rx_pn_check = 1;
133 	cfg_ctx->defrag_timeout_check = ol_defrag_timeout_check();
134 	cfg_ctx->max_peer_id = 511;
135 	cfg_ctx->max_vdev = CFG_TGT_NUM_VDEV;
136 	cfg_ctx->pn_rx_fwd_check = 1;
137 	cfg_ctx->frame_type = wlan_frm_fmt_802_3;
138 	cfg_ctx->max_thruput_mbps = 800;
139 	cfg_ctx->max_nbuf_frags = 1;
140 	cfg_ctx->vow_config = vow_config;
141 	cfg_ctx->target_tx_credit = CFG_TGT_NUM_MSDU_DESC;
142 	cfg_ctx->throttle_period_ms = 40;
143 	cfg_ctx->dutycycle_level[0] = THROTTLE_DUTY_CYCLE_LEVEL0;
144 	cfg_ctx->dutycycle_level[1] = THROTTLE_DUTY_CYCLE_LEVEL1;
145 	cfg_ctx->dutycycle_level[2] = THROTTLE_DUTY_CYCLE_LEVEL2;
146 	cfg_ctx->dutycycle_level[3] = THROTTLE_DUTY_CYCLE_LEVEL3;
147 	cfg_ctx->rx_fwd_disabled = 0;
148 	cfg_ctx->is_packet_log_enabled = 0;
149 	cfg_ctx->is_full_reorder_offload = cfg_param->is_full_reorder_offload;
150 	cfg_ctx->ipa_uc_rsc.uc_offload_enabled =
151 		cfg_param->is_uc_offload_enabled;
152 	cfg_ctx->ipa_uc_rsc.tx_max_buf_cnt = cfg_param->uc_tx_buffer_count;
153 	cfg_ctx->ipa_uc_rsc.tx_buf_size = cfg_param->uc_tx_buffer_size;
154 	cfg_ctx->ipa_uc_rsc.rx_ind_ring_size =
155 		cfg_param->uc_rx_indication_ring_count;
156 	cfg_ctx->ipa_uc_rsc.tx_partition_base = cfg_param->uc_tx_partition_base;
157 	cfg_ctx->enable_rxthread = cfg_param->enable_rxthread;
158 	cfg_ctx->ip_tcp_udp_checksum_offload =
159 		cfg_param->ip_tcp_udp_checksum_offload;
160 	cfg_ctx->ce_classify_enabled = cfg_param->ce_classify_enabled;
161 
162 	ol_tx_set_flow_control_parameters(cfg_ctx, cfg_param);
163 	return (void *)cfg_ctx;
164 }
165 
166 int ol_cfg_is_high_latency(ol_pdev_handle pdev)
167 {
168 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
169 	return cfg->is_high_latency;
170 }
171 
172 int ol_cfg_max_peer_id(ol_pdev_handle pdev)
173 {
174 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
175 	/*
176 	 * TBDXXX - this value must match the peer table
177 	 * size allocated in FW
178 	 */
179 	return cfg->max_peer_id;
180 }
181 
182 int ol_cfg_max_vdevs(ol_pdev_handle pdev)
183 {
184 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
185 	return cfg->max_vdev;
186 }
187 
188 int ol_cfg_rx_pn_check(ol_pdev_handle pdev)
189 {
190 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
191 	return cfg->rx_pn_check;
192 }
193 
194 int ol_cfg_rx_fwd_check(ol_pdev_handle pdev)
195 {
196 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
197 	return cfg->pn_rx_fwd_check;
198 }
199 
200 /**
201  * ol_set_cfg_rx_fwd_disabled - set rx fwd disable/enable
202  *
203  * @pdev - handle to the physical device
204  * @disable_rx_fwd 1 -> no rx->tx forward -> rx->tx forward
205  *
206  * Choose whether to forward rx frames to tx (where applicable) within the
207  * WLAN driver, or to leave all forwarding up to the operating system.
208  * Currently only intra-bss fwd is supported.
209  *
210  */
211 void ol_set_cfg_rx_fwd_disabled(void *ppdev, uint8_t disable_rx_fwd)
212 {
213 	ol_pdev_handle pdev = ppdev;
214 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
215 	cfg->rx_fwd_disabled = disable_rx_fwd;
216 }
217 
218 /**
219  * ol_set_cfg_packet_log_enabled - Set packet log config in HTT
220  * config based on CFG ini configuration
221  *
222  * @pdev - handle to the physical device
223  * @val - 0 - disable, 1 - enable
224  */
225 void ol_set_cfg_packet_log_enabled(void *ppdev, uint8_t val)
226 {
227 	ol_pdev_handle pdev = ppdev;
228 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
229 	cfg->is_packet_log_enabled = val;
230 }
231 
232 uint8_t ol_cfg_is_packet_log_enabled(ol_pdev_handle pdev)
233 {
234 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
235 	return cfg->is_packet_log_enabled;
236 }
237 
238 int ol_cfg_rx_fwd_disabled(ol_pdev_handle pdev)
239 {
240 #if defined(ATHR_WIN_NWF)
241 	/* for Windows, let the OS handle the forwarding */
242 	return 1;
243 #else
244 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
245 	return cfg->rx_fwd_disabled;
246 #endif
247 }
248 
249 int ol_cfg_rx_fwd_inter_bss(ol_pdev_handle pdev)
250 {
251 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
252 	return cfg->rx_fwd_inter_bss;
253 }
254 
255 enum wlan_frm_fmt ol_cfg_frame_type(ol_pdev_handle pdev)
256 {
257 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
258 	return cfg->frame_type;
259 }
260 
261 int ol_cfg_max_thruput_mbps(ol_pdev_handle pdev)
262 {
263 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
264 	return cfg->max_thruput_mbps;
265 }
266 
267 int ol_cfg_netbuf_frags_max(ol_pdev_handle pdev)
268 {
269 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
270 	return cfg->max_nbuf_frags;
271 }
272 
273 int ol_cfg_tx_free_at_download(ol_pdev_handle pdev)
274 {
275 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
276 	return cfg->tx_free_at_download;
277 }
278 
279 void ol_cfg_set_tx_free_at_download(ol_pdev_handle pdev)
280 {
281 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
282 	cfg->tx_free_at_download = 1;
283 }
284 
285 
286 #ifdef CONFIG_HL_SUPPORT
287 uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev)
288 {
289 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
290 	return cfg->target_tx_credit;
291 }
292 #else
293 
294 uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev)
295 {
296 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
297 	uint16_t rc;
298 	uint16_t vow_max_sta = (cfg->vow_config & 0xffff0000) >> 16;
299 	uint16_t vow_max_desc_persta = cfg->vow_config & 0x0000ffff;
300 
301 	rc =  (cfg->target_tx_credit + (vow_max_sta * vow_max_desc_persta));
302 
303 	return rc;
304 }
305 #endif
306 
307 int ol_cfg_tx_download_size(ol_pdev_handle pdev)
308 {
309 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
310 	return cfg->tx_download_size;
311 }
312 
313 int ol_cfg_rx_host_defrag_timeout_duplicate_check(ol_pdev_handle pdev)
314 {
315 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
316 	return cfg->defrag_timeout_check;
317 }
318 
319 int ol_cfg_throttle_period_ms(ol_pdev_handle pdev)
320 {
321 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
322 	return cfg->throttle_period_ms;
323 }
324 
325 int ol_cfg_throttle_duty_cycle_level(ol_pdev_handle pdev, int level)
326 {
327 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
328 	return cfg->dutycycle_level[level];
329 }
330 
331 int ol_cfg_is_full_reorder_offload(ol_pdev_handle pdev)
332 {
333 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
334 	return cfg->is_full_reorder_offload;
335 }
336 
337 /**
338  * ol_cfg_is_rx_thread_enabled() - return rx_thread is enable/disable
339  * @pdev : handle to the physical device
340  *
341  * Return: 1 - enable, 0 - disable
342  */
343 int ol_cfg_is_rx_thread_enabled(ol_pdev_handle pdev)
344 {
345 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
346 	return cfg->enable_rxthread;
347 }
348 
349 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
350 /**
351  * ol_cfg_get_tx_flow_stop_queue_th() - return stop queue threshold
352  * @pdev : handle to the physical device
353  *
354  * Return: stop queue threshold
355  */
356 int ol_cfg_get_tx_flow_stop_queue_th(ol_pdev_handle pdev)
357 {
358 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
359 	return cfg->tx_flow_stop_queue_th;
360 }
361 
362 /**
363  * ol_cfg_get_tx_flow_start_queue_offset() - return start queue offset
364  * @pdev : handle to the physical device
365  *
366  * Return: start queue offset
367  */
368 int ol_cfg_get_tx_flow_start_queue_offset(ol_pdev_handle pdev)
369 {
370 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
371 	return cfg->tx_flow_start_queue_offset;
372 }
373 
374 #endif
375 
376 #ifdef IPA_OFFLOAD
377 unsigned int ol_cfg_ipa_uc_offload_enabled(ol_pdev_handle pdev)
378 {
379 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
380 	return (unsigned int)cfg->ipa_uc_rsc.uc_offload_enabled;
381 }
382 
383 unsigned int ol_cfg_ipa_uc_tx_buf_size(ol_pdev_handle pdev)
384 {
385 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
386 	return cfg->ipa_uc_rsc.tx_buf_size;
387 }
388 
389 unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(ol_pdev_handle pdev)
390 {
391 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
392 	return cfg->ipa_uc_rsc.tx_max_buf_cnt;
393 }
394 
395 unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(ol_pdev_handle pdev)
396 {
397 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
398 	return cfg->ipa_uc_rsc.rx_ind_ring_size;
399 }
400 
401 unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev)
402 {
403 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
404 	return cfg->ipa_uc_rsc.tx_partition_base;
405 }
406 
407 void ol_cfg_set_ipa_uc_tx_partition_base(void *pdev, uint32_t val)
408 {
409 	struct txrx_pdev_cfg_t *cfg = pdev;
410 	cfg->ipa_uc_rsc.tx_partition_base = val;
411 }
412 #endif /* IPA_OFFLOAD */
413 
414 /**
415  * ol_cfg_is_ce_classify_enabled() - Return if CE classification is enabled
416  *				     or disabled
417  * @pdev : handle to the physical device
418  *
419  * Return: 1 - enabled, 0 - disabled
420  */
421 bool ol_cfg_is_ce_classify_enabled(ol_pdev_handle pdev)
422 {
423 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
424 	return cfg->ce_classify_enabled;
425 }
426