xref: /wlan-dirver/qcacld-3.0/core/hdd/src/wlan_hdd_napi.c (revision bb8e47c200751dd274982fa7d00566e04456aa23)
1 /*
2  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: wlan_hdd_napi.c
21  *
22  * WLAN HDD NAPI interface implementation
23  */
24 #include <linux/smp.h> /* get_cpu */
25 
26 #include "wlan_hdd_napi.h"
27 #include "cds_api.h"       /* cds_get_context */
28 #include "hif.h"           /* hif_map_service...*/
29 #include "wlan_hdd_main.h" /* hdd_err/warn... */
30 #include "qdf_types.h"     /* QDF_MODULE_ID_... */
31 #include "ce_api.h"
32 
33 /*  guaranteed to be initialized to zero/NULL by the standard */
34 static struct qca_napi_data *hdd_napi_ctx;
35 
36 /**
37  * hdd_napi_get_all() - return the whole NAPI structure from HIF
38  *
39  * Gets to the data structure common to all NAPI instances.
40  *
41  * Return:
42  *  NULL  : probably NAPI not initialized yet.
43  *  <addr>: the address of the whole NAPI structure
44  */
45 struct qca_napi_data *hdd_napi_get_all(void)
46 {
47 	struct qca_napi_data *rp = NULL;
48 	struct hif_opaque_softc *hif;
49 
50 	NAPI_DEBUG("-->");
51 
52 	hif = cds_get_context(QDF_MODULE_ID_HIF);
53 	if (unlikely(NULL == hif))
54 		QDF_ASSERT(NULL != hif); /* WARN */
55 	else
56 		rp = hif_napi_get_all(hif);
57 
58 	NAPI_DEBUG("<-- [addr=%pK]", rp);
59 	return rp;
60 }
61 
62 /**
63  * hdd_napi_get_map() - get a copy of napi pipe map
64  *
65  * Return:
66  *  uint32_t  : copy of pipe map
67  */
68 static uint32_t hdd_napi_get_map(void)
69 {
70 	uint32_t map = 0;
71 
72 	NAPI_DEBUG("-->");
73 	/* cache once, use forever */
74 	if (hdd_napi_ctx == NULL)
75 		hdd_napi_ctx = hdd_napi_get_all();
76 	if (hdd_napi_ctx != NULL)
77 		map = hdd_napi_ctx->ce_map;
78 
79 	NAPI_DEBUG("<-- [map=0x%08x]", map);
80 	return map;
81 }
82 
83 /**
84  * hdd_napi_create() - creates the NAPI structures for a given netdev
85  *
86  * Creates NAPI instances. This function is called
87  * unconditionally during initialization. It creates
88  * napi structures through the proper HTC/HIF calls.
89  * The structures are disabled on creation.
90  *
91  * Return:
92  *   single-queue: <0: err, >0=id, 0 (should not happen)
93  *   multi-queue: bitmap of created instances (0: none)
94  */
95 int hdd_napi_create(void)
96 {
97 	struct  hif_opaque_softc *hif_ctx;
98 	int     rc = 0;
99 	struct hdd_context *hdd_ctx;
100 	uint8_t feature_flags = 0;
101 
102 	NAPI_DEBUG("-->");
103 
104 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
105 	if (unlikely(NULL == hif_ctx)) {
106 		QDF_ASSERT(NULL != hif_ctx);
107 		rc = -EFAULT;
108 	} else {
109 
110 		feature_flags = QCA_NAPI_FEATURE_CPU_CORRECTION |
111 				QCA_NAPI_FEATURE_IRQ_BLACKLISTING |
112 				QCA_NAPI_FEATURE_CORE_CTL_BOOST;
113 
114 		rc = hif_napi_create(hif_ctx, hdd_napi_poll,
115 				     QCA_NAPI_BUDGET,
116 				     QCA_NAPI_DEF_SCALE,
117 				     feature_flags);
118 		if (rc < 0) {
119 			hdd_err("ERR(%d) creating NAPI instances",
120 				rc);
121 		} else {
122 			hdd_info("napi instances were created. Map=0x%x", rc);
123 			hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
124 			if (unlikely(NULL == hdd_ctx)) {
125 				QDF_ASSERT(0);
126 				rc = -EFAULT;
127 			} else {
128 				rc = hdd_napi_event(NAPI_EVT_INI_FILE,
129 					(void *)hdd_ctx->napi_enable);
130 			}
131 		}
132 
133 	}
134 	NAPI_DEBUG("<-- [rc=%d]", rc);
135 
136 	return rc;
137 }
138 
139 /**
140  * hdd_napi_destroy() - destroys the NAPI structures for a given netdev
141  * @force: if set, will force-disable the instance before _del'ing
142  *
143  * Destroy NAPI instances. This function is called
144  * unconditionally during module removal. It destroy
145  * napi structures through the proper HTC/HIF calls.
146  *
147  * Return:
148  *    number of NAPI instances destroyed
149  */
150 int hdd_napi_destroy(int force)
151 {
152 	int rc = 0;
153 	int i;
154 	uint32_t hdd_napi_map = hdd_napi_get_map();
155 
156 	NAPI_DEBUG("--> (force=%d)", force);
157 	if (hdd_napi_map) {
158 		struct hif_opaque_softc *hif_ctx;
159 
160 		hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
161 		if (unlikely(NULL == hif_ctx))
162 			QDF_ASSERT(NULL != hif_ctx);
163 		else
164 			for (i = 0; i < CE_COUNT_MAX; i++)
165 				if (hdd_napi_map & (0x01 << i)) {
166 					if (0 <= hif_napi_destroy(
167 						    hif_ctx,
168 						    NAPI_PIPE2ID(i), force)) {
169 						rc++;
170 						hdd_napi_map &= ~(0x01 << i);
171 					} else
172 						hdd_err("cannot destroy napi %d: (pipe:%d), f=%d\n",
173 							i,
174 							NAPI_PIPE2ID(i), force);
175 				}
176 	} else {
177 		struct hif_opaque_softc *hif_ctx;
178 
179 		hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
180 
181 		if (unlikely(NULL == hif_ctx))
182 			QDF_ASSERT(NULL != hif_ctx);
183 		else
184 			rc = hif_napi_cpu_deinit(hif_ctx);
185 	}
186 
187 	/* if all instances are removed, it is likely that hif_context has been
188 	 * removed as well, so the cached value of the napi context also needs
189 	 * to be removed
190 	 */
191 	if (force)
192 		QDF_ASSERT(hdd_napi_map == 0);
193 	if (0 == hdd_napi_map)
194 		hdd_napi_ctx = NULL;
195 
196 	NAPI_DEBUG("<-- [rc=%d]", rc);
197 	return rc;
198 }
199 
200 /**
201  * hdd_napi_enabled() - checks if NAPI is enabled (for a given id)
202  * @id: the id of the NAPI to check (any= -1)
203  *
204  * Return:
205  *   int: 0  = false (NOT enabled)
206  *        !0 = true  (enabbled)
207  */
208 int hdd_napi_enabled(int id)
209 {
210 	struct hif_opaque_softc *hif;
211 	int rc = 0; /* NOT enabled */
212 
213 	hif = cds_get_context(QDF_MODULE_ID_HIF);
214 	if (unlikely(NULL == hif))
215 		QDF_ASSERT(hif != NULL); /* WARN_ON; rc = 0 */
216 	else if (-1 == id)
217 		rc = hif_napi_enabled(hif, id);
218 	else
219 		rc = hif_napi_enabled(hif, NAPI_ID2PIPE(id));
220 	return rc;
221 }
222 
223 /**
224  * hdd_napi_event() - relay the event detected by HDD to HIF NAPI event handler
225  * @event: event code
226  * @data : event-specific auxiliary data
227  *
228  * See function documentation in hif_napi.c::hif_napi_event for list of events
229  * and how each of them is handled.
230  *
231  * Return:
232  *  < 0: error code
233  *  = 0: event handled successfully
234  */
235 int hdd_napi_event(enum qca_napi_event event, void *data)
236 {
237 	int rc = -EFAULT;  /* assume err */
238 	struct hif_opaque_softc *hif;
239 
240 	NAPI_DEBUG("-->(event=%d, aux=%pK)", event, data);
241 
242 	hif = cds_get_context(QDF_MODULE_ID_HIF);
243 	if (unlikely(NULL == hif))
244 		QDF_ASSERT(hif != NULL);
245 	else
246 		rc = hif_napi_event(hif, event, data);
247 
248 	NAPI_DEBUG("<--[rc=%d]", rc);
249 	return rc;
250 }
251 
252 #if defined HELIUMPLUS && defined MSM_PLATFORM
253 /**
254  * hdd_napi_perfd_cpufreq() - set/reset min CPU freq for cores
255  * @req_state:  high/low
256  *
257  * Send a message to cnss-daemon through netlink. cnss-daemon,
258  * in turn, sends a message to perf-daemon.
259  * If freq > 0, this is a set request. It sets the min frequency of the
260  * cores of the specified cluster to provided freq value (in KHz).
261  * If freq == 0, then the freq lock is removed (and frequency returns to
262  * system default).
263  *
264  * Semantical Alert:
265  * There can be at most one lock active at a time. Each "set" request must
266  * be followed by a "reset" request. Perfd behaviour is undefined otherwise.
267  *
268  * Return: == 0: netlink message sent to cnss-daemon
269  *         <  0: failure to send the message
270  */
271 static int hdd_napi_perfd_cpufreq(enum qca_napi_tput_state req_state)
272 {
273 	int rc = 0;
274 	struct wlan_core_minfreq req;
275 	struct hdd_context *hdd_ctx;
276 
277 	NAPI_DEBUG("-> (%d)", req_state);
278 
279 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
280 	if (unlikely(hdd_ctx == NULL)) {
281 		hdd_err("cannot get hdd_context");
282 		rc = -EFAULT;
283 		goto hnpc_ret;
284 	}
285 
286 	switch (req_state) {
287 	case QCA_NAPI_TPUT_LO:
288 		req.magic    = WLAN_CORE_MINFREQ_MAGIC;
289 		req.reserved = 0; /* unused */
290 		req.coremask = 0; /* not valid */
291 		req.freq     = 0; /* reset */
292 		break;
293 	case QCA_NAPI_TPUT_HI:
294 		req.magic    = WLAN_CORE_MINFREQ_MAGIC;
295 		req.reserved = 0; /* unused */
296 		req.coremask = 0x0f0; /* perf cluster */
297 		req.freq     = 700;   /* KHz */
298 		break;
299 	default:
300 		hdd_err("invalid req_state (%d)", req_state);
301 		rc = -EINVAL;
302 		goto hnpc_ret;
303 	} /* switch */
304 
305 	NAPI_DEBUG("CPU min freq to %d",
306 		   (req.freq == 0)?"Resetting":"Setting", req.freq);
307 	/* the following service function returns void */
308 	wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index,
309 				WLAN_SVC_CORE_MINFREQ,
310 				&req, sizeof(struct wlan_core_minfreq));
311 hnpc_ret:
312 	NAPI_DEBUG("<--[rc=%d]", rc);
313 	return rc;
314 }
315 
316 /**
317  * hdd_napi_apply_throughput_policy() - implement the throughput action policy
318  * @hddctx:     HDD context
319  * @tx_packets: number of tx packets in the last interval
320  * @rx_packets: number of rx packets in the last interval
321  *
322  * Called by hdd_bus_bw_compute_cb, checks the number of packets in the last
323  * interval, and determines the desired napi throughput state (HI/LO). If
324  * the desired state is different from the current, then it invokes the
325  * event handler to switch to the desired state.
326  *
327  * The policy implementation is limited to this function and
328  * The current policy is: determine the NAPI mode based on the condition:
329  *      (total number of packets > medium threshold)
330  * - tx packets are included because:
331  *   a- tx-completions arrive at one of the rx CEs
332  *   b- in TCP, a lof of TX implies ~(tx/2) rx (ACKs)
333  *   c- so that we can use the same normalized criteria in ini file
334  * - medium-threshold (default: 500 packets / 10 ms), because
335  *   we would like to be more reactive.
336  *
337  * Return: 0 : no action taken, or action return code
338  *         !0: error, or action error code
339  */
340 static int napi_tput_policy_delay;
341 int hdd_napi_apply_throughput_policy(struct hdd_context *hddctx,
342 				     uint64_t tx_packets,
343 				     uint64_t rx_packets)
344 {
345 	int rc = 0;
346 	uint64_t packets = tx_packets + rx_packets;
347 	enum qca_napi_tput_state req_state;
348 	struct qca_napi_data *napid = hdd_napi_get_all();
349 	int enabled;
350 
351 	NAPI_DEBUG("-->%s(tx=%lld, rx=%lld)", __func__, tx_packets, rx_packets);
352 
353 	if (unlikely(napi_tput_policy_delay < 0))
354 		napi_tput_policy_delay = 0;
355 	if (napi_tput_policy_delay > 0) {
356 		NAPI_DEBUG("%s: delaying policy; delay-count=%d",
357 			  __func__, napi_tput_policy_delay);
358 		napi_tput_policy_delay--;
359 
360 		/* make sure the next timer call calls us */
361 		hddctx->cur_vote_level = -1;
362 
363 		return rc;
364 	}
365 
366 	if (!napid) {
367 		hdd_err("ERR: napid NULL");
368 		return rc;
369 	}
370 
371 	enabled = hdd_napi_enabled(HDD_NAPI_ANY);
372 	if (!enabled) {
373 		hdd_err("ERR: napi not enabled");
374 		return rc;
375 	}
376 
377 	if (packets > hddctx->config->busBandwidthHighThreshold)
378 		req_state = QCA_NAPI_TPUT_HI;
379 	else
380 		req_state = QCA_NAPI_TPUT_LO;
381 
382 	if (req_state != napid->napi_mode) {
383 		/* [re]set the floor frequency of high cluster */
384 		rc = hdd_napi_perfd_cpufreq(req_state);
385 		/* blacklist/boost_mode on/off */
386 		rc = hdd_napi_event(NAPI_EVT_TPUT_STATE, (void *)req_state);
387 	}
388 	return rc;
389 }
390 
391 /**
392  * hdd_napi_serialize() - serialize all NAPI activities
393  * @is_on: 1="serialize" or 0="de-serialize"
394  *
395  * Start/stop "serial-NAPI-mode".
396  * NAPI serial mode describes a state where all NAPI operations are forced to be
397  * run serially. This is achieved by ensuring all NAPI instances are run on the
398  * same CPU, so forced to be serial.
399  * NAPI life-cycle:
400  * - Interrupt is received for a given CE.
401  * - In the ISR, the interrupt is masked and corresponding NAPI instance
402  *   is scheduled, to be run as a bottom-half.
403  * - Bottom-half starts with a poll call (by the net_rx softirq). There may be
404  *   one of more subsequent calls until the work is complete.
405  * - Once the work is complete, the poll handler enables the interrupt and
406  *   the cycle re-starts.
407  *
408  * Return: <0: error-code (operation failed)
409  *         =0: success
410  *         >0: status (not used)
411  */
412 int hdd_napi_serialize(int is_on)
413 {
414 	int rc;
415 	struct hdd_context *hdd_ctx;
416 #define POLICY_DELAY_FACTOR (1)
417 	rc = hif_napi_serialize(cds_get_context(QDF_MODULE_ID_HIF), is_on);
418 	if ((rc == 0) && (is_on == 0)) {
419 		/* apply throughput policy after one timeout */
420 		napi_tput_policy_delay = POLICY_DELAY_FACTOR;
421 
422 		/* make sure that bus_bandwidth trigger is executed */
423 		hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
424 		if (hdd_ctx != NULL)
425 			hdd_ctx->cur_vote_level = -1;
426 
427 	}
428 	return rc;
429 }
430 #endif /* HELIUMPLUS && MSM_PLATFORM */
431 
432 /**
433  * hdd_napi_poll() - NAPI poll function
434  * @napi  : pointer to NAPI struct
435  * @budget: the pre-declared budget
436  *
437  * Implementation of poll function. This function is called
438  * by kernel during softirq processing.
439  *
440  * NOTE FOR THE MAINTAINER:
441  *   Make sure this is very close to the ce_tasklet code.
442  *
443  * Return:
444  *   int: the amount of work done ( <= budget )
445  */
446 int hdd_napi_poll(struct napi_struct *napi, int budget)
447 {
448 	return hif_napi_poll(cds_get_context(QDF_MODULE_ID_HIF), napi, budget);
449 }
450 
451 /**
452  * hdd_display_napi_stats() - print NAPI stats
453  *
454  * Return: == 0: success; !=0: failure
455  */
456 int hdd_display_napi_stats(void)
457 {
458 	int i, j, k, n; /* NAPI, CPU, bucket indices, bucket buf write index*/
459 	int max;
460 	struct qca_napi_data *napid;
461 	struct qca_napi_info *napii;
462 	struct qca_napi_stat *napis;
463 	/*
464 	 * Expecting each NAPI bucket item to need at max 5 numerals + space for
465 	 * formatting. For example "10000 " Thus the array needs to have
466 	 * (5 + 1) * QCA_NAPI_NUM_BUCKETS bytes of space. Leaving one space at
467 	 * the end of the "buf" arrary for end of string char.
468 	 */
469 	char buf[6 * QCA_NAPI_NUM_BUCKETS + 1] = {'\0'};
470 
471 	napid = hdd_napi_get_all();
472 	if (NULL == napid) {
473 		hdd_err("%s unable to retrieve napi structure", __func__);
474 		return -EFAULT;
475 	}
476 	hdd_debug("[NAPI %u][BL %d]:  scheds   polls   comps    done t-lim p-lim  corr  max_time napi-buckets(%d)",
477 		  napid->napi_mode,
478 		  hif_napi_cpu_blacklist(napid, BLACKLIST_QUERY),
479 		  QCA_NAPI_NUM_BUCKETS);
480 
481 	for (i = 0; i < CE_COUNT_MAX; i++)
482 		if (napid->ce_map & (0x01 << i)) {
483 			napii = napid->napis[i];
484 			if (!napii)
485 				continue;
486 
487 			for (j = 0; j < num_possible_cpus(); j++) {
488 				napis = &(napii->stats[j]);
489 				n = 0;
490 				max = sizeof(buf);
491 				for (k = 0; k < QCA_NAPI_NUM_BUCKETS; k++) {
492 					n += scnprintf(
493 						buf + n, max - n,
494 						" %d",
495 						napis->napi_budget_uses[k]);
496 				}
497 
498 				if (napis->napi_schedules != 0)
499 					hdd_debug("NAPI[%2d]CPU[%d]: %7d %7d %7d %7d %5d %5d %5d %9llu %s",
500 						  i, j,
501 						  napis->napi_schedules,
502 						  napis->napi_polls,
503 						  napis->napi_completes,
504 						  napis->napi_workdone,
505 						  napis->time_limit_reached,
506 						  napis->rxpkt_thresh_reached,
507 						  napis->cpu_corrected,
508 						  napis->napi_max_poll_time,
509 						  buf);
510 			}
511 		}
512 
513 	hif_napi_stats(napid);
514 	return 0;
515 }
516 
517 /**
518  * hdd_clear_napi_stats() - clear NAPI stats
519  *
520  * Return: == 0: success; !=0: failure
521  */
522 int hdd_clear_napi_stats(void)
523 {
524 	int i, j;
525 	struct qca_napi_data *napid;
526 	struct qca_napi_info *napii;
527 	struct qca_napi_stat *napis;
528 
529 	napid = hdd_napi_get_all();
530 	if (NULL == napid) {
531 		hdd_err("%s unable to retrieve napi structure", __func__);
532 		return -EFAULT;
533 	}
534 
535 	for (i = 0; i < CE_COUNT_MAX; i++)
536 		if (napid->ce_map & (0x01 << i)) {
537 			napii = napid->napis[i];
538 			for (j = 0; j < NR_CPUS; j++) {
539 				napis = &(napii->stats[j]);
540 				qdf_mem_zero(napis,
541 					     sizeof(struct qca_napi_stat));
542 			}
543 		}
544 
545 	return 0;
546 }
547