1 /*
2  * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: wlan_hdd_napi.c
22  *
23  * WLAN HDD NAPI interface implementation
24  */
25 #include <linux/smp.h> /* get_cpu */
26 
27 #include "wlan_hdd_napi.h"
28 #include "cds_api.h"       /* cds_get_context */
29 #include "hif.h"           /* hif_map_service...*/
30 #include "wlan_hdd_main.h" /* hdd_err/warn... */
31 #include "qdf_types.h"     /* QDF_MODULE_ID_... */
32 #include "ce_api.h"
33 #include "wlan_dp_ucfg_api.h"
34 
35 /*  guaranteed to be initialized to zero/NULL by the standard */
36 static struct qca_napi_data *hdd_napi_ctx;
37 
38 /**
39  * hdd_napi_get_all() - return the whole NAPI structure from HIF
40  *
41  * Gets to the data structure common to all NAPI instances.
42  *
43  * Return:
44  *  NULL  : probably NAPI not initialized yet.
45  *  <addr>: the address of the whole NAPI structure
46  */
hdd_napi_get_all(void)47 struct qca_napi_data *hdd_napi_get_all(void)
48 {
49 	struct qca_napi_data *rp = NULL;
50 	struct hif_opaque_softc *hif;
51 
52 	NAPI_DEBUG("-->");
53 
54 	hif = cds_get_context(QDF_MODULE_ID_HIF);
55 	if (unlikely(!hif))
56 		QDF_ASSERT(hif); /* WARN */
57 	else
58 		rp = hif_napi_get_all(hif);
59 
60 	NAPI_DEBUG("<-- [addr=%pK]", rp);
61 	return rp;
62 }
63 
64 /**
65  * hdd_napi_get_map() - get a copy of napi pipe map
66  *
67  * Return:
68  *  uint32_t  : copy of pipe map
69  */
hdd_napi_get_map(void)70 static uint32_t hdd_napi_get_map(void)
71 {
72 	uint32_t map = 0;
73 
74 	NAPI_DEBUG("-->");
75 	/* cache once, use forever */
76 	if (!hdd_napi_ctx)
77 		hdd_napi_ctx = hdd_napi_get_all();
78 	if (hdd_napi_ctx)
79 		map = hdd_napi_ctx->ce_map;
80 
81 	NAPI_DEBUG("<-- [map=0x%08x]", map);
82 	return map;
83 }
84 
85 /**
86  * hdd_napi_create() - creates the NAPI structures for a given netdev
87  *
88  * Creates NAPI instances. This function is called
89  * unconditionally during initialization. It creates
90  * napi structures through the proper HTC/HIF calls.
91  * The structures are disabled on creation.
92  *
93  * Return:
94  *   single-queue: <0: err, >0=id, 0 (should not happen)
95  *   multi-queue: bitmap of created instances (0: none)
96  */
hdd_napi_create(void)97 int hdd_napi_create(void)
98 {
99 	struct  hif_opaque_softc *hif_ctx;
100 	int     rc = 0;
101 	struct hdd_context *hdd_ctx;
102 	uint8_t feature_flags = 0;
103 	struct qca_napi_data *napid = hdd_napi_get_all();
104 
105 	NAPI_DEBUG("-->");
106 
107 	if (!napid) {
108 		hdd_err("unable to retrieve napi structure");
109 		rc = -EFAULT;
110 		goto exit;
111 	}
112 
113 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
114 	if (unlikely(!hif_ctx)) {
115 		QDF_ASSERT(hif_ctx);
116 		rc = -EFAULT;
117 		goto exit;
118 	}
119 
120 	feature_flags = QCA_NAPI_FEATURE_CPU_CORRECTION |
121 		QCA_NAPI_FEATURE_IRQ_BLACKLISTING |
122 		QCA_NAPI_FEATURE_CORE_CTL_BOOST;
123 
124 	rc = hif_napi_create(hif_ctx, hdd_napi_poll,
125 			     QCA_NAPI_BUDGET,
126 			     QCA_NAPI_DEF_SCALE,
127 			     feature_flags);
128 	if (rc < 0) {
129 		hdd_err("ERR(%d) creating NAPI instances",
130 			rc);
131 		goto exit;
132 	}
133 
134 	hdd_debug("napi instances were created. Map=0x%x", rc);
135 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
136 	if (unlikely(!hdd_ctx)) {
137 		QDF_ASSERT(0);
138 		rc = -EFAULT;
139 		goto exit;
140 	}
141 
142 	rc = hdd_napi_event(NAPI_EVT_INI_FILE,
143 			    (void *)ucfg_dp_get_napi_enabled(hdd_ctx->psoc));
144 	napid->user_cpu_affin_mask =
145 		hdd_ctx->config->napi_cpu_affinity_mask;
146 
147  exit:
148 	NAPI_DEBUG("<-- [rc=%d]", rc);
149 	return rc;
150 }
151 
152 /**
153  * hdd_napi_destroy() - destroys the NAPI structures for a given netdev
154  * @force: if set, will force-disable the instance before _del'ing
155  *
156  * Destroy NAPI instances. This function is called
157  * unconditionally during module removal. It destroy
158  * napi structures through the proper HTC/HIF calls.
159  *
160  * Return:
161  *    number of NAPI instances destroyed
162  */
hdd_napi_destroy(int force)163 int hdd_napi_destroy(int force)
164 {
165 	int rc = 0;
166 	int i;
167 	uint32_t hdd_napi_map = hdd_napi_get_map();
168 
169 	NAPI_DEBUG("--> (force=%d)", force);
170 	if (hdd_napi_map) {
171 		struct hif_opaque_softc *hif_ctx;
172 
173 		hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
174 		if (unlikely(!hif_ctx))
175 			QDF_ASSERT(hif_ctx);
176 		else
177 			for (i = 0; i < CE_COUNT_MAX; i++)
178 				if (hdd_napi_map & (0x01 << i)) {
179 					if (0 <= hif_napi_destroy(
180 						    hif_ctx,
181 						    NAPI_PIPE2ID(i), force)) {
182 						rc++;
183 						hdd_napi_map &= ~(0x01 << i);
184 					} else
185 						hdd_err("cannot destroy napi %d: (pipe:%d), f=%d\n",
186 							i,
187 							NAPI_PIPE2ID(i), force);
188 				}
189 	} else {
190 		struct hif_opaque_softc *hif_ctx;
191 
192 		hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
193 
194 		if (unlikely(!hif_ctx))
195 			QDF_ASSERT(hif_ctx);
196 		else
197 			rc = hif_napi_cpu_deinit(hif_ctx);
198 	}
199 
200 	/* if all instances are removed, it is likely that hif_context has been
201 	 * removed as well, so the cached value of the napi context also needs
202 	 * to be removed
203 	 */
204 	if (force)
205 		QDF_ASSERT(hdd_napi_map == 0);
206 	if (0 == hdd_napi_map)
207 		hdd_napi_ctx = NULL;
208 
209 	NAPI_DEBUG("<-- [rc=%d]", rc);
210 	return rc;
211 }
212 
213 /**
214  * hdd_napi_enabled() - checks if NAPI is enabled (for a given id)
215  * @id: the id of the NAPI to check (any= -1)
216  *
217  * Return:
218  *   int: 0  = false (NOT enabled)
219  *        !0 = true  (enabbled)
220  */
hdd_napi_enabled(int id)221 int hdd_napi_enabled(int id)
222 {
223 	struct hif_opaque_softc *hif;
224 	int rc = 0; /* NOT enabled */
225 
226 	hif = cds_get_context(QDF_MODULE_ID_HIF);
227 	if (unlikely(!hif))
228 		QDF_ASSERT(hif); /* WARN_ON; rc = 0 */
229 	else if (-1 == id)
230 		rc = hif_napi_enabled(hif, id);
231 	else
232 		rc = hif_napi_enabled(hif, NAPI_ID2PIPE(id));
233 	return rc;
234 }
235 
236 /**
237  * hdd_napi_event() - relay the event detected by HDD to HIF NAPI event handler
238  * @event: event code
239  * @data : event-specific auxiliary data
240  *
241  * See function documentation in hif_napi.c::hif_napi_event for list of events
242  * and how each of them is handled.
243  *
244  * Return:
245  *  < 0: error code
246  *  = 0: event handled successfully
247  */
hdd_napi_event(enum qca_napi_event event,void * data)248 int hdd_napi_event(enum qca_napi_event event, void *data)
249 {
250 	int rc = -EFAULT;  /* assume err */
251 	struct hif_opaque_softc *hif;
252 
253 	NAPI_DEBUG("-->(event=%d, aux=%pK)", event, data);
254 
255 	hif = cds_get_context(QDF_MODULE_ID_HIF);
256 	if (unlikely(!hif))
257 		QDF_ASSERT(hif);
258 	else
259 		rc = hif_napi_event(hif, event, data);
260 
261 	NAPI_DEBUG("<--[rc=%d]", rc);
262 	return rc;
263 }
264 
265 #if defined HELIUMPLUS && defined MSM_PLATFORM
266 
267 static int napi_tput_policy_delay;
268 
269 /**
270  * hdd_napi_perfd_cpufreq() - set/reset min CPU freq for cores
271  * @req_state:  high/low
272  *
273  * Send a message to cnss-daemon through netlink. cnss-daemon,
274  * in turn, sends a message to perf-daemon.
275  * If freq > 0, this is a set request. It sets the min frequency of the
276  * cores of the specified cluster to provided freq value (in KHz).
277  * If freq == 0, then the freq lock is removed (and frequency returns to
278  * system default).
279  *
280  * Semantical Alert:
281  * There can be at most one lock active at a time. Each "set" request must
282  * be followed by a "reset" request. Perfd behaviour is undefined otherwise.
283  *
284  * Return: == 0: netlink message sent to cnss-daemon
285  *         <  0: failure to send the message
286  */
hdd_napi_perfd_cpufreq(enum qca_napi_tput_state req_state)287 static int hdd_napi_perfd_cpufreq(enum qca_napi_tput_state req_state)
288 {
289 	int rc = 0;
290 	struct wlan_core_minfreq req;
291 	struct hdd_context *hdd_ctx;
292 
293 	NAPI_DEBUG("-> (%d)", req_state);
294 
295 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
296 	if (unlikely(!hdd_ctx)) {
297 		rc = -EFAULT;
298 		goto hnpc_ret;
299 	}
300 
301 	switch (req_state) {
302 	case QCA_NAPI_TPUT_LO:
303 		req.magic    = WLAN_CORE_MINFREQ_MAGIC;
304 		req.reserved = 0; /* unused */
305 		req.coremask = 0; /* not valid */
306 		req.freq     = 0; /* reset */
307 		break;
308 	case QCA_NAPI_TPUT_HI:
309 		req.magic    = WLAN_CORE_MINFREQ_MAGIC;
310 		req.reserved = 0; /* unused */
311 		req.coremask = 0x0f0; /* perf cluster */
312 		req.freq     = 700;   /* KHz */
313 		break;
314 	default:
315 		hdd_err("invalid req_state (%d)", req_state);
316 		rc = -EINVAL;
317 		goto hnpc_ret;
318 	} /* switch */
319 
320 	NAPI_DEBUG("CPU min freq to %d",
321 		   (req.freq == 0)?"Resetting":"Setting", req.freq);
322 	/* the following service function returns void */
323 	wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index,
324 				WLAN_SVC_CORE_MINFREQ,
325 				&req, sizeof(struct wlan_core_minfreq));
326 hnpc_ret:
327 	NAPI_DEBUG("<--[rc=%d]", rc);
328 	return rc;
329 }
330 
331 /**
332  * hdd_napi_apply_throughput_policy() - implement the throughput action policy
333  * @hddctx:     HDD context
334  * @tx_packets: number of tx packets in the last interval
335  * @rx_packets: number of rx packets in the last interval
336  *
337  * Called by hdd_bus_bw_compute_cb, checks the number of packets in the last
338  * interval, and determines the desired napi throughput state (HI/LO). If
339  * the desired state is different from the current, then it invokes the
340  * event handler to switch to the desired state.
341  *
342  * The policy implementation is limited to this function and
343  * The current policy is: determine the NAPI mode based on the condition:
344  *      (total number of packets > medium threshold)
345  * - tx packets are included because:
346  *   a- tx-completions arrive at one of the rx CEs
347  *   b- in TCP, a lof of TX implies ~(tx/2) rx (ACKs)
348  *   c- so that we can use the same normalized criteria in ini file
349  * - medium-threshold (default: 500 packets / 10 ms), because
350  *   we would like to be more reactive.
351  *
352  * Return: 0 : no action taken, or action return code
353  *         !0: error, or action error code
354  */
hdd_napi_apply_throughput_policy(struct hdd_context * hddctx,uint64_t tx_packets,uint64_t rx_packets)355 int hdd_napi_apply_throughput_policy(struct hdd_context *hddctx,
356 				     uint64_t tx_packets,
357 				     uint64_t rx_packets)
358 {
359 	int rc = 0;
360 	uint64_t packets = tx_packets + rx_packets;
361 	enum qca_napi_tput_state req_state;
362 	struct qca_napi_data *napid = hdd_napi_get_all();
363 	int enabled;
364 
365 	NAPI_DEBUG("-->(tx=%lld, rx=%lld)", tx_packets, rx_packets);
366 
367 	if (unlikely(napi_tput_policy_delay < 0))
368 		napi_tput_policy_delay = 0;
369 	if (napi_tput_policy_delay > 0) {
370 		NAPI_DEBUG("delaying policy; delay-count=%d",
371 			   napi_tput_policy_delay);
372 		napi_tput_policy_delay--;
373 
374 		/* make sure the next timer call calls us */
375 		ucfg_dp_set_current_throughput_level(hddctx->psoc, -1);
376 
377 		return rc;
378 	}
379 
380 	if (!napid) {
381 		hdd_err("ERR: napid NULL");
382 		return rc;
383 	}
384 
385 	enabled = hdd_napi_enabled(HDD_NAPI_ANY);
386 	if (!enabled) {
387 		hdd_err("ERR: napi not enabled");
388 		return rc;
389 	}
390 
391 	if (packets > ucfg_dp_get_bus_bw_high_threshold(hddctx->psoc))
392 		req_state = QCA_NAPI_TPUT_HI;
393 	else
394 		req_state = QCA_NAPI_TPUT_LO;
395 
396 	if (req_state != napid->napi_mode) {
397 		/* [re]set the floor frequency of high cluster */
398 		rc = hdd_napi_perfd_cpufreq(req_state);
399 		/* denylist/boost_mode on/off */
400 		rc = hdd_napi_event(NAPI_EVT_TPUT_STATE, (void *)req_state);
401 	}
402 	return rc;
403 }
404 
405 /**
406  * hdd_napi_serialize() - serialize all NAPI activities
407  * @is_on: 1="serialize" or 0="de-serialize"
408  *
409  * Start/stop "serial-NAPI-mode".
410  * NAPI serial mode describes a state where all NAPI operations are forced to be
411  * run serially. This is achieved by ensuring all NAPI instances are run on the
412  * same CPU, so forced to be serial.
413  * NAPI life-cycle:
414  * - Interrupt is received for a given CE.
415  * - In the ISR, the interrupt is masked and corresponding NAPI instance
416  *   is scheduled, to be run as a bottom-half.
417  * - Bottom-half starts with a poll call (by the net_rx softirq). There may be
418  *   one of more subsequent calls until the work is complete.
419  * - Once the work is complete, the poll handler enables the interrupt and
420  *   the cycle re-starts.
421  *
422  * Return: <0: error-code (operation failed)
423  *         =0: success
424  *         >0: status (not used)
425  */
hdd_napi_serialize(int is_on)426 int hdd_napi_serialize(int is_on)
427 {
428 	int rc;
429 	struct hdd_context *hdd_ctx;
430 #define POLICY_DELAY_FACTOR (1)
431 	rc = hif_napi_serialize(cds_get_context(QDF_MODULE_ID_HIF), is_on);
432 	if ((rc == 0) && (is_on == 0)) {
433 		/* apply throughput policy after one timeout */
434 		napi_tput_policy_delay = POLICY_DELAY_FACTOR;
435 
436 		/* make sure that bus_bandwidth trigger is executed */
437 		hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
438 		if (hdd_ctx)
439 			ucfg_dp_set_current_throughput_level(hdd_ctx->psoc,
440 							     -1);
441 
442 	}
443 	return rc;
444 }
445 #endif /* HELIUMPLUS && MSM_PLATFORM */
446 
447 /**
448  * hdd_napi_poll() - NAPI poll function
449  * @napi  : pointer to NAPI struct
450  * @budget: the pre-declared budget
451  *
452  * Implementation of poll function. This function is called
453  * by kernel during softirq processing.
454  *
455  * NOTE FOR THE MAINTAINER:
456  *   Make sure this is very close to the ce_tasklet code.
457  *
458  * Return:
459  *   int: the amount of work done ( <= budget )
460  */
hdd_napi_poll(struct napi_struct * napi,int budget)461 int hdd_napi_poll(struct napi_struct *napi, int budget)
462 {
463 	return hif_napi_poll(cds_get_context(QDF_MODULE_ID_HIF), napi, budget);
464 }
465 
466 /**
467  * hdd_display_napi_stats() - print NAPI stats
468  *
469  * Return: == 0: success; !=0: failure
470  */
hdd_display_napi_stats(void)471 int hdd_display_napi_stats(void)
472 {
473 	int i, j, k, n; /* NAPI, CPU, bucket indices, bucket buf write index*/
474 	int max;
475 	struct qca_napi_data *napid;
476 	struct qca_napi_info *napii;
477 	struct qca_napi_stat *napis;
478 	/*
479 	 * Expecting each NAPI bucket item to need at max 5 numerals + space for
480 	 * formatting. For example "10000 " Thus the array needs to have
481 	 * (5 + 1) * QCA_NAPI_NUM_BUCKETS bytes of space. Leaving one space at
482 	 * the end of the "buf" array for end of string char.
483 	 */
484 	char buf[6 * QCA_NAPI_NUM_BUCKETS + 1] = {'\0'};
485 
486 	napid = hdd_napi_get_all();
487 	if (!napid) {
488 		hdd_err("unable to retrieve napi structure");
489 		return -EFAULT;
490 	}
491 	hdd_nofl_info("[NAPI %u][BL %d]:  scheds   polls   comps    done t-lim p-lim  corr  max_time napi-buckets(%d)",
492 		      napid->napi_mode,
493 		      hif_napi_cpu_denylist(napid, DENYLIST_QUERY),
494 		      QCA_NAPI_NUM_BUCKETS);
495 
496 	for (i = 0; i < CE_COUNT_MAX; i++)
497 		if (napid->ce_map & (0x01 << i)) {
498 			napii = napid->napis[i];
499 			if (!napii)
500 				continue;
501 
502 			for (j = 0; j < num_possible_cpus(); j++) {
503 				napis = &(napii->stats[j]);
504 				n = 0;
505 				max = sizeof(buf);
506 				for (k = 0; k < QCA_NAPI_NUM_BUCKETS; k++) {
507 					n += scnprintf(
508 						buf + n, max - n,
509 						" %d",
510 						napis->napi_budget_uses[k]);
511 				}
512 
513 				if (napis->napi_schedules != 0)
514 					hdd_nofl_info("NAPI[%2d]CPU[%d]: %7d %7d %7d %7d %5d %5d %5d %9llu %s",
515 						      i, j,
516 						      napis->napi_schedules,
517 						      napis->napi_polls,
518 						      napis->napi_completes,
519 						      napis->napi_workdone,
520 						      napis->time_limit_reached,
521 						      napis->
522 							rxpkt_thresh_reached,
523 						      napis->cpu_corrected,
524 						      napis->napi_max_poll_time,
525 						      buf);
526 			}
527 		}
528 
529 	hif_napi_stats(napid);
530 	return 0;
531 }
532 
533 /**
534  * hdd_clear_napi_stats() - clear NAPI stats
535  *
536  * Return: == 0: success; !=0: failure
537  */
hdd_clear_napi_stats(void)538 int hdd_clear_napi_stats(void)
539 {
540 	int i, j;
541 	struct qca_napi_data *napid;
542 	struct qca_napi_info *napii;
543 	struct qca_napi_stat *napis;
544 
545 	napid = hdd_napi_get_all();
546 	if (!napid) {
547 		hdd_err("unable to retrieve napi structure");
548 		return -EFAULT;
549 	}
550 
551 	for (i = 0; i < CE_COUNT_MAX; i++)
552 		if (napid->ce_map & (0x01 << i)) {
553 			napii = napid->napis[i];
554 			for (j = 0; j < NR_CPUS; j++) {
555 				napis = &(napii->stats[j]);
556 				qdf_mem_zero(napis,
557 					     sizeof(struct qca_napi_stat));
558 			}
559 		}
560 
561 	return 0;
562 }
563