xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_napi.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: hif_napi.c
21  *
22  * HIF NAPI interface implementation
23  */
24 
25 #include <linux/string.h> /* memset */
26 
27 /* Linux headers */
28 #include <linux/cpumask.h>
29 #include <linux/cpufreq.h>
30 #include <linux/cpu.h>
31 #include <linux/topology.h>
32 #include <linux/interrupt.h>
33 #include <linux/irq.h>
34 #ifdef CONFIG_SCHED_CORE_CTL
35 #include <linux/sched/core_ctl.h>
36 #endif
37 #include <pld_common.h>
38 #include <linux/pm.h>
39 
40 /* Driver headers */
41 #include <hif_napi.h>
42 #include <hif_debug.h>
43 #include <hif_io32.h>
44 #include <ce_api.h>
45 #include <ce_internal.h>
46 #include <hif_irq_affinity.h>
47 #include "qdf_cpuhp.h"
48 #include "qdf_module.h"
49 #include "qdf_net_if.h"
50 #include "qdf_dev.h"
51 
52 enum napi_decision_vector {
53 	HIF_NAPI_NOEVENT = 0,
54 	HIF_NAPI_INITED  = 1,
55 	HIF_NAPI_CONF_UP = 2
56 };
57 #define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP)
58 
59 #ifdef RECEIVE_OFFLOAD
60 /**
61  * hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI
62  * @napi: Rx_thread NAPI
63  * @budget: NAPI BUDGET
64  *
65  * Return: 0 as it is not supposed to be polled at all as it is not scheduled.
66  */
67 static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget)
68 {
69 	hif_err("This napi_poll should not be polled as we don't schedule it");
70 	QDF_ASSERT(0);
71 	return 0;
72 }
73 
74 /**
75  * hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI
76  * @napii: Handle to napi_info holding rx_thread napi
77  *
78  * Return: None
79  */
80 static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
81 {
82 	init_dummy_netdev(&napii->rx_thread_netdev);
83 	netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi,
84 		       hif_rxthread_napi_poll, 64);
85 	napi_enable(&napii->rx_thread_napi);
86 }
87 
88 /**
89  * hif_deinit_rx_thread_napi() - Deinitialize dummy Rx_thread NAPI
90  * @napii: Handle to napi_info holding rx_thread napi
91  *
92  * Return: None
93  */
94 static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii)
95 {
96 	netif_napi_del(&napii->rx_thread_napi);
97 }
98 #else /* RECEIVE_OFFLOAD */
99 static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
100 {
101 }
102 
103 static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii)
104 {
105 }
106 #endif
107 
108 /**
109  * hif_napi_create() - creates the NAPI structures for a given CE
110  * @hif    : pointer to hif context
111  * @pipe_id: the CE id on which the instance will be created
112  * @poll   : poll function to be used for this NAPI instance
113  * @budget : budget to be registered with the NAPI instance
114  * @scale  : scale factor on the weight (to scaler budget to 1000)
115  * @flags  : feature flags
116  *
117  * Description:
118  *    Creates NAPI instances. This function is called
119  *    unconditionally during initialization. It creates
120  *    napi structures through the proper HTC/HIF calls.
121  *    The structures are disabled on creation.
122  *    Note that for each NAPI instance a separate dummy netdev is used
123  *
124  * Return:
125  * < 0: error
126  * = 0: <should never happen>
127  * > 0: id of the created object (for multi-NAPI, number of objects created)
128  */
129 int hif_napi_create(struct hif_opaque_softc   *hif_ctx,
130 		    int (*poll)(struct napi_struct *, int),
131 		    int                budget,
132 		    int                scale,
133 		    uint8_t            flags)
134 {
135 	int i;
136 	struct qca_napi_data *napid;
137 	struct qca_napi_info *napii;
138 	struct CE_state      *ce_state;
139 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
140 	int    rc = 0;
141 
142 	NAPI_DEBUG("-->(budget=%d, scale=%d)",
143 		   budget, scale);
144 	NAPI_DEBUG("hif->napi_data.state = 0x%08x",
145 		   hif->napi_data.state);
146 	NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x",
147 		   hif->napi_data.ce_map);
148 
149 	napid = &(hif->napi_data);
150 	if (0 == (napid->state &  HIF_NAPI_INITED)) {
151 		memset(napid, 0, sizeof(struct qca_napi_data));
152 		qdf_spinlock_create(&(napid->lock));
153 
154 		napid->state |= HIF_NAPI_INITED;
155 		napid->flags = flags;
156 
157 		rc = hif_napi_cpu_init(hif_ctx);
158 		if (rc != 0 && rc != -EALREADY) {
159 			hif_err("NAPI_initialization failed(rc=%d)", rc);
160 			rc = napid->ce_map;
161 			goto hnc_err;
162 		} else
163 			rc = 0;
164 
165 		hif_debug("NAPI structures initialized, rc=%d", rc);
166 	}
167 	for (i = 0; i < hif->ce_count; i++) {
168 		ce_state = hif->ce_id_to_state[i];
169 		NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d",
170 			   i, ce_state->htt_rx_data,
171 			   ce_state->htt_tx_data);
172 		if (ce_srng_based(hif))
173 			continue;
174 
175 		if (!ce_state->htt_rx_data)
176 			continue;
177 
178 		/* Now this is a CE where we need NAPI on */
179 		NAPI_DEBUG("Creating NAPI on pipe %d", i);
180 		napii = qdf_mem_malloc(sizeof(*napii));
181 		napid->napis[i] = napii;
182 		if (!napii) {
183 			rc = -ENOMEM;
184 			goto napii_free;
185 		}
186 	}
187 
188 	for (i = 0; i < hif->ce_count; i++) {
189 		napii = napid->napis[i];
190 		if (!napii)
191 			continue;
192 
193 		NAPI_DEBUG("initializing NAPI for pipe %d", i);
194 		memset(napii, 0, sizeof(struct qca_napi_info));
195 		napii->scale = scale;
196 		napii->id    = NAPI_PIPE2ID(i);
197 		napii->hif_ctx = hif_ctx;
198 		napii->irq   = pld_get_irq(hif->qdf_dev->dev, i);
199 
200 		if (napii->irq < 0)
201 			hif_warn("bad IRQ value for CE %d: %d", i, napii->irq);
202 
203 		init_dummy_netdev(&(napii->netdev));
204 
205 		NAPI_DEBUG("adding napi=%pK to netdev=%pK (poll=%pK, bdgt=%d)",
206 			   &(napii->napi), &(napii->netdev), poll, budget);
207 		netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget);
208 
209 		NAPI_DEBUG("after napi_add");
210 		NAPI_DEBUG("napi=0x%pK, netdev=0x%pK",
211 			   &(napii->napi), &(napii->netdev));
212 		NAPI_DEBUG("napi.dev_list.prev=0x%pK, next=0x%pK",
213 			   napii->napi.dev_list.prev,
214 			   napii->napi.dev_list.next);
215 		NAPI_DEBUG("dev.napi_list.prev=0x%pK, next=0x%pK",
216 			   napii->netdev.napi_list.prev,
217 			   napii->netdev.napi_list.next);
218 
219 		hif_init_rx_thread_napi(napii);
220 		napii->lro_ctx = qdf_lro_init();
221 		NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n",
222 				i, napii->id, napii->lro_ctx);
223 
224 		/* It is OK to change the state variable below without
225 		 * protection as there should be no-one around yet
226 		 */
227 		napid->ce_map |= (0x01 << i);
228 		hif_debug("NAPI id %d created for pipe %d", napii->id, i);
229 	}
230 
231 	/* no ces registered with the napi */
232 	if (!ce_srng_based(hif) && napid->ce_map == 0) {
233 		hif_warn("no napis created for copy engines");
234 		rc = -EFAULT;
235 		goto napii_free;
236 	}
237 
238 	NAPI_DEBUG("napi map = %x", napid->ce_map);
239 	NAPI_DEBUG("NAPI ids created for all applicable pipes");
240 	return napid->ce_map;
241 
242 napii_free:
243 	for (i = 0; i < hif->ce_count; i++) {
244 		napii = napid->napis[i];
245 		napid->napis[i] = NULL;
246 		if (napii)
247 			qdf_mem_free(napii);
248 	}
249 
250 hnc_err:
251 	NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map);
252 	return rc;
253 }
254 qdf_export_symbol(hif_napi_create);
255 
256 #ifdef RECEIVE_OFFLOAD
257 void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl,
258 					 void (offld_flush_handler)(void *))
259 {
260 	int i;
261 	struct CE_state *ce_state;
262 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
263 	struct qca_napi_data *napid;
264 	struct qca_napi_info *napii;
265 
266 	if (!scn) {
267 		hif_err("hif_state NULL!");
268 		QDF_ASSERT(0);
269 		return;
270 	}
271 
272 	napid = hif_napi_get_all(hif_hdl);
273 	for (i = 0; i < scn->ce_count; i++) {
274 		ce_state = scn->ce_id_to_state[i];
275 		if (ce_state && (ce_state->htt_rx_data)) {
276 			napii = napid->napis[i];
277 			napii->offld_flush_cb = offld_flush_handler;
278 			hif_debug("Registering offload for ce_id %d NAPI callback for %d flush_cb %pK",
279 				i, napii->id, napii->offld_flush_cb);
280 		}
281 	}
282 }
283 
284 void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
285 {
286 	int i;
287 	struct CE_state *ce_state;
288 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
289 	struct qca_napi_data *napid;
290 	struct qca_napi_info *napii;
291 
292 	if (!scn) {
293 		hif_err("hif_state NULL!");
294 		QDF_ASSERT(0);
295 		return;
296 	}
297 
298 	napid = hif_napi_get_all(hif_hdl);
299 	for (i = 0; i < scn->ce_count; i++) {
300 		ce_state = scn->ce_id_to_state[i];
301 		if (ce_state && (ce_state->htt_rx_data)) {
302 			napii = napid->napis[i];
303 			hif_debug("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK",
304 				 i, napii->id, napii->offld_flush_cb);
305 			/* Not required */
306 			napii->offld_flush_cb = NULL;
307 		}
308 	}
309 }
310 #endif /* RECEIVE_OFFLOAD */
311 
312 /**
313  *
314  * hif_napi_destroy() - destroys the NAPI structures for a given instance
315  * @hif   : pointer to hif context
316  * @ce_id : the CE id whose napi instance will be destroyed
317  * @force : if set, will destroy even if entry is active (de-activates)
318  *
319  * Description:
320  *    Destroy a given NAPI instance. This function is called
321  *    unconditionally during cleanup.
322  *    Refuses to destroy an entry of it is still enabled (unless force=1)
323  *    Marks the whole napi_data invalid if all instances are destroyed.
324  *
325  * Return:
326  * -EINVAL: specific entry has not been created
327  * -EPERM : specific entry is still active
328  * 0 <    : error
329  * 0 =    : success
330  */
331 int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
332 		     uint8_t          id,
333 		     int              force)
334 {
335 	uint8_t ce = NAPI_ID2PIPE(id);
336 	int rc = 0;
337 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
338 
339 	NAPI_DEBUG("-->(id=%d, force=%d)", id, force);
340 
341 	if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) {
342 		hif_err("NAPI not initialized or entry %d not created", id);
343 		rc = -EINVAL;
344 	} else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) {
345 		hif_err("NAPI instance %d (pipe %d) not created", id, ce);
346 		if (hif->napi_data.napis[ce])
347 			hif_err("memory allocated but ce_map not set %d (pipe %d)",
348 				id, ce);
349 		rc = -EINVAL;
350 	} else {
351 		struct qca_napi_data *napid;
352 		struct qca_napi_info *napii;
353 
354 		napid = &(hif->napi_data);
355 		napii = napid->napis[ce];
356 		if (!napii) {
357 			if (napid->ce_map & (0x01 << ce))
358 				hif_err("napii & ce_map out of sync(ce %d)", ce);
359 			return -EINVAL;
360 		}
361 
362 
363 		if (hif->napi_data.state == HIF_NAPI_CONF_UP) {
364 			if (force) {
365 				napi_disable(&(napii->napi));
366 				hif_debug("NAPI entry %d force disabled", id);
367 				NAPI_DEBUG("NAPI %d force disabled", id);
368 			} else {
369 				hif_err("Cannot destroy active NAPI %d", id);
370 				rc = -EPERM;
371 			}
372 		}
373 		if (0 == rc) {
374 			NAPI_DEBUG("before napi_del");
375 			NAPI_DEBUG("napi.dlist.prv=0x%pK, next=0x%pK",
376 				  napii->napi.dev_list.prev,
377 				  napii->napi.dev_list.next);
378 			NAPI_DEBUG("dev.napi_l.prv=0x%pK, next=0x%pK",
379 				   napii->netdev.napi_list.prev,
380 				   napii->netdev.napi_list.next);
381 
382 			qdf_lro_deinit(napii->lro_ctx);
383 			netif_napi_del(&(napii->napi));
384 			hif_deinit_rx_thread_napi(napii);
385 
386 			napid->ce_map &= ~(0x01 << ce);
387 			napid->napis[ce] = NULL;
388 			napii->scale  = 0;
389 			qdf_mem_free(napii);
390 			hif_debug("NAPI %d destroyed", id);
391 
392 			/* if there are no active instances and
393 			 * if they are all destroyed,
394 			 * set the whole structure to uninitialized state
395 			 */
396 			if (napid->ce_map == 0) {
397 				rc = hif_napi_cpu_deinit(hif_ctx);
398 				/* caller is tolerant to receiving !=0 rc */
399 
400 				qdf_spinlock_destroy(&(napid->lock));
401 				memset(napid,
402 				       0, sizeof(struct qca_napi_data));
403 				hif_debug("no NAPI instances. Zapped");
404 			}
405 		}
406 	}
407 
408 	return rc;
409 }
410 qdf_export_symbol(hif_napi_destroy);
411 
412 #ifdef FEATURE_LRO
413 void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
414 {
415 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
416 	struct qca_napi_data *napid;
417 	struct qca_napi_info *napii;
418 
419 	napid = &(scn->napi_data);
420 	napii = napid->napis[NAPI_ID2PIPE(napi_id)];
421 
422 	if (napii)
423 		return napii->lro_ctx;
424 	return 0;
425 }
426 #endif
427 
428 /**
429  *
430  * hif_napi_get_all() - returns the address of the whole HIF NAPI structure
431  * @hif: pointer to hif context
432  *
433  * Description:
434  *    Returns the address of the whole structure
435  *
436  * Return:
437  *  <addr>: address of the whole HIF NAPI structure
438  */
439 inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx)
440 {
441 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
442 
443 	return &(hif->napi_data);
444 }
445 
446 struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid)
447 {
448 	int id = NAPI_ID2PIPE(napi_id);
449 
450 	return napid->napis[id];
451 }
452 
453 /**
454  *
455  * hif_napi_event() - reacts to events that impact NAPI
456  * @hif : pointer to hif context
457  * @evnt: event that has been detected
458  * @data: more data regarding the event
459  *
460  * Description:
461  *   This function handles two types of events:
462  *   1- Events that change the state of NAPI (enabled/disabled):
463  *      {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE}
464  *      The state is retrievable by "hdd_napi_enabled(-1)"
465  *    - NAPI will be on if either INI file is on and it has not been disabled
466  *                                by a subsequent vendor CMD,
467  *                         or     it has been enabled by a vendor CMD.
468  *   2- Events that change the CPU affinity of a NAPI instance/IRQ:
469  *      {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE}
470  *    - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode
471  *    - NAPI will switch throughput mode based on hdd_napi_throughput_policy()
472  *    - In LO tput mode, NAPI will yield control if its interrupts to the system
473  *      management functions. However in HI throughput mode, NAPI will actively
474  *      manage its interrupts/instances (by trying to disperse them out to
475  *      separate performance cores).
476  *    - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events.
477  *
478  *    + In some cases (roaming peer management is the only case so far), a
479  *      a client can trigger a "SERIALIZE" event. Basically, this means that the
480  *      users is asking NAPI to go into a truly single execution context state.
481  *      So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted,
482  *      (if called for the first time) and then moves all IRQs (for NAPI
483  *      instances) to be collapsed to a single core. If called multiple times,
484  *      it will just re-collapse the CPUs. This is because blacklist-on() API
485  *      is reference-counted, and because the API has already been called.
486  *
487  *      Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go
488  *      to its "normal" operation. Optionally, they can give a timeout value (in
489  *      multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this
490  *      case, NAPI will just set the current throughput state to uninitialized
491  *      and set the delay period. Once policy handler is called, it would skip
492  *      applying the policy delay period times, and otherwise apply the policy.
493  *
494  * Return:
495  *  < 0: some error
496  *  = 0: event handled successfully
497  */
498 int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event,
499 		   void *data)
500 {
501 	int      rc = 0;
502 	uint32_t prev_state;
503 	int      i;
504 	bool state_changed;
505 	struct napi_struct *napi;
506 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
507 	struct qca_napi_data *napid = &(hif->napi_data);
508 	enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED;
509 	enum {
510 		BLACKLIST_NOT_PENDING,
511 		BLACKLIST_ON_PENDING,
512 		BLACKLIST_OFF_PENDING
513 	     } blacklist_pending = BLACKLIST_NOT_PENDING;
514 
515 	NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data);
516 
517 	if (ce_srng_based(hif))
518 		return hif_exec_event(hif_ctx, event, data);
519 
520 	if ((napid->state & HIF_NAPI_INITED) == 0) {
521 		NAPI_DEBUG("%s: got event when NAPI not initialized",
522 			   __func__);
523 		return -EINVAL;
524 	}
525 	qdf_spin_lock_bh(&(napid->lock));
526 	prev_state = napid->state;
527 	switch (event) {
528 	case NAPI_EVT_INI_FILE:
529 	case NAPI_EVT_CMD_STATE:
530 	case NAPI_EVT_INT_STATE: {
531 		int on = (data != ((void *)0));
532 
533 		hif_debug("recved evnt: STATE_CMD %d; v = %d (state=0x%0x)",
534 			 event, on, prev_state);
535 		if (on)
536 			if (prev_state & HIF_NAPI_CONF_UP) {
537 				hif_debug("Duplicate NAPI conf ON msg");
538 			} else {
539 				hif_debug("Setting state to ON");
540 				napid->state |= HIF_NAPI_CONF_UP;
541 			}
542 		else /* off request */
543 			if (prev_state & HIF_NAPI_CONF_UP) {
544 				hif_debug("Setting state to OFF");
545 				napid->state &= ~HIF_NAPI_CONF_UP;
546 			} else {
547 				hif_debug("Duplicate NAPI conf OFF msg");
548 			}
549 		break;
550 	}
551 	/* case NAPI_INIT_FILE/CMD_STATE */
552 
553 	case NAPI_EVT_CPU_STATE: {
554 		int cpu = ((unsigned long int)data >> 16);
555 		int val = ((unsigned long int)data & 0x0ff);
556 
557 		NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d",
558 			   __func__, cpu, val);
559 
560 		/* state has already been set by hnc_cpu_notify_cb */
561 		if ((val == QCA_NAPI_CPU_DOWN) &&
562 		    (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */
563 		    (napid->napi_cpu[cpu].napis != 0)) {
564 			NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d",
565 				   __func__, cpu);
566 			rc = hif_napi_cpu_migrate(napid,
567 						  cpu,
568 						  HNC_ACT_RELOCATE);
569 			napid->napi_cpu[cpu].napis = 0;
570 		}
571 		/* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */
572 		break;
573 	}
574 
575 	case NAPI_EVT_TPUT_STATE: {
576 		tput_mode = (enum qca_napi_tput_state)data;
577 		if (tput_mode == QCA_NAPI_TPUT_LO) {
578 			/* from TPUT_HI -> TPUT_LO */
579 			NAPI_DEBUG("%s: Moving to napi_tput_LO state",
580 				   __func__);
581 			blacklist_pending = BLACKLIST_OFF_PENDING;
582 			/*
583 			 * Ideally we should "collapse" interrupts here, since
584 			 * we are "dispersing" interrupts in the "else" case.
585 			 * This allows the possibility that our interrupts may
586 			 * still be on the perf cluster the next time we enter
587 			 * high tput mode. However, the irq_balancer is free
588 			 * to move our interrupts to power cluster once
589 			 * blacklisting has been turned off in the "else" case.
590 			 */
591 		} else {
592 			/* from TPUT_LO -> TPUT->HI */
593 			NAPI_DEBUG("%s: Moving to napi_tput_HI state",
594 				   __func__);
595 			rc = hif_napi_cpu_migrate(napid,
596 						  HNC_ANY_CPU,
597 						  HNC_ACT_DISPERSE);
598 
599 			blacklist_pending = BLACKLIST_ON_PENDING;
600 		}
601 		napid->napi_mode = tput_mode;
602 		break;
603 	}
604 
605 	case NAPI_EVT_USR_SERIAL: {
606 		unsigned long users = (unsigned long)data;
607 
608 		NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld",
609 			   __func__, users);
610 
611 		rc = hif_napi_cpu_migrate(napid,
612 					  HNC_ANY_CPU,
613 					  HNC_ACT_COLLAPSE);
614 		if ((users == 0) && (rc == 0))
615 			blacklist_pending = BLACKLIST_ON_PENDING;
616 		break;
617 	}
618 	case NAPI_EVT_USR_NORMAL: {
619 		NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__);
620 		if (!napid->user_cpu_affin_mask)
621 			blacklist_pending = BLACKLIST_OFF_PENDING;
622 		/*
623 		 * Deserialization timeout is handled at hdd layer;
624 		 * just mark current mode to uninitialized to ensure
625 		 * it will be set when the delay is over
626 		 */
627 		napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED;
628 		break;
629 	}
630 	default: {
631 		hif_err("Unknown event: %d (data=0x%0lx)",
632 			event, (unsigned long) data);
633 		break;
634 	} /* default */
635 	}; /* switch */
636 
637 
638 	switch (blacklist_pending) {
639 	case BLACKLIST_ON_PENDING:
640 		/* assume the control of WLAN IRQs */
641 		hif_napi_cpu_blacklist(napid, BLACKLIST_ON);
642 		break;
643 	case BLACKLIST_OFF_PENDING:
644 		/* yield the control of WLAN IRQs */
645 		hif_napi_cpu_blacklist(napid, BLACKLIST_OFF);
646 		break;
647 	default: /* nothing to do */
648 		break;
649 	} /* switch blacklist_pending */
650 
651 	/* we want to perform the comparison in lock:
652 	 * there is a possiblity of hif_napi_event get called
653 	 * from two different contexts (driver unload and cpu hotplug
654 	 * notification) and napid->state get changed
655 	 * in driver unload context and can lead to race condition
656 	 * in cpu hotplug context. Therefore, perform the napid->state
657 	 * comparison before releasing lock.
658 	 */
659 	state_changed = (prev_state != napid->state);
660 	qdf_spin_unlock_bh(&(napid->lock));
661 
662 	if (state_changed) {
663 		if (napid->state == ENABLE_NAPI_MASK) {
664 			rc = 1;
665 			for (i = 0; i < CE_COUNT_MAX; i++) {
666 				struct qca_napi_info *napii = napid->napis[i];
667 				if (napii) {
668 					napi = &(napii->napi);
669 					NAPI_DEBUG("%s: enabling NAPI %d",
670 						   __func__, i);
671 					napi_enable(napi);
672 				}
673 			}
674 		} else {
675 			rc = 0;
676 			for (i = 0; i < CE_COUNT_MAX; i++) {
677 				struct qca_napi_info *napii = napid->napis[i];
678 				if (napii) {
679 					napi = &(napii->napi);
680 					NAPI_DEBUG("%s: disabling NAPI %d",
681 						   __func__, i);
682 					napi_disable(napi);
683 					/* in case it is affined, remove it */
684 					qdf_dev_set_irq_affinity(napii->irq,
685 								 NULL);
686 				}
687 			}
688 		}
689 	} else {
690 		hif_debug("no change in hif napi state (still %d)", prev_state);
691 	}
692 
693 	NAPI_DEBUG("<--[rc=%d]", rc);
694 	return rc;
695 }
696 qdf_export_symbol(hif_napi_event);
697 
698 /**
699  * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not
700  * @hif: hif context
701  * @ce : CE instance (or -1, to check if any CEs are enabled)
702  *
703  * Return: bool
704  */
705 int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce)
706 {
707 	int rc;
708 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
709 
710 	if (-1 == ce)
711 		rc = ((hif->napi_data.state == ENABLE_NAPI_MASK));
712 	else
713 		rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) &&
714 		      (hif->napi_data.ce_map & (0x01 << ce)));
715 	return rc;
716 }
717 qdf_export_symbol(hif_napi_enabled);
718 
719 /**
720  * hif_napi_created() - checks whether NAPI is created for given ce or not
721  * @hif: hif context
722  * @ce : CE instance
723  *
724  * Return: bool
725  */
726 bool hif_napi_created(struct hif_opaque_softc *hif_ctx, int ce)
727 {
728 	int rc;
729 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
730 
731 	rc = (hif->napi_data.ce_map & (0x01 << ce));
732 
733 	return !!rc;
734 }
735 qdf_export_symbol(hif_napi_created);
736 
737 /**
738  * hif_napi_enable_irq() - enables bus interrupts after napi_complete
739  *
740  * @hif: hif context
741  * @id : id of NAPI instance calling this (used to determine the CE)
742  *
743  * Return: void
744  */
745 inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id)
746 {
747 	struct hif_softc *scn = HIF_GET_SOFTC(hif);
748 
749 	hif_irq_enable(scn, NAPI_ID2PIPE(id));
750 }
751 
752 
753 /**
754  * hif_napi_schedule() - schedules napi, updates stats
755  * @scn:  hif context
756  * @ce_id: index of napi instance
757  *
758  * Return: false if napi didn't enable or already scheduled, otherwise true
759  */
760 bool hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
761 {
762 	int cpu = smp_processor_id();
763 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
764 	struct qca_napi_info *napii;
765 
766 	napii = scn->napi_data.napis[ce_id];
767 	if (qdf_unlikely(!napii)) {
768 		hif_err("scheduling unallocated napi (ce:%d)", ce_id);
769 		qdf_atomic_dec(&scn->active_tasklet_cnt);
770 		return false;
771 	}
772 
773 	if (test_bit(NAPI_STATE_SCHED, &napii->napi.state)) {
774 		NAPI_DEBUG("napi scheduled, return");
775 		qdf_atomic_dec(&scn->active_tasklet_cnt);
776 		return false;
777 	}
778 
779 	hif_record_ce_desc_event(scn,  ce_id, NAPI_SCHEDULE,
780 				 NULL, NULL, 0, 0);
781 	napii->stats[cpu].napi_schedules++;
782 	NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id);
783 	napi_schedule(&(napii->napi));
784 
785 	return true;
786 }
787 qdf_export_symbol(hif_napi_schedule);
788 
789 /**
790  * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed
791  * @napi_info: pointer to qca_napi_info for the napi instance
792  *
793  * Return: true  => interrupt already on correct cpu, no correction needed
794  *         false => interrupt on wrong cpu, correction done for cpu affinity
795  *                   of the interrupt
796  */
797 static inline
798 bool hif_napi_correct_cpu(struct qca_napi_info *napi_info)
799 {
800 	bool right_cpu = true;
801 	int rc = 0;
802 	int cpu;
803 	struct qca_napi_data *napid;
804 	QDF_STATUS ret;
805 
806 	napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx));
807 
808 	if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) {
809 
810 		cpu = qdf_get_cpu();
811 		if (unlikely((hif_napi_cpu_blacklist(napid,
812 						BLACKLIST_QUERY) > 0) &&
813 						(cpu != napi_info->cpu))) {
814 			right_cpu = false;
815 
816 			NAPI_DEBUG("interrupt on wrong CPU, correcting");
817 			napi_info->cpumask.bits[0] = (0x01 << napi_info->cpu);
818 
819 			irq_modify_status(napi_info->irq, IRQ_NO_BALANCING, 0);
820 			ret = qdf_dev_set_irq_affinity(napi_info->irq,
821 						       (struct qdf_cpu_mask *)
822 						       &napi_info->cpumask);
823 			rc = qdf_status_to_os_return(ret);
824 			irq_modify_status(napi_info->irq, 0, IRQ_NO_BALANCING);
825 
826 			if (rc)
827 				hif_err("Setting irq affinity hint: %d", rc);
828 			else
829 				napi_info->stats[cpu].cpu_corrected++;
830 		}
831 	}
832 	return right_cpu;
833 }
834 
835 #ifdef RECEIVE_OFFLOAD
836 /**
837  * hif_napi_offld_flush_cb() - Call upper layer flush callback
838  * @napi_info: Handle to hif_napi_info
839  *
840  * Return: None
841  */
842 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
843 {
844 	if (napi_info->offld_flush_cb)
845 		napi_info->offld_flush_cb(napi_info);
846 }
847 #else
848 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
849 {
850 }
851 #endif
852 
853 /**
854  * hif_napi_poll() - NAPI poll routine
855  * @napi  : pointer to NAPI struct as kernel holds it
856  * @budget:
857  *
858  * This is the body of the poll function.
859  * The poll function is called by kernel. So, there is a wrapper
860  * function in HDD, which in turn calls this function.
861  * Two main reasons why the whole thing is not implemented in HDD:
862  * a) references to things like ce_service that HDD is not aware of
863  * b) proximity to the implementation of ce_tasklet, which the body
864  *    of this function should be very close to.
865  *
866  * NOTE TO THE MAINTAINER:
867  *  Consider this function and ce_tasklet very tightly coupled pairs.
868  *  Any changes to ce_tasklet or this function may likely need to be
869  *  reflected in the counterpart.
870  *
871  * Returns:
872  *  int: the amount of work done in this poll (<= budget)
873  */
874 int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
875 		  struct napi_struct *napi,
876 		  int budget)
877 {
878 	int    rc = 0; /* default: no work done, also takes care of error */
879 	int    normalized = 0;
880 	int    bucket;
881 	int    cpu = smp_processor_id();
882 	bool poll_on_right_cpu;
883 	struct hif_softc      *hif = HIF_GET_SOFTC(hif_ctx);
884 	struct qca_napi_info *napi_info;
885 	struct CE_state *ce_state = NULL;
886 
887 	if (unlikely(!hif)) {
888 		hif_err("hif context is NULL");
889 		QDF_ASSERT(0);
890 		goto out;
891 	}
892 
893 	napi_info = (struct qca_napi_info *)
894 		container_of(napi, struct qca_napi_info, napi);
895 
896 	NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)",
897 		   __func__, napi_info->id, napi_info->irq, budget);
898 
899 	napi_info->stats[cpu].napi_polls++;
900 
901 	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
902 				 NAPI_POLL_ENTER, NULL, NULL, cpu, 0);
903 
904 	rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id));
905 	NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
906 		    __func__, rc);
907 
908 	hif_napi_offld_flush_cb(napi_info);
909 
910 	/* do not return 0, if there was some work done,
911 	 * even if it is below the scale
912 	 */
913 	if (rc) {
914 		napi_info->stats[cpu].napi_workdone += rc;
915 		normalized = (rc / napi_info->scale);
916 		if (normalized == 0)
917 			normalized++;
918 		bucket = (normalized - 1) /
919 				(QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS);
920 		if (bucket >= QCA_NAPI_NUM_BUCKETS) {
921 			bucket = QCA_NAPI_NUM_BUCKETS - 1;
922 			hif_err("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)"
923 				" normalized %d, napi budget %d",
924 				bucket, QCA_NAPI_NUM_BUCKETS,
925 				normalized, QCA_NAPI_BUDGET);
926 		}
927 		napi_info->stats[cpu].napi_budget_uses[bucket]++;
928 	} else {
929 	/* if ce_per engine reports 0, then poll should be terminated */
930 		NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI",
931 			   __func__, __LINE__);
932 	}
933 
934 	ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)];
935 
936 	/*
937 	 * Not using the API hif_napi_correct_cpu directly in the if statement
938 	 * below since the API may not get evaluated if put at the end if any
939 	 * prior condition would evaluate to be true. The CPU correction
940 	 * check should kick in every poll.
941 	 */
942 #ifdef NAPI_YIELD_BUDGET_BASED
943 	if (ce_state && (ce_state->force_break || 0 == rc)) {
944 #else
945 	poll_on_right_cpu = hif_napi_correct_cpu(napi_info);
946 	if ((ce_state) &&
947 	    (!ce_check_rx_pending(ce_state) || (0 == rc) ||
948 	     !poll_on_right_cpu)) {
949 #endif
950 		napi_info->stats[cpu].napi_completes++;
951 #ifdef NAPI_YIELD_BUDGET_BASED
952 		ce_state->force_break = 0;
953 #endif
954 
955 		hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE,
956 					 NULL, NULL, 0, 0);
957 		if (normalized >= budget)
958 			normalized = budget - 1;
959 
960 		napi_complete(napi);
961 		/* enable interrupts */
962 		hif_napi_enable_irq(hif_ctx, napi_info->id);
963 		/* support suspend/resume */
964 		qdf_atomic_dec(&(hif->active_tasklet_cnt));
965 
966 		NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts",
967 			   __func__, __LINE__);
968 	} else {
969 		/* 4.4 kernel NAPI implementation requires drivers to
970 		 * return full work when they ask to be re-scheduled,
971 		 * or napi_complete and re-start with a fresh interrupt
972 		 */
973 		normalized = budget;
974 	}
975 
976 	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
977 				 NAPI_POLL_EXIT, NULL, NULL, normalized, 0);
978 
979 	NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized);
980 	return normalized;
981 out:
982 	return rc;
983 }
984 qdf_export_symbol(hif_napi_poll);
985 
986 void hif_update_napi_max_poll_time(struct CE_state *ce_state,
987 				   int ce_id,
988 				   int cpu_id)
989 {
990 	struct hif_softc *hif;
991 	struct qca_napi_info *napi_info;
992 	unsigned long long napi_poll_time = sched_clock() -
993 					ce_state->ce_service_start_time;
994 
995 	hif = ce_state->scn;
996 	napi_info = hif->napi_data.napis[ce_id];
997 	if (napi_poll_time >
998 			napi_info->stats[cpu_id].napi_max_poll_time)
999 		napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time;
1000 }
1001 qdf_export_symbol(hif_update_napi_max_poll_time);
1002 
1003 #ifdef HIF_IRQ_AFFINITY
1004 /**
1005  *
1006  * hif_napi_update_yield_stats() - update NAPI yield related stats
1007  * @cpu_id: CPU ID for which stats needs to be updates
1008  * @ce_id: Copy Engine ID for which yield stats needs to be updates
1009  * @time_limit_reached: indicates whether the time limit was reached
1010  * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached
1011  *
1012  * Return: None
1013  */
1014 void hif_napi_update_yield_stats(struct CE_state *ce_state,
1015 				 bool time_limit_reached,
1016 				 bool rxpkt_thresh_reached)
1017 {
1018 	struct hif_softc *hif;
1019 	struct qca_napi_data *napi_data = NULL;
1020 	int ce_id = 0;
1021 	int cpu_id = 0;
1022 
1023 	if (unlikely(!ce_state)) {
1024 		QDF_ASSERT(ce_state);
1025 		return;
1026 	}
1027 
1028 	hif = ce_state->scn;
1029 
1030 	if (unlikely(!hif)) {
1031 		QDF_ASSERT(hif);
1032 		return;
1033 	}
1034 	napi_data = &(hif->napi_data);
1035 	if (unlikely(!napi_data)) {
1036 		QDF_ASSERT(napi_data);
1037 		return;
1038 	}
1039 
1040 	ce_id = ce_state->id;
1041 	cpu_id = qdf_get_cpu();
1042 
1043 	if (unlikely(!napi_data->napis[ce_id])) {
1044 		return;
1045 	}
1046 
1047 	if (time_limit_reached)
1048 		napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++;
1049 	else
1050 		napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++;
1051 
1052 	hif_update_napi_max_poll_time(ce_state, ce_id,
1053 				      cpu_id);
1054 }
1055 
1056 /**
1057  *
1058  * hif_napi_stats() - display NAPI CPU statistics
1059  * @napid: pointer to qca_napi_data
1060  *
1061  * Description:
1062  *    Prints the various CPU cores on which the NAPI instances /CEs interrupts
1063  *    are being executed. Can be called from outside NAPI layer.
1064  *
1065  * Return: None
1066  */
1067 void hif_napi_stats(struct qca_napi_data *napid)
1068 {
1069 	int i;
1070 	struct qca_napi_cpu *cpu;
1071 
1072 	if (!napid) {
1073 		qdf_debug("%s: napiid struct is null", __func__);
1074 		return;
1075 	}
1076 
1077 	cpu = napid->napi_cpu;
1078 	qdf_debug("NAPI CPU TABLE");
1079 	qdf_debug("lilclhead=%d, bigclhead=%d",
1080 		  napid->lilcl_head, napid->bigcl_head);
1081 	for (i = 0; i < NR_CPUS; i++) {
1082 		qdf_debug("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d",
1083 			  i,
1084 			  cpu[i].state, cpu[i].core_id, cpu[i].cluster_id,
1085 			  cpu[i].core_mask.bits[0],
1086 			  cpu[i].thread_mask.bits[0],
1087 			  cpu[i].max_freq, cpu[i].napis,
1088 			  cpu[i].cluster_nxt);
1089 	}
1090 }
1091 
1092 #ifdef FEATURE_NAPI_DEBUG
1093 /*
1094  * Local functions
1095  * - no argument checks, all internal/trusted callers
1096  */
1097 static void hnc_dump_cpus(struct qca_napi_data *napid)
1098 {
1099 	hif_napi_stats(napid);
1100 }
1101 #else
1102 static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ };
1103 #endif /* FEATURE_NAPI_DEBUG */
1104 /**
1105  * hnc_link_clusters() - partitions to cpu table into clusters
1106  * @napid: pointer to NAPI data
1107  *
1108  * Takes in a CPU topology table and builds two linked lists
1109  * (big cluster cores, list-head at bigcl_head, and little cluster
1110  * cores, list-head at lilcl_head) out of it.
1111  *
1112  * If there are more than two clusters:
1113  * - bigcl_head and lilcl_head will be different,
1114  * - the cluster with highest cpufreq will be considered the "big" cluster.
1115  *   If there are more than one with the highest frequency, the *last* of such
1116  *   clusters will be designated as the "big cluster"
1117  * - the cluster with lowest cpufreq will be considered the "li'l" cluster.
1118  *   If there are more than one clusters with the lowest cpu freq, the *first*
1119  *   of such clusters will be designated as the "little cluster"
1120  * - We only support up to 32 clusters
1121  * Return: 0 : OK
1122  *         !0: error (at least one of lil/big clusters could not be found)
1123  */
1124 #define HNC_MIN_CLUSTER 0
1125 #define HNC_MAX_CLUSTER 1
1126 static int hnc_link_clusters(struct qca_napi_data *napid)
1127 {
1128 	int rc = 0;
1129 
1130 	int i;
1131 	int it = 0;
1132 	uint32_t cl_done = 0x0;
1133 	int cl, curcl, curclhead = 0;
1134 	int more;
1135 	unsigned int lilfrq = INT_MAX;
1136 	unsigned int bigfrq = 0;
1137 	unsigned int clfrq = 0;
1138 	int prev = 0;
1139 	struct qca_napi_cpu *cpus = napid->napi_cpu;
1140 
1141 	napid->lilcl_head = napid->bigcl_head = -1;
1142 
1143 	do {
1144 		more = 0;
1145 		it++; curcl = -1;
1146 		for (i = 0; i < NR_CPUS; i++) {
1147 			cl = cpus[i].cluster_id;
1148 			NAPI_DEBUG("Processing cpu[%d], cluster=%d\n",
1149 				   i, cl);
1150 			if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) {
1151 				NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl);
1152 				/* continue if ASSERTs are disabled */
1153 				continue;
1154 			};
1155 			if (cpumask_weight(&(cpus[i].core_mask)) == 0) {
1156 				NAPI_DEBUG("Core mask 0. SKIPPED\n");
1157 				continue;
1158 			}
1159 			if (cl_done & (0x01 << cl)) {
1160 				NAPI_DEBUG("Cluster already processed. SKIPPED\n");
1161 				continue;
1162 			} else {
1163 				if (more == 0) {
1164 					more = 1;
1165 					curcl = cl;
1166 					curclhead = i; /* row */
1167 					clfrq = cpus[i].max_freq;
1168 					prev = -1;
1169 				};
1170 				if ((curcl >= 0) && (curcl != cl)) {
1171 					NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n",
1172 						   cl, curcl);
1173 					continue;
1174 				}
1175 				if (cpus[i].max_freq != clfrq)
1176 					NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n",
1177 						   cpus[i].max_freq, clfrq);
1178 				if (clfrq >= bigfrq) {
1179 					bigfrq = clfrq;
1180 					napid->bigcl_head  = curclhead;
1181 					NAPI_DEBUG("bigcl=%d\n", curclhead);
1182 				}
1183 				if (clfrq < lilfrq) {
1184 					lilfrq = clfrq;
1185 					napid->lilcl_head = curclhead;
1186 					NAPI_DEBUG("lilcl=%d\n", curclhead);
1187 				}
1188 				if (prev != -1)
1189 					cpus[prev].cluster_nxt = i;
1190 
1191 				prev = i;
1192 			}
1193 		}
1194 		if (curcl >= 0)
1195 			cl_done |= (0x01 << curcl);
1196 
1197 	} while (more);
1198 
1199 	if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0)))
1200 		rc = -EFAULT;
1201 
1202 	hnc_dump_cpus(napid); /* if NAPI_DEBUG */
1203 	return rc;
1204 }
1205 #undef HNC_MIN_CLUSTER
1206 #undef HNC_MAX_CLUSTER
1207 
1208 /*
1209  * hotplug function group
1210  */
1211 
1212 /**
1213  * hnc_cpu_online_cb() - handles CPU hotplug "up" events
1214  * @context: the associated HIF context
1215  * @cpu: the CPU Id of the CPU the event happened on
1216  *
1217  * Return: None
1218  */
1219 static void hnc_cpu_online_cb(void *context, uint32_t cpu)
1220 {
1221 	struct hif_softc *hif = context;
1222 	struct qca_napi_data *napid = &hif->napi_data;
1223 
1224 	if (cpu >= NR_CPUS)
1225 		return;
1226 
1227 	NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu);
1228 
1229 	napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
1230 	NAPI_DEBUG("%s: CPU %u marked %d",
1231 		   __func__, cpu, napid->napi_cpu[cpu].state);
1232 
1233 	NAPI_DEBUG("<--%s", __func__);
1234 }
1235 
1236 /**
1237  * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events
1238  * @context: the associated HIF context
1239  * @cpu: the CPU Id of the CPU the event happened on
1240  *
1241  * On transtion to offline, we act on PREP events, because we may need to move
1242  * the irqs/NAPIs to another CPU before it is actually off-lined.
1243  *
1244  * Return: None
1245  */
1246 static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu)
1247 {
1248 	struct hif_softc *hif = context;
1249 	struct qca_napi_data *napid = &hif->napi_data;
1250 
1251 	if (cpu >= NR_CPUS)
1252 		return;
1253 
1254 	NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu);
1255 
1256 	napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
1257 
1258 	NAPI_DEBUG("%s: CPU %u marked %d; updating affinity",
1259 		   __func__, cpu, napid->napi_cpu[cpu].state);
1260 
1261 	/**
1262 	 * we need to move any NAPIs on this CPU out.
1263 	 * if we are in LO throughput mode, then this is valid
1264 	 * if the CPU is the the low designated CPU.
1265 	 */
1266 	hif_napi_event(GET_HIF_OPAQUE_HDL(hif),
1267 		       NAPI_EVT_CPU_STATE,
1268 		       (void *)
1269 		       ((size_t)cpu << 16 | napid->napi_cpu[cpu].state));
1270 
1271 	NAPI_DEBUG("<--%s", __func__);
1272 }
1273 
1274 static int hnc_hotplug_register(struct hif_softc *hif_sc)
1275 {
1276 	QDF_STATUS status;
1277 
1278 	NAPI_DEBUG("-->%s", __func__);
1279 
1280 	status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler,
1281 				    hif_sc,
1282 				    hnc_cpu_online_cb,
1283 				    hnc_cpu_before_offline_cb);
1284 
1285 	NAPI_DEBUG("<--%s [%d]", __func__, status);
1286 
1287 	return qdf_status_to_os_return(status);
1288 }
1289 
1290 static void hnc_hotplug_unregister(struct hif_softc *hif_sc)
1291 {
1292 	NAPI_DEBUG("-->%s", __func__);
1293 
1294 	if (hif_sc->napi_data.cpuhp_handler)
1295 		qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler);
1296 
1297 	NAPI_DEBUG("<--%s", __func__);
1298 }
1299 
1300 /**
1301  * hnc_install_tput() - installs a callback in the throughput detector
1302  * @register: !0 => register; =0: unregister
1303  *
1304  * installs a callback to be called when wifi driver throughput (tx+rx)
1305  * crosses a threshold. Currently, we are using the same criteria as
1306  * TCP ack suppression (500 packets/100ms by default).
1307  *
1308  * Return: 0 : success
1309  *         <0: failure
1310  */
1311 
1312 static int hnc_tput_hook(int install)
1313 {
1314 	int rc = 0;
1315 
1316 	/*
1317 	 * Nothing, until the bw_calculation accepts registration
1318 	 * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk
1319 	 *   hdd_napi_throughput_policy(...)
1320 	 */
1321 	return rc;
1322 }
1323 
1324 /*
1325  * Implementation of hif_napi_cpu API
1326  */
1327 
1328 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1329 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i)
1330 {
1331 	cpumask_copy(&(cpus[i].thread_mask),
1332 			     topology_sibling_cpumask(i));
1333 }
1334 #else
1335 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i)
1336 {
1337 }
1338 #endif
1339 
1340 
1341 /**
1342  * hif_napi_cpu_init() - initialization of irq affinity block
1343  * @ctx: pointer to qca_napi_data
1344  *
1345  * called by hif_napi_create, after the first instance is called
1346  * - builds napi_rss_cpus table from cpu topology
1347  * - links cores of the same clusters together
1348  * - installs hot-plug notifier
1349  * - installs throughput trigger notifier (when such mechanism exists)
1350  *
1351  * Return: 0: OK
1352  *         <0: error code
1353  */
1354 int hif_napi_cpu_init(struct hif_opaque_softc *hif)
1355 {
1356 	int rc = 0;
1357 	int i;
1358 	struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1359 	struct qca_napi_cpu *cpus = napid->napi_cpu;
1360 
1361 	NAPI_DEBUG("--> ");
1362 
1363 	if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) {
1364 		NAPI_DEBUG("NAPI RSS table already initialized.\n");
1365 		rc = -EALREADY;
1366 		goto lab_rss_init;
1367 	}
1368 
1369 	/* build CPU topology table */
1370 	for_each_possible_cpu(i) {
1371 		cpus[i].state       = ((cpumask_test_cpu(i, cpu_online_mask)
1372 					? QCA_NAPI_CPU_UP
1373 					: QCA_NAPI_CPU_DOWN));
1374 		cpus[i].core_id     = topology_core_id(i);
1375 		cpus[i].cluster_id  = topology_physical_package_id(i);
1376 		cpumask_copy(&(cpus[i].core_mask),
1377 			     topology_core_cpumask(i));
1378 		record_sibling_cpumask(cpus, i);
1379 		cpus[i].max_freq    = cpufreq_quick_get_max(i);
1380 		cpus[i].napis       = 0x0;
1381 		cpus[i].cluster_nxt = -1; /* invalid */
1382 	}
1383 
1384 	/* link clusters together */
1385 	rc = hnc_link_clusters(napid);
1386 	if (0 != rc)
1387 		goto lab_err_topology;
1388 
1389 	/* install hotplug notifier */
1390 	rc = hnc_hotplug_register(HIF_GET_SOFTC(hif));
1391 	if (0 != rc)
1392 		goto lab_err_hotplug;
1393 
1394 	/* install throughput notifier */
1395 	rc = hnc_tput_hook(1);
1396 	if (0 == rc)
1397 		goto lab_rss_init;
1398 
1399 lab_err_hotplug:
1400 	hnc_tput_hook(0);
1401 	hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
1402 lab_err_topology:
1403 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1404 lab_rss_init:
1405 	NAPI_DEBUG("<-- [rc=%d]", rc);
1406 	return rc;
1407 }
1408 
1409 /**
1410  * hif_napi_cpu_deinit() - clean-up of irq affinity block
1411  *
1412  * called by hif_napi_destroy, when the last instance is removed
1413  * - uninstalls throughput and hotplug notifiers
1414  * - clears cpu topology table
1415  * Return: 0: OK
1416  */
1417 int hif_napi_cpu_deinit(struct hif_opaque_softc *hif)
1418 {
1419 	int rc = 0;
1420 	struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1421 
1422 	NAPI_DEBUG("-->%s(...)", __func__);
1423 
1424 	/* uninstall tput notifier */
1425 	rc = hnc_tput_hook(0);
1426 
1427 	/* uninstall hotplug notifier */
1428 	hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
1429 
1430 	/* clear the topology table */
1431 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1432 
1433 	NAPI_DEBUG("<--%s[rc=%d]", __func__, rc);
1434 
1435 	return rc;
1436 }
1437 
1438 /**
1439  * hncm_migrate_to() - migrates a NAPI to a CPU
1440  * @napid: pointer to NAPI block
1441  * @ce_id: CE_id of the NAPI instance
1442  * @didx : index in the CPU topology table for the CPU to migrate to
1443  *
1444  * Migrates NAPI (identified by the CE_id) to the destination core
1445  * Updates the napi_map of the destination entry
1446  *
1447  * Return:
1448  *  =0 : success
1449  *  <0 : error
1450  */
1451 static int hncm_migrate_to(struct qca_napi_data *napid,
1452 			   int                   napi_ce,
1453 			   int                   didx)
1454 {
1455 	int rc = 0;
1456 	QDF_STATUS status;
1457 
1458 	NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx);
1459 
1460 	if (!napid->napis[napi_ce])
1461 		return -EINVAL;
1462 
1463 	napid->napis[napi_ce]->cpumask.bits[0] = (1 << didx);
1464 
1465 	irq_modify_status(napid->napis[napi_ce]->irq, IRQ_NO_BALANCING, 0);
1466 	status = qdf_dev_set_irq_affinity(napid->napis[napi_ce]->irq,
1467 					  (struct qdf_cpu_mask *)
1468 					  &napid->napis[napi_ce]->cpumask);
1469 	rc = qdf_status_to_os_return(status);
1470 
1471 	/* unmark the napis bitmap in the cpu table */
1472 	napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce);
1473 	/* mark the napis bitmap for the new designated cpu */
1474 	napid->napi_cpu[didx].napis |= (0x01 << napi_ce);
1475 	napid->napis[napi_ce]->cpu = didx;
1476 
1477 	NAPI_DEBUG("<--%s[%d]", __func__, rc);
1478 	return rc;
1479 }
1480 /**
1481  * hncm_dest_cpu() - finds a destination CPU for NAPI
1482  * @napid: pointer to NAPI block
1483  * @act  : RELOCATE | COLLAPSE | DISPERSE
1484  *
1485  * Finds the designated destionation for the next IRQ.
1486  * RELOCATE: translated to either COLLAPSE or DISPERSE based
1487  *           on napid->napi_mode (throughput state)
1488  * COLLAPSE: All have the same destination: the first online CPU in lilcl
1489  * DISPERSE: One of the CPU in bigcl, which has the smallest number of
1490  *           NAPIs on it
1491  *
1492  * Return: >=0 : index in the cpu topology table
1493  *       : < 0 : error
1494  */
1495 static int hncm_dest_cpu(struct qca_napi_data *napid, int act)
1496 {
1497 	int destidx = -1;
1498 	int head, i;
1499 
1500 	NAPI_DEBUG("-->%s(act=%d)", __func__, act);
1501 	if (act == HNC_ACT_RELOCATE) {
1502 		if (napid->napi_mode == QCA_NAPI_TPUT_LO)
1503 			act = HNC_ACT_COLLAPSE;
1504 		else
1505 			act = HNC_ACT_DISPERSE;
1506 		NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d",
1507 			   __func__, act);
1508 	}
1509 	if (act == HNC_ACT_COLLAPSE) {
1510 		head = i = napid->lilcl_head;
1511 retry_collapse:
1512 		while (i >= 0) {
1513 			if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) {
1514 				destidx = i;
1515 				break;
1516 			}
1517 			i = napid->napi_cpu[i].cluster_nxt;
1518 		}
1519 		if ((destidx < 0) && (head == napid->lilcl_head)) {
1520 			NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl",
1521 				__func__);
1522 			head = i = napid->bigcl_head;
1523 			goto retry_collapse;
1524 		}
1525 	} else { /* HNC_ACT_DISPERSE */
1526 		int smallest = 99; /* all 32 bits full */
1527 		int smallidx = -1;
1528 
1529 		head = i = napid->bigcl_head;
1530 retry_disperse:
1531 		while (i >= 0) {
1532 			if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) &&
1533 			    (hweight32(napid->napi_cpu[i].napis) <= smallest)) {
1534 				smallest = napid->napi_cpu[i].napis;
1535 				smallidx = i;
1536 			}
1537 			i = napid->napi_cpu[i].cluster_nxt;
1538 		}
1539 		/* Check if matches with user sepecified CPU mask */
1540 		smallidx = ((1 << smallidx) & napid->user_cpu_affin_mask) ?
1541 								smallidx : -1;
1542 
1543 		if ((smallidx < 0) && (head == napid->bigcl_head)) {
1544 			NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl",
1545 				__func__);
1546 			head = i = napid->lilcl_head;
1547 			goto retry_disperse;
1548 		}
1549 		destidx = smallidx;
1550 	}
1551 	NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx);
1552 	return destidx;
1553 }
1554 /**
1555  * hif_napi_cpu_migrate() - migrate IRQs away
1556  * @cpu: -1: all CPUs <n> specific CPU
1557  * @act: COLLAPSE | DISPERSE
1558  *
1559  * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible
1560  * cores. Eligible cores are:
1561  * act=COLLAPSE -> the first online core of the little cluster
1562  * act=DISPERSE -> separate cores of the big cluster, so that each core will
1563  *                 host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis)
1564  *
1565  * Note that this function is called with a spinlock acquired already.
1566  *
1567  * Return: =0: success
1568  *         <0: error
1569  */
1570 
1571 int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action)
1572 {
1573 	int      rc = 0;
1574 	struct qca_napi_cpu *cpup;
1575 	int      i, dind;
1576 	uint32_t napis;
1577 
1578 	NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)",
1579 		   __func__, cpu, action);
1580 	/* the following is really: hif_napi_enabled() with less overhead */
1581 	if (napid->ce_map == 0) {
1582 		NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__);
1583 		goto hncm_return;
1584 	}
1585 
1586 	cpup = napid->napi_cpu;
1587 
1588 	switch (action) {
1589 	case HNC_ACT_RELOCATE:
1590 	case HNC_ACT_DISPERSE:
1591 	case HNC_ACT_COLLAPSE: {
1592 		/* first find the src napi set */
1593 		if (cpu == HNC_ANY_CPU)
1594 			napis = napid->ce_map;
1595 		else
1596 			napis = cpup[cpu].napis;
1597 		/* then clear the napi bitmap on each CPU */
1598 		for (i = 0; i < NR_CPUS; i++)
1599 			cpup[i].napis = 0;
1600 		/* then for each of the NAPIs to disperse: */
1601 		for (i = 0; i < CE_COUNT_MAX; i++)
1602 			if (napis & (1 << i)) {
1603 				/* find a destination CPU */
1604 				dind = hncm_dest_cpu(napid, action);
1605 				if (dind >= 0) {
1606 					NAPI_DEBUG("Migrating NAPI ce%d to %d",
1607 						   i, dind);
1608 					rc = hncm_migrate_to(napid, i, dind);
1609 				} else {
1610 					NAPI_DEBUG("No dest for NAPI ce%d", i);
1611 					hnc_dump_cpus(napid);
1612 					rc = -1;
1613 				}
1614 			}
1615 		break;
1616 	}
1617 	default: {
1618 		NAPI_DEBUG("%s: bad action: %d\n", __func__, action);
1619 		QDF_BUG(0);
1620 		break;
1621 	}
1622 	} /* switch action */
1623 
1624 hncm_return:
1625 	hnc_dump_cpus(napid);
1626 	return rc;
1627 }
1628 
1629 
1630 /**
1631  * hif_napi_bl_irq() - calls irq_modify_status to enable/disable blacklisting
1632  * @napid: pointer to qca_napi_data structure
1633  * @bl_flag: blacklist flag to enable/disable blacklisting
1634  *
1635  * The function enables/disables blacklisting for all the copy engine
1636  * interrupts on which NAPI is enabled.
1637  *
1638  * Return: None
1639  */
1640 static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag)
1641 {
1642 	int i;
1643 	struct qca_napi_info *napii;
1644 
1645 	for (i = 0; i < CE_COUNT_MAX; i++) {
1646 		/* check if NAPI is enabled on the CE */
1647 		if (!(napid->ce_map & (0x01 << i)))
1648 			continue;
1649 
1650 		/*double check that NAPI is allocated for the CE */
1651 		napii = napid->napis[i];
1652 		if (!(napii))
1653 			continue;
1654 
1655 		if (bl_flag == true)
1656 			irq_modify_status(napii->irq,
1657 					  0, IRQ_NO_BALANCING);
1658 		else
1659 			irq_modify_status(napii->irq,
1660 					  IRQ_NO_BALANCING, 0);
1661 		hif_debug("bl_flag %d CE %d", bl_flag, i);
1662 	}
1663 }
1664 
1665 /**
1666  * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts.
1667  * @napid: pointer to qca_napi_data structure
1668  * @op: blacklist operation to perform
1669  *
1670  * The function enables/disables/queries blacklisting for all CE RX
1671  * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables
1672  * core_ctl_set_boost.
1673  * Once blacklisting is enabled, the interrupts will not be managed by the IRQ
1674  * balancer.
1675  *
1676  * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled
1677  *         for BLACKLIST_QUERY op - blacklist refcount
1678  *         for BLACKLIST_ON op    - return value from core_ctl_set_boost API
1679  *         for BLACKLIST_OFF op   - return value from core_ctl_set_boost API
1680  */
1681 int hif_napi_cpu_blacklist(struct qca_napi_data *napid,
1682 			   enum qca_blacklist_op op)
1683 {
1684 	int rc = 0;
1685 	static int ref_count; /* = 0 by the compiler */
1686 	uint8_t flags = napid->flags;
1687 	bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING;
1688 	bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST;
1689 
1690 	NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op);
1691 
1692 	if (!(bl_en && ccb_en)) {
1693 		rc = -EINVAL;
1694 		goto out;
1695 	}
1696 
1697 	switch (op) {
1698 	case BLACKLIST_QUERY:
1699 		rc = ref_count;
1700 		break;
1701 	case BLACKLIST_ON:
1702 		ref_count++;
1703 		rc = 0;
1704 		if (ref_count == 1) {
1705 			rc = hif_napi_core_ctl_set_boost(true);
1706 			NAPI_DEBUG("boost_on() returns %d - refcnt=%d",
1707 				rc, ref_count);
1708 			hif_napi_bl_irq(napid, true);
1709 		}
1710 		break;
1711 	case BLACKLIST_OFF:
1712 		if (ref_count) {
1713 			ref_count--;
1714 			rc = 0;
1715 			if (ref_count == 0) {
1716 				rc = hif_napi_core_ctl_set_boost(false);
1717 				NAPI_DEBUG("boost_off() returns %d - refcnt=%d",
1718 					   rc, ref_count);
1719 				hif_napi_bl_irq(napid, false);
1720 			}
1721 		}
1722 		break;
1723 	default:
1724 		NAPI_DEBUG("Invalid blacklist op: %d", op);
1725 		rc = -EINVAL;
1726 	} /* switch */
1727 out:
1728 	NAPI_DEBUG("<--%s[%d]", __func__, rc);
1729 	return rc;
1730 }
1731 
1732 /**
1733  * hif_napi_serialize() - [de-]serialize NAPI operations
1734  * @hif:   context
1735  * @is_on: 1: serialize, 0: deserialize
1736  *
1737  * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the
1738  * following steps (see hif_napi_event for code):
1739  * - put irqs of all NAPI instances on the same CPU
1740  * - only for the first serialize call: blacklist
1741  *
1742  * hif_napi_serialize(hif, 0):
1743  * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec)
1744  * - at the end of the timer, check the current throughput state and
1745  *   implement it.
1746  */
1747 static unsigned long napi_serialize_reqs;
1748 int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on)
1749 {
1750 	int rc = -EINVAL;
1751 
1752 	if (hif)
1753 		switch (is_on) {
1754 		case 0: { /* de-serialize */
1755 			rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL,
1756 					    (void *) 0);
1757 			napi_serialize_reqs = 0;
1758 			break;
1759 		} /* end de-serialize */
1760 		case 1: { /* serialize */
1761 			rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL,
1762 					    (void *)napi_serialize_reqs++);
1763 			break;
1764 		} /* end serialize */
1765 		default:
1766 			break; /* no-op */
1767 		} /* switch */
1768 	return rc;
1769 }
1770 
1771 #endif /* ifdef HIF_IRQ_AFFINITY */
1772