xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_napi.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: hif_napi.c
21  *
22  * HIF NAPI interface implementation
23  */
24 
25 #include <linux/string.h> /* memset */
26 
27 /* Linux headers */
28 #include <linux/cpumask.h>
29 #include <linux/cpufreq.h>
30 #include <linux/cpu.h>
31 #include <linux/topology.h>
32 #include <linux/interrupt.h>
33 #include <linux/irq.h>
34 #ifdef CONFIG_SCHED_CORE_CTL
35 #include <linux/sched/core_ctl.h>
36 #endif
37 #include <pld_common.h>
38 #include <linux/pm.h>
39 
40 /* Driver headers */
41 #include <hif_napi.h>
42 #include <hif_debug.h>
43 #include <hif_io32.h>
44 #include <ce_api.h>
45 #include <ce_internal.h>
46 #include <hif_irq_affinity.h>
47 #include "qdf_cpuhp.h"
48 #include "qdf_module.h"
49 
50 enum napi_decision_vector {
51 	HIF_NAPI_NOEVENT = 0,
52 	HIF_NAPI_INITED  = 1,
53 	HIF_NAPI_CONF_UP = 2
54 };
55 #define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP)
56 
57 #ifdef RECEIVE_OFFLOAD
58 /**
59  * hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI
60  * @napi: Rx_thread NAPI
61  * @budget: NAPI BUDGET
62  *
63  * Return: 0 as it is not supposed to be polled at all as it is not scheduled.
64  */
65 static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget)
66 {
67 	HIF_ERROR("This napi_poll should not be polled as we don't schedule it");
68 	QDF_ASSERT(0);
69 	return 0;
70 }
71 
72 /**
73  * hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI
74  * @napii: Handle to napi_info holding rx_thread napi
75  *
76  * Return: None
77  */
78 static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
79 {
80 	init_dummy_netdev(&napii->rx_thread_netdev);
81 	netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi,
82 		       hif_rxthread_napi_poll, 64);
83 	napi_enable(&napii->rx_thread_napi);
84 }
85 
86 /**
87  * hif_deinit_rx_thread_napi() - Deinitialize dummy Rx_thread NAPI
88  * @napii: Handle to napi_info holding rx_thread napi
89  *
90  * Return: None
91  */
92 static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii)
93 {
94 	netif_napi_del(&napii->rx_thread_napi);
95 }
96 #else /* RECEIVE_OFFLOAD */
97 static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
98 {
99 }
100 
101 static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii)
102 {
103 }
104 #endif
105 
106 /**
107  * hif_napi_create() - creates the NAPI structures for a given CE
108  * @hif    : pointer to hif context
109  * @pipe_id: the CE id on which the instance will be created
110  * @poll   : poll function to be used for this NAPI instance
111  * @budget : budget to be registered with the NAPI instance
112  * @scale  : scale factor on the weight (to scaler budget to 1000)
113  * @flags  : feature flags
114  *
115  * Description:
116  *    Creates NAPI instances. This function is called
117  *    unconditionally during initialization. It creates
118  *    napi structures through the proper HTC/HIF calls.
119  *    The structures are disabled on creation.
120  *    Note that for each NAPI instance a separate dummy netdev is used
121  *
122  * Return:
123  * < 0: error
124  * = 0: <should never happen>
125  * > 0: id of the created object (for multi-NAPI, number of objects created)
126  */
127 int hif_napi_create(struct hif_opaque_softc   *hif_ctx,
128 		    int (*poll)(struct napi_struct *, int),
129 		    int                budget,
130 		    int                scale,
131 		    uint8_t            flags)
132 {
133 	int i;
134 	struct qca_napi_data *napid;
135 	struct qca_napi_info *napii;
136 	struct CE_state      *ce_state;
137 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
138 	int    rc = 0;
139 
140 	NAPI_DEBUG("-->(budget=%d, scale=%d)",
141 		   budget, scale);
142 	NAPI_DEBUG("hif->napi_data.state = 0x%08x",
143 		   hif->napi_data.state);
144 	NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x",
145 		   hif->napi_data.ce_map);
146 
147 	napid = &(hif->napi_data);
148 	if (0 == (napid->state &  HIF_NAPI_INITED)) {
149 		memset(napid, 0, sizeof(struct qca_napi_data));
150 		qdf_spinlock_create(&(napid->lock));
151 
152 		napid->state |= HIF_NAPI_INITED;
153 		napid->flags = flags;
154 
155 		rc = hif_napi_cpu_init(hif_ctx);
156 		if (rc != 0 && rc != -EALREADY) {
157 			HIF_ERROR("NAPI_initialization failed,. %d", rc);
158 			rc = napid->ce_map;
159 			goto hnc_err;
160 		} else
161 			rc = 0;
162 
163 		HIF_DBG("%s: NAPI structures initialized, rc=%d",
164 			 __func__, rc);
165 	}
166 	for (i = 0; i < hif->ce_count; i++) {
167 		ce_state = hif->ce_id_to_state[i];
168 		NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d",
169 			   i, ce_state->htt_rx_data,
170 			   ce_state->htt_tx_data);
171 		if (ce_srng_based(hif))
172 			continue;
173 
174 		if (!ce_state->htt_rx_data)
175 			continue;
176 
177 		/* Now this is a CE where we need NAPI on */
178 		NAPI_DEBUG("Creating NAPI on pipe %d", i);
179 		napii = qdf_mem_malloc(sizeof(*napii));
180 		napid->napis[i] = napii;
181 		if (!napii) {
182 			NAPI_DEBUG("NAPI alloc failure %d", i);
183 			rc = -ENOMEM;
184 			goto napii_free;
185 		}
186 	}
187 
188 	for (i = 0; i < hif->ce_count; i++) {
189 		napii = napid->napis[i];
190 		if (!napii)
191 			continue;
192 
193 		NAPI_DEBUG("initializing NAPI for pipe %d", i);
194 		memset(napii, 0, sizeof(struct qca_napi_info));
195 		napii->scale = scale;
196 		napii->id    = NAPI_PIPE2ID(i);
197 		napii->hif_ctx = hif_ctx;
198 		napii->irq   = pld_get_irq(hif->qdf_dev->dev, i);
199 
200 		if (napii->irq < 0)
201 			HIF_WARN("%s: bad IRQ value for CE %d: %d",
202 				 __func__, i, napii->irq);
203 
204 		init_dummy_netdev(&(napii->netdev));
205 
206 		NAPI_DEBUG("adding napi=%pK to netdev=%pK (poll=%pK, bdgt=%d)",
207 			   &(napii->napi), &(napii->netdev), poll, budget);
208 		netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget);
209 
210 		NAPI_DEBUG("after napi_add");
211 		NAPI_DEBUG("napi=0x%pK, netdev=0x%pK",
212 			   &(napii->napi), &(napii->netdev));
213 		NAPI_DEBUG("napi.dev_list.prev=0x%pK, next=0x%pK",
214 			   napii->napi.dev_list.prev,
215 			   napii->napi.dev_list.next);
216 		NAPI_DEBUG("dev.napi_list.prev=0x%pK, next=0x%pK",
217 			   napii->netdev.napi_list.prev,
218 			   napii->netdev.napi_list.next);
219 
220 		hif_init_rx_thread_napi(napii);
221 		napii->lro_ctx = qdf_lro_init();
222 		NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n",
223 				i, napii->id, napii->lro_ctx);
224 
225 		/* It is OK to change the state variable below without
226 		 * protection as there should be no-one around yet
227 		 */
228 		napid->ce_map |= (0x01 << i);
229 		HIF_DBG("%s: NAPI id %d created for pipe %d", __func__,
230 			 napii->id, i);
231 	}
232 
233 	/* no ces registered with the napi */
234 	if (!ce_srng_based(hif) && napid->ce_map == 0) {
235 		HIF_WARN("%s: no napis created for copy engines", __func__);
236 		rc = -EFAULT;
237 		goto napii_free;
238 	}
239 
240 	NAPI_DEBUG("napi map = %x", napid->ce_map);
241 	NAPI_DEBUG("NAPI ids created for all applicable pipes");
242 	return napid->ce_map;
243 
244 napii_free:
245 	for (i = 0; i < hif->ce_count; i++) {
246 		napii = napid->napis[i];
247 		napid->napis[i] = NULL;
248 		if (napii)
249 			qdf_mem_free(napii);
250 	}
251 
252 hnc_err:
253 	NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map);
254 	return rc;
255 }
256 qdf_export_symbol(hif_napi_create);
257 
258 #ifdef RECEIVE_OFFLOAD
259 void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl,
260 					 void (offld_flush_handler)(void *))
261 {
262 	int i;
263 	struct CE_state *ce_state;
264 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
265 	struct qca_napi_data *napid;
266 	struct qca_napi_info *napii;
267 
268 	if (!scn) {
269 		HIF_ERROR("%s: hif_state NULL!", __func__);
270 		QDF_ASSERT(0);
271 		return;
272 	}
273 
274 	napid = hif_napi_get_all(hif_hdl);
275 	for (i = 0; i < scn->ce_count; i++) {
276 		ce_state = scn->ce_id_to_state[i];
277 		if (ce_state && (ce_state->htt_rx_data)) {
278 			napii = napid->napis[i];
279 			napii->offld_flush_cb = offld_flush_handler;
280 			HIF_DBG("Registering offload for ce_id %d NAPI callback for %d flush_cb %p\n",
281 				i, napii->id, napii->offld_flush_cb);
282 		}
283 	}
284 }
285 
286 void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
287 {
288 	int i;
289 	struct CE_state *ce_state;
290 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
291 	struct qca_napi_data *napid;
292 	struct qca_napi_info *napii;
293 
294 	if (!scn) {
295 		HIF_ERROR("%s: hif_state NULL!", __func__);
296 		QDF_ASSERT(0);
297 		return;
298 	}
299 
300 	napid = hif_napi_get_all(hif_hdl);
301 	for (i = 0; i < scn->ce_count; i++) {
302 		ce_state = scn->ce_id_to_state[i];
303 		if (ce_state && (ce_state->htt_rx_data)) {
304 			napii = napid->napis[i];
305 			HIF_DBG("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK\n",
306 				i, napii->id, napii->offld_flush_cb);
307 			/* Not required */
308 			napii->offld_flush_cb = NULL;
309 		}
310 	}
311 }
312 #endif /* RECEIVE_OFFLOAD */
313 
314 /**
315  *
316  * hif_napi_destroy() - destroys the NAPI structures for a given instance
317  * @hif   : pointer to hif context
318  * @ce_id : the CE id whose napi instance will be destroyed
319  * @force : if set, will destroy even if entry is active (de-activates)
320  *
321  * Description:
322  *    Destroy a given NAPI instance. This function is called
323  *    unconditionally during cleanup.
324  *    Refuses to destroy an entry of it is still enabled (unless force=1)
325  *    Marks the whole napi_data invalid if all instances are destroyed.
326  *
327  * Return:
328  * -EINVAL: specific entry has not been created
329  * -EPERM : specific entry is still active
330  * 0 <    : error
331  * 0 =    : success
332  */
333 int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
334 		     uint8_t          id,
335 		     int              force)
336 {
337 	uint8_t ce = NAPI_ID2PIPE(id);
338 	int rc = 0;
339 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
340 
341 	NAPI_DEBUG("-->(id=%d, force=%d)", id, force);
342 
343 	if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) {
344 		HIF_ERROR("%s: NAPI not initialized or entry %d not created",
345 			  __func__, id);
346 		rc = -EINVAL;
347 	} else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) {
348 		HIF_ERROR("%s: NAPI instance %d (pipe %d) not created",
349 			  __func__, id, ce);
350 		if (hif->napi_data.napis[ce])
351 			HIF_ERROR("%s: memory allocated but ce_map not set %d (pipe %d)",
352 				  __func__, id, ce);
353 		rc = -EINVAL;
354 	} else {
355 		struct qca_napi_data *napid;
356 		struct qca_napi_info *napii;
357 
358 		napid = &(hif->napi_data);
359 		napii = napid->napis[ce];
360 		if (!napii) {
361 			if (napid->ce_map & (0x01 << ce))
362 				HIF_ERROR("%s: napii & ce_map out of sync(ce %d)",
363 					  __func__, ce);
364 			return -EINVAL;
365 		}
366 
367 
368 		if (hif->napi_data.state == HIF_NAPI_CONF_UP) {
369 			if (force) {
370 				napi_disable(&(napii->napi));
371 				HIF_DBG("%s: NAPI entry %d force disabled",
372 					 __func__, id);
373 				NAPI_DEBUG("NAPI %d force disabled", id);
374 			} else {
375 				HIF_ERROR("%s: Cannot destroy active NAPI %d",
376 					  __func__, id);
377 				rc = -EPERM;
378 			}
379 		}
380 		if (0 == rc) {
381 			NAPI_DEBUG("before napi_del");
382 			NAPI_DEBUG("napi.dlist.prv=0x%pK, next=0x%pK",
383 				  napii->napi.dev_list.prev,
384 				  napii->napi.dev_list.next);
385 			NAPI_DEBUG("dev.napi_l.prv=0x%pK, next=0x%pK",
386 				   napii->netdev.napi_list.prev,
387 				   napii->netdev.napi_list.next);
388 
389 			qdf_lro_deinit(napii->lro_ctx);
390 			netif_napi_del(&(napii->napi));
391 			hif_deinit_rx_thread_napi(napii);
392 
393 			napid->ce_map &= ~(0x01 << ce);
394 			napid->napis[ce] = NULL;
395 			napii->scale  = 0;
396 			qdf_mem_free(napii);
397 			HIF_DBG("%s: NAPI %d destroyed\n", __func__, id);
398 
399 			/* if there are no active instances and
400 			 * if they are all destroyed,
401 			 * set the whole structure to uninitialized state
402 			 */
403 			if (napid->ce_map == 0) {
404 				rc = hif_napi_cpu_deinit(hif_ctx);
405 				/* caller is tolerant to receiving !=0 rc */
406 
407 				qdf_spinlock_destroy(&(napid->lock));
408 				memset(napid,
409 				       0, sizeof(struct qca_napi_data));
410 				HIF_DBG("%s: no NAPI instances. Zapped.",
411 					 __func__);
412 			}
413 		}
414 	}
415 
416 	return rc;
417 }
418 qdf_export_symbol(hif_napi_destroy);
419 
420 #ifdef FEATURE_LRO
421 void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
422 {
423 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
424 	struct qca_napi_data *napid;
425 	struct qca_napi_info *napii;
426 
427 	napid = &(scn->napi_data);
428 	napii = napid->napis[NAPI_ID2PIPE(napi_id)];
429 
430 	if (napii)
431 		return napii->lro_ctx;
432 	return 0;
433 }
434 #endif
435 
436 /**
437  *
438  * hif_napi_get_all() - returns the address of the whole HIF NAPI structure
439  * @hif: pointer to hif context
440  *
441  * Description:
442  *    Returns the address of the whole structure
443  *
444  * Return:
445  *  <addr>: address of the whole HIF NAPI structure
446  */
447 inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx)
448 {
449 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
450 
451 	return &(hif->napi_data);
452 }
453 
454 struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid)
455 {
456 	int id = NAPI_ID2PIPE(napi_id);
457 
458 	return napid->napis[id];
459 }
460 
461 /**
462  *
463  * hif_napi_event() - reacts to events that impact NAPI
464  * @hif : pointer to hif context
465  * @evnt: event that has been detected
466  * @data: more data regarding the event
467  *
468  * Description:
469  *   This function handles two types of events:
470  *   1- Events that change the state of NAPI (enabled/disabled):
471  *      {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE}
472  *      The state is retrievable by "hdd_napi_enabled(-1)"
473  *    - NAPI will be on if either INI file is on and it has not been disabled
474  *                                by a subsequent vendor CMD,
475  *                         or     it has been enabled by a vendor CMD.
476  *   2- Events that change the CPU affinity of a NAPI instance/IRQ:
477  *      {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE}
478  *    - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode
479  *    - NAPI will switch throughput mode based on hdd_napi_throughput_policy()
480  *    - In LO tput mode, NAPI will yield control if its interrupts to the system
481  *      management functions. However in HI throughput mode, NAPI will actively
482  *      manage its interrupts/instances (by trying to disperse them out to
483  *      separate performance cores).
484  *    - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events.
485  *
486  *    + In some cases (roaming peer management is the only case so far), a
487  *      a client can trigger a "SERIALIZE" event. Basically, this means that the
488  *      users is asking NAPI to go into a truly single execution context state.
489  *      So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted,
490  *      (if called for the first time) and then moves all IRQs (for NAPI
491  *      instances) to be collapsed to a single core. If called multiple times,
492  *      it will just re-collapse the CPUs. This is because blacklist-on() API
493  *      is reference-counted, and because the API has already been called.
494  *
495  *      Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go
496  *      to its "normal" operation. Optionally, they can give a timeout value (in
497  *      multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this
498  *      case, NAPI will just set the current throughput state to uninitialized
499  *      and set the delay period. Once policy handler is called, it would skip
500  *      applying the policy delay period times, and otherwise apply the policy.
501  *
502  * Return:
503  *  < 0: some error
504  *  = 0: event handled successfully
505  */
506 int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event,
507 		   void *data)
508 {
509 	int      rc = 0;
510 	uint32_t prev_state;
511 	int      i;
512 	bool state_changed;
513 	struct napi_struct *napi;
514 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
515 	struct qca_napi_data *napid = &(hif->napi_data);
516 	enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED;
517 	enum {
518 		BLACKLIST_NOT_PENDING,
519 		BLACKLIST_ON_PENDING,
520 		BLACKLIST_OFF_PENDING
521 	     } blacklist_pending = BLACKLIST_NOT_PENDING;
522 
523 	NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data);
524 
525 	if (ce_srng_based(hif))
526 		return hif_exec_event(hif_ctx, event, data);
527 
528 	if ((napid->state & HIF_NAPI_INITED) == 0) {
529 		NAPI_DEBUG("%s: got event when NAPI not initialized",
530 			   __func__);
531 		return -EINVAL;
532 	}
533 	qdf_spin_lock_bh(&(napid->lock));
534 	prev_state = napid->state;
535 	switch (event) {
536 	case NAPI_EVT_INI_FILE:
537 	case NAPI_EVT_CMD_STATE:
538 	case NAPI_EVT_INT_STATE: {
539 		int on = (data != ((void *)0));
540 
541 		HIF_DBG("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)",
542 			 __func__, event,
543 			 on, prev_state);
544 		if (on)
545 			if (prev_state & HIF_NAPI_CONF_UP) {
546 				HIF_DBG("%s: duplicate NAPI conf ON msg",
547 					 __func__);
548 			} else {
549 				HIF_DBG("%s: setting state to ON",
550 					 __func__);
551 				napid->state |= HIF_NAPI_CONF_UP;
552 			}
553 		else /* off request */
554 			if (prev_state & HIF_NAPI_CONF_UP) {
555 				HIF_DBG("%s: setting state to OFF",
556 				 __func__);
557 				napid->state &= ~HIF_NAPI_CONF_UP;
558 			} else {
559 				HIF_DBG("%s: duplicate NAPI conf OFF msg",
560 					 __func__);
561 			}
562 		break;
563 	}
564 	/* case NAPI_INIT_FILE/CMD_STATE */
565 
566 	case NAPI_EVT_CPU_STATE: {
567 		int cpu = ((unsigned long int)data >> 16);
568 		int val = ((unsigned long int)data & 0x0ff);
569 
570 		NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d",
571 			   __func__, cpu, val);
572 
573 		/* state has already been set by hnc_cpu_notify_cb */
574 		if ((val == QCA_NAPI_CPU_DOWN) &&
575 		    (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */
576 		    (napid->napi_cpu[cpu].napis != 0)) {
577 			NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d",
578 				   __func__, cpu);
579 			rc = hif_napi_cpu_migrate(napid,
580 						  cpu,
581 						  HNC_ACT_RELOCATE);
582 			napid->napi_cpu[cpu].napis = 0;
583 		}
584 		/* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */
585 		break;
586 	}
587 
588 	case NAPI_EVT_TPUT_STATE: {
589 		tput_mode = (enum qca_napi_tput_state)data;
590 		if (tput_mode == QCA_NAPI_TPUT_LO) {
591 			/* from TPUT_HI -> TPUT_LO */
592 			NAPI_DEBUG("%s: Moving to napi_tput_LO state",
593 				   __func__);
594 			blacklist_pending = BLACKLIST_OFF_PENDING;
595 			/*
596 			 * Ideally we should "collapse" interrupts here, since
597 			 * we are "dispersing" interrupts in the "else" case.
598 			 * This allows the possibility that our interrupts may
599 			 * still be on the perf cluster the next time we enter
600 			 * high tput mode. However, the irq_balancer is free
601 			 * to move our interrupts to power cluster once
602 			 * blacklisting has been turned off in the "else" case.
603 			 */
604 		} else {
605 			/* from TPUT_LO -> TPUT->HI */
606 			NAPI_DEBUG("%s: Moving to napi_tput_HI state",
607 				   __func__);
608 			rc = hif_napi_cpu_migrate(napid,
609 						  HNC_ANY_CPU,
610 						  HNC_ACT_DISPERSE);
611 
612 			blacklist_pending = BLACKLIST_ON_PENDING;
613 		}
614 		napid->napi_mode = tput_mode;
615 		break;
616 	}
617 
618 	case NAPI_EVT_USR_SERIAL: {
619 		unsigned long users = (unsigned long)data;
620 
621 		NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld",
622 			   __func__, users);
623 
624 		rc = hif_napi_cpu_migrate(napid,
625 					  HNC_ANY_CPU,
626 					  HNC_ACT_COLLAPSE);
627 		if ((users == 0) && (rc == 0))
628 			blacklist_pending = BLACKLIST_ON_PENDING;
629 		break;
630 	}
631 	case NAPI_EVT_USR_NORMAL: {
632 		NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__);
633 		/*
634 		 * Deserialization timeout is handled at hdd layer;
635 		 * just mark current mode to uninitialized to ensure
636 		 * it will be set when the delay is over
637 		 */
638 		napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED;
639 		break;
640 	}
641 	default: {
642 		HIF_ERROR("%s: unknown event: %d (data=0x%0lx)",
643 			  __func__, event, (unsigned long) data);
644 		break;
645 	} /* default */
646 	}; /* switch */
647 
648 
649 	switch (blacklist_pending) {
650 	case BLACKLIST_ON_PENDING:
651 		/* assume the control of WLAN IRQs */
652 		hif_napi_cpu_blacklist(napid, BLACKLIST_ON);
653 		break;
654 	case BLACKLIST_OFF_PENDING:
655 		/* yield the control of WLAN IRQs */
656 		hif_napi_cpu_blacklist(napid, BLACKLIST_OFF);
657 		break;
658 	default: /* nothing to do */
659 		break;
660 	} /* switch blacklist_pending */
661 
662 	/* we want to perform the comparison in lock:
663 	 * there is a possiblity of hif_napi_event get called
664 	 * from two different contexts (driver unload and cpu hotplug
665 	 * notification) and napid->state get changed
666 	 * in driver unload context and can lead to race condition
667 	 * in cpu hotplug context. Therefore, perform the napid->state
668 	 * comparison before releasing lock.
669 	 */
670 	state_changed = (prev_state != napid->state);
671 	qdf_spin_unlock_bh(&(napid->lock));
672 
673 	if (state_changed) {
674 		if (napid->state == ENABLE_NAPI_MASK) {
675 			rc = 1;
676 			for (i = 0; i < CE_COUNT_MAX; i++) {
677 				struct qca_napi_info *napii = napid->napis[i];
678 				if (napii) {
679 					napi = &(napii->napi);
680 					NAPI_DEBUG("%s: enabling NAPI %d",
681 						   __func__, i);
682 					napi_enable(napi);
683 				}
684 			}
685 		} else {
686 			rc = 0;
687 			for (i = 0; i < CE_COUNT_MAX; i++) {
688 				struct qca_napi_info *napii = napid->napis[i];
689 				if (napii) {
690 					napi = &(napii->napi);
691 					NAPI_DEBUG("%s: disabling NAPI %d",
692 						   __func__, i);
693 					napi_disable(napi);
694 					/* in case it is affined, remove it */
695 					irq_set_affinity_hint(napii->irq, NULL);
696 				}
697 			}
698 		}
699 	} else {
700 		HIF_DBG("%s: no change in hif napi state (still %d)",
701 			 __func__, prev_state);
702 	}
703 
704 	NAPI_DEBUG("<--[rc=%d]", rc);
705 	return rc;
706 }
707 qdf_export_symbol(hif_napi_event);
708 
709 /**
710  * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not
711  * @hif: hif context
712  * @ce : CE instance (or -1, to check if any CEs are enabled)
713  *
714  * Return: bool
715  */
716 int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce)
717 {
718 	int rc;
719 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
720 
721 	if (-1 == ce)
722 		rc = ((hif->napi_data.state == ENABLE_NAPI_MASK));
723 	else
724 		rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) &&
725 		      (hif->napi_data.ce_map & (0x01 << ce)));
726 	return rc;
727 }
728 qdf_export_symbol(hif_napi_enabled);
729 
730 /**
731  * hif_napi_created() - checks whether NAPI is created for given ce or not
732  * @hif: hif context
733  * @ce : CE instance
734  *
735  * Return: bool
736  */
737 bool hif_napi_created(struct hif_opaque_softc *hif_ctx, int ce)
738 {
739 	int rc;
740 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
741 
742 	rc = (hif->napi_data.ce_map & (0x01 << ce));
743 
744 	return !!rc;
745 }
746 qdf_export_symbol(hif_napi_created);
747 
748 /**
749  * hif_napi_enable_irq() - enables bus interrupts after napi_complete
750  *
751  * @hif: hif context
752  * @id : id of NAPI instance calling this (used to determine the CE)
753  *
754  * Return: void
755  */
756 inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id)
757 {
758 	struct hif_softc *scn = HIF_GET_SOFTC(hif);
759 
760 	hif_irq_enable(scn, NAPI_ID2PIPE(id));
761 }
762 
763 
764 /**
765  * hif_napi_schedule() - schedules napi, updates stats
766  * @scn:  hif context
767  * @ce_id: index of napi instance
768  *
769  * Return: false if napi didn't enable or already scheduled, otherwise true
770  */
771 bool hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
772 {
773 	int cpu = smp_processor_id();
774 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
775 	struct qca_napi_info *napii;
776 
777 	napii = scn->napi_data.napis[ce_id];
778 	if (qdf_unlikely(!napii)) {
779 		HIF_ERROR("%s, scheduling unallocated napi (ce:%d)",
780 			      __func__, ce_id);
781 		qdf_atomic_dec(&scn->active_tasklet_cnt);
782 		return false;
783 	}
784 
785 	if (test_bit(NAPI_STATE_SCHED, &napii->napi.state)) {
786 		NAPI_DEBUG("napi scheduled, return");
787 		qdf_atomic_dec(&scn->active_tasklet_cnt);
788 		return false;
789 	}
790 
791 	hif_record_ce_desc_event(scn,  ce_id, NAPI_SCHEDULE,
792 				 NULL, NULL, 0, 0);
793 	napii->stats[cpu].napi_schedules++;
794 	NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id);
795 	napi_schedule(&(napii->napi));
796 
797 	return true;
798 }
799 qdf_export_symbol(hif_napi_schedule);
800 
801 /**
802  * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed
803  * @napi_info: pointer to qca_napi_info for the napi instance
804  *
805  * Return: true  => interrupt already on correct cpu, no correction needed
806  *         false => interrupt on wrong cpu, correction done for cpu affinity
807  *                   of the interrupt
808  */
809 static inline
810 bool hif_napi_correct_cpu(struct qca_napi_info *napi_info)
811 {
812 	bool right_cpu = true;
813 	int rc = 0;
814 	cpumask_t cpumask;
815 	int cpu;
816 	struct qca_napi_data *napid;
817 
818 	napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx));
819 
820 	if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) {
821 
822 		cpu = qdf_get_cpu();
823 		if (unlikely((hif_napi_cpu_blacklist(napid,
824 						BLACKLIST_QUERY) > 0) &&
825 						(cpu != napi_info->cpu))) {
826 			right_cpu = false;
827 
828 			NAPI_DEBUG("interrupt on wrong CPU, correcting");
829 			cpumask.bits[0] = (0x01 << napi_info->cpu);
830 
831 			irq_modify_status(napi_info->irq, IRQ_NO_BALANCING, 0);
832 			rc = irq_set_affinity_hint(napi_info->irq,
833 						   &cpumask);
834 			irq_modify_status(napi_info->irq, 0, IRQ_NO_BALANCING);
835 
836 			if (rc)
837 				HIF_ERROR("error setting irq affinity hint: %d",
838 					  rc);
839 			else
840 				napi_info->stats[cpu].cpu_corrected++;
841 		}
842 	}
843 	return right_cpu;
844 }
845 
846 #ifdef RECEIVE_OFFLOAD
847 /**
848  * hif_napi_offld_flush_cb() - Call upper layer flush callback
849  * @napi_info: Handle to hif_napi_info
850  *
851  * Return: None
852  */
853 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
854 {
855 	if (napi_info->offld_flush_cb)
856 		napi_info->offld_flush_cb(napi_info);
857 }
858 #else
859 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
860 {
861 }
862 #endif
863 
864 /**
865  * hif_napi_poll() - NAPI poll routine
866  * @napi  : pointer to NAPI struct as kernel holds it
867  * @budget:
868  *
869  * This is the body of the poll function.
870  * The poll function is called by kernel. So, there is a wrapper
871  * function in HDD, which in turn calls this function.
872  * Two main reasons why the whole thing is not implemented in HDD:
873  * a) references to things like ce_service that HDD is not aware of
874  * b) proximity to the implementation of ce_tasklet, which the body
875  *    of this function should be very close to.
876  *
877  * NOTE TO THE MAINTAINER:
878  *  Consider this function and ce_tasklet very tightly coupled pairs.
879  *  Any changes to ce_tasklet or this function may likely need to be
880  *  reflected in the counterpart.
881  *
882  * Returns:
883  *  int: the amount of work done in this poll (<= budget)
884  */
885 int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
886 		  struct napi_struct *napi,
887 		  int budget)
888 {
889 	int    rc = 0; /* default: no work done, also takes care of error */
890 	int    normalized = 0;
891 	int    bucket;
892 	int    cpu = smp_processor_id();
893 	bool poll_on_right_cpu;
894 	struct hif_softc      *hif = HIF_GET_SOFTC(hif_ctx);
895 	struct qca_napi_info *napi_info;
896 	struct CE_state *ce_state = NULL;
897 
898 	if (unlikely(NULL == hif)) {
899 		HIF_ERROR("%s: hif context is NULL", __func__);
900 		QDF_ASSERT(0);
901 		goto out;
902 	}
903 
904 	napi_info = (struct qca_napi_info *)
905 		container_of(napi, struct qca_napi_info, napi);
906 
907 	NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)",
908 		   __func__, napi_info->id, napi_info->irq, budget);
909 
910 	napi_info->stats[cpu].napi_polls++;
911 
912 	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
913 				 NAPI_POLL_ENTER, NULL, NULL, cpu, 0);
914 
915 	rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id));
916 	NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
917 		    __func__, rc);
918 
919 	hif_napi_offld_flush_cb(napi_info);
920 
921 	/* do not return 0, if there was some work done,
922 	 * even if it is below the scale
923 	 */
924 	if (rc) {
925 		napi_info->stats[cpu].napi_workdone += rc;
926 		normalized = (rc / napi_info->scale);
927 		if (normalized == 0)
928 			normalized++;
929 		bucket = normalized / (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS);
930 		if (bucket >= QCA_NAPI_NUM_BUCKETS) {
931 			bucket = QCA_NAPI_NUM_BUCKETS - 1;
932 			HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)",
933 				bucket, QCA_NAPI_NUM_BUCKETS);
934 		}
935 		napi_info->stats[cpu].napi_budget_uses[bucket]++;
936 	} else {
937 	/* if ce_per engine reports 0, then poll should be terminated */
938 		NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI",
939 			   __func__, __LINE__);
940 	}
941 
942 	ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)];
943 
944 	/*
945 	 * Not using the API hif_napi_correct_cpu directly in the if statement
946 	 * below since the API may not get evaluated if put at the end if any
947 	 * prior condition would evaluate to be true. The CPU correction
948 	 * check should kick in every poll.
949 	 */
950 #ifdef NAPI_YIELD_BUDGET_BASED
951 	if (ce_state && (ce_state->force_break || 0 == rc)) {
952 #else
953 	poll_on_right_cpu = hif_napi_correct_cpu(napi_info);
954 	if ((ce_state) &&
955 	    (!ce_check_rx_pending(ce_state) || (0 == rc) ||
956 	     !poll_on_right_cpu)) {
957 #endif
958 		napi_info->stats[cpu].napi_completes++;
959 #ifdef NAPI_YIELD_BUDGET_BASED
960 		ce_state->force_break = 0;
961 #endif
962 
963 		hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE,
964 					 NULL, NULL, 0, 0);
965 		if (normalized >= budget)
966 			normalized = budget - 1;
967 
968 		napi_complete(napi);
969 		/* enable interrupts */
970 		hif_napi_enable_irq(hif_ctx, napi_info->id);
971 		/* support suspend/resume */
972 		qdf_atomic_dec(&(hif->active_tasklet_cnt));
973 
974 		NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts",
975 			   __func__, __LINE__);
976 	} else {
977 		/* 4.4 kernel NAPI implementation requires drivers to
978 		 * return full work when they ask to be re-scheduled,
979 		 * or napi_complete and re-start with a fresh interrupt
980 		 */
981 		normalized = budget;
982 	}
983 
984 	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
985 				 NAPI_POLL_EXIT, NULL, NULL, normalized, 0);
986 
987 	NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized);
988 	return normalized;
989 out:
990 	return rc;
991 }
992 qdf_export_symbol(hif_napi_poll);
993 
994 void hif_update_napi_max_poll_time(struct CE_state *ce_state,
995 				   int ce_id,
996 				   int cpu_id)
997 {
998 	struct hif_softc *hif;
999 	struct qca_napi_info *napi_info;
1000 	unsigned long long napi_poll_time = sched_clock() -
1001 					ce_state->ce_service_start_time;
1002 
1003 	hif = ce_state->scn;
1004 	napi_info = hif->napi_data.napis[ce_id];
1005 	if (napi_poll_time >
1006 			napi_info->stats[cpu_id].napi_max_poll_time)
1007 		napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time;
1008 }
1009 
1010 #ifdef HIF_IRQ_AFFINITY
1011 /**
1012  *
1013  * hif_napi_update_yield_stats() - update NAPI yield related stats
1014  * @cpu_id: CPU ID for which stats needs to be updates
1015  * @ce_id: Copy Engine ID for which yield stats needs to be updates
1016  * @time_limit_reached: indicates whether the time limit was reached
1017  * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached
1018  *
1019  * Return: None
1020  */
1021 void hif_napi_update_yield_stats(struct CE_state *ce_state,
1022 				 bool time_limit_reached,
1023 				 bool rxpkt_thresh_reached)
1024 {
1025 	struct hif_softc *hif;
1026 	struct qca_napi_data *napi_data = NULL;
1027 	int ce_id = 0;
1028 	int cpu_id = 0;
1029 
1030 	if (unlikely(NULL == ce_state)) {
1031 		QDF_ASSERT(NULL != ce_state);
1032 		return;
1033 	}
1034 
1035 	hif = ce_state->scn;
1036 
1037 	if (unlikely(NULL == hif)) {
1038 		QDF_ASSERT(NULL != hif);
1039 		return;
1040 	}
1041 	napi_data = &(hif->napi_data);
1042 	if (unlikely(NULL == napi_data)) {
1043 		QDF_ASSERT(NULL != napi_data);
1044 		return;
1045 	}
1046 
1047 	ce_id = ce_state->id;
1048 	cpu_id = qdf_get_cpu();
1049 
1050 	if (unlikely(!napi_data->napis[ce_id])) {
1051 		HIF_INFO("%s: NAPI info is NULL for ce id: %d",
1052 			 __func__, ce_id);
1053 		return;
1054 	}
1055 
1056 	if (time_limit_reached)
1057 		napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++;
1058 	else
1059 		napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++;
1060 
1061 	hif_update_napi_max_poll_time(ce_state, ce_id,
1062 				      cpu_id);
1063 }
1064 
1065 /**
1066  *
1067  * hif_napi_stats() - display NAPI CPU statistics
1068  * @napid: pointer to qca_napi_data
1069  *
1070  * Description:
1071  *    Prints the various CPU cores on which the NAPI instances /CEs interrupts
1072  *    are being executed. Can be called from outside NAPI layer.
1073  *
1074  * Return: None
1075  */
1076 void hif_napi_stats(struct qca_napi_data *napid)
1077 {
1078 	int i;
1079 	struct qca_napi_cpu *cpu;
1080 
1081 	if (napid == NULL) {
1082 		qdf_debug("%s: napiid struct is null", __func__);
1083 		return;
1084 	}
1085 
1086 	cpu = napid->napi_cpu;
1087 	qdf_debug("NAPI CPU TABLE");
1088 	qdf_debug("lilclhead=%d, bigclhead=%d",
1089 		  napid->lilcl_head, napid->bigcl_head);
1090 	for (i = 0; i < NR_CPUS; i++) {
1091 		qdf_debug("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d",
1092 			  i,
1093 			  cpu[i].state, cpu[i].core_id, cpu[i].cluster_id,
1094 			  cpu[i].core_mask.bits[0],
1095 			  cpu[i].thread_mask.bits[0],
1096 			  cpu[i].max_freq, cpu[i].napis,
1097 			  cpu[i].cluster_nxt);
1098 	}
1099 }
1100 
1101 #ifdef FEATURE_NAPI_DEBUG
1102 /*
1103  * Local functions
1104  * - no argument checks, all internal/trusted callers
1105  */
1106 static void hnc_dump_cpus(struct qca_napi_data *napid)
1107 {
1108 	hif_napi_stats(napid);
1109 }
1110 #else
1111 static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ };
1112 #endif /* FEATURE_NAPI_DEBUG */
1113 /**
1114  * hnc_link_clusters() - partitions to cpu table into clusters
1115  * @napid: pointer to NAPI data
1116  *
1117  * Takes in a CPU topology table and builds two linked lists
1118  * (big cluster cores, list-head at bigcl_head, and little cluster
1119  * cores, list-head at lilcl_head) out of it.
1120  *
1121  * If there are more than two clusters:
1122  * - bigcl_head and lilcl_head will be different,
1123  * - the cluster with highest cpufreq will be considered the "big" cluster.
1124  *   If there are more than one with the highest frequency, the *last* of such
1125  *   clusters will be designated as the "big cluster"
1126  * - the cluster with lowest cpufreq will be considered the "li'l" cluster.
1127  *   If there are more than one clusters with the lowest cpu freq, the *first*
1128  *   of such clusters will be designated as the "little cluster"
1129  * - We only support up to 32 clusters
1130  * Return: 0 : OK
1131  *         !0: error (at least one of lil/big clusters could not be found)
1132  */
1133 #define HNC_MIN_CLUSTER 0
1134 #define HNC_MAX_CLUSTER 1
1135 static int hnc_link_clusters(struct qca_napi_data *napid)
1136 {
1137 	int rc = 0;
1138 
1139 	int i;
1140 	int it = 0;
1141 	uint32_t cl_done = 0x0;
1142 	int cl, curcl, curclhead = 0;
1143 	int more;
1144 	unsigned int lilfrq = INT_MAX;
1145 	unsigned int bigfrq = 0;
1146 	unsigned int clfrq = 0;
1147 	int prev = 0;
1148 	struct qca_napi_cpu *cpus = napid->napi_cpu;
1149 
1150 	napid->lilcl_head = napid->bigcl_head = -1;
1151 
1152 	do {
1153 		more = 0;
1154 		it++; curcl = -1;
1155 		for (i = 0; i < NR_CPUS; i++) {
1156 			cl = cpus[i].cluster_id;
1157 			NAPI_DEBUG("Processing cpu[%d], cluster=%d\n",
1158 				   i, cl);
1159 			if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) {
1160 				NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl);
1161 				/* continue if ASSERTs are disabled */
1162 				continue;
1163 			};
1164 			if (cpumask_weight(&(cpus[i].core_mask)) == 0) {
1165 				NAPI_DEBUG("Core mask 0. SKIPPED\n");
1166 				continue;
1167 			}
1168 			if (cl_done & (0x01 << cl)) {
1169 				NAPI_DEBUG("Cluster already processed. SKIPPED\n");
1170 				continue;
1171 			} else {
1172 				if (more == 0) {
1173 					more = 1;
1174 					curcl = cl;
1175 					curclhead = i; /* row */
1176 					clfrq = cpus[i].max_freq;
1177 					prev = -1;
1178 				};
1179 				if ((curcl >= 0) && (curcl != cl)) {
1180 					NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n",
1181 						   cl, curcl);
1182 					continue;
1183 				}
1184 				if (cpus[i].max_freq != clfrq)
1185 					NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n",
1186 						   cpus[i].max_freq, clfrq);
1187 				if (clfrq >= bigfrq) {
1188 					bigfrq = clfrq;
1189 					napid->bigcl_head  = curclhead;
1190 					NAPI_DEBUG("bigcl=%d\n", curclhead);
1191 				}
1192 				if (clfrq < lilfrq) {
1193 					lilfrq = clfrq;
1194 					napid->lilcl_head = curclhead;
1195 					NAPI_DEBUG("lilcl=%d\n", curclhead);
1196 				}
1197 				if (prev != -1)
1198 					cpus[prev].cluster_nxt = i;
1199 
1200 				prev = i;
1201 			}
1202 		}
1203 		if (curcl >= 0)
1204 			cl_done |= (0x01 << curcl);
1205 
1206 	} while (more);
1207 
1208 	if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0)))
1209 		rc = -EFAULT;
1210 
1211 	hnc_dump_cpus(napid); /* if NAPI_DEBUG */
1212 	return rc;
1213 }
1214 #undef HNC_MIN_CLUSTER
1215 #undef HNC_MAX_CLUSTER
1216 
1217 /*
1218  * hotplug function group
1219  */
1220 
1221 /**
1222  * hnc_cpu_online_cb() - handles CPU hotplug "up" events
1223  * @context: the associated HIF context
1224  * @cpu: the CPU Id of the CPU the event happened on
1225  *
1226  * Return: None
1227  */
1228 static void hnc_cpu_online_cb(void *context, uint32_t cpu)
1229 {
1230 	struct hif_softc *hif = context;
1231 	struct qca_napi_data *napid = &hif->napi_data;
1232 
1233 	if (cpu >= NR_CPUS)
1234 		return;
1235 
1236 	NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu);
1237 
1238 	napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
1239 	NAPI_DEBUG("%s: CPU %u marked %d",
1240 		   __func__, cpu, napid->napi_cpu[cpu].state);
1241 
1242 	NAPI_DEBUG("<--%s", __func__);
1243 }
1244 
1245 /**
1246  * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events
1247  * @context: the associated HIF context
1248  * @cpu: the CPU Id of the CPU the event happened on
1249  *
1250  * On transtion to offline, we act on PREP events, because we may need to move
1251  * the irqs/NAPIs to another CPU before it is actually off-lined.
1252  *
1253  * Return: None
1254  */
1255 static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu)
1256 {
1257 	struct hif_softc *hif = context;
1258 	struct qca_napi_data *napid = &hif->napi_data;
1259 
1260 	if (cpu >= NR_CPUS)
1261 		return;
1262 
1263 	NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu);
1264 
1265 	napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
1266 
1267 	NAPI_DEBUG("%s: CPU %u marked %d; updating affinity",
1268 		   __func__, cpu, napid->napi_cpu[cpu].state);
1269 
1270 	/**
1271 	 * we need to move any NAPIs on this CPU out.
1272 	 * if we are in LO throughput mode, then this is valid
1273 	 * if the CPU is the the low designated CPU.
1274 	 */
1275 	hif_napi_event(GET_HIF_OPAQUE_HDL(hif),
1276 		       NAPI_EVT_CPU_STATE,
1277 		       (void *)
1278 		       ((size_t)cpu << 16 | napid->napi_cpu[cpu].state));
1279 
1280 	NAPI_DEBUG("<--%s", __func__);
1281 }
1282 
1283 static int hnc_hotplug_register(struct hif_softc *hif_sc)
1284 {
1285 	QDF_STATUS status;
1286 
1287 	NAPI_DEBUG("-->%s", __func__);
1288 
1289 	status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler,
1290 				    hif_sc,
1291 				    hnc_cpu_online_cb,
1292 				    hnc_cpu_before_offline_cb);
1293 
1294 	NAPI_DEBUG("<--%s [%d]", __func__, status);
1295 
1296 	return qdf_status_to_os_return(status);
1297 }
1298 
1299 static void hnc_hotplug_unregister(struct hif_softc *hif_sc)
1300 {
1301 	NAPI_DEBUG("-->%s", __func__);
1302 
1303 	if (hif_sc->napi_data.cpuhp_handler)
1304 		qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler);
1305 
1306 	NAPI_DEBUG("<--%s", __func__);
1307 }
1308 
1309 /**
1310  * hnc_install_tput() - installs a callback in the throughput detector
1311  * @register: !0 => register; =0: unregister
1312  *
1313  * installs a callback to be called when wifi driver throughput (tx+rx)
1314  * crosses a threshold. Currently, we are using the same criteria as
1315  * TCP ack suppression (500 packets/100ms by default).
1316  *
1317  * Return: 0 : success
1318  *         <0: failure
1319  */
1320 
1321 static int hnc_tput_hook(int install)
1322 {
1323 	int rc = 0;
1324 
1325 	/*
1326 	 * Nothing, until the bw_calculation accepts registration
1327 	 * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk
1328 	 *   hdd_napi_throughput_policy(...)
1329 	 */
1330 	return rc;
1331 }
1332 
1333 /*
1334  * Implementation of hif_napi_cpu API
1335  */
1336 
1337 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1338 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i)
1339 {
1340 	cpumask_copy(&(cpus[i].thread_mask),
1341 			     topology_sibling_cpumask(i));
1342 }
1343 #else
1344 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i)
1345 {
1346 }
1347 #endif
1348 
1349 
1350 /**
1351  * hif_napi_cpu_init() - initialization of irq affinity block
1352  * @ctx: pointer to qca_napi_data
1353  *
1354  * called by hif_napi_create, after the first instance is called
1355  * - builds napi_rss_cpus table from cpu topology
1356  * - links cores of the same clusters together
1357  * - installs hot-plug notifier
1358  * - installs throughput trigger notifier (when such mechanism exists)
1359  *
1360  * Return: 0: OK
1361  *         <0: error code
1362  */
1363 int hif_napi_cpu_init(struct hif_opaque_softc *hif)
1364 {
1365 	int rc = 0;
1366 	int i;
1367 	struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1368 	struct qca_napi_cpu *cpus = napid->napi_cpu;
1369 
1370 	NAPI_DEBUG("--> ");
1371 
1372 	if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) {
1373 		NAPI_DEBUG("NAPI RSS table already initialized.\n");
1374 		rc = -EALREADY;
1375 		goto lab_rss_init;
1376 	}
1377 
1378 	/* build CPU topology table */
1379 	for_each_possible_cpu(i) {
1380 		cpus[i].state       = ((cpumask_test_cpu(i, cpu_online_mask)
1381 					? QCA_NAPI_CPU_UP
1382 					: QCA_NAPI_CPU_DOWN));
1383 		cpus[i].core_id     = topology_core_id(i);
1384 		cpus[i].cluster_id  = topology_physical_package_id(i);
1385 		cpumask_copy(&(cpus[i].core_mask),
1386 			     topology_core_cpumask(i));
1387 		record_sibling_cpumask(cpus, i);
1388 		cpus[i].max_freq    = cpufreq_quick_get_max(i);
1389 		cpus[i].napis       = 0x0;
1390 		cpus[i].cluster_nxt = -1; /* invalid */
1391 	}
1392 
1393 	/* link clusters together */
1394 	rc = hnc_link_clusters(napid);
1395 	if (0 != rc)
1396 		goto lab_err_topology;
1397 
1398 	/* install hotplug notifier */
1399 	rc = hnc_hotplug_register(HIF_GET_SOFTC(hif));
1400 	if (0 != rc)
1401 		goto lab_err_hotplug;
1402 
1403 	/* install throughput notifier */
1404 	rc = hnc_tput_hook(1);
1405 	if (0 == rc)
1406 		goto lab_rss_init;
1407 
1408 lab_err_hotplug:
1409 	hnc_tput_hook(0);
1410 	hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
1411 lab_err_topology:
1412 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1413 lab_rss_init:
1414 	NAPI_DEBUG("<-- [rc=%d]", rc);
1415 	return rc;
1416 }
1417 
1418 /**
1419  * hif_napi_cpu_deinit() - clean-up of irq affinity block
1420  *
1421  * called by hif_napi_destroy, when the last instance is removed
1422  * - uninstalls throughput and hotplug notifiers
1423  * - clears cpu topology table
1424  * Return: 0: OK
1425  */
1426 int hif_napi_cpu_deinit(struct hif_opaque_softc *hif)
1427 {
1428 	int rc = 0;
1429 	struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1430 
1431 	NAPI_DEBUG("-->%s(...)", __func__);
1432 
1433 	/* uninstall tput notifier */
1434 	rc = hnc_tput_hook(0);
1435 
1436 	/* uninstall hotplug notifier */
1437 	hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
1438 
1439 	/* clear the topology table */
1440 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1441 
1442 	NAPI_DEBUG("<--%s[rc=%d]", __func__, rc);
1443 
1444 	return rc;
1445 }
1446 
1447 /**
1448  * hncm_migrate_to() - migrates a NAPI to a CPU
1449  * @napid: pointer to NAPI block
1450  * @ce_id: CE_id of the NAPI instance
1451  * @didx : index in the CPU topology table for the CPU to migrate to
1452  *
1453  * Migrates NAPI (identified by the CE_id) to the destination core
1454  * Updates the napi_map of the destination entry
1455  *
1456  * Return:
1457  *  =0 : success
1458  *  <0 : error
1459  */
1460 static int hncm_migrate_to(struct qca_napi_data *napid,
1461 			   int                   napi_ce,
1462 			   int                   didx)
1463 {
1464 	int rc = 0;
1465 	cpumask_t cpumask;
1466 
1467 	NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx);
1468 
1469 	cpumask.bits[0] = (1 << didx);
1470 	if (!napid->napis[napi_ce])
1471 		return -EINVAL;
1472 
1473 	irq_modify_status(napid->napis[napi_ce]->irq, IRQ_NO_BALANCING, 0);
1474 	rc = irq_set_affinity_hint(napid->napis[napi_ce]->irq, &cpumask);
1475 
1476 	/* unmark the napis bitmap in the cpu table */
1477 	napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce);
1478 	/* mark the napis bitmap for the new designated cpu */
1479 	napid->napi_cpu[didx].napis |= (0x01 << napi_ce);
1480 	napid->napis[napi_ce]->cpu = didx;
1481 
1482 	NAPI_DEBUG("<--%s[%d]", __func__, rc);
1483 	return rc;
1484 }
1485 /**
1486  * hncm_dest_cpu() - finds a destination CPU for NAPI
1487  * @napid: pointer to NAPI block
1488  * @act  : RELOCATE | COLLAPSE | DISPERSE
1489  *
1490  * Finds the designated destionation for the next IRQ.
1491  * RELOCATE: translated to either COLLAPSE or DISPERSE based
1492  *           on napid->napi_mode (throughput state)
1493  * COLLAPSE: All have the same destination: the first online CPU in lilcl
1494  * DISPERSE: One of the CPU in bigcl, which has the smallest number of
1495  *           NAPIs on it
1496  *
1497  * Return: >=0 : index in the cpu topology table
1498  *       : < 0 : error
1499  */
1500 static int hncm_dest_cpu(struct qca_napi_data *napid, int act)
1501 {
1502 	int destidx = -1;
1503 	int head, i;
1504 
1505 	NAPI_DEBUG("-->%s(act=%d)", __func__, act);
1506 	if (act == HNC_ACT_RELOCATE) {
1507 		if (napid->napi_mode == QCA_NAPI_TPUT_LO)
1508 			act = HNC_ACT_COLLAPSE;
1509 		else
1510 			act = HNC_ACT_DISPERSE;
1511 		NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d",
1512 			   __func__, act);
1513 	}
1514 	if (act == HNC_ACT_COLLAPSE) {
1515 		head = i = napid->lilcl_head;
1516 retry_collapse:
1517 		while (i >= 0) {
1518 			if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) {
1519 				destidx = i;
1520 				break;
1521 			}
1522 			i = napid->napi_cpu[i].cluster_nxt;
1523 		}
1524 		if ((destidx < 0) && (head == napid->lilcl_head)) {
1525 			NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl",
1526 				__func__);
1527 			head = i = napid->bigcl_head;
1528 			goto retry_collapse;
1529 		}
1530 	} else { /* HNC_ACT_DISPERSE */
1531 		int smallest = 99; /* all 32 bits full */
1532 		int smallidx = -1;
1533 
1534 		head = i = napid->bigcl_head;
1535 retry_disperse:
1536 		while (i >= 0) {
1537 			if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) &&
1538 			    (hweight32(napid->napi_cpu[i].napis) <= smallest)) {
1539 				smallest = napid->napi_cpu[i].napis;
1540 				smallidx = i;
1541 			}
1542 			i = napid->napi_cpu[i].cluster_nxt;
1543 		}
1544 		/* Check if matches with user sepecified CPU mask */
1545 		smallidx = ((1 << smallidx) & napid->user_cpu_affin_mask) ?
1546 								smallidx : -1;
1547 
1548 		if ((smallidx < 0) && (head == napid->bigcl_head)) {
1549 			NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl",
1550 				__func__);
1551 			head = i = napid->lilcl_head;
1552 			goto retry_disperse;
1553 		}
1554 		destidx = smallidx;
1555 	}
1556 	NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx);
1557 	return destidx;
1558 }
1559 /**
1560  * hif_napi_cpu_migrate() - migrate IRQs away
1561  * @cpu: -1: all CPUs <n> specific CPU
1562  * @act: COLLAPSE | DISPERSE
1563  *
1564  * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible
1565  * cores. Eligible cores are:
1566  * act=COLLAPSE -> the first online core of the little cluster
1567  * act=DISPERSE -> separate cores of the big cluster, so that each core will
1568  *                 host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis)
1569  *
1570  * Note that this function is called with a spinlock acquired already.
1571  *
1572  * Return: =0: success
1573  *         <0: error
1574  */
1575 
1576 int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action)
1577 {
1578 	int      rc = 0;
1579 	struct qca_napi_cpu *cpup;
1580 	int      i, dind;
1581 	uint32_t napis;
1582 
1583 	NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)",
1584 		   __func__, cpu, action);
1585 	/* the following is really: hif_napi_enabled() with less overhead */
1586 	if (napid->ce_map == 0) {
1587 		NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__);
1588 		goto hncm_return;
1589 	}
1590 
1591 	cpup = napid->napi_cpu;
1592 
1593 	switch (action) {
1594 	case HNC_ACT_RELOCATE:
1595 	case HNC_ACT_DISPERSE:
1596 	case HNC_ACT_COLLAPSE: {
1597 		/* first find the src napi set */
1598 		if (cpu == HNC_ANY_CPU)
1599 			napis = napid->ce_map;
1600 		else
1601 			napis = cpup[cpu].napis;
1602 		/* then clear the napi bitmap on each CPU */
1603 		for (i = 0; i < NR_CPUS; i++)
1604 			cpup[i].napis = 0;
1605 		/* then for each of the NAPIs to disperse: */
1606 		for (i = 0; i < CE_COUNT_MAX; i++)
1607 			if (napis & (1 << i)) {
1608 				/* find a destination CPU */
1609 				dind = hncm_dest_cpu(napid, action);
1610 				if (dind >= 0) {
1611 					NAPI_DEBUG("Migrating NAPI ce%d to %d",
1612 						   i, dind);
1613 					rc = hncm_migrate_to(napid, i, dind);
1614 				} else {
1615 					NAPI_DEBUG("No dest for NAPI ce%d", i);
1616 					hnc_dump_cpus(napid);
1617 					rc = -1;
1618 				}
1619 			}
1620 		break;
1621 	}
1622 	default: {
1623 		NAPI_DEBUG("%s: bad action: %d\n", __func__, action);
1624 		QDF_BUG(0);
1625 		break;
1626 	}
1627 	} /* switch action */
1628 
1629 hncm_return:
1630 	hnc_dump_cpus(napid);
1631 	return rc;
1632 }
1633 
1634 
1635 /**
1636  * hif_napi_bl_irq() - calls irq_modify_status to enable/disable blacklisting
1637  * @napid: pointer to qca_napi_data structure
1638  * @bl_flag: blacklist flag to enable/disable blacklisting
1639  *
1640  * The function enables/disables blacklisting for all the copy engine
1641  * interrupts on which NAPI is enabled.
1642  *
1643  * Return: None
1644  */
1645 static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag)
1646 {
1647 	int i;
1648 	struct qca_napi_info *napii;
1649 
1650 	for (i = 0; i < CE_COUNT_MAX; i++) {
1651 		/* check if NAPI is enabled on the CE */
1652 		if (!(napid->ce_map & (0x01 << i)))
1653 			continue;
1654 
1655 		/*double check that NAPI is allocated for the CE */
1656 		napii = napid->napis[i];
1657 		if (!(napii))
1658 			continue;
1659 
1660 		if (bl_flag == true)
1661 			irq_modify_status(napii->irq,
1662 					  0, IRQ_NO_BALANCING);
1663 		else
1664 			irq_modify_status(napii->irq,
1665 					  IRQ_NO_BALANCING, 0);
1666 		HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i);
1667 	}
1668 }
1669 
1670 #ifdef CONFIG_SCHED_CORE_CTL
1671 /* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */
1672 static inline int hif_napi_core_ctl_set_boost(bool boost)
1673 {
1674 	return core_ctl_set_boost(boost);
1675 }
1676 #else
1677 static inline int hif_napi_core_ctl_set_boost(bool boost)
1678 {
1679 	return 0;
1680 }
1681 #endif
1682 /**
1683  * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts.
1684  * @napid: pointer to qca_napi_data structure
1685  * @op: blacklist operation to perform
1686  *
1687  * The function enables/disables/queries blacklisting for all CE RX
1688  * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables
1689  * core_ctl_set_boost.
1690  * Once blacklisting is enabled, the interrupts will not be managed by the IRQ
1691  * balancer.
1692  *
1693  * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled
1694  *         for BLACKLIST_QUERY op - blacklist refcount
1695  *         for BLACKLIST_ON op    - return value from core_ctl_set_boost API
1696  *         for BLACKLIST_OFF op   - return value from core_ctl_set_boost API
1697  */
1698 int hif_napi_cpu_blacklist(struct qca_napi_data *napid,
1699 			   enum qca_blacklist_op op)
1700 {
1701 	int rc = 0;
1702 	static int ref_count; /* = 0 by the compiler */
1703 	uint8_t flags = napid->flags;
1704 	bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING;
1705 	bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST;
1706 
1707 	NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op);
1708 
1709 	if (!(bl_en && ccb_en)) {
1710 		rc = -EINVAL;
1711 		goto out;
1712 	}
1713 
1714 	switch (op) {
1715 	case BLACKLIST_QUERY:
1716 		rc = ref_count;
1717 		break;
1718 	case BLACKLIST_ON:
1719 		ref_count++;
1720 		rc = 0;
1721 		if (ref_count == 1) {
1722 			rc = hif_napi_core_ctl_set_boost(true);
1723 			NAPI_DEBUG("boost_on() returns %d - refcnt=%d",
1724 				rc, ref_count);
1725 			hif_napi_bl_irq(napid, true);
1726 		}
1727 		break;
1728 	case BLACKLIST_OFF:
1729 		if (ref_count) {
1730 			ref_count--;
1731 			rc = 0;
1732 			if (ref_count == 0) {
1733 				rc = hif_napi_core_ctl_set_boost(false);
1734 				NAPI_DEBUG("boost_off() returns %d - refcnt=%d",
1735 					   rc, ref_count);
1736 				hif_napi_bl_irq(napid, false);
1737 			}
1738 		}
1739 		break;
1740 	default:
1741 		NAPI_DEBUG("Invalid blacklist op: %d", op);
1742 		rc = -EINVAL;
1743 	} /* switch */
1744 out:
1745 	NAPI_DEBUG("<--%s[%d]", __func__, rc);
1746 	return rc;
1747 }
1748 
1749 /**
1750  * hif_napi_serialize() - [de-]serialize NAPI operations
1751  * @hif:   context
1752  * @is_on: 1: serialize, 0: deserialize
1753  *
1754  * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the
1755  * following steps (see hif_napi_event for code):
1756  * - put irqs of all NAPI instances on the same CPU
1757  * - only for the first serialize call: blacklist
1758  *
1759  * hif_napi_serialize(hif, 0):
1760  * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec)
1761  * - at the end of the timer, check the current throughput state and
1762  *   implement it.
1763  */
1764 static unsigned long napi_serialize_reqs;
1765 int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on)
1766 {
1767 	int rc = -EINVAL;
1768 
1769 	if (hif != NULL)
1770 		switch (is_on) {
1771 		case 0: { /* de-serialize */
1772 			rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL,
1773 					    (void *) 0);
1774 			napi_serialize_reqs = 0;
1775 			break;
1776 		} /* end de-serialize */
1777 		case 1: { /* serialize */
1778 			rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL,
1779 					    (void *)napi_serialize_reqs++);
1780 			break;
1781 		} /* end serialize */
1782 		default:
1783 			break; /* no-op */
1784 		} /* switch */
1785 	return rc;
1786 }
1787 
1788 #endif /* ifdef HIF_IRQ_AFFINITY */
1789