xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_napi.c (revision e1d3d092f61a07549ab97f6f1f0c86554e0c642f)
1 /*
2  * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /**
29  * DOC: hif_napi.c
30  *
31  * HIF NAPI interface implementation
32  */
33 
34 #include <string.h> /* memset */
35 
36 /* Linux headers */
37 #include <linux/cpumask.h>
38 #include <linux/cpufreq.h>
39 #include <linux/cpu.h>
40 #include <linux/topology.h>
41 #include <linux/interrupt.h>
42 #include <linux/irq.h>
43 #ifdef HELIUMPLUS
44 #ifdef CONFIG_SCHED_CORE_CTL
45 #include <linux/sched/core_ctl.h>
46 #endif
47 #include <pld_snoc.h>
48 #endif
49 #include <linux/pm.h>
50 
51 /* Driver headers */
52 #include <hif_napi.h>
53 #include <hif_debug.h>
54 #include <hif_io32.h>
55 #include <ce_api.h>
56 #include <ce_internal.h>
57 
58 enum napi_decision_vector {
59 	HIF_NAPI_NOEVENT = 0,
60 	HIF_NAPI_INITED  = 1,
61 	HIF_NAPI_CONF_UP = 2
62 };
63 #define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP)
64 
65 #ifdef HELIUMPLUS
66 static inline int hif_get_irq_for_ce(int ce_id)
67 {
68 	return pld_snoc_get_irq(ce_id);
69 }
70 #else /* HELIUMPLUS */
71 static inline int hif_get_irq_for_ce(int ce_id)
72 {
73 	return -EINVAL;
74 }
75 static int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu,
76 				int action)
77 {
78 	return 0;
79 }
80 
81 int hif_napi_cpu_blacklist(struct qca_napi_data *napid,
82 					enum qca_blacklist_op op)
83 {
84 	return 0;
85 }
86 #endif /* HELIUMPLUS */
87 
88 /**
89  * hif_napi_create() - creates the NAPI structures for a given CE
90  * @hif    : pointer to hif context
91  * @pipe_id: the CE id on which the instance will be created
92  * @poll   : poll function to be used for this NAPI instance
93  * @budget : budget to be registered with the NAPI instance
94  * @scale  : scale factor on the weight (to scaler budget to 1000)
95  * @flags  : feature flags
96  *
97  * Description:
98  *    Creates NAPI instances. This function is called
99  *    unconditionally during initialization. It creates
100  *    napi structures through the proper HTC/HIF calls.
101  *    The structures are disabled on creation.
102  *    Note that for each NAPI instance a separate dummy netdev is used
103  *
104  * Return:
105  * < 0: error
106  * = 0: <should never happen>
107  * > 0: id of the created object (for multi-NAPI, number of objects created)
108  */
109 int hif_napi_create(struct hif_opaque_softc   *hif_ctx,
110 		    int (*poll)(struct napi_struct *, int),
111 		    int                budget,
112 		    int                scale,
113 		    uint8_t            flags)
114 {
115 	int i;
116 	struct qca_napi_data *napid;
117 	struct qca_napi_info *napii;
118 	struct CE_state      *ce_state;
119 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
120 	int    rc = 0;
121 
122 	NAPI_DEBUG("-->(budget=%d, scale=%d)",
123 		   budget, scale);
124 	NAPI_DEBUG("hif->napi_data.state = 0x%08x",
125 		   hif->napi_data.state);
126 	NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x",
127 		   hif->napi_data.ce_map);
128 
129 	napid = &(hif->napi_data);
130 	if (0 == (napid->state &  HIF_NAPI_INITED)) {
131 		memset(napid, 0, sizeof(struct qca_napi_data));
132 		qdf_spinlock_create(&(napid->lock));
133 
134 		napid->state |= HIF_NAPI_INITED;
135 		napid->flags = flags;
136 
137 		rc = hif_napi_cpu_init(hif_ctx);
138 		if (rc != 0) {
139 			HIF_ERROR("NAPI_initialization failed,. %d", rc);
140 			goto hnc_err;
141 		}
142 
143 		HIF_DBG("%s: NAPI structures initialized, rc=%d",
144 			 __func__, rc);
145 	}
146 	for (i = 0; i < hif->ce_count; i++) {
147 		ce_state = hif->ce_id_to_state[i];
148 		NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d",
149 			   i, ce_state->htt_rx_data,
150 			   ce_state->htt_tx_data);
151 		if (!ce_state->htt_rx_data)
152 			continue;
153 
154 		/* Now this is a CE where we need NAPI on */
155 		NAPI_DEBUG("Creating NAPI on pipe %d", i);
156 
157 		napii = &(napid->napis[i]);
158 		memset(napii, 0, sizeof(struct qca_napi_info));
159 		napii->scale = scale;
160 		napii->id    = NAPI_PIPE2ID(i);
161 		napii->hif_ctx = hif_ctx;
162 		napii->irq   = hif_get_irq_for_ce(i);
163 
164 		if (napii->irq < 0)
165 			HIF_WARN("%s: bad IRQ value for CE %d: %d",
166 				 __func__, i, napii->irq);
167 
168 		qdf_spinlock_create(&napii->lro_unloading_lock);
169 		init_dummy_netdev(&(napii->netdev));
170 
171 		NAPI_DEBUG("adding napi=%p to netdev=%p (poll=%p, bdgt=%d)",
172 			   &(napii->napi), &(napii->netdev), poll, budget);
173 		netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget);
174 
175 		NAPI_DEBUG("after napi_add");
176 		NAPI_DEBUG("napi=0x%p, netdev=0x%p",
177 			   &(napii->napi), &(napii->netdev));
178 		NAPI_DEBUG("napi.dev_list.prev=0x%p, next=0x%p",
179 			   napii->napi.dev_list.prev,
180 			   napii->napi.dev_list.next);
181 		NAPI_DEBUG("dev.napi_list.prev=0x%p, next=0x%p",
182 			   napii->netdev.napi_list.prev,
183 			   napii->netdev.napi_list.next);
184 
185 		/* It is OK to change the state variable below without
186 		 * protection as there should be no-one around yet
187 		 */
188 		napid->ce_map |= (0x01 << i);
189 		HIF_DBG("%s: NAPI id %d created for pipe %d", __func__,
190 			 napii->id, i);
191 	}
192 	NAPI_DEBUG("NAPI ids created for all applicable pipes");
193 hnc_err:
194 	NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map);
195 	return napid->ce_map;
196 }
197 
198 /**
199  *
200  * hif_napi_destroy() - destroys the NAPI structures for a given instance
201  * @hif   : pointer to hif context
202  * @ce_id : the CE id whose napi instance will be destroyed
203  * @force : if set, will destroy even if entry is active (de-activates)
204  *
205  * Description:
206  *    Destroy a given NAPI instance. This function is called
207  *    unconditionally during cleanup.
208  *    Refuses to destroy an entry of it is still enabled (unless force=1)
209  *    Marks the whole napi_data invalid if all instances are destroyed.
210  *
211  * Return:
212  * -EINVAL: specific entry has not been created
213  * -EPERM : specific entry is still active
214  * 0 <    : error
215  * 0 =    : success
216  */
217 int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
218 		     uint8_t          id,
219 		     int              force)
220 {
221 	uint8_t ce = NAPI_ID2PIPE(id);
222 	int rc = 0;
223 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
224 
225 	NAPI_DEBUG("-->(id=%d, force=%d)", id, force);
226 
227 	if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) {
228 		HIF_ERROR("%s: NAPI not initialized or entry %d not created",
229 			  __func__, id);
230 		rc = -EINVAL;
231 	} else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) {
232 		HIF_ERROR("%s: NAPI instance %d (pipe %d) not created",
233 			  __func__, id, ce);
234 		rc = -EINVAL;
235 	} else {
236 		struct qca_napi_data *napid;
237 		struct qca_napi_info *napii;
238 
239 		napid = &(hif->napi_data);
240 		napii = &(napid->napis[ce]);
241 
242 		if (hif->napi_data.state == HIF_NAPI_CONF_UP) {
243 			if (force) {
244 				napi_disable(&(napii->napi));
245 				HIF_DBG("%s: NAPI entry %d force disabled",
246 					 __func__, id);
247 				NAPI_DEBUG("NAPI %d force disabled", id);
248 			} else {
249 				HIF_ERROR("%s: Cannot destroy active NAPI %d",
250 					  __func__, id);
251 				rc = -EPERM;
252 			}
253 		}
254 		if (0 == rc) {
255 			NAPI_DEBUG("before napi_del");
256 			NAPI_DEBUG("napi.dlist.prv=0x%p, next=0x%p",
257 				  napii->napi.dev_list.prev,
258 				  napii->napi.dev_list.next);
259 			NAPI_DEBUG("dev.napi_l.prv=0x%p, next=0x%p",
260 				   napii->netdev.napi_list.prev,
261 				   napii->netdev.napi_list.next);
262 
263 			qdf_spinlock_destroy(&napii->lro_unloading_lock);
264 			netif_napi_del(&(napii->napi));
265 
266 			napid->ce_map &= ~(0x01 << ce);
267 			napii->scale  = 0;
268 			HIF_DBG("%s: NAPI %d destroyed\n", __func__, id);
269 
270 			/* if there are no active instances and
271 			 * if they are all destroyed,
272 			 * set the whole structure to uninitialized state
273 			 */
274 			if (napid->ce_map == 0) {
275 				rc = hif_napi_cpu_deinit(hif_ctx);
276 				/* caller is tolerant to receiving !=0 rc */
277 
278 				qdf_spinlock_destroy(&(napid->lock));
279 				memset(napid,
280 				       0, sizeof(struct qca_napi_data));
281 				HIF_DBG("%s: no NAPI instances. Zapped.",
282 					 __func__);
283 			}
284 		}
285 	}
286 
287 	return rc;
288 }
289 
290 /**
291  * hif_napi_lro_flush_cb_register() - init and register flush callback for LRO
292  * @hif_hdl: pointer to hif context
293  * @lro_flush_handler: register LRO flush callback
294  * @lro_init_handler: Callback for initializing LRO
295  *
296  * Return: positive value on success and 0 on failure
297  */
298 int hif_napi_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
299 				   void (lro_flush_handler)(void *),
300 				   void *(lro_init_handler)(void))
301 {
302 	int rc = 0;
303 	int i;
304 	struct CE_state *ce_state;
305 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
306 	void *data = NULL;
307 	struct qca_napi_data *napid;
308 	struct qca_napi_info *napii;
309 
310 	QDF_ASSERT(scn != NULL);
311 
312 	napid = hif_napi_get_all(hif_hdl);
313 	if (scn != NULL) {
314 		for (i = 0; i < scn->ce_count; i++) {
315 			ce_state = scn->ce_id_to_state[i];
316 			if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
317 				data = lro_init_handler();
318 				if (data == NULL) {
319 					HIF_ERROR("%s: Failed to init LRO for CE %d",
320 						  __func__, i);
321 					continue;
322 				}
323 				napii = &(napid->napis[i]);
324 				napii->lro_flush_cb = lro_flush_handler;
325 				napii->lro_ctx = data;
326 				HIF_DBG("Registering LRO for ce_id %d NAPI callback for %d flush_cb %p, lro_data %p\n",
327 					i, napii->id, napii->lro_flush_cb,
328 					napii->lro_ctx);
329 				rc++;
330 			}
331 		}
332 	} else {
333 		HIF_ERROR("%s: hif_state NULL!", __func__);
334 	}
335 	return rc;
336 }
337 
338 /**
339  * hif_napi_lro_flush_cb_deregister() - Degregister and free LRO.
340  * @hif: pointer to hif context
341  * @lro_deinit_cb: LRO deinit callback
342  *
343  * Return: NONE
344  */
345 void hif_napi_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
346 				     void (lro_deinit_cb)(void *))
347 {
348 	int i;
349 	struct CE_state *ce_state;
350 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
351 	struct qca_napi_data *napid;
352 	struct qca_napi_info *napii;
353 
354 	QDF_ASSERT(scn != NULL);
355 
356 	napid = hif_napi_get_all(hif_hdl);
357 	if (scn != NULL) {
358 		for (i = 0; i < scn->ce_count; i++) {
359 			ce_state = scn->ce_id_to_state[i];
360 			if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
361 				napii = &(napid->napis[i]);
362 				HIF_DBG("deRegistering LRO for ce_id %d NAPI callback for %d flush_cb %p, lro_data %p\n",
363 					i, napii->id, napii->lro_flush_cb,
364 					napii->lro_ctx);
365 				qdf_spin_lock_bh(&napii->lro_unloading_lock);
366 				napii->lro_flush_cb = NULL;
367 				lro_deinit_cb(napii->lro_ctx);
368 				napii->lro_ctx = NULL;
369 				qdf_spin_unlock_bh(
370 					&napii->lro_unloading_lock);
371 			}
372 		}
373 	} else {
374 		HIF_ERROR("%s: hif_state NULL!", __func__);
375 	}
376 }
377 
378 /**
379  * hif_napi_get_lro_info() - returns the address LRO data for napi_id
380  * @hif: pointer to hif context
381  * @napi_id: napi instance
382  *
383  * Description:
384  *    Returns the address of the LRO structure
385  *
386  * Return:
387  *  <addr>: address of the LRO structure
388  */
389 void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
390 {
391 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
392 	struct qca_napi_data *napid;
393 	struct qca_napi_info *napii;
394 
395 	napid = &(scn->napi_data);
396 	napii = &(napid->napis[NAPI_ID2PIPE(napi_id)]);
397 
398 	return napii->lro_ctx;
399 }
400 
401 /**
402  *
403  * hif_napi_get_all() - returns the address of the whole HIF NAPI structure
404  * @hif: pointer to hif context
405  *
406  * Description:
407  *    Returns the address of the whole structure
408  *
409  * Return:
410  *  <addr>: address of the whole HIF NAPI structure
411  */
412 inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx)
413 {
414 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
415 
416 	return &(hif->napi_data);
417 }
418 
419 /**
420  *
421  * hif_napi_event() - reacts to events that impact NAPI
422  * @hif : pointer to hif context
423  * @evnt: event that has been detected
424  * @data: more data regarding the event
425  *
426  * Description:
427  *   This function handles two types of events:
428  *   1- Events that change the state of NAPI (enabled/disabled):
429  *      {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE}
430  *      The state is retrievable by "hdd_napi_enabled(-1)"
431  *    - NAPI will be on if either INI file is on and it has not been disabled
432  *                                by a subsequent vendor CMD,
433  *                         or     it has been enabled by a vendor CMD.
434  *   2- Events that change the CPU affinity of a NAPI instance/IRQ:
435  *      {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE}
436  *    - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode
437  *    - NAPI will switch throughput mode based on hdd_napi_throughput_policy()
438  *    - In LO tput mode, NAPI will yield control if its interrupts to the system
439  *      management functions. However in HI throughput mode, NAPI will actively
440  *      manage its interrupts/instances (by trying to disperse them out to
441  *      separate performance cores).
442  *    - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events.
443  *
444  *    + In some cases (roaming peer management is the only case so far), a
445  *      a client can trigger a "SERIALIZE" event. Basically, this means that the
446  *      users is asking NAPI to go into a truly single execution context state.
447  *      So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted,
448  *      (if called for the first time) and then moves all IRQs (for NAPI
449  *      instances) to be collapsed to a single core. If called multiple times,
450  *      it will just re-collapse the CPUs. This is because blacklist-on() API
451  *      is reference-counted, and because the API has already been called.
452  *
453  *      Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go
454  *      to its "normal" operation. Optionally, they can give a timeout value (in
455  *      multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this
456  *      case, NAPI will just set the current throughput state to uninitialized
457  *      and set the delay period. Once policy handler is called, it would skip
458  *      applying the policy delay period times, and otherwise apply the policy.
459  *
460  * Return:
461  *  < 0: some error
462  *  = 0: event handled successfully
463  */
464 int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event,
465 		   void *data)
466 {
467 	int      rc = 0;
468 	uint32_t prev_state;
469 	int      i;
470 	struct napi_struct *napi;
471 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
472 	struct qca_napi_data *napid = &(hif->napi_data);
473 	enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED;
474 	enum {
475 		BLACKLIST_NOT_PENDING,
476 		BLACKLIST_ON_PENDING,
477 		BLACKLIST_OFF_PENDING
478 	     } blacklist_pending = BLACKLIST_NOT_PENDING;
479 
480 	NAPI_DEBUG("%s: -->(event=%d, aux=%p)", __func__, event, data);
481 
482 	if ((napid->state & HIF_NAPI_INITED) == 0) {
483 		NAPI_DEBUG("%s: got event when NAPI not initialized",
484 			   __func__);
485 		return -EINVAL;
486 	}
487 	qdf_spin_lock_bh(&(napid->lock));
488 	prev_state = napid->state;
489 	switch (event) {
490 	case NAPI_EVT_INI_FILE:
491 	case NAPI_EVT_CMD_STATE:
492 	case NAPI_EVT_INT_STATE: {
493 		int on = (data != ((void *)0));
494 
495 		HIF_DBG("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)",
496 			 __func__, event,
497 			 on, prev_state);
498 		if (on)
499 			if (prev_state & HIF_NAPI_CONF_UP) {
500 				HIF_DBG("%s: duplicate NAPI conf ON msg",
501 					 __func__);
502 			} else {
503 				HIF_DBG("%s: setting state to ON",
504 					 __func__);
505 				napid->state |= HIF_NAPI_CONF_UP;
506 			}
507 		else /* off request */
508 			if (prev_state & HIF_NAPI_CONF_UP) {
509 				HIF_DBG("%s: setting state to OFF",
510 				 __func__);
511 				napid->state &= ~HIF_NAPI_CONF_UP;
512 			} else {
513 				HIF_DBG("%s: duplicate NAPI conf OFF msg",
514 					 __func__);
515 			}
516 		break;
517 	}
518 	/* case NAPI_INIT_FILE/CMD_STATE */
519 
520 	case NAPI_EVT_CPU_STATE: {
521 		int cpu = ((unsigned long int)data >> 16);
522 		int val = ((unsigned long int)data & 0x0ff);
523 
524 		NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d",
525 			   __func__, cpu, val);
526 
527 		/* state has already been set by hnc_cpu_notify_cb */
528 		if ((val == QCA_NAPI_CPU_DOWN) &&
529 		    (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */
530 		    (napid->napi_cpu[cpu].napis != 0)) {
531 			NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d",
532 				   __func__, cpu);
533 			rc = hif_napi_cpu_migrate(napid,
534 						  cpu,
535 						  HNC_ACT_RELOCATE);
536 			napid->napi_cpu[cpu].napis = 0;
537 		}
538 		/* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */
539 		break;
540 	}
541 
542 	case NAPI_EVT_TPUT_STATE: {
543 		tput_mode = (enum qca_napi_tput_state)data;
544 		if (tput_mode == QCA_NAPI_TPUT_LO) {
545 			/* from TPUT_HI -> TPUT_LO */
546 			NAPI_DEBUG("%s: Moving to napi_tput_LO state",
547 				   __func__);
548 			blacklist_pending = BLACKLIST_OFF_PENDING;
549 			/*
550 			 * Ideally we should "collapse" interrupts here, since
551 			 * we are "dispersing" interrupts in the "else" case.
552 			 * This allows the possibility that our interrupts may
553 			 * still be on the perf cluster the next time we enter
554 			 * high tput mode. However, the irq_balancer is free
555 			 * to move our interrupts to power cluster once
556 			 * blacklisting has been turned off in the "else" case.
557 			 */
558 		} else {
559 			/* from TPUT_LO -> TPUT->HI */
560 			NAPI_DEBUG("%s: Moving to napi_tput_HI state",
561 				   __func__);
562 			rc = hif_napi_cpu_migrate(napid,
563 						  HNC_ANY_CPU,
564 						  HNC_ACT_DISPERSE);
565 
566 			blacklist_pending = BLACKLIST_ON_PENDING;
567 		}
568 		napid->napi_mode = tput_mode;
569 		break;
570 	}
571 
572 	case NAPI_EVT_USR_SERIAL: {
573 		unsigned long users = (unsigned long)data;
574 
575 		NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld",
576 			   __func__, users);
577 
578 		rc = hif_napi_cpu_migrate(napid,
579 					  HNC_ANY_CPU,
580 					  HNC_ACT_COLLAPSE);
581 		if ((users == 0) && (rc == 0))
582 			blacklist_pending = BLACKLIST_ON_PENDING;
583 		break;
584 	}
585 	case NAPI_EVT_USR_NORMAL: {
586 		NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__);
587 		/*
588 		 * Deserialization timeout is handled at hdd layer;
589 		 * just mark current mode to uninitialized to ensure
590 		 * it will be set when the delay is over
591 		 */
592 		napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED;
593 		break;
594 	}
595 	default: {
596 		HIF_ERROR("%s: unknown event: %d (data=0x%0lx)",
597 			  __func__, event, (unsigned long) data);
598 		break;
599 	} /* default */
600 	}; /* switch */
601 
602 
603 	switch (blacklist_pending) {
604 	case BLACKLIST_ON_PENDING:
605 		/* assume the control of WLAN IRQs */
606 		hif_napi_cpu_blacklist(napid, BLACKLIST_ON);
607 		break;
608 	case BLACKLIST_OFF_PENDING:
609 		/* yield the control of WLAN IRQs */
610 		hif_napi_cpu_blacklist(napid, BLACKLIST_OFF);
611 		break;
612 	default: /* nothing to do */
613 		break;
614 	} /* switch blacklist_pending */
615 
616 	qdf_spin_unlock_bh(&(napid->lock));
617 
618 	if (prev_state != napid->state) {
619 		if (napid->state == ENABLE_NAPI_MASK) {
620 			rc = 1;
621 			for (i = 0; i < CE_COUNT_MAX; i++)
622 				if ((napid->ce_map & (0x01 << i))) {
623 					napi = &(napid->napis[i].napi);
624 					NAPI_DEBUG("%s: enabling NAPI %d",
625 						   __func__, i);
626 					napi_enable(napi);
627 				}
628 		} else {
629 			rc = 0;
630 			for (i = 0; i < CE_COUNT_MAX; i++)
631 				if (napid->ce_map & (0x01 << i)) {
632 					napi = &(napid->napis[i].napi);
633 					NAPI_DEBUG("%s: disabling NAPI %d",
634 						   __func__, i);
635 					napi_disable(napi);
636 					/* in case it is affined, remove it */
637 					irq_set_affinity_hint(
638 							napid->napis[i].irq,
639 							NULL);
640 				}
641 		}
642 	} else {
643 		HIF_DBG("%s: no change in hif napi state (still %d)",
644 			 __func__, prev_state);
645 	}
646 
647 	NAPI_DEBUG("<--[rc=%d]", rc);
648 	return rc;
649 }
650 
651 /**
652  * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not
653  * @hif: hif context
654  * @ce : CE instance (or -1, to check if any CEs are enabled)
655  *
656  * Return: bool
657  */
658 int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce)
659 {
660 	int rc;
661 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
662 
663 	if (-1 == ce)
664 		rc = ((hif->napi_data.state == ENABLE_NAPI_MASK));
665 	else
666 		rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) &&
667 		      (hif->napi_data.ce_map & (0x01 << ce)));
668 	return rc;
669 };
670 
671 /**
672  * hif_napi_enable_irq() - enables bus interrupts after napi_complete
673  *
674  * @hif: hif context
675  * @id : id of NAPI instance calling this (used to determine the CE)
676  *
677  * Return: void
678  */
679 inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id)
680 {
681 	struct hif_softc *scn = HIF_GET_SOFTC(hif);
682 
683 	hif_irq_enable(scn, NAPI_ID2PIPE(id));
684 }
685 
686 
687 /**
688  * hif_napi_schedule() - schedules napi, updates stats
689  * @scn:  hif context
690  * @ce_id: index of napi instance
691  *
692  * Return: void
693  */
694 int hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
695 {
696 	int cpu = smp_processor_id();
697 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
698 
699 	hif_record_ce_desc_event(scn,  ce_id, NAPI_SCHEDULE,
700 				 NULL, NULL, 0);
701 
702 	scn->napi_data.napis[ce_id].stats[cpu].napi_schedules++;
703 	NAPI_DEBUG("scheduling napi %d (ce:%d)",
704 		   scn->napi_data.napis[ce_id].id, ce_id);
705 	napi_schedule(&(scn->napi_data.napis[ce_id].napi));
706 
707 	return true;
708 }
709 
710 /**
711  * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed
712  * @napi_info: pointer to qca_napi_info for the napi instance
713  *
714  * Return: true  => interrupt already on correct cpu, no correction needed
715  *         false => interrupt on wrong cpu, correction done for cpu affinity
716  *                   of the interrupt
717  */
718 static inline
719 bool hif_napi_correct_cpu(struct qca_napi_info *napi_info)
720 {
721 	bool right_cpu = true;
722 	int rc = 0;
723 	cpumask_t cpumask;
724 	int cpu;
725 	struct qca_napi_data *napid;
726 
727 	napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx));
728 
729 	if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) {
730 
731 		cpu = qdf_get_cpu();
732 		if (unlikely((hif_napi_cpu_blacklist(napid,
733 						BLACKLIST_QUERY) > 0) &&
734 						(cpu != napi_info->cpu))) {
735 			right_cpu = false;
736 
737 			NAPI_DEBUG("interrupt on wrong CPU, correcting");
738 			cpumask.bits[0] = (0x01 << napi_info->cpu);
739 
740 			irq_modify_status(napi_info->irq, IRQ_NO_BALANCING, 0);
741 			rc = irq_set_affinity_hint(napi_info->irq,
742 						   &cpumask);
743 			irq_modify_status(napi_info->irq, 0, IRQ_NO_BALANCING);
744 
745 			if (rc)
746 				HIF_ERROR("error setting irq affinity hint: %d",
747 					  rc);
748 			else
749 				napi_info->stats[cpu].cpu_corrected++;
750 		}
751 	}
752 	return right_cpu;
753 }
754 
755 /**
756  * hif_napi_poll() - NAPI poll routine
757  * @napi  : pointer to NAPI struct as kernel holds it
758  * @budget:
759  *
760  * This is the body of the poll function.
761  * The poll function is called by kernel. So, there is a wrapper
762  * function in HDD, which in turn calls this function.
763  * Two main reasons why the whole thing is not implemented in HDD:
764  * a) references to things like ce_service that HDD is not aware of
765  * b) proximity to the implementation of ce_tasklet, which the body
766  *    of this function should be very close to.
767  *
768  * NOTE TO THE MAINTAINER:
769  *  Consider this function and ce_tasklet very tightly coupled pairs.
770  *  Any changes to ce_tasklet or this function may likely need to be
771  *  reflected in the counterpart.
772  *
773  * Returns:
774  *  int: the amount of work done in this poll (<= budget)
775  */
776 int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
777 		  struct napi_struct *napi,
778 		  int budget)
779 {
780 	int    rc = 0; /* default: no work done, also takes care of error */
781 	int    normalized = 0;
782 	int    bucket;
783 	int    cpu = smp_processor_id();
784 	bool poll_on_right_cpu;
785 	struct hif_softc      *hif = HIF_GET_SOFTC(hif_ctx);
786 	struct qca_napi_info *napi_info;
787 	struct CE_state *ce_state = NULL;
788 
789 	NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)",
790 		   __func__, napi_info->id, napi_info->irq, budget);
791 
792 	if (unlikely(NULL == hif)) {
793 		HIF_ERROR("%s: hif context is NULL", __func__);
794 		QDF_ASSERT(0);
795 		goto out;
796 	}
797 
798 	napi_info = (struct qca_napi_info *)
799 		container_of(napi, struct qca_napi_info, napi);
800 
801 	napi_info->stats[cpu].napi_polls++;
802 
803 	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
804 				 NAPI_POLL_ENTER, NULL, NULL, cpu);
805 
806 	qdf_spin_lock_bh(&napi_info->lro_unloading_lock);
807 
808 	rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id));
809 	NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
810 		    __func__, rc);
811 
812 	if (napi_info->lro_flush_cb)
813 		napi_info->lro_flush_cb(napi_info->lro_ctx);
814 	qdf_spin_unlock_bh(&napi_info->lro_unloading_lock);
815 
816 	/* do not return 0, if there was some work done,
817 	 * even if it is below the scale
818 	 */
819 	if (rc) {
820 		napi_info->stats[cpu].napi_workdone += rc;
821 		normalized = (rc / napi_info->scale);
822 		if (normalized == 0)
823 			normalized++;
824 		bucket = normalized / (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS);
825 		if (bucket >= QCA_NAPI_NUM_BUCKETS) {
826 			bucket = QCA_NAPI_NUM_BUCKETS - 1;
827 			HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)",
828 				bucket, QCA_NAPI_NUM_BUCKETS);
829 		}
830 		napi_info->stats[cpu].napi_budget_uses[bucket]++;
831 	} else {
832 	/* if ce_per engine reports 0, then poll should be terminated */
833 		NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI",
834 			   __func__, __LINE__);
835 	}
836 
837 	ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)];
838 
839 	/*
840 	 * Not using the API hif_napi_correct_cpu directly in the if statement
841 	 * below since the API may not get evaluated if put at the end if any
842 	 * prior condition would evaluate to be true. The CPU correction
843 	 * check should kick in every poll.
844 	 */
845 #ifdef NAPI_YIELD_BUDGET_BASED
846 	if (ce_state && (ce_state->force_break || 0 == rc)) {
847 #else
848 	poll_on_right_cpu = hif_napi_correct_cpu(napi_info);
849 	if ((ce_state) &&
850 	    (!ce_check_rx_pending(ce_state) || (0 == rc) ||
851 	     !poll_on_right_cpu)) {
852 #endif
853 		napi_info->stats[cpu].napi_completes++;
854 #ifdef NAPI_YIELD_BUDGET_BASED
855 		ce_state->force_break = 0;
856 #endif
857 
858 		hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE,
859 					 NULL, NULL, 0);
860 		if (normalized >= budget)
861 			normalized = budget - 1;
862 
863 		/* enable interrupts */
864 		napi_complete(napi);
865 		hif_napi_enable_irq(hif_ctx, napi_info->id);
866 		/* support suspend/resume */
867 		qdf_atomic_dec(&(hif->active_tasklet_cnt));
868 
869 		NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts",
870 			   __func__, __LINE__);
871 	} else {
872 		/* 4.4 kernel NAPI implementation requires drivers to
873 		 * return full work when they ask to be re-scheduled,
874 		 * or napi_complete and re-start with a fresh interrupt
875 		 */
876 		normalized = budget;
877 	}
878 
879 	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
880 				 NAPI_POLL_EXIT, NULL, NULL, normalized);
881 
882 	NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized);
883 	return normalized;
884 out:
885 	return rc;
886 }
887 
888 #ifdef HELIUMPLUS
889 /**
890  *
891  * hif_napi_update_yield_stats() - update NAPI yield related stats
892  * @cpu_id: CPU ID for which stats needs to be updates
893  * @ce_id: Copy Engine ID for which yield stats needs to be updates
894  * @time_limit_reached: indicates whether the time limit was reached
895  * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached
896  *
897  * Return: None
898  */
899 void hif_napi_update_yield_stats(struct CE_state *ce_state,
900 				 bool time_limit_reached,
901 				 bool rxpkt_thresh_reached)
902 {
903 	struct hif_softc *hif;
904 	struct qca_napi_data *napi_data = NULL;
905 	int ce_id = 0;
906 	int cpu_id = 0;
907 
908 	if (unlikely(NULL == ce_state)) {
909 		QDF_ASSERT(NULL != ce_state);
910 		return;
911 	}
912 
913 	hif = ce_state->scn;
914 
915 	if (unlikely(NULL == hif)) {
916 		QDF_ASSERT(NULL != hif);
917 		return;
918 	}
919 	napi_data = &(hif->napi_data);
920 	if (unlikely(NULL == napi_data)) {
921 		QDF_ASSERT(NULL != napi_data);
922 		return;
923 	}
924 
925 	ce_id = ce_state->id;
926 	cpu_id = qdf_get_cpu();
927 
928 	if (time_limit_reached)
929 		napi_data->napis[ce_id].stats[cpu_id].time_limit_reached++;
930 	else
931 		napi_data->napis[ce_id].stats[cpu_id].rxpkt_thresh_reached++;
932 }
933 
934 /**
935  *
936  * hif_napi_stats() - display NAPI CPU statistics
937  * @napid: pointer to qca_napi_data
938  *
939  * Description:
940  *    Prints the various CPU cores on which the NAPI instances /CEs interrupts
941  *    are being executed. Can be called from outside NAPI layer.
942  *
943  * Return: None
944  */
945 void hif_napi_stats(struct qca_napi_data *napid)
946 {
947 	int i;
948 	struct qca_napi_cpu *cpu;
949 
950 	if (napid == NULL) {
951 		qdf_print("%s: napiid struct is null", __func__);
952 		return;
953 	}
954 
955 	cpu = napid->napi_cpu;
956 	qdf_print("NAPI CPU TABLE");
957 	qdf_print("lilclhead=%d, bigclhead=%d",
958 		  napid->lilcl_head, napid->bigcl_head);
959 	for (i = 0; i < NR_CPUS; i++) {
960 		qdf_print("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d",
961 			  i,
962 			  cpu[i].state, cpu[i].core_id, cpu[i].cluster_id,
963 			  cpu[i].core_mask.bits[0],
964 			  cpu[i].thread_mask.bits[0],
965 			  cpu[i].max_freq, cpu[i].napis,
966 			  cpu[i].cluster_nxt);
967 	}
968 }
969 
970 #ifdef FEATURE_NAPI_DEBUG
971 /*
972  * Local functions
973  * - no argument checks, all internal/trusted callers
974  */
975 static void hnc_dump_cpus(struct qca_napi_data *napid)
976 {
977 	hif_napi_stats(napid);
978 }
979 #else
980 static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ };
981 #endif /* FEATURE_NAPI_DEBUG */
982 /**
983  * hnc_link_clusters() - partitions to cpu table into clusters
984  * @napid: pointer to NAPI data
985  *
986  * Takes in a CPU topology table and builds two linked lists
987  * (big cluster cores, list-head at bigcl_head, and little cluster
988  * cores, list-head at lilcl_head) out of it.
989  *
990  * If there are more than two clusters:
991  * - bigcl_head and lilcl_head will be different,
992  * - the cluster with highest cpufreq will be considered the "big" cluster.
993  *   If there are more than one with the highest frequency, the *last* of such
994  *   clusters will be designated as the "big cluster"
995  * - the cluster with lowest cpufreq will be considered the "li'l" cluster.
996  *   If there are more than one clusters with the lowest cpu freq, the *first*
997  *   of such clusters will be designated as the "little cluster"
998  * - We only support up to 32 clusters
999  * Return: 0 : OK
1000  *         !0: error (at least one of lil/big clusters could not be found)
1001  */
1002 #define HNC_MIN_CLUSTER 0
1003 #define HNC_MAX_CLUSTER 31
1004 static int hnc_link_clusters(struct qca_napi_data *napid)
1005 {
1006 	int rc = 0;
1007 
1008 	int i;
1009 	int it = 0;
1010 	uint32_t cl_done = 0x0;
1011 	int cl, curcl, curclhead = 0;
1012 	int more;
1013 	unsigned int lilfrq = INT_MAX;
1014 	unsigned int bigfrq = 0;
1015 	unsigned int clfrq = 0;
1016 	int prev = 0;
1017 	struct qca_napi_cpu *cpus = napid->napi_cpu;
1018 
1019 	napid->lilcl_head = napid->bigcl_head = -1;
1020 
1021 	do {
1022 		more = 0;
1023 		it++; curcl = -1;
1024 		for (i = 0; i < NR_CPUS; i++) {
1025 			cl = cpus[i].cluster_id;
1026 			NAPI_DEBUG("Processing cpu[%d], cluster=%d\n",
1027 				   i, cl);
1028 			if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) {
1029 				NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl);
1030 				QDF_ASSERT(0);
1031 				/* continue if ASSERTs are disabled */
1032 				continue;
1033 			};
1034 			if (cpumask_weight(&(cpus[i].core_mask)) == 0) {
1035 				NAPI_DEBUG("Core mask 0. SKIPPED\n");
1036 				continue;
1037 			}
1038 			if (cl_done & (0x01 << cl)) {
1039 				NAPI_DEBUG("Cluster already processed. SKIPPED\n");
1040 				continue;
1041 			} else {
1042 				if (more == 0) {
1043 					more = 1;
1044 					curcl = cl;
1045 					curclhead = i; /* row */
1046 					clfrq = cpus[i].max_freq;
1047 					prev = -1;
1048 				};
1049 				if ((curcl >= 0) && (curcl != cl)) {
1050 					NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n",
1051 						   cl, curcl);
1052 					continue;
1053 				}
1054 				if (cpus[i].max_freq != clfrq)
1055 					NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n",
1056 						   cpus[i].max_freq, clfrq);
1057 				if (clfrq >= bigfrq) {
1058 					bigfrq = clfrq;
1059 					napid->bigcl_head  = curclhead;
1060 					NAPI_DEBUG("bigcl=%d\n", curclhead);
1061 				}
1062 				if (clfrq < lilfrq) {
1063 					lilfrq = clfrq;
1064 					napid->lilcl_head = curclhead;
1065 					NAPI_DEBUG("lilcl=%d\n", curclhead);
1066 				}
1067 				if (prev != -1)
1068 					cpus[prev].cluster_nxt = i;
1069 
1070 				prev = i;
1071 			}
1072 		}
1073 		if (curcl >= 0)
1074 			cl_done |= (0x01 << curcl);
1075 
1076 	} while (more);
1077 
1078 	if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0)))
1079 		rc = -EFAULT;
1080 
1081 	hnc_dump_cpus(napid); /* if NAPI_DEBUG */
1082 	return rc;
1083 }
1084 #undef HNC_MIN_CLUSTER
1085 #undef HNC_MAX_CLUSTER
1086 
1087 /*
1088  * hotplug function group
1089  */
1090 
1091 /**
1092  * hnc_cpu_notify_cb() - handles CPU hotplug events
1093  *
1094  * On transitions to online, we onlu handle the ONLINE event,
1095  * and ignore the PREP events, because we dont want to act too
1096  * early.
1097  * On transtion to offline, we act on PREP events, because
1098  * we may need to move the irqs/NAPIs to another CPU before
1099  * it is actually off-lined.
1100  *
1101  * Return: NOTIFY_OK (dont block action)
1102  */
1103 static int hnc_cpu_notify_cb(struct notifier_block *nb,
1104 			     unsigned long          action,
1105 			     void                  *hcpu)
1106 {
1107 	int rc = NOTIFY_OK;
1108 	unsigned long cpu = (unsigned long)hcpu;
1109 	struct hif_opaque_softc *hif;
1110 	struct qca_napi_data *napid = NULL;
1111 
1112 	NAPI_DEBUG("-->%s(act=%ld, cpu=%ld)", __func__, action, cpu);
1113 
1114 	napid = qdf_container_of(nb, struct qca_napi_data, hnc_cpu_notifier);
1115 	hif = &qdf_container_of(napid, struct hif_softc, napi_data)->osc;
1116 
1117 	switch (action) {
1118 	case CPU_ONLINE:
1119 		napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
1120 		NAPI_DEBUG("%s: CPU %ld marked %d",
1121 			   __func__, cpu, napid->napi_cpu[cpu].state);
1122 		break;
1123 	case CPU_DEAD: /* already dead; we have marked it before, but ... */
1124 	case CPU_DEAD_FROZEN:
1125 		napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
1126 		NAPI_DEBUG("%s: CPU %ld marked %d",
1127 			   __func__, cpu, napid->napi_cpu[cpu].state);
1128 		break;
1129 	case CPU_DOWN_PREPARE:
1130 	case CPU_DOWN_PREPARE_FROZEN:
1131 		napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
1132 
1133 		NAPI_DEBUG("%s: CPU %ld marked %d; updating affinity",
1134 			   __func__, cpu, napid->napi_cpu[cpu].state);
1135 
1136 		/**
1137 		 * we need to move any NAPIs on this CPU out.
1138 		 * if we are in LO throughput mode, then this is valid
1139 		 * if the CPU is the the low designated CPU.
1140 		 */
1141 		hif_napi_event(hif,
1142 			       NAPI_EVT_CPU_STATE,
1143 			       (void *)
1144 			       ((cpu << 16) | napid->napi_cpu[cpu].state));
1145 		break;
1146 	default:
1147 		NAPI_DEBUG("%s: ignored. action: %ld", __func__, action);
1148 		break;
1149 	} /* switch */
1150 	NAPI_DEBUG("<--%s [%d]", __func__, rc);
1151 	return rc;
1152 }
1153 
1154 /**
1155  * hnc_hotplug_hook() - installs a hotplug notifier
1156  * @hif_sc: hif_sc context
1157  * @register: !0 => register , =0 => deregister
1158  *
1159  * Because the callback relies on the data layout of
1160  * struct hif_softc & its napi_data member, this callback
1161  * registration requires that the hif_softc is passed in.
1162  *
1163  * Note that this is different from the cpu notifier used by
1164  * rx_thread (cds_schedule.c).
1165  * We may consider combining these modifiers in the future.
1166  *
1167  * Return: 0: success
1168  *        <0: error
1169  */
1170 static int hnc_hotplug_hook(struct hif_softc *hif_sc, int install)
1171 {
1172 	int rc = 0;
1173 
1174 	NAPI_DEBUG("-->%s(%d)", __func__, install);
1175 
1176 	if (install) {
1177 		hif_sc->napi_data.hnc_cpu_notifier.notifier_call
1178 			= hnc_cpu_notify_cb;
1179 		rc = register_hotcpu_notifier(
1180 			&hif_sc->napi_data.hnc_cpu_notifier);
1181 	} else {
1182 		unregister_hotcpu_notifier(
1183 			&hif_sc->napi_data.hnc_cpu_notifier);
1184 	}
1185 
1186 	NAPI_DEBUG("<--%s()[%d]", __func__, rc);
1187 	return rc;
1188 }
1189 
1190 /**
1191  * hnc_install_tput() - installs a callback in the throughput detector
1192  * @register: !0 => register; =0: unregister
1193  *
1194  * installs a callback to be called when wifi driver throughput (tx+rx)
1195  * crosses a threshold. Currently, we are using the same criteria as
1196  * TCP ack suppression (500 packets/100ms by default).
1197  *
1198  * Return: 0 : success
1199  *         <0: failure
1200  */
1201 
1202 static int hnc_tput_hook(int install)
1203 {
1204 	int rc = 0;
1205 
1206 	/*
1207 	 * Nothing, until the bw_calculation accepts registration
1208 	 * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk
1209 	 *   hdd_napi_throughput_policy(...)
1210 	 */
1211 	return rc;
1212 }
1213 
1214 /*
1215  * Implementation of hif_napi_cpu API
1216  */
1217 
1218 /**
1219  * hif_napi_cpu_init() - initialization of irq affinity block
1220  * @ctx: pointer to qca_napi_data
1221  *
1222  * called by hif_napi_create, after the first instance is called
1223  * - builds napi_rss_cpus table from cpu topology
1224  * - links cores of the same clusters together
1225  * - installs hot-plug notifier
1226  * - installs throughput trigger notifier (when such mechanism exists)
1227  *
1228  * Return: 0: OK
1229  *         <0: error code
1230  */
1231 int hif_napi_cpu_init(struct hif_opaque_softc *hif)
1232 {
1233 	int rc = 0;
1234 	int i;
1235 	struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1236 	struct qca_napi_cpu *cpus = napid->napi_cpu;
1237 
1238 	NAPI_DEBUG("--> ");
1239 
1240 	if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) {
1241 		NAPI_DEBUG("NAPI RSS table already initialized.\n");
1242 		rc = -EALREADY;
1243 		goto lab_rss_init;
1244 	}
1245 
1246 	/* build CPU topology table */
1247 	for_each_possible_cpu(i) {
1248 		cpus[i].state       = ((cpumask_test_cpu(i, cpu_online_mask)
1249 					? QCA_NAPI_CPU_UP
1250 					: QCA_NAPI_CPU_DOWN));
1251 		cpus[i].core_id     = topology_core_id(i);
1252 		cpus[i].cluster_id  = topology_physical_package_id(i);
1253 		cpumask_copy(&(cpus[i].core_mask),
1254 			     topology_core_cpumask(i));
1255 		cpumask_copy(&(cpus[i].thread_mask),
1256 			     topology_sibling_cpumask(i));
1257 		cpus[i].max_freq    = cpufreq_quick_get_max(i);
1258 		cpus[i].napis       = 0x0;
1259 		cpus[i].cluster_nxt = -1; /* invalid */
1260 	}
1261 
1262 	/* link clusters together */
1263 	rc = hnc_link_clusters(napid);
1264 	if (0 != rc)
1265 		goto lab_err_topology;
1266 
1267 	/* install hotplug notifier */
1268 	rc = hnc_hotplug_hook(HIF_GET_SOFTC(hif), 1);
1269 	if (0 != rc)
1270 		goto lab_err_hotplug;
1271 
1272 	/* install throughput notifier */
1273 	rc = hnc_tput_hook(1);
1274 	if (0 == rc)
1275 		goto lab_rss_init;
1276 
1277 lab_err_hotplug:
1278 	hnc_tput_hook(0);
1279 	hnc_hotplug_hook(HIF_GET_SOFTC(hif), 0);
1280 lab_err_topology:
1281 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1282 lab_rss_init:
1283 	NAPI_DEBUG("<-- [rc=%d]", rc);
1284 	return rc;
1285 }
1286 
1287 /**
1288  * hif_napi_cpu_deinit() - clean-up of irq affinity block
1289  *
1290  * called by hif_napi_destroy, when the last instance is removed
1291  * - uninstalls throughput and hotplug notifiers
1292  * - clears cpu topology table
1293  * Return: 0: OK
1294  */
1295 int hif_napi_cpu_deinit(struct hif_opaque_softc *hif)
1296 {
1297 	int rc = 0;
1298 	struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1299 
1300 	NAPI_DEBUG("-->%s(...)", __func__);
1301 
1302 	/* uninstall tput notifier */
1303 	rc = hnc_tput_hook(0);
1304 
1305 	/* uninstall hotplug notifier */
1306 	rc = hnc_hotplug_hook(HIF_GET_SOFTC(hif), 0);
1307 
1308 	/* clear the topology table */
1309 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1310 
1311 	NAPI_DEBUG("<--%s[rc=%d]", __func__, rc);
1312 
1313 	return rc;
1314 }
1315 
1316 /**
1317  * hncm_migrate_to() - migrates a NAPI to a CPU
1318  * @napid: pointer to NAPI block
1319  * @ce_id: CE_id of the NAPI instance
1320  * @didx : index in the CPU topology table for the CPU to migrate to
1321  *
1322  * Migrates NAPI (identified by the CE_id) to the destination core
1323  * Updates the napi_map of the destination entry
1324  *
1325  * Return:
1326  *  =0 : success
1327  *  <0 : error
1328  */
1329 static int hncm_migrate_to(struct qca_napi_data *napid,
1330 			   int                   napi_ce,
1331 			   int                   didx)
1332 {
1333 	int rc = 0;
1334 	cpumask_t cpumask;
1335 
1336 	NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx);
1337 
1338 	cpumask.bits[0] = (1 << didx);
1339 
1340 	irq_modify_status(napid->napis[napi_ce].irq, IRQ_NO_BALANCING, 0);
1341 	rc = irq_set_affinity_hint(napid->napis[napi_ce].irq, &cpumask);
1342 
1343 	/* unmark the napis bitmap in the cpu table */
1344 	napid->napi_cpu[napid->napis[napi_ce].cpu].napis &= ~(0x01 << napi_ce);
1345 	/* mark the napis bitmap for the new designated cpu */
1346 	napid->napi_cpu[didx].napis |= (0x01 << napi_ce);
1347 	napid->napis[napi_ce].cpu = didx;
1348 
1349 	NAPI_DEBUG("<--%s[%d]", __func__, rc);
1350 	return rc;
1351 }
1352 /**
1353  * hncm_dest_cpu() - finds a destination CPU for NAPI
1354  * @napid: pointer to NAPI block
1355  * @act  : RELOCATE | COLLAPSE | DISPERSE
1356  *
1357  * Finds the designated destionation for the next IRQ.
1358  * RELOCATE: translated to either COLLAPSE or DISPERSE based
1359  *           on napid->napi_mode (throughput state)
1360  * COLLAPSE: All have the same destination: the first online CPU in lilcl
1361  * DISPERSE: One of the CPU in bigcl, which has the smallest number of
1362  *           NAPIs on it
1363  *
1364  * Return: >=0 : index in the cpu topology table
1365  *       : < 0 : error
1366  */
1367 static int hncm_dest_cpu(struct qca_napi_data *napid, int act)
1368 {
1369 	int destidx = -1;
1370 	int head, i;
1371 
1372 	NAPI_DEBUG("-->%s(act=%d)", __func__, act);
1373 	if (act == HNC_ACT_RELOCATE) {
1374 		if (napid->napi_mode == QCA_NAPI_TPUT_LO)
1375 			act = HNC_ACT_COLLAPSE;
1376 		else
1377 			act = HNC_ACT_DISPERSE;
1378 		NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d",
1379 			   __func__, act);
1380 	}
1381 	if (act == HNC_ACT_COLLAPSE) {
1382 		head = i = napid->lilcl_head;
1383 retry_collapse:
1384 		while (i >= 0) {
1385 			if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) {
1386 				destidx = i;
1387 				break;
1388 			}
1389 			i = napid->napi_cpu[i].cluster_nxt;
1390 		}
1391 		if ((destidx < 0) && (head == napid->lilcl_head)) {
1392 			NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl",
1393 				__func__);
1394 			head = i = napid->bigcl_head;
1395 			goto retry_collapse;
1396 		}
1397 	} else { /* HNC_ACT_DISPERSE */
1398 		int smallest = 99; /* all 32 bits full */
1399 		int smallidx = -1;
1400 
1401 		head = i = napid->bigcl_head;
1402 retry_disperse:
1403 		while (i >= 0) {
1404 			if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) &&
1405 			    (hweight32(napid->napi_cpu[i].napis) <= smallest)) {
1406 				smallest = napid->napi_cpu[i].napis;
1407 				smallidx = i;
1408 			}
1409 			i = napid->napi_cpu[i].cluster_nxt;
1410 		}
1411 		destidx = smallidx;
1412 		if ((destidx < 0) && (head == napid->bigcl_head)) {
1413 			NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl",
1414 				__func__);
1415 			head = i = napid->lilcl_head;
1416 			goto retry_disperse;
1417 		}
1418 	}
1419 	NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx);
1420 	return destidx;
1421 }
1422 /**
1423  * hif_napi_cpu_migrate() - migrate IRQs away
1424  * @cpu: -1: all CPUs <n> specific CPU
1425  * @act: COLLAPSE | DISPERSE
1426  *
1427  * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible
1428  * cores. Eligible cores are:
1429  * act=COLLAPSE -> the first online core of the little cluster
1430  * act=DISPERSE -> separate cores of the big cluster, so that each core will
1431  *                 host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis)
1432  *
1433  * Note that this function is called with a spinlock acquired already.
1434  *
1435  * Return: =0: success
1436  *         <0: error
1437  */
1438 
1439 int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action)
1440 {
1441 	int      rc = 0;
1442 	struct qca_napi_cpu *cpup;
1443 	int      i, dind;
1444 	uint32_t napis;
1445 
1446 	NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)",
1447 		   __func__, cpu, action);
1448 	/* the following is really: hif_napi_enabled() with less overhead */
1449 	if (napid->ce_map == 0) {
1450 		NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__);
1451 		goto hncm_return;
1452 	}
1453 
1454 	cpup = napid->napi_cpu;
1455 
1456 	switch (action) {
1457 	case HNC_ACT_RELOCATE:
1458 	case HNC_ACT_DISPERSE:
1459 	case HNC_ACT_COLLAPSE: {
1460 		/* first find the src napi set */
1461 		if (cpu == HNC_ANY_CPU)
1462 			napis = napid->ce_map;
1463 		else
1464 			napis = cpup[cpu].napis;
1465 		/* then clear the napi bitmap on each CPU */
1466 		for (i = 0; i < NR_CPUS; i++)
1467 			cpup[i].napis = 0;
1468 		/* then for each of the NAPIs to disperse: */
1469 		for (i = 0; i < CE_COUNT_MAX; i++)
1470 			if (napis & (1 << i)) {
1471 				/* find a destination CPU */
1472 				dind = hncm_dest_cpu(napid, action);
1473 				if (dind >= 0) {
1474 					NAPI_DEBUG("Migrating NAPI ce%d to %d",
1475 						   i, dind);
1476 					rc = hncm_migrate_to(napid, i, dind);
1477 				} else {
1478 					NAPI_DEBUG("No dest for NAPI ce%d", i);
1479 					hnc_dump_cpus(napid);
1480 					rc = -1;
1481 				}
1482 			}
1483 		break;
1484 	}
1485 	default: {
1486 		NAPI_DEBUG("%s: bad action: %d\n", __func__, action);
1487 		QDF_BUG(0);
1488 		break;
1489 	}
1490 	} /* switch action */
1491 
1492 hncm_return:
1493 	hnc_dump_cpus(napid);
1494 	return rc;
1495 }
1496 
1497 
1498 /**
1499  * hif_napi_bl_irq() - calls irq_modify_status to enable/disable blacklisting
1500  * @napid: pointer to qca_napi_data structure
1501  * @bl_flag: blacklist flag to enable/disable blacklisting
1502  *
1503  * The function enables/disables blacklisting for all the copy engine
1504  * interrupts on which NAPI is enabled.
1505  *
1506  * Return: None
1507  */
1508 static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag)
1509 {
1510 	int i;
1511 
1512 	for (i = 0; i < CE_COUNT_MAX; i++) {
1513 		/* check if NAPI is enabled on the CE */
1514 		if (!(napid->ce_map & (0x01 << i)))
1515 			continue;
1516 
1517 		if (bl_flag == true)
1518 			irq_modify_status(napid->napis[i].irq,
1519 					  0, IRQ_NO_BALANCING);
1520 		else
1521 			irq_modify_status(napid->napis[i].irq,
1522 					  IRQ_NO_BALANCING, 0);
1523 		HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i);
1524 	}
1525 }
1526 
1527 #ifdef CONFIG_SCHED_CORE_CTL
1528 /* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */
1529 static inline int hif_napi_core_ctl_set_boost(bool boost)
1530 {
1531 	return core_ctl_set_boost(boost);
1532 }
1533 #else
1534 static inline int hif_napi_core_ctl_set_boost(bool boost)
1535 {
1536 	return 0;
1537 }
1538 #endif
1539 /**
1540  * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts.
1541  * @napid: pointer to qca_napi_data structure
1542  * @op: blacklist operation to perform
1543  *
1544  * The function enables/disables/queries blacklisting for all CE RX
1545  * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables
1546  * core_ctl_set_boost.
1547  * Once blacklisting is enabled, the interrupts will not be managed by the IRQ
1548  * balancer.
1549  *
1550  * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled
1551  *         for BLACKLIST_QUERY op - blacklist refcount
1552  *         for BLACKLIST_ON op    - return value from core_ctl_set_boost API
1553  *         for BLACKLIST_OFF op   - return value from core_ctl_set_boost API
1554  */
1555 int hif_napi_cpu_blacklist(struct qca_napi_data *napid,
1556 			   enum qca_blacklist_op op)
1557 {
1558 	int rc = 0;
1559 	static int ref_count; /* = 0 by the compiler */
1560 	uint8_t flags = napid->flags;
1561 	bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING;
1562 	bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST;
1563 
1564 	NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op);
1565 
1566 	if (!(bl_en && ccb_en)) {
1567 		rc = -EINVAL;
1568 		goto out;
1569 	}
1570 
1571 	switch (op) {
1572 	case BLACKLIST_QUERY:
1573 		rc = ref_count;
1574 		break;
1575 	case BLACKLIST_ON:
1576 		ref_count++;
1577 		rc = 0;
1578 		if (ref_count == 1) {
1579 			rc = hif_napi_core_ctl_set_boost(true);
1580 			NAPI_DEBUG("boost_on() returns %d - refcnt=%d",
1581 				rc, ref_count);
1582 			hif_napi_bl_irq(napid, true);
1583 		}
1584 		break;
1585 	case BLACKLIST_OFF:
1586 		if (ref_count)
1587 			ref_count--;
1588 		rc = 0;
1589 		if (ref_count == 0) {
1590 			rc = hif_napi_core_ctl_set_boost(false);
1591 			NAPI_DEBUG("boost_off() returns %d - refcnt=%d",
1592 				   rc, ref_count);
1593 			hif_napi_bl_irq(napid, false);
1594 		}
1595 		break;
1596 	default:
1597 		NAPI_DEBUG("Invalid blacklist op: %d", op);
1598 		rc = -EINVAL;
1599 	} /* switch */
1600 out:
1601 	NAPI_DEBUG("<--%s[%d]", __func__, rc);
1602 	return rc;
1603 }
1604 
1605 /**
1606  * hif_napi_serialize() - [de-]serialize NAPI operations
1607  * @hif:   context
1608  * @is_on: 1: serialize, 0: deserialize
1609  *
1610  * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the
1611  * following steps (see hif_napi_event for code):
1612  * - put irqs of all NAPI instances on the same CPU
1613  * - only for the first serialize call: blacklist
1614  *
1615  * hif_napi_serialize(hif, 0):
1616  * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec)
1617  * - at the end of the timer, check the current throughput state and
1618  *   implement it.
1619  */
1620 static unsigned long napi_serialize_reqs;
1621 int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on)
1622 {
1623 	int rc = -EINVAL;
1624 
1625 	if (hif != NULL)
1626 		switch (is_on) {
1627 		case 0: { /* de-serialize */
1628 			rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL,
1629 					    (void *) 0);
1630 			napi_serialize_reqs = 0;
1631 			break;
1632 		} /* end de-serialize */
1633 		case 1: { /* serialize */
1634 			rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL,
1635 					    (void *)napi_serialize_reqs++);
1636 			break;
1637 		} /* end serialize */
1638 		default:
1639 			break; /* no-op */
1640 		} /* switch */
1641 	return rc;
1642 }
1643 
1644 #endif /* ifdef HELIUMPLUS */
1645