xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_napi.c (revision 928e3ecad0fabf5320100a0d8fbde785757aa071)
1 /*
2  * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /**
29  * DOC: hif_napi.c
30  *
31  * HIF NAPI interface implementation
32  */
33 
34 #include <string.h> /* memset */
35 
36 /* Linux headers */
37 #include <linux/cpumask.h>
38 #include <linux/cpufreq.h>
39 #include <linux/cpu.h>
40 #include <linux/topology.h>
41 #include <linux/interrupt.h>
42 #include <linux/irq.h>
43 #ifdef HELIUMPLUS
44 #ifdef CONFIG_SCHED_CORE_CTL
45 #include <linux/sched/core_ctl.h>
46 #endif
47 #include <pld_snoc.h>
48 #endif
49 #include <linux/pm.h>
50 
51 /* Driver headers */
52 #include <hif_napi.h>
53 #include <hif_debug.h>
54 #include <hif_io32.h>
55 #include <ce_api.h>
56 #include <ce_internal.h>
57 
58 enum napi_decision_vector {
59 	HIF_NAPI_NOEVENT = 0,
60 	HIF_NAPI_INITED  = 1,
61 	HIF_NAPI_CONF_UP = 2
62 };
63 #define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP)
64 
65 #ifdef HELIUMPLUS
66 static inline int hif_get_irq_for_ce(int ce_id)
67 {
68 	return pld_snoc_get_irq(ce_id);
69 }
70 #else /* HELIUMPLUS */
71 static inline int hif_get_irq_for_ce(int ce_id)
72 {
73 	return -EINVAL;
74 }
75 static int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu,
76 				int action)
77 {
78 	return 0;
79 }
80 
81 int hif_napi_cpu_blacklist(struct qca_napi_data *napid,
82 					enum qca_blacklist_op op)
83 {
84 	return 0;
85 }
86 #endif /* HELIUMPLUS */
87 
88 /**
89  * hif_napi_create() - creates the NAPI structures for a given CE
90  * @hif    : pointer to hif context
91  * @pipe_id: the CE id on which the instance will be created
92  * @poll   : poll function to be used for this NAPI instance
93  * @budget : budget to be registered with the NAPI instance
94  * @scale  : scale factor on the weight (to scaler budget to 1000)
95  * @flags  : feature flags
96  *
97  * Description:
98  *    Creates NAPI instances. This function is called
99  *    unconditionally during initialization. It creates
100  *    napi structures through the proper HTC/HIF calls.
101  *    The structures are disabled on creation.
102  *    Note that for each NAPI instance a separate dummy netdev is used
103  *
104  * Return:
105  * < 0: error
106  * = 0: <should never happen>
107  * > 0: id of the created object (for multi-NAPI, number of objects created)
108  */
109 int hif_napi_create(struct hif_opaque_softc   *hif_ctx,
110 		    int (*poll)(struct napi_struct *, int),
111 		    int                budget,
112 		    int                scale,
113 		    uint8_t            flags)
114 {
115 	int i;
116 	struct qca_napi_data *napid;
117 	struct qca_napi_info *napii;
118 	struct CE_state      *ce_state;
119 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
120 	int    rc = 0;
121 
122 	NAPI_DEBUG("-->(budget=%d, scale=%d)",
123 		   budget, scale);
124 	NAPI_DEBUG("hif->napi_data.state = 0x%08x",
125 		   hif->napi_data.state);
126 	NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x",
127 		   hif->napi_data.ce_map);
128 
129 	napid = &(hif->napi_data);
130 	if (0 == (napid->state &  HIF_NAPI_INITED)) {
131 		memset(napid, 0, sizeof(struct qca_napi_data));
132 		qdf_spinlock_create(&(napid->lock));
133 
134 		napid->state |= HIF_NAPI_INITED;
135 		napid->flags = flags;
136 
137 		rc = hif_napi_cpu_init(hif_ctx);
138 		if (rc != 0) {
139 			HIF_ERROR("NAPI_initialization failed,. %d", rc);
140 			goto hnc_err;
141 		}
142 
143 		HIF_INFO("%s: NAPI structures initialized, rc=%d",
144 			 __func__, rc);
145 	}
146 	for (i = 0; i < hif->ce_count; i++) {
147 		ce_state = hif->ce_id_to_state[i];
148 		NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d",
149 			   i, ce_state->htt_rx_data,
150 			   ce_state->htt_tx_data);
151 		if (!ce_state->htt_rx_data)
152 			continue;
153 
154 		/* Now this is a CE where we need NAPI on */
155 		NAPI_DEBUG("Creating NAPI on pipe %d", i);
156 
157 		napii = &(napid->napis[i]);
158 		memset(napii, 0, sizeof(struct qca_napi_info));
159 		napii->scale = scale;
160 		napii->id    = NAPI_PIPE2ID(i);
161 		napii->hif_ctx = hif_ctx;
162 		napii->irq   = hif_get_irq_for_ce(i);
163 
164 		if (napii->irq < 0)
165 			HIF_WARN("%s: bad IRQ value for CE %d: %d",
166 				 __func__, i, napii->irq);
167 
168 		qdf_spinlock_create(&napii->lro_unloading_lock);
169 		init_dummy_netdev(&(napii->netdev));
170 
171 		NAPI_DEBUG("adding napi=%p to netdev=%p (poll=%p, bdgt=%d)",
172 			   &(napii->napi), &(napii->netdev), poll, budget);
173 		netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget);
174 
175 		NAPI_DEBUG("after napi_add");
176 		NAPI_DEBUG("napi=0x%p, netdev=0x%p",
177 			   &(napii->napi), &(napii->netdev));
178 		NAPI_DEBUG("napi.dev_list.prev=0x%p, next=0x%p",
179 			   napii->napi.dev_list.prev,
180 			   napii->napi.dev_list.next);
181 		NAPI_DEBUG("dev.napi_list.prev=0x%p, next=0x%p",
182 			   napii->netdev.napi_list.prev,
183 			   napii->netdev.napi_list.next);
184 
185 		/* It is OK to change the state variable below without
186 		 * protection as there should be no-one around yet
187 		 */
188 		napid->ce_map |= (0x01 << i);
189 		HIF_INFO("%s: NAPI id %d created for pipe %d", __func__,
190 			 napii->id, i);
191 	}
192 	NAPI_DEBUG("NAPI ids created for all applicable pipes");
193 hnc_err:
194 	NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map);
195 	return napid->ce_map;
196 }
197 
198 /**
199  *
200  * hif_napi_destroy() - destroys the NAPI structures for a given instance
201  * @hif   : pointer to hif context
202  * @ce_id : the CE id whose napi instance will be destroyed
203  * @force : if set, will destroy even if entry is active (de-activates)
204  *
205  * Description:
206  *    Destroy a given NAPI instance. This function is called
207  *    unconditionally during cleanup.
208  *    Refuses to destroy an entry of it is still enabled (unless force=1)
209  *    Marks the whole napi_data invalid if all instances are destroyed.
210  *
211  * Return:
212  * -EINVAL: specific entry has not been created
213  * -EPERM : specific entry is still active
214  * 0 <    : error
215  * 0 =    : success
216  */
217 int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
218 		     uint8_t          id,
219 		     int              force)
220 {
221 	uint8_t ce = NAPI_ID2PIPE(id);
222 	int rc = 0;
223 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
224 
225 	NAPI_DEBUG("-->(id=%d, force=%d)", id, force);
226 
227 	if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) {
228 		HIF_ERROR("%s: NAPI not initialized or entry %d not created",
229 			  __func__, id);
230 		rc = -EINVAL;
231 	} else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) {
232 		HIF_ERROR("%s: NAPI instance %d (pipe %d) not created",
233 			  __func__, id, ce);
234 		rc = -EINVAL;
235 	} else {
236 		struct qca_napi_data *napid;
237 		struct qca_napi_info *napii;
238 
239 		napid = &(hif->napi_data);
240 		napii = &(napid->napis[ce]);
241 
242 		if (hif->napi_data.state == HIF_NAPI_CONF_UP) {
243 			if (force) {
244 				napi_disable(&(napii->napi));
245 				HIF_INFO("%s: NAPI entry %d force disabled",
246 					 __func__, id);
247 				NAPI_DEBUG("NAPI %d force disabled", id);
248 			} else {
249 				HIF_ERROR("%s: Cannot destroy active NAPI %d",
250 					  __func__, id);
251 				rc = -EPERM;
252 			}
253 		}
254 		if (0 == rc) {
255 			NAPI_DEBUG("before napi_del");
256 			NAPI_DEBUG("napi.dlist.prv=0x%p, next=0x%p",
257 				  napii->napi.dev_list.prev,
258 				  napii->napi.dev_list.next);
259 			NAPI_DEBUG("dev.napi_l.prv=0x%p, next=0x%p",
260 				   napii->netdev.napi_list.prev,
261 				   napii->netdev.napi_list.next);
262 
263 			qdf_spinlock_destroy(&napii->lro_unloading_lock);
264 			netif_napi_del(&(napii->napi));
265 
266 			napid->ce_map &= ~(0x01 << ce);
267 			napii->scale  = 0;
268 			HIF_INFO("%s: NAPI %d destroyed\n", __func__, id);
269 
270 			/* if there are no active instances and
271 			 * if they are all destroyed,
272 			 * set the whole structure to uninitialized state
273 			 */
274 			if (napid->ce_map == 0) {
275 				rc = hif_napi_cpu_deinit(hif_ctx);
276 				/* caller is tolerant to receiving !=0 rc */
277 
278 				qdf_spinlock_destroy(&(napid->lock));
279 				memset(napid,
280 				       0, sizeof(struct qca_napi_data));
281 				HIF_INFO("%s: no NAPI instances. Zapped.",
282 					 __func__);
283 			}
284 		}
285 	}
286 
287 	return rc;
288 }
289 
290 /**
291  * hif_napi_lro_flush_cb_register() - init and register flush callback for LRO
292  * @hif_hdl: pointer to hif context
293  * @lro_flush_handler: register LRO flush callback
294  * @lro_init_handler: Callback for initializing LRO
295  *
296  * Return: positive value on success and 0 on failure
297  */
298 int hif_napi_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
299 				   void (lro_flush_handler)(void *),
300 				   void *(lro_init_handler)(void))
301 {
302 	int rc = 0;
303 	int i;
304 	struct CE_state *ce_state;
305 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
306 	void *data = NULL;
307 	struct qca_napi_data *napid;
308 	struct qca_napi_info *napii;
309 
310 	QDF_ASSERT(scn != NULL);
311 
312 	napid = hif_napi_get_all(hif_hdl);
313 	if (scn != NULL) {
314 		for (i = 0; i < scn->ce_count; i++) {
315 			ce_state = scn->ce_id_to_state[i];
316 			if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
317 				data = lro_init_handler();
318 				if (data == NULL) {
319 					HIF_ERROR("%s: Failed to init LRO for CE %d",
320 						  __func__, i);
321 					continue;
322 				}
323 				napii = &(napid->napis[i]);
324 				napii->lro_flush_cb = lro_flush_handler;
325 				napii->lro_ctx = data;
326 				HIF_ERROR("Registering LRO for ce_id %d NAPI callback for %d flush_cb %p, lro_data %p\n",
327 					i, napii->id, napii->lro_flush_cb,
328 					napii->lro_ctx);
329 				rc++;
330 			}
331 		}
332 	} else {
333 		HIF_ERROR("%s: hif_state NULL!", __func__);
334 	}
335 	return rc;
336 }
337 
338 /**
339  * hif_napi_lro_flush_cb_deregister() - Degregister and free LRO.
340  * @hif: pointer to hif context
341  * @lro_deinit_cb: LRO deinit callback
342  *
343  * Return: NONE
344  */
345 void hif_napi_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
346 				     void (lro_deinit_cb)(void *))
347 {
348 	int i;
349 	struct CE_state *ce_state;
350 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
351 	struct qca_napi_data *napid;
352 	struct qca_napi_info *napii;
353 
354 	QDF_ASSERT(scn != NULL);
355 
356 	napid = hif_napi_get_all(hif_hdl);
357 	if (scn != NULL) {
358 		for (i = 0; i < scn->ce_count; i++) {
359 			ce_state = scn->ce_id_to_state[i];
360 			if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
361 				napii = &(napid->napis[i]);
362 				HIF_ERROR("deRegistering LRO for ce_id %d NAPI callback for %d flush_cb %p, lro_data %p\n",
363 					i, napii->id, napii->lro_flush_cb,
364 					napii->lro_ctx);
365 				qdf_spin_lock_bh(&napii->lro_unloading_lock);
366 				napii->lro_flush_cb = NULL;
367 				lro_deinit_cb(napii->lro_ctx);
368 				napii->lro_ctx = NULL;
369 				qdf_spin_unlock_bh(
370 					&napii->lro_unloading_lock);
371 			}
372 		}
373 	} else {
374 		HIF_ERROR("%s: hif_state NULL!", __func__);
375 	}
376 }
377 
378 /**
379  * hif_napi_get_lro_info() - returns the address LRO data for napi_id
380  * @hif: pointer to hif context
381  * @napi_id: napi instance
382  *
383  * Description:
384  *    Returns the address of the LRO structure
385  *
386  * Return:
387  *  <addr>: address of the LRO structure
388  */
389 void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
390 {
391 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
392 	struct qca_napi_data *napid;
393 	struct qca_napi_info *napii;
394 
395 	napid = &(scn->napi_data);
396 	napii = &(napid->napis[NAPI_ID2PIPE(napi_id)]);
397 
398 	return napii->lro_ctx;
399 }
400 
401 /**
402  *
403  * hif_napi_get_all() - returns the address of the whole HIF NAPI structure
404  * @hif: pointer to hif context
405  *
406  * Description:
407  *    Returns the address of the whole structure
408  *
409  * Return:
410  *  <addr>: address of the whole HIF NAPI structure
411  */
412 inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx)
413 {
414 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
415 
416 	return &(hif->napi_data);
417 }
418 
419 /**
420  *
421  * hif_napi_event() - reacts to events that impact NAPI
422  * @hif : pointer to hif context
423  * @evnt: event that has been detected
424  * @data: more data regarding the event
425  *
426  * Description:
427  *   This function handles two types of events:
428  *   1- Events that change the state of NAPI (enabled/disabled):
429  *      {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE}
430  *      The state is retrievable by "hdd_napi_enabled(-1)"
431  *    - NAPI will be on if either INI file is on and it has not been disabled
432  *                                by a subsequent vendor CMD,
433  *                         or     it has been enabled by a vendor CMD.
434  *   2- Events that change the CPU affinity of a NAPI instance/IRQ:
435  *      {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE}
436  *    - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode
437  *    - NAPI will switch throughput mode based on hdd_napi_throughput_policy()
438  *    - In LO tput mode, NAPI will yield control if its interrupts to the system
439  *      management functions. However in HI throughput mode, NAPI will actively
440  *      manage its interrupts/instances (by trying to disperse them out to
441  *      separate performance cores).
442  *    - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events.
443  *
444  *    + In some cases (roaming peer management is the only case so far), a
445  *      a client can trigger a "SERIALIZE" event. Basically, this means that the
446  *      users is asking NAPI to go into a truly single execution context state.
447  *      So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted,
448  *      (if called for the first time) and then moves all IRQs (for NAPI
449  *      instances) to be collapsed to a single core. If called multiple times,
450  *      it will just re-collapse the CPUs. This is because blacklist-on() API
451  *      is reference-counted, and because the API has already been called.
452  *
453  *      Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go
454  *      to its "normal" operation. Optionally, they can give a timeout value (in
455  *      multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this
456  *      case, NAPI will just set the current throughput state to uninitialized
457  *      and set the delay period. Once policy handler is called, it would skip
458  *      applying the policy delay period times, and otherwise apply the policy.
459  *
460  * Return:
461  *  < 0: some error
462  *  = 0: event handled successfully
463  */
464 int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event,
465 		   void *data)
466 {
467 	int      rc = 0;
468 	uint32_t prev_state;
469 	int      i;
470 	struct napi_struct *napi;
471 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
472 	struct qca_napi_data *napid = &(hif->napi_data);
473 	enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED;
474 	enum {
475 		BLACKLIST_NOT_PENDING,
476 		BLACKLIST_ON_PENDING,
477 		BLACKLIST_OFF_PENDING
478 	     } blacklist_pending = BLACKLIST_NOT_PENDING;
479 
480 	NAPI_DEBUG("%s: -->(event=%d, aux=%p)", __func__, event, data);
481 
482 	if ((napid->state & HIF_NAPI_INITED) == 0) {
483 		NAPI_DEBUG("%s: got event when NAPI not initialized",
484 			   __func__);
485 		return -EINVAL;
486 	}
487 	qdf_spin_lock_bh(&(napid->lock));
488 	prev_state = napid->state;
489 	switch (event) {
490 	case NAPI_EVT_INI_FILE:
491 	case NAPI_EVT_CMD_STATE:
492 	case NAPI_EVT_INT_STATE: {
493 		int on = (data != ((void *)0));
494 
495 		HIF_INFO("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)",
496 			 __func__, event,
497 			 on, prev_state);
498 		if (on)
499 			if (prev_state & HIF_NAPI_CONF_UP) {
500 				HIF_INFO("%s: duplicate NAPI conf ON msg",
501 					 __func__);
502 			} else {
503 				HIF_INFO("%s: setting state to ON",
504 					 __func__);
505 				napid->state |= HIF_NAPI_CONF_UP;
506 			}
507 		else /* off request */
508 			if (prev_state & HIF_NAPI_CONF_UP) {
509 				HIF_INFO("%s: setting state to OFF",
510 				 __func__);
511 				napid->state &= ~HIF_NAPI_CONF_UP;
512 			} else {
513 				HIF_INFO("%s: duplicate NAPI conf OFF msg",
514 					 __func__);
515 			}
516 		break;
517 	}
518 	/* case NAPI_INIT_FILE/CMD_STATE */
519 
520 	case NAPI_EVT_CPU_STATE: {
521 		int cpu = ((unsigned long int)data >> 16);
522 		int val = ((unsigned long int)data & 0x0ff);
523 
524 		NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d",
525 			   __func__, cpu, val);
526 
527 		/* state has already been set by hnc_cpu_notify_cb */
528 		if ((val == QCA_NAPI_CPU_DOWN) &&
529 		    (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */
530 		    (napid->napi_cpu[cpu].napis != 0)) {
531 			NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d",
532 				   __func__, cpu);
533 			rc = hif_napi_cpu_migrate(napid,
534 						  cpu,
535 						  HNC_ACT_RELOCATE);
536 			napid->napi_cpu[cpu].napis = 0;
537 		}
538 		/* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */
539 		break;
540 	}
541 
542 	case NAPI_EVT_TPUT_STATE: {
543 		tput_mode = (enum qca_napi_tput_state)data;
544 		if (tput_mode == QCA_NAPI_TPUT_LO) {
545 			/* from TPUT_HI -> TPUT_LO */
546 			NAPI_DEBUG("%s: Moving to napi_tput_LO state",
547 				   __func__);
548 			blacklist_pending = BLACKLIST_OFF_PENDING;
549 			/*
550 			.*.Ideally we should "collapse" interrupts here, since
551 			 * we are "dispersing" interrupts in the "else" case.
552 			 * This allows the possibility that our interrupts may
553 			 * still be on the perf cluster the next time we enter
554 			 * high tput mode. However, the irq_balancer is free
555 			 * to move our interrupts to power cluster once
556 			 * blacklisting has been turned off in the "else" case.
557 			 */
558 		} else {
559 			/* from TPUT_LO -> TPUT->HI */
560 			NAPI_DEBUG("%s: Moving to napi_tput_HI state",
561 				   __func__);
562 			rc = hif_napi_cpu_migrate(napid,
563 						  HNC_ANY_CPU,
564 						  HNC_ACT_DISPERSE);
565 
566 			blacklist_pending = BLACKLIST_ON_PENDING;
567 		}
568 		napid->napi_mode = tput_mode;
569 		break;
570 	}
571 
572 	case NAPI_EVT_USR_SERIAL: {
573 		unsigned long users = (unsigned long)data;
574 
575 		NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld",
576 			   __func__, users);
577 
578 		rc = hif_napi_cpu_migrate(napid,
579 					  HNC_ANY_CPU,
580 					  HNC_ACT_COLLAPSE);
581 		if ((users == 0) && (rc == 0))
582 			blacklist_pending = BLACKLIST_ON_PENDING;
583 		break;
584 	}
585 	case NAPI_EVT_USR_NORMAL: {
586 		NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__);
587 		/*
588 		 * Deserialization timeout is handled at hdd layer;
589 		 * just mark current mode to uninitialized to ensure
590 		 * it will be set when the delay is over
591 		 */
592 		napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED;
593 		break;
594 	}
595 	default: {
596 		HIF_ERROR("%s: unknown event: %d (data=0x%0lx)",
597 			  __func__, event, (unsigned long) data);
598 		break;
599 	} /* default */
600 	}; /* switch */
601 
602 
603 	switch (blacklist_pending) {
604 	case BLACKLIST_ON_PENDING:
605 		/* assume the control of WLAN IRQs */
606 		hif_napi_cpu_blacklist(napid, BLACKLIST_ON);
607 		break;
608 	case BLACKLIST_OFF_PENDING:
609 		/* yield the control of WLAN IRQs */
610 		hif_napi_cpu_blacklist(napid, BLACKLIST_OFF);
611 		break;
612 	default: /* nothing to do */
613 		break;
614 	} /* switch blacklist_pending */
615 
616 	qdf_spin_unlock_bh(&(napid->lock));
617 
618 	if (prev_state != napid->state) {
619 		if (napid->state == ENABLE_NAPI_MASK) {
620 			rc = 1;
621 			for (i = 0; i < CE_COUNT_MAX; i++)
622 				if ((napid->ce_map & (0x01 << i))) {
623 					napi = &(napid->napis[i].napi);
624 					NAPI_DEBUG("%s: enabling NAPI %d",
625 						   __func__, i);
626 					napi_enable(napi);
627 				}
628 		} else {
629 			rc = 0;
630 			for (i = 0; i < CE_COUNT_MAX; i++)
631 				if (napid->ce_map & (0x01 << i)) {
632 					napi = &(napid->napis[i].napi);
633 					NAPI_DEBUG("%s: disabling NAPI %d",
634 						   __func__, i);
635 					napi_disable(napi);
636 					/* in case it is affined, remove it */
637 					irq_set_affinity_hint(
638 							napid->napis[i].irq,
639 							NULL);
640 				}
641 		}
642 	} else {
643 		HIF_INFO("%s: no change in hif napi state (still %d)",
644 			 __func__, prev_state);
645 	}
646 
647 	NAPI_DEBUG("<--[rc=%d]", rc);
648 	return rc;
649 }
650 
651 /**
652  * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not
653  * @hif: hif context
654  * @ce : CE instance (or -1, to check if any CEs are enabled)
655  *
656  * Return: bool
657  */
658 int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce)
659 {
660 	int rc;
661 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
662 
663 	if (-1 == ce)
664 		rc = ((hif->napi_data.state == ENABLE_NAPI_MASK));
665 	else
666 		rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) &&
667 		      (hif->napi_data.ce_map & (0x01 << ce)));
668 	return rc;
669 };
670 
671 /**
672  * hif_napi_enable_irq() - enables bus interrupts after napi_complete
673  *
674  * @hif: hif context
675  * @id : id of NAPI instance calling this (used to determine the CE)
676  *
677  * Return: void
678  */
679 inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id)
680 {
681 	struct hif_softc *scn = HIF_GET_SOFTC(hif);
682 
683 	hif_irq_enable(scn, NAPI_ID2PIPE(id));
684 }
685 
686 
687 /**
688  * hif_napi_schedule() - schedules napi, updates stats
689  * @scn:  hif context
690  * @ce_id: index of napi instance
691  *
692  * Return: void
693  */
694 int hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
695 {
696 	int cpu = smp_processor_id();
697 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
698 
699 	hif_record_ce_desc_event(scn,  ce_id, NAPI_SCHEDULE,
700 				 NULL, NULL, 0);
701 
702 	scn->napi_data.napis[ce_id].stats[cpu].napi_schedules++;
703 	NAPI_DEBUG("scheduling napi %d (ce:%d)",
704 		   scn->napi_data.napis[ce_id].id, ce_id);
705 	napi_schedule(&(scn->napi_data.napis[ce_id].napi));
706 
707 	return true;
708 }
709 
710 /**
711  * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed
712  * @napi_info: pointer to qca_napi_info for the napi instance
713  *
714  * Return: true  => interrupt already on correct cpu, no correction needed
715  *         false => interrupt on wrong cpu, correction done for cpu affinity
716  *                   of the interrupt
717  */
718 static inline
719 bool hif_napi_correct_cpu(struct qca_napi_info *napi_info)
720 {
721 	bool right_cpu = true;
722 	int rc = 0;
723 	cpumask_t cpumask;
724 	int cpu ;
725 	struct qca_napi_data *napid;
726 
727 	napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx));
728 
729 	if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) {
730 
731 		cpu = qdf_get_cpu();
732 		if (unlikely((hif_napi_cpu_blacklist(napid,
733 						BLACKLIST_QUERY) > 0) &&
734 						(cpu != napi_info->cpu))) {
735 			right_cpu = false;
736 
737 			NAPI_DEBUG("interrupt on wrong CPU, correcting");
738 			cpumask.bits[0] = (0x01 << napi_info->cpu);
739 
740 			irq_modify_status(napi_info->irq, IRQ_NO_BALANCING, 0);
741 			rc = irq_set_affinity_hint(napi_info->irq,
742 						   &cpumask);
743 			irq_modify_status(napi_info->irq, 0, IRQ_NO_BALANCING);
744 
745 			if (rc)
746 				HIF_ERROR("error setting irq affinity hint: %d", rc);
747 			else
748 				napi_info->stats[cpu].cpu_corrected++;
749 		}
750 	}
751 	return right_cpu;
752 }
753 
754 /**
755  * hif_napi_poll() - NAPI poll routine
756  * @napi  : pointer to NAPI struct as kernel holds it
757  * @budget:
758  *
759  * This is the body of the poll function.
760  * The poll function is called by kernel. So, there is a wrapper
761  * function in HDD, which in turn calls this function.
762  * Two main reasons why the whole thing is not implemented in HDD:
763  * a) references to things like ce_service that HDD is not aware of
764  * b) proximity to the implementation of ce_tasklet, which the body
765  *    of this function should be very close to.
766  *
767  * NOTE TO THE MAINTAINER:
768  *  Consider this function and ce_tasklet very tightly coupled pairs.
769  *  Any changes to ce_tasklet or this function may likely need to be
770  *  reflected in the counterpart.
771  *
772  * Returns:
773  *  int: the amount of work done in this poll (<= budget)
774  */
775 int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
776 		  struct napi_struct *napi,
777 		  int budget)
778 {
779 	int    rc = 0; /* default: no work done, also takes care of error */
780 	int    normalized = 0;
781 	int    bucket;
782 	int    cpu = smp_processor_id();
783 	bool poll_on_right_cpu;
784 	struct hif_softc      *hif = HIF_GET_SOFTC(hif_ctx);
785 	struct qca_napi_info *napi_info;
786 	struct CE_state *ce_state = NULL;
787 
788 	NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)",
789 		   __func__, napi_info->id, napi_info->irq, budget);
790 
791 	if (unlikely(NULL == hif)) {
792 		HIF_ERROR("%s: hif context is NULL", __func__);
793 		QDF_ASSERT(0);
794 		goto out;
795 	}
796 
797 	napi_info = (struct qca_napi_info *)
798 		container_of(napi, struct qca_napi_info, napi);
799 
800 	napi_info->stats[cpu].napi_polls++;
801 
802 	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
803 				 NAPI_POLL_ENTER, NULL, NULL, cpu);
804 
805 	qdf_spin_lock_bh(&napi_info->lro_unloading_lock);
806 
807 	rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id));
808 	NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
809 		    __func__, rc);
810 
811 	if (napi_info->lro_flush_cb)
812 		napi_info->lro_flush_cb(napi_info->lro_ctx);
813 	qdf_spin_unlock_bh(&napi_info->lro_unloading_lock);
814 
815 	/* do not return 0, if there was some work done,
816 	 * even if it is below the scale
817 	 */
818 	if (rc) {
819 		napi_info->stats[cpu].napi_workdone += rc;
820 		normalized = (rc / napi_info->scale);
821 		if (normalized == 0)
822 			normalized++;
823 		bucket = normalized / (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS);
824 		if (bucket >= QCA_NAPI_NUM_BUCKETS) {
825 			bucket = QCA_NAPI_NUM_BUCKETS - 1;
826 			HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)",
827 				bucket, QCA_NAPI_NUM_BUCKETS);
828 		}
829 		napi_info->stats[cpu].napi_budget_uses[bucket]++;
830 	} else {
831 	/* if ce_per engine reports 0, then poll should be terminated */
832 		NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI",
833 			   __func__, __LINE__);
834 	}
835 
836 	ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)];
837 
838 	/*
839 	 * Not using the API hif_napi_correct_cpu directly in the if statement
840 	 * below since the API may not get evaluated if put at the end if any
841 	 * prior condition would evaluate to be true. The CPU correction
842 	 * check should kick in every poll.
843 	 */
844 #ifdef NAPI_YIELD_BUDGET_BASED
845 	if (ce_state && (ce_state->force_break || 0 == rc)) {
846 #else
847 	poll_on_right_cpu = hif_napi_correct_cpu(napi_info);
848 	if ((ce_state) &&
849 	    (!ce_check_rx_pending(ce_state) || (0 == rc) ||
850 	     !poll_on_right_cpu)) {
851 #endif
852 		napi_info->stats[cpu].napi_completes++;
853 #ifdef NAPI_YIELD_BUDGET_BASED
854 		ce_state->force_break = 0;
855 #endif
856 
857 		hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE,
858 					 NULL, NULL, 0);
859 		if (normalized >= budget)
860 			normalized = budget - 1;
861 
862 		/* enable interrupts */
863 		napi_complete(napi);
864 		hif_napi_enable_irq(hif_ctx, napi_info->id);
865 		/* support suspend/resume */
866 		qdf_atomic_dec(&(hif->active_tasklet_cnt));
867 
868 		NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts",
869 			   __func__, __LINE__);
870 	} else {
871 		/* 4.4 kernel NAPI implementation requires drivers to
872 		 * return full work when they ask to be re-scheduled,
873 		 * or napi_complete and re-start with a fresh interrupt
874 		 */
875 		normalized = budget;
876 	}
877 
878 	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
879 				 NAPI_POLL_EXIT, NULL, NULL, normalized);
880 
881 	NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized);
882 	return normalized;
883 out:
884 	return rc;
885 }
886 
887 #ifdef HELIUMPLUS
888 /**
889  *
890  * hif_napi_update_yield_stats() - update NAPI yield related stats
891  * @cpu_id: CPU ID for which stats needs to be updates
892  * @ce_id: Copy Engine ID for which yield stats needs to be updates
893  * @time_limit_reached: indicates whether the time limit was reached
894  * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached
895  *
896  * Return: None
897  */
898 void hif_napi_update_yield_stats(struct CE_state *ce_state,
899 				 bool time_limit_reached,
900 				 bool rxpkt_thresh_reached)
901 {
902 	struct hif_softc *hif;
903 	struct qca_napi_data *napi_data = NULL;
904 	int ce_id = 0;
905 	int cpu_id = 0;
906 
907 	if (unlikely(NULL == ce_state)) {
908 		QDF_ASSERT(NULL != ce_state);
909 		return;
910 	}
911 
912 	hif = ce_state->scn;
913 
914 	if (unlikely(NULL == hif)) {
915 		QDF_ASSERT(NULL != hif);
916 		return;
917 	} else {
918 		napi_data = &(hif->napi_data);
919 		if (unlikely(NULL == napi_data)) {
920 			QDF_ASSERT(NULL != napi_data);
921 			return;
922 		}
923 	}
924 
925 	ce_id = ce_state->id;
926 	cpu_id = qdf_get_cpu();
927 
928 	if (time_limit_reached)
929 		napi_data->napis[ce_id].stats[cpu_id].time_limit_reached++;
930 	else
931 		napi_data->napis[ce_id].stats[cpu_id].rxpkt_thresh_reached++;
932 }
933 
934 /**
935  *
936  * hif_napi_stats() - display NAPI CPU statistics
937  * @napid: pointer to qca_napi_data
938  *
939  * Description:
940  *    Prints the various CPU cores on which the NAPI instances /CEs interrupts
941  *    are being executed. Can be called from outside NAPI layer.
942  *
943  * Return: None
944  */
945 void hif_napi_stats(struct qca_napi_data *napid)
946 {
947 	int i;
948 	struct qca_napi_cpu *cpu;
949 
950 	if (napid == NULL) {
951 		qdf_print("%s: napiid struct is null", __func__);
952 		return;
953 	}
954 
955 	cpu = napid->napi_cpu;
956 	qdf_print("NAPI CPU TABLE");
957 	qdf_print("lilclhead=%d, bigclhead=%d",
958 		  napid->lilcl_head, napid->bigcl_head);
959 	for (i = 0; i < NR_CPUS; i++) {
960 		qdf_print("CPU[%02d]: state:%d crid=%02d clid=%02d "
961 			  "crmk:0x%0lx thmk:0x%0lx frq:%d "
962 			  "napi = 0x%08x lnk:%d",
963 			  i,
964 			  cpu[i].state, cpu[i].core_id, cpu[i].cluster_id,
965 			  cpu[i].core_mask.bits[0],
966 			  cpu[i].thread_mask.bits[0],
967 			  cpu[i].max_freq, cpu[i].napis,
968 			  cpu[i].cluster_nxt);
969 	}
970 }
971 
972 #ifdef FEATURE_NAPI_DEBUG
973 /*
974  * Local functions
975  * - no argument checks, all internal/trusted callers
976  */
977 static void hnc_dump_cpus(struct qca_napi_data *napid)
978 {
979 	hif_napi_stats(napid);
980 }
981 #else
982 static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ };
983 #endif /* FEATURE_NAPI_DEBUG */
984 /**
985  * hnc_link_clusters() - partitions to cpu table into clusters
986  * @napid: pointer to NAPI data
987  *
988  * Takes in a CPU topology table and builds two linked lists
989  * (big cluster cores, list-head at bigcl_head, and little cluster
990  * cores, list-head at lilcl_head) out of it.
991  *
992  * If there are more than two clusters:
993  * - bigcl_head and lilcl_head will be different,
994  * - the cluster with highest cpufreq will be considered the "big" cluster.
995  *   If there are more than one with the highest frequency, the *last* of such
996  *   clusters will be designated as the "big cluster"
997  * - the cluster with lowest cpufreq will be considered the "li'l" cluster.
998  *   If there are more than one clusters with the lowest cpu freq, the *first*
999  *   of such clusters will be designated as the "little cluster"
1000  * - We only support up to 32 clusters
1001  * Return: 0 : OK
1002  *         !0: error (at least one of lil/big clusters could not be found)
1003  */
1004 #define HNC_MIN_CLUSTER 0
1005 #define HNC_MAX_CLUSTER 31
1006 static int hnc_link_clusters(struct qca_napi_data *napid)
1007 {
1008 	int rc = 0;
1009 
1010 	int i;
1011 	int it = 0;
1012 	uint32_t cl_done = 0x0;
1013 	int cl, curcl, curclhead = 0;
1014 	int more;
1015 	unsigned int lilfrq = INT_MAX;
1016 	unsigned int bigfrq = 0;
1017 	unsigned int clfrq = 0;
1018 	int prev = 0;
1019 	struct qca_napi_cpu *cpus = napid->napi_cpu;
1020 
1021 	napid->lilcl_head = napid->bigcl_head = -1;
1022 
1023 	do {
1024 		more = 0;
1025 		it++; curcl = -1;
1026 		for (i = 0; i < NR_CPUS; i++) {
1027 			cl = cpus[i].cluster_id;
1028 			NAPI_DEBUG("Processing cpu[%d], cluster=%d\n",
1029 				   i, cl);
1030 			if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) {
1031 				NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl);
1032 				QDF_ASSERT(0);
1033 				/* continue if ASSERTs are disabled */
1034 				continue;
1035 			};
1036 			if (cpumask_weight(&(cpus[i].core_mask)) == 0) {
1037 				NAPI_DEBUG("Core mask 0. SKIPPED\n");
1038 				continue;
1039 			}
1040 			if (cl_done & (0x01 << cl)) {
1041 				NAPI_DEBUG("Cluster already processed. "
1042 					   "SKIPPED\n");
1043 				continue;
1044 			} else {
1045 				if (more == 0) {
1046 					more = 1;
1047 					curcl = cl;
1048 					curclhead = i; /* row */
1049 					clfrq = cpus[i].max_freq;
1050 					prev = -1;
1051 				};
1052 				if ((curcl >= 0) && (curcl != cl)) {
1053 					NAPI_DEBUG("Entry cl(%d) != curcl(%d). "
1054 						   "SKIPPED\n",
1055 						   cl, curcl);
1056 					continue;
1057 				}
1058 				if (cpus[i].max_freq != clfrq)
1059 					NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n",
1060 						   cpus[i].max_freq, clfrq);
1061 				if (clfrq >= bigfrq) {
1062 					bigfrq = clfrq;
1063 					napid->bigcl_head  = curclhead;
1064 					NAPI_DEBUG("bigcl=%d\n", curclhead);
1065 				}
1066 				if (clfrq < lilfrq) {
1067 					lilfrq = clfrq;
1068 					napid->lilcl_head = curclhead;
1069 					NAPI_DEBUG("lilcl=%d\n", curclhead);
1070 				}
1071 				if (prev != -1)
1072 					cpus[prev].cluster_nxt = i;
1073 
1074 				prev = i;
1075 			}
1076 		}
1077 		if (curcl >= 0)
1078 			cl_done |= (0x01 << curcl);
1079 
1080 	} while (more);
1081 
1082 	if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0)))
1083 		rc = -EFAULT;
1084 
1085 	hnc_dump_cpus(napid); /* if NAPI_DEBUG */
1086 	return rc;
1087 }
1088 #undef HNC_MIN_CLUSTER
1089 #undef HNC_MAX_CLUSTER
1090 
1091 /*
1092  * hotplug function group
1093  */
1094 
1095 /**
1096  * hnc_cpu_notify_cb() - handles CPU hotplug events
1097  *
1098  * On transitions to online, we onlu handle the ONLINE event,
1099  * and ignore the PREP events, because we dont want to act too
1100  * early.
1101  * On transtion to offline, we act on PREP events, because
1102  * we may need to move the irqs/NAPIs to another CPU before
1103  * it is actually off-lined.
1104  *
1105  * Return: NOTIFY_OK (dont block action)
1106  */
1107 static int hnc_cpu_notify_cb(struct notifier_block *nb,
1108 			     unsigned long          action,
1109 			     void                  *hcpu)
1110 {
1111 	int rc = NOTIFY_OK;
1112 	unsigned long cpu = (unsigned long)hcpu;
1113 	struct hif_opaque_softc *hif;
1114 	struct qca_napi_data *napid = NULL;
1115 
1116 	NAPI_DEBUG("-->%s(act=%ld, cpu=%ld)", __func__, action, cpu);
1117 
1118 	napid = qdf_container_of(nb, struct qca_napi_data, hnc_cpu_notifier);
1119 	hif = &qdf_container_of(napid, struct hif_softc, napi_data)->osc;
1120 
1121 	switch (action) {
1122 	case CPU_ONLINE:
1123 		napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
1124 		NAPI_DEBUG("%s: CPU %ld marked %d",
1125 			   __func__, cpu, napid->napi_cpu[cpu].state);
1126 		break;
1127 	case CPU_DEAD: /* already dead; we have marked it before, but ... */
1128 	case CPU_DEAD_FROZEN:
1129 		napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
1130 		NAPI_DEBUG("%s: CPU %ld marked %d",
1131 			   __func__, cpu, napid->napi_cpu[cpu].state);
1132 		break;
1133 	case CPU_DOWN_PREPARE:
1134 	case CPU_DOWN_PREPARE_FROZEN:
1135 		napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
1136 
1137 		NAPI_DEBUG("%s: CPU %ld marked %d; updating affinity",
1138 			   __func__, cpu, napid->napi_cpu[cpu].state);
1139 
1140 		/**
1141 		 * we need to move any NAPIs on this CPU out.
1142 		 * if we are in LO throughput mode, then this is valid
1143 		 * if the CPU is the the low designated CPU.
1144 		 */
1145 		hif_napi_event(hif,
1146 			       NAPI_EVT_CPU_STATE,
1147 			       (void *)
1148 			       ((cpu << 16) | napid->napi_cpu[cpu].state));
1149 		break;
1150 	default:
1151 		NAPI_DEBUG("%s: ignored. action: %ld", __func__, action);
1152 		break;
1153 	} /* switch */
1154 	NAPI_DEBUG("<--%s [%d]", __func__, rc);
1155 	return rc;
1156 }
1157 
1158 /**
1159  * hnc_hotplug_hook() - installs a hotplug notifier
1160  * @hif_sc: hif_sc context
1161  * @register: !0 => register , =0 => deregister
1162  *
1163  * Because the callback relies on the data layout of
1164  * struct hif_softc & its napi_data member, this callback
1165  * registration requires that the hif_softc is passed in.
1166  *
1167  * Note that this is different from the cpu notifier used by
1168  * rx_thread (cds_schedule.c).
1169  * We may consider combining these modifiers in the future.
1170  *
1171  * Return: 0: success
1172  *        <0: error
1173  */
1174 static int hnc_hotplug_hook(struct hif_softc *hif_sc, int install)
1175 {
1176 	int rc = 0;
1177 
1178 	NAPI_DEBUG("-->%s(%d)", __func__, install);
1179 
1180 	if (install) {
1181 		hif_sc->napi_data.hnc_cpu_notifier.notifier_call
1182 			= hnc_cpu_notify_cb;
1183 		rc = register_hotcpu_notifier(
1184 			&hif_sc->napi_data.hnc_cpu_notifier);
1185 	} else {
1186 		unregister_hotcpu_notifier(
1187 			&hif_sc->napi_data.hnc_cpu_notifier);
1188 	}
1189 
1190 	NAPI_DEBUG("<--%s()[%d]", __func__, rc);
1191 	return rc;
1192 }
1193 
1194 /**
1195  * hnc_install_tput() - installs a callback in the throughput detector
1196  * @register: !0 => register; =0: unregister
1197  *
1198  * installs a callback to be called when wifi driver throughput (tx+rx)
1199  * crosses a threshold. Currently, we are using the same criteria as
1200  * TCP ack suppression (500 packets/100ms by default).
1201  *
1202  * Return: 0 : success
1203  *         <0: failure
1204  */
1205 
1206 static int hnc_tput_hook(int install)
1207 {
1208 	int rc = 0;
1209 
1210 	/*
1211 	 * Nothing, until the bw_calculation accepts registration
1212 	 * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk
1213 	 *   hdd_napi_throughput_policy(...)
1214 	 */
1215 	return rc;
1216 }
1217 
1218 /*
1219  * Implementation of hif_napi_cpu API
1220  */
1221 
1222 /**
1223  * hif_napi_cpu_init() - initialization of irq affinity block
1224  * @ctx: pointer to qca_napi_data
1225  *
1226  * called by hif_napi_create, after the first instance is called
1227  * - builds napi_rss_cpus table from cpu topology
1228  * - links cores of the same clusters together
1229  * - installs hot-plug notifier
1230  * - installs throughput trigger notifier (when such mechanism exists)
1231  *
1232  * Return: 0: OK
1233  *         <0: error code
1234  */
1235 int hif_napi_cpu_init(struct hif_opaque_softc *hif)
1236 {
1237 	int rc = 0;
1238 	int i;
1239 	struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1240 	struct qca_napi_cpu *cpus = napid->napi_cpu;
1241 
1242 	NAPI_DEBUG("--> ");
1243 
1244 	if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) {
1245 		NAPI_DEBUG("NAPI RSS table already initialized.\n");
1246 		rc = -EALREADY;
1247 		goto lab_rss_init;
1248 	}
1249 
1250 	/* build CPU topology table */
1251 	for_each_possible_cpu(i) {
1252 		cpus[i].state       = ((cpumask_test_cpu(i, cpu_online_mask)
1253 					? QCA_NAPI_CPU_UP
1254 					: QCA_NAPI_CPU_DOWN));
1255 		cpus[i].core_id     = topology_core_id(i);
1256 		cpus[i].cluster_id  = topology_physical_package_id(i);
1257 		cpumask_copy(&(cpus[i].core_mask),
1258 			     topology_core_cpumask(i));
1259 		cpumask_copy(&(cpus[i].thread_mask),
1260 			     topology_sibling_cpumask(i));
1261 		cpus[i].max_freq    = cpufreq_quick_get_max(i);
1262 		cpus[i].napis       = 0x0;
1263 		cpus[i].cluster_nxt = -1; /* invalid */
1264 	}
1265 
1266 	/* link clusters together */
1267 	rc = hnc_link_clusters(napid);
1268 	if (0 != rc)
1269 		goto lab_err_topology;
1270 
1271 	/* install hotplug notifier */
1272 	rc = hnc_hotplug_hook(HIF_GET_SOFTC(hif), 1);
1273 	if (0 != rc)
1274 		goto lab_err_hotplug;
1275 
1276 	/* install throughput notifier */
1277 	rc = hnc_tput_hook(1);
1278 	if (0 == rc)
1279 		goto lab_rss_init;
1280 
1281 lab_err_hotplug:
1282 	hnc_tput_hook(0);
1283 	hnc_hotplug_hook(HIF_GET_SOFTC(hif), 0);
1284 lab_err_topology:
1285 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1286 lab_rss_init:
1287 	NAPI_DEBUG("<-- [rc=%d]", rc);
1288 	return rc;
1289 }
1290 
1291 /**
1292  * hif_napi_cpu_deinit() - clean-up of irq affinity block
1293  *
1294  * called by hif_napi_destroy, when the last instance is removed
1295  * - uninstalls throughput and hotplug notifiers
1296  * - clears cpu topology table
1297  * Return: 0: OK
1298  */
1299 int hif_napi_cpu_deinit(struct hif_opaque_softc *hif)
1300 {
1301 	int rc = 0;
1302 	struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1303 
1304 	NAPI_DEBUG("-->%s(...)", __func__);
1305 
1306 	/* uninstall tput notifier */
1307 	rc = hnc_tput_hook(0);
1308 
1309 	/* uninstall hotplug notifier */
1310 	rc = hnc_hotplug_hook(HIF_GET_SOFTC(hif), 0);
1311 
1312 	/* clear the topology table */
1313 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1314 
1315 	NAPI_DEBUG("<--%s[rc=%d]", __func__, rc);
1316 
1317 	return rc;
1318 }
1319 
1320 /**
1321  * hncm_migrate_to() - migrates a NAPI to a CPU
1322  * @napid: pointer to NAPI block
1323  * @ce_id: CE_id of the NAPI instance
1324  * @didx : index in the CPU topology table for the CPU to migrate to
1325  *
1326  * Migrates NAPI (identified by the CE_id) to the destination core
1327  * Updates the napi_map of the destination entry
1328  *
1329  * Return:
1330  *  =0 : success
1331  *  <0 : error
1332  */
1333 static int hncm_migrate_to(struct qca_napi_data *napid,
1334 			   int                   napi_ce,
1335 			   int                   didx)
1336 {
1337 	int rc = 0;
1338 	cpumask_t cpumask;
1339 
1340 	NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx);
1341 
1342 	cpumask.bits[0] = (1 << didx);
1343 
1344 	irq_modify_status(napid->napis[napi_ce].irq, IRQ_NO_BALANCING, 0);
1345 	rc = irq_set_affinity_hint(napid->napis[napi_ce].irq, &cpumask);
1346 
1347 	/* unmark the napis bitmap in the cpu table */
1348 	napid->napi_cpu[napid->napis[napi_ce].cpu].napis &= ~(0x01 << napi_ce);
1349 	/* mark the napis bitmap for the new designated cpu */
1350 	napid->napi_cpu[didx].napis |= (0x01 << napi_ce);
1351 	napid->napis[napi_ce].cpu = didx;
1352 
1353 	NAPI_DEBUG("<--%s[%d]", __func__, rc);
1354 	return rc;
1355 }
1356 /**
1357  * hncm_dest_cpu() - finds a destination CPU for NAPI
1358  * @napid: pointer to NAPI block
1359  * @act  : RELOCATE | COLLAPSE | DISPERSE
1360  *
1361  * Finds the designated destionation for the next IRQ.
1362  * RELOCATE: translated to either COLLAPSE or DISPERSE based
1363  *           on napid->napi_mode (throughput state)
1364  * COLLAPSE: All have the same destination: the first online CPU in lilcl
1365  * DISPERSE: One of the CPU in bigcl, which has the smallest number of
1366  *           NAPIs on it
1367  *
1368  * Return: >=0 : index in the cpu topology table
1369  *       : < 0 : error
1370  */
1371 static int hncm_dest_cpu(struct qca_napi_data *napid, int act)
1372 {
1373 	int destidx = -1;
1374 	int head, i;
1375 
1376 	NAPI_DEBUG("-->%s(act=%d)", __func__, act);
1377 	if (act == HNC_ACT_RELOCATE) {
1378 		if (napid->napi_mode == QCA_NAPI_TPUT_LO)
1379 			act = HNC_ACT_COLLAPSE;
1380 		else
1381 			act = HNC_ACT_DISPERSE;
1382 		NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d",
1383 			   __func__, act);
1384 	}
1385 	if (act == HNC_ACT_COLLAPSE) {
1386 		head = i = napid->lilcl_head;
1387 retry_collapse:
1388 		while (i >= 0) {
1389 			if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) {
1390 				destidx = i;
1391 				break;
1392 			} else {
1393 				i = napid->napi_cpu[i].cluster_nxt;
1394 			}
1395 		}
1396 		if ((destidx < 0) && (head == napid->lilcl_head)) {
1397 			NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl",
1398 				__func__);
1399 			head = i = napid->bigcl_head;
1400 			goto retry_collapse;
1401 		}
1402 	} else { /* HNC_ACT_DISPERSE */
1403 		int smallest = 99; /* all 32 bits full */
1404 		int smallidx = -1;
1405 
1406 		head = i = napid->bigcl_head;
1407 retry_disperse:
1408 		while (i >= 0) {
1409 			if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) &&
1410 			    (hweight32(napid->napi_cpu[i].napis) <= smallest)) {
1411 				smallest = napid->napi_cpu[i].napis;
1412 				smallidx = i;
1413 			}
1414 			i = napid->napi_cpu[i].cluster_nxt;
1415 		}
1416 		destidx = smallidx;
1417 		if ((destidx < 0) && (head == napid->bigcl_head)) {
1418 			NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl",
1419 				__func__);
1420 			head = i = napid->lilcl_head;
1421 			goto retry_disperse;
1422 		}
1423 	}
1424 	NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx);
1425 	return destidx;
1426 }
1427 /**
1428  * hif_napi_cpu_migrate() - migrate IRQs away
1429  * @cpu: -1: all CPUs <n> specific CPU
1430  * @act: COLLAPSE | DISPERSE
1431  *
1432  * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible
1433  * cores. Eligible cores are:
1434  * act=COLLAPSE -> the first online core of the little cluster
1435  * act=DISPERSE -> separate cores of the big cluster, so that each core will
1436  *                 host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis)
1437  *
1438  * Note that this function is called with a spinlock acquired already.
1439  *
1440  * Return: =0: success
1441  *         <0: error
1442  */
1443 
1444 int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action)
1445 {
1446 	int      rc = 0;
1447 	struct qca_napi_cpu *cpup;
1448 	int      i, dind;
1449 	uint32_t napis;
1450 
1451 	NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)",
1452 		   __func__, cpu, action);
1453 	/* the following is really: hif_napi_enabled() with less overhead */
1454 	if (napid->ce_map == 0) {
1455 		NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__);
1456 		goto hncm_return;
1457 	}
1458 
1459 	cpup = napid->napi_cpu;
1460 
1461 	switch (action) {
1462 	case HNC_ACT_RELOCATE:
1463 	case HNC_ACT_DISPERSE:
1464 	case HNC_ACT_COLLAPSE: {
1465 		/* first find the src napi set */
1466 		if (cpu == HNC_ANY_CPU)
1467 			napis = napid->ce_map;
1468 		else
1469 			napis = cpup[cpu].napis;
1470 		/* then clear the napi bitmap on each CPU */
1471 		for (i = 0; i < NR_CPUS; i++)
1472 			cpup[i].napis = 0;
1473 		/* then for each of the NAPIs to disperse: */
1474 		for (i = 0; i < CE_COUNT_MAX; i++)
1475 			if (napis & (1 << i)) {
1476 				/* find a destination CPU */
1477 				dind = hncm_dest_cpu(napid, action);
1478 				if (dind >= 0) {
1479 					NAPI_DEBUG("Migrating NAPI ce%d to %d",
1480 						   i, dind);
1481 					rc = hncm_migrate_to(napid, i, dind);
1482 				} else {
1483 					NAPI_DEBUG("No dest for NAPI ce%d", i);
1484 					hnc_dump_cpus(napid);
1485 					rc = -1;
1486 				}
1487 			}
1488 		break;
1489 	}
1490 	default: {
1491 		NAPI_DEBUG("%s: bad action: %d\n", __func__, action);
1492 		QDF_BUG(0);
1493 		break;
1494 	}
1495 	} /* switch action */
1496 
1497 hncm_return:
1498 	hnc_dump_cpus(napid);
1499 	return rc;
1500 }
1501 
1502 
1503 /**
1504  * hif_napi_bl_irq() - calls irq_modify_status to enable/disable blacklisting
1505  * @napid: pointer to qca_napi_data structure
1506  * @bl_flag: blacklist flag to enable/disable blacklisting
1507  *
1508  * The function enables/disables blacklisting for all the copy engine
1509  * interrupts on which NAPI is enabled.
1510  *
1511  * Return: None
1512  */
1513 static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag)
1514 {
1515 	int i;
1516 	for (i = 0; i < CE_COUNT_MAX; i++) {
1517 		/* check if NAPI is enabled on the CE */
1518 		if (!(napid->ce_map & (0x01 << i)))
1519 			continue;
1520 
1521 		if (bl_flag == true)
1522 			irq_modify_status(napid->napis[i].irq,
1523 					  0, IRQ_NO_BALANCING);
1524 		else
1525 			irq_modify_status(napid->napis[i].irq,
1526 					  IRQ_NO_BALANCING, 0);
1527 		HIF_INFO("%s: bl_flag %d CE %d", __func__, bl_flag, i);
1528 	}
1529 }
1530 
1531 #ifdef CONFIG_SCHED_CORE_CTL
1532 /* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */
1533 static inline int hif_napi_core_ctl_set_boost(bool boost)
1534 {
1535 	return core_ctl_set_boost(boost);
1536 }
1537 #else
1538 static inline int hif_napi_core_ctl_set_boost(bool boost)
1539 {
1540 	return 0;
1541 }
1542 #endif
1543 /**
1544  * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts.
1545  * @napid: pointer to qca_napi_data structure
1546  * @op: blacklist operation to perform
1547  *
1548  * The function enables/disables/queries blacklisting for all CE RX
1549  * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables
1550  * core_ctl_set_boost.
1551  * Once blacklisting is enabled, the interrupts will not be managed by the IRQ
1552  * balancer.
1553  *
1554  * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled
1555  *         for BLACKLIST_QUERY op - blacklist refcount
1556  *         for BLACKLIST_ON op    - return value from core_ctl_set_boost API
1557  *         for BLACKLIST_OFF op   - return value from core_ctl_set_boost API
1558  */
1559 int hif_napi_cpu_blacklist(struct qca_napi_data *napid, enum qca_blacklist_op op)
1560 {
1561 	int rc = 0;
1562 	static int ref_count; /* = 0 by the compiler */
1563 	uint8_t flags = napid->flags;
1564 	bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING;
1565 	bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST;
1566 
1567 	NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op);
1568 
1569 	if (!(bl_en && ccb_en)) {
1570 		rc = -EINVAL;
1571 		goto out;
1572 	}
1573 
1574 	switch (op) {
1575 	case BLACKLIST_QUERY:
1576 		rc = ref_count;
1577 		break;
1578 	case BLACKLIST_ON:
1579 		ref_count++;
1580 		rc = 0;
1581 		if (ref_count == 1) {
1582 			rc = hif_napi_core_ctl_set_boost(true);
1583 			NAPI_DEBUG("boost_on() returns %d - refcnt=%d",
1584 				rc, ref_count);
1585 			hif_napi_bl_irq(napid, true);
1586 		}
1587 		break;
1588 	case BLACKLIST_OFF:
1589 		if (ref_count)
1590 			ref_count--;
1591 		rc = 0;
1592 		if (ref_count == 0) {
1593 			rc = hif_napi_core_ctl_set_boost(false);
1594 			NAPI_DEBUG("boost_off() returns %d - refcnt=%d",
1595 				   rc, ref_count);
1596 			hif_napi_bl_irq(napid, false);
1597 		}
1598 		break;
1599 	default:
1600 		NAPI_DEBUG("Invalid blacklist op: %d", op);
1601 		rc = -EINVAL;
1602 	} /* switch */
1603 out:
1604 	NAPI_DEBUG("<--%s[%d]", __func__, rc);
1605 	return rc;
1606 }
1607 
1608 /**
1609  * hif_napi_serialize() - [de-]serialize NAPI operations
1610  * @hif:   context
1611  * @is_on: 1: serialize, 0: deserialize
1612  *
1613  * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the
1614  * following steps (see hif_napi_event for code):
1615  * - put irqs of all NAPI instances on the same CPU
1616  * - only for the first serialize call: blacklist
1617  *
1618  * hif_napi_serialize(hif, 0):
1619  * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec)
1620  * - at the end of the timer, check the current throughput state and
1621  *   implement it.
1622  */
1623 static unsigned long napi_serialize_reqs;
1624 int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on)
1625 {
1626 	int rc = -EINVAL;
1627 
1628 	if (hif != NULL)
1629 		switch (is_on) {
1630 		case 0: { /* de-serialize */
1631 			rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL,
1632 					    (void *) 0);
1633 			napi_serialize_reqs = 0;
1634 			break;
1635 		} /* end de-serialize */
1636 		case 1: { /* serialize */
1637 			rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL,
1638 					    (void *)napi_serialize_reqs++);
1639 			break;
1640 		} /* end serialize */
1641 		default:
1642 			break; /* no-op */
1643 		} /* switch */
1644 	return rc;
1645 }
1646 
1647 #endif /* ifdef HELIUMPLUS */
1648