xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_napi.c (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: hif_napi.c
21  *
22  * HIF NAPI interface implementation
23  */
24 
25 #include <linux/string.h> /* memset */
26 
27 /* Linux headers */
28 #include <linux/cpumask.h>
29 #include <linux/cpufreq.h>
30 #include <linux/cpu.h>
31 #include <linux/topology.h>
32 #include <linux/interrupt.h>
33 #include <linux/irq.h>
34 #ifdef CONFIG_SCHED_CORE_CTL
35 #include <linux/sched/core_ctl.h>
36 #endif
37 #include <pld_common.h>
38 #include <linux/pm.h>
39 
40 /* Driver headers */
41 #include <hif_napi.h>
42 #include <hif_debug.h>
43 #include <hif_io32.h>
44 #include <ce_api.h>
45 #include <ce_internal.h>
46 #include <hif_irq_affinity.h>
47 #include "qdf_cpuhp.h"
48 #include "qdf_module.h"
49 
50 enum napi_decision_vector {
51 	HIF_NAPI_NOEVENT = 0,
52 	HIF_NAPI_INITED  = 1,
53 	HIF_NAPI_CONF_UP = 2
54 };
55 #define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP)
56 
57 #ifdef RECEIVE_OFFLOAD
58 /**
59  * hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI
60  * @napi: Rx_thread NAPI
61  * @budget: NAPI BUDGET
62  *
63  * Return: 0 as it is not supposed to be polled at all as it is not scheduled.
64  */
65 static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget)
66 {
67 	HIF_ERROR("This napi_poll should not be polled as we don't schedule it");
68 	QDF_ASSERT(0);
69 	return 0;
70 }
71 
72 /**
73  * hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI
74  * @napii: Handle to napi_info holding rx_thread napi
75  *
76  * Return: None
77  */
78 static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
79 {
80 	init_dummy_netdev(&napii->rx_thread_netdev);
81 	netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi,
82 		       hif_rxthread_napi_poll, 64);
83 	napi_enable(&napii->rx_thread_napi);
84 }
85 #else /* RECEIVE_OFFLOAD */
86 static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
87 {
88 }
89 #endif
90 
91 /**
92  * hif_napi_create() - creates the NAPI structures for a given CE
93  * @hif    : pointer to hif context
94  * @pipe_id: the CE id on which the instance will be created
95  * @poll   : poll function to be used for this NAPI instance
96  * @budget : budget to be registered with the NAPI instance
97  * @scale  : scale factor on the weight (to scaler budget to 1000)
98  * @flags  : feature flags
99  *
100  * Description:
101  *    Creates NAPI instances. This function is called
102  *    unconditionally during initialization. It creates
103  *    napi structures through the proper HTC/HIF calls.
104  *    The structures are disabled on creation.
105  *    Note that for each NAPI instance a separate dummy netdev is used
106  *
107  * Return:
108  * < 0: error
109  * = 0: <should never happen>
110  * > 0: id of the created object (for multi-NAPI, number of objects created)
111  */
112 int hif_napi_create(struct hif_opaque_softc   *hif_ctx,
113 		    int (*poll)(struct napi_struct *, int),
114 		    int                budget,
115 		    int                scale,
116 		    uint8_t            flags)
117 {
118 	int i;
119 	struct qca_napi_data *napid;
120 	struct qca_napi_info *napii;
121 	struct CE_state      *ce_state;
122 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
123 	int    rc = 0;
124 
125 	NAPI_DEBUG("-->(budget=%d, scale=%d)",
126 		   budget, scale);
127 	NAPI_DEBUG("hif->napi_data.state = 0x%08x",
128 		   hif->napi_data.state);
129 	NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x",
130 		   hif->napi_data.ce_map);
131 
132 	napid = &(hif->napi_data);
133 	if (0 == (napid->state &  HIF_NAPI_INITED)) {
134 		memset(napid, 0, sizeof(struct qca_napi_data));
135 		qdf_spinlock_create(&(napid->lock));
136 
137 		napid->state |= HIF_NAPI_INITED;
138 		napid->flags = flags;
139 
140 		rc = hif_napi_cpu_init(hif_ctx);
141 		if (rc != 0 && rc != -EALREADY) {
142 			HIF_ERROR("NAPI_initialization failed,. %d", rc);
143 			rc = napid->ce_map;
144 			goto hnc_err;
145 		} else
146 			rc = 0;
147 
148 		HIF_DBG("%s: NAPI structures initialized, rc=%d",
149 			 __func__, rc);
150 	}
151 	for (i = 0; i < hif->ce_count; i++) {
152 		ce_state = hif->ce_id_to_state[i];
153 		NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d",
154 			   i, ce_state->htt_rx_data,
155 			   ce_state->htt_tx_data);
156 		if (ce_srng_based(hif))
157 			continue;
158 
159 		if (!ce_state->htt_rx_data)
160 			continue;
161 
162 		/* Now this is a CE where we need NAPI on */
163 		NAPI_DEBUG("Creating NAPI on pipe %d", i);
164 		napii = qdf_mem_malloc(sizeof(*napii));
165 		napid->napis[i] = napii;
166 		if (!napii) {
167 			NAPI_DEBUG("NAPI alloc failure %d", i);
168 			rc = -ENOMEM;
169 			goto napii_free;
170 		}
171 	}
172 
173 	for (i = 0; i < hif->ce_count; i++) {
174 		napii = napid->napis[i];
175 		if (!napii)
176 			continue;
177 
178 		NAPI_DEBUG("initializing NAPI for pipe %d", i);
179 		memset(napii, 0, sizeof(struct qca_napi_info));
180 		napii->scale = scale;
181 		napii->id    = NAPI_PIPE2ID(i);
182 		napii->hif_ctx = hif_ctx;
183 		napii->irq   = pld_get_irq(hif->qdf_dev->dev, i);
184 
185 		if (napii->irq < 0)
186 			HIF_WARN("%s: bad IRQ value for CE %d: %d",
187 				 __func__, i, napii->irq);
188 
189 		init_dummy_netdev(&(napii->netdev));
190 
191 		NAPI_DEBUG("adding napi=%pK to netdev=%pK (poll=%pK, bdgt=%d)",
192 			   &(napii->napi), &(napii->netdev), poll, budget);
193 		netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget);
194 
195 		NAPI_DEBUG("after napi_add");
196 		NAPI_DEBUG("napi=0x%pK, netdev=0x%pK",
197 			   &(napii->napi), &(napii->netdev));
198 		NAPI_DEBUG("napi.dev_list.prev=0x%pK, next=0x%pK",
199 			   napii->napi.dev_list.prev,
200 			   napii->napi.dev_list.next);
201 		NAPI_DEBUG("dev.napi_list.prev=0x%pK, next=0x%pK",
202 			   napii->netdev.napi_list.prev,
203 			   napii->netdev.napi_list.next);
204 
205 		hif_init_rx_thread_napi(napii);
206 		napii->lro_ctx = qdf_lro_init();
207 		NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n",
208 				i, napii->id, napii->lro_ctx);
209 
210 		/* It is OK to change the state variable below without
211 		 * protection as there should be no-one around yet
212 		 */
213 		napid->ce_map |= (0x01 << i);
214 		HIF_DBG("%s: NAPI id %d created for pipe %d", __func__,
215 			 napii->id, i);
216 	}
217 
218 	/* no ces registered with the napi */
219 	if (!ce_srng_based(hif) && napid->ce_map == 0) {
220 		HIF_WARN("%s: no napis created for copy engines", __func__);
221 		rc = -EFAULT;
222 		goto napii_free;
223 	}
224 
225 	NAPI_DEBUG("napi map = %x", napid->ce_map);
226 	NAPI_DEBUG("NAPI ids created for all applicable pipes");
227 	return napid->ce_map;
228 
229 napii_free:
230 	for (i = 0; i < hif->ce_count; i++) {
231 		napii = napid->napis[i];
232 		napid->napis[i] = NULL;
233 		if (napii)
234 			qdf_mem_free(napii);
235 	}
236 
237 hnc_err:
238 	NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map);
239 	return rc;
240 }
241 qdf_export_symbol(hif_napi_create);
242 
243 #ifdef RECEIVE_OFFLOAD
244 void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl,
245 					 void (offld_flush_handler)(void *))
246 {
247 	int i;
248 	struct CE_state *ce_state;
249 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
250 	struct qca_napi_data *napid;
251 	struct qca_napi_info *napii;
252 
253 	if (!scn) {
254 		HIF_ERROR("%s: hif_state NULL!", __func__);
255 		QDF_ASSERT(0);
256 		return;
257 	}
258 
259 	napid = hif_napi_get_all(hif_hdl);
260 	for (i = 0; i < scn->ce_count; i++) {
261 		ce_state = scn->ce_id_to_state[i];
262 		if (ce_state && (ce_state->htt_rx_data)) {
263 			napii = napid->napis[i];
264 			napii->offld_flush_cb = offld_flush_handler;
265 			HIF_DBG("Registering offload for ce_id %d NAPI callback for %d flush_cb %p\n",
266 				i, napii->id, napii->offld_flush_cb);
267 		}
268 	}
269 }
270 
271 void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
272 {
273 	int i;
274 	struct CE_state *ce_state;
275 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
276 	struct qca_napi_data *napid;
277 	struct qca_napi_info *napii;
278 
279 	if (!scn) {
280 		HIF_ERROR("%s: hif_state NULL!", __func__);
281 		QDF_ASSERT(0);
282 		return;
283 	}
284 
285 	napid = hif_napi_get_all(hif_hdl);
286 	for (i = 0; i < scn->ce_count; i++) {
287 		ce_state = scn->ce_id_to_state[i];
288 		if (ce_state && (ce_state->htt_rx_data)) {
289 			napii = napid->napis[i];
290 			HIF_DBG("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK\n",
291 				i, napii->id, napii->offld_flush_cb);
292 			/* Not required */
293 			napii->offld_flush_cb = NULL;
294 		}
295 	}
296 }
297 #endif /* RECEIVE_OFFLOAD */
298 
299 /**
300  *
301  * hif_napi_destroy() - destroys the NAPI structures for a given instance
302  * @hif   : pointer to hif context
303  * @ce_id : the CE id whose napi instance will be destroyed
304  * @force : if set, will destroy even if entry is active (de-activates)
305  *
306  * Description:
307  *    Destroy a given NAPI instance. This function is called
308  *    unconditionally during cleanup.
309  *    Refuses to destroy an entry of it is still enabled (unless force=1)
310  *    Marks the whole napi_data invalid if all instances are destroyed.
311  *
312  * Return:
313  * -EINVAL: specific entry has not been created
314  * -EPERM : specific entry is still active
315  * 0 <    : error
316  * 0 =    : success
317  */
318 int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
319 		     uint8_t          id,
320 		     int              force)
321 {
322 	uint8_t ce = NAPI_ID2PIPE(id);
323 	int rc = 0;
324 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
325 
326 	NAPI_DEBUG("-->(id=%d, force=%d)", id, force);
327 
328 	if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) {
329 		HIF_ERROR("%s: NAPI not initialized or entry %d not created",
330 			  __func__, id);
331 		rc = -EINVAL;
332 	} else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) {
333 		HIF_ERROR("%s: NAPI instance %d (pipe %d) not created",
334 			  __func__, id, ce);
335 		if (hif->napi_data.napis[ce])
336 			HIF_ERROR("%s: memory allocated but ce_map not set %d (pipe %d)",
337 				  __func__, id, ce);
338 		rc = -EINVAL;
339 	} else {
340 		struct qca_napi_data *napid;
341 		struct qca_napi_info *napii;
342 
343 		napid = &(hif->napi_data);
344 		napii = napid->napis[ce];
345 		if (!napii) {
346 			if (napid->ce_map & (0x01 << ce))
347 				HIF_ERROR("%s: napii & ce_map out of sync(ce %d)",
348 					  __func__, ce);
349 			return -EINVAL;
350 		}
351 
352 
353 		if (hif->napi_data.state == HIF_NAPI_CONF_UP) {
354 			if (force) {
355 				napi_disable(&(napii->napi));
356 				HIF_DBG("%s: NAPI entry %d force disabled",
357 					 __func__, id);
358 				NAPI_DEBUG("NAPI %d force disabled", id);
359 			} else {
360 				HIF_ERROR("%s: Cannot destroy active NAPI %d",
361 					  __func__, id);
362 				rc = -EPERM;
363 			}
364 		}
365 		if (0 == rc) {
366 			NAPI_DEBUG("before napi_del");
367 			NAPI_DEBUG("napi.dlist.prv=0x%pK, next=0x%pK",
368 				  napii->napi.dev_list.prev,
369 				  napii->napi.dev_list.next);
370 			NAPI_DEBUG("dev.napi_l.prv=0x%pK, next=0x%pK",
371 				   napii->netdev.napi_list.prev,
372 				   napii->netdev.napi_list.next);
373 
374 			qdf_lro_deinit(napii->lro_ctx);
375 			netif_napi_del(&(napii->napi));
376 
377 			napid->ce_map &= ~(0x01 << ce);
378 			napid->napis[ce] = NULL;
379 			napii->scale  = 0;
380 			qdf_mem_free(napii);
381 			HIF_DBG("%s: NAPI %d destroyed\n", __func__, id);
382 
383 			/* if there are no active instances and
384 			 * if they are all destroyed,
385 			 * set the whole structure to uninitialized state
386 			 */
387 			if (napid->ce_map == 0) {
388 				rc = hif_napi_cpu_deinit(hif_ctx);
389 				/* caller is tolerant to receiving !=0 rc */
390 
391 				qdf_spinlock_destroy(&(napid->lock));
392 				memset(napid,
393 				       0, sizeof(struct qca_napi_data));
394 				HIF_DBG("%s: no NAPI instances. Zapped.",
395 					 __func__);
396 			}
397 		}
398 	}
399 
400 	return rc;
401 }
402 qdf_export_symbol(hif_napi_destroy);
403 
404 #ifdef FEATURE_LRO
405 void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
406 {
407 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
408 	struct qca_napi_data *napid;
409 	struct qca_napi_info *napii;
410 
411 	napid = &(scn->napi_data);
412 	napii = napid->napis[NAPI_ID2PIPE(napi_id)];
413 
414 	if (napii)
415 		return napii->lro_ctx;
416 	return 0;
417 }
418 #endif
419 
420 /**
421  *
422  * hif_napi_get_all() - returns the address of the whole HIF NAPI structure
423  * @hif: pointer to hif context
424  *
425  * Description:
426  *    Returns the address of the whole structure
427  *
428  * Return:
429  *  <addr>: address of the whole HIF NAPI structure
430  */
431 inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx)
432 {
433 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
434 
435 	return &(hif->napi_data);
436 }
437 
438 struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid)
439 {
440 	int id = NAPI_ID2PIPE(napi_id);
441 
442 	return napid->napis[id];
443 }
444 
445 /**
446  *
447  * hif_napi_event() - reacts to events that impact NAPI
448  * @hif : pointer to hif context
449  * @evnt: event that has been detected
450  * @data: more data regarding the event
451  *
452  * Description:
453  *   This function handles two types of events:
454  *   1- Events that change the state of NAPI (enabled/disabled):
455  *      {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE}
456  *      The state is retrievable by "hdd_napi_enabled(-1)"
457  *    - NAPI will be on if either INI file is on and it has not been disabled
458  *                                by a subsequent vendor CMD,
459  *                         or     it has been enabled by a vendor CMD.
460  *   2- Events that change the CPU affinity of a NAPI instance/IRQ:
461  *      {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE}
462  *    - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode
463  *    - NAPI will switch throughput mode based on hdd_napi_throughput_policy()
464  *    - In LO tput mode, NAPI will yield control if its interrupts to the system
465  *      management functions. However in HI throughput mode, NAPI will actively
466  *      manage its interrupts/instances (by trying to disperse them out to
467  *      separate performance cores).
468  *    - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events.
469  *
470  *    + In some cases (roaming peer management is the only case so far), a
471  *      a client can trigger a "SERIALIZE" event. Basically, this means that the
472  *      users is asking NAPI to go into a truly single execution context state.
473  *      So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted,
474  *      (if called for the first time) and then moves all IRQs (for NAPI
475  *      instances) to be collapsed to a single core. If called multiple times,
476  *      it will just re-collapse the CPUs. This is because blacklist-on() API
477  *      is reference-counted, and because the API has already been called.
478  *
479  *      Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go
480  *      to its "normal" operation. Optionally, they can give a timeout value (in
481  *      multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this
482  *      case, NAPI will just set the current throughput state to uninitialized
483  *      and set the delay period. Once policy handler is called, it would skip
484  *      applying the policy delay period times, and otherwise apply the policy.
485  *
486  * Return:
487  *  < 0: some error
488  *  = 0: event handled successfully
489  */
490 int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event,
491 		   void *data)
492 {
493 	int      rc = 0;
494 	uint32_t prev_state;
495 	int      i;
496 	bool state_changed;
497 	struct napi_struct *napi;
498 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
499 	struct qca_napi_data *napid = &(hif->napi_data);
500 	enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED;
501 	enum {
502 		BLACKLIST_NOT_PENDING,
503 		BLACKLIST_ON_PENDING,
504 		BLACKLIST_OFF_PENDING
505 	     } blacklist_pending = BLACKLIST_NOT_PENDING;
506 
507 	NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data);
508 
509 	if (ce_srng_based(hif))
510 		return hif_exec_event(hif_ctx, event, data);
511 
512 	if ((napid->state & HIF_NAPI_INITED) == 0) {
513 		NAPI_DEBUG("%s: got event when NAPI not initialized",
514 			   __func__);
515 		return -EINVAL;
516 	}
517 	qdf_spin_lock_bh(&(napid->lock));
518 	prev_state = napid->state;
519 	switch (event) {
520 	case NAPI_EVT_INI_FILE:
521 	case NAPI_EVT_CMD_STATE:
522 	case NAPI_EVT_INT_STATE: {
523 		int on = (data != ((void *)0));
524 
525 		HIF_DBG("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)",
526 			 __func__, event,
527 			 on, prev_state);
528 		if (on)
529 			if (prev_state & HIF_NAPI_CONF_UP) {
530 				HIF_DBG("%s: duplicate NAPI conf ON msg",
531 					 __func__);
532 			} else {
533 				HIF_DBG("%s: setting state to ON",
534 					 __func__);
535 				napid->state |= HIF_NAPI_CONF_UP;
536 			}
537 		else /* off request */
538 			if (prev_state & HIF_NAPI_CONF_UP) {
539 				HIF_DBG("%s: setting state to OFF",
540 				 __func__);
541 				napid->state &= ~HIF_NAPI_CONF_UP;
542 			} else {
543 				HIF_DBG("%s: duplicate NAPI conf OFF msg",
544 					 __func__);
545 			}
546 		break;
547 	}
548 	/* case NAPI_INIT_FILE/CMD_STATE */
549 
550 	case NAPI_EVT_CPU_STATE: {
551 		int cpu = ((unsigned long int)data >> 16);
552 		int val = ((unsigned long int)data & 0x0ff);
553 
554 		NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d",
555 			   __func__, cpu, val);
556 
557 		/* state has already been set by hnc_cpu_notify_cb */
558 		if ((val == QCA_NAPI_CPU_DOWN) &&
559 		    (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */
560 		    (napid->napi_cpu[cpu].napis != 0)) {
561 			NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d",
562 				   __func__, cpu);
563 			rc = hif_napi_cpu_migrate(napid,
564 						  cpu,
565 						  HNC_ACT_RELOCATE);
566 			napid->napi_cpu[cpu].napis = 0;
567 		}
568 		/* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */
569 		break;
570 	}
571 
572 	case NAPI_EVT_TPUT_STATE: {
573 		tput_mode = (enum qca_napi_tput_state)data;
574 		if (tput_mode == QCA_NAPI_TPUT_LO) {
575 			/* from TPUT_HI -> TPUT_LO */
576 			NAPI_DEBUG("%s: Moving to napi_tput_LO state",
577 				   __func__);
578 			blacklist_pending = BLACKLIST_OFF_PENDING;
579 			/*
580 			 * Ideally we should "collapse" interrupts here, since
581 			 * we are "dispersing" interrupts in the "else" case.
582 			 * This allows the possibility that our interrupts may
583 			 * still be on the perf cluster the next time we enter
584 			 * high tput mode. However, the irq_balancer is free
585 			 * to move our interrupts to power cluster once
586 			 * blacklisting has been turned off in the "else" case.
587 			 */
588 		} else {
589 			/* from TPUT_LO -> TPUT->HI */
590 			NAPI_DEBUG("%s: Moving to napi_tput_HI state",
591 				   __func__);
592 			rc = hif_napi_cpu_migrate(napid,
593 						  HNC_ANY_CPU,
594 						  HNC_ACT_DISPERSE);
595 
596 			blacklist_pending = BLACKLIST_ON_PENDING;
597 		}
598 		napid->napi_mode = tput_mode;
599 		break;
600 	}
601 
602 	case NAPI_EVT_USR_SERIAL: {
603 		unsigned long users = (unsigned long)data;
604 
605 		NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld",
606 			   __func__, users);
607 
608 		rc = hif_napi_cpu_migrate(napid,
609 					  HNC_ANY_CPU,
610 					  HNC_ACT_COLLAPSE);
611 		if ((users == 0) && (rc == 0))
612 			blacklist_pending = BLACKLIST_ON_PENDING;
613 		break;
614 	}
615 	case NAPI_EVT_USR_NORMAL: {
616 		NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__);
617 		/*
618 		 * Deserialization timeout is handled at hdd layer;
619 		 * just mark current mode to uninitialized to ensure
620 		 * it will be set when the delay is over
621 		 */
622 		napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED;
623 		break;
624 	}
625 	default: {
626 		HIF_ERROR("%s: unknown event: %d (data=0x%0lx)",
627 			  __func__, event, (unsigned long) data);
628 		break;
629 	} /* default */
630 	}; /* switch */
631 
632 
633 	switch (blacklist_pending) {
634 	case BLACKLIST_ON_PENDING:
635 		/* assume the control of WLAN IRQs */
636 		hif_napi_cpu_blacklist(napid, BLACKLIST_ON);
637 		break;
638 	case BLACKLIST_OFF_PENDING:
639 		/* yield the control of WLAN IRQs */
640 		hif_napi_cpu_blacklist(napid, BLACKLIST_OFF);
641 		break;
642 	default: /* nothing to do */
643 		break;
644 	} /* switch blacklist_pending */
645 
646 	/* we want to perform the comparison in lock:
647 	 * there is a possiblity of hif_napi_event get called
648 	 * from two different contexts (driver unload and cpu hotplug
649 	 * notification) and napid->state get changed
650 	 * in driver unload context and can lead to race condition
651 	 * in cpu hotplug context. Therefore, perform the napid->state
652 	 * comparison before releasing lock.
653 	 */
654 	state_changed = (prev_state != napid->state);
655 	qdf_spin_unlock_bh(&(napid->lock));
656 
657 	if (state_changed) {
658 		if (napid->state == ENABLE_NAPI_MASK) {
659 			rc = 1;
660 			for (i = 0; i < CE_COUNT_MAX; i++) {
661 				struct qca_napi_info *napii = napid->napis[i];
662 				if (napii) {
663 					napi = &(napii->napi);
664 					NAPI_DEBUG("%s: enabling NAPI %d",
665 						   __func__, i);
666 					napi_enable(napi);
667 				}
668 			}
669 		} else {
670 			rc = 0;
671 			for (i = 0; i < CE_COUNT_MAX; i++) {
672 				struct qca_napi_info *napii = napid->napis[i];
673 				if (napii) {
674 					napi = &(napii->napi);
675 					NAPI_DEBUG("%s: disabling NAPI %d",
676 						   __func__, i);
677 					napi_disable(napi);
678 					/* in case it is affined, remove it */
679 					irq_set_affinity_hint(napii->irq, NULL);
680 				}
681 			}
682 		}
683 	} else {
684 		HIF_DBG("%s: no change in hif napi state (still %d)",
685 			 __func__, prev_state);
686 	}
687 
688 	NAPI_DEBUG("<--[rc=%d]", rc);
689 	return rc;
690 }
691 qdf_export_symbol(hif_napi_event);
692 
693 /**
694  * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not
695  * @hif: hif context
696  * @ce : CE instance (or -1, to check if any CEs are enabled)
697  *
698  * Return: bool
699  */
700 int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce)
701 {
702 	int rc;
703 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
704 
705 	if (-1 == ce)
706 		rc = ((hif->napi_data.state == ENABLE_NAPI_MASK));
707 	else
708 		rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) &&
709 		      (hif->napi_data.ce_map & (0x01 << ce)));
710 	return rc;
711 }
712 qdf_export_symbol(hif_napi_enabled);
713 
714 /**
715  * hif_napi_created() - checks whether NAPI is created for given ce or not
716  * @hif: hif context
717  * @ce : CE instance
718  *
719  * Return: bool
720  */
721 bool hif_napi_created(struct hif_opaque_softc *hif_ctx, int ce)
722 {
723 	int rc;
724 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
725 
726 	rc = (hif->napi_data.ce_map & (0x01 << ce));
727 
728 	return !!rc;
729 }
730 qdf_export_symbol(hif_napi_created);
731 
732 /**
733  * hif_napi_enable_irq() - enables bus interrupts after napi_complete
734  *
735  * @hif: hif context
736  * @id : id of NAPI instance calling this (used to determine the CE)
737  *
738  * Return: void
739  */
740 inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id)
741 {
742 	struct hif_softc *scn = HIF_GET_SOFTC(hif);
743 
744 	hif_irq_enable(scn, NAPI_ID2PIPE(id));
745 }
746 
747 
748 /**
749  * hif_napi_schedule() - schedules napi, updates stats
750  * @scn:  hif context
751  * @ce_id: index of napi instance
752  *
753  * Return: false if napi didn't enable or already scheduled, otherwise true
754  */
755 bool hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
756 {
757 	int cpu = smp_processor_id();
758 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
759 	struct qca_napi_info *napii;
760 
761 	napii = scn->napi_data.napis[ce_id];
762 	if (qdf_unlikely(!napii)) {
763 		HIF_ERROR("%s, scheduling unallocated napi (ce:%d)",
764 			      __func__, ce_id);
765 		qdf_atomic_dec(&scn->active_tasklet_cnt);
766 		return false;
767 	}
768 
769 	if (test_bit(NAPI_STATE_SCHED, &napii->napi.state)) {
770 		NAPI_DEBUG("napi scheduled, return");
771 		qdf_atomic_dec(&scn->active_tasklet_cnt);
772 		return false;
773 	}
774 
775 	hif_record_ce_desc_event(scn,  ce_id, NAPI_SCHEDULE,
776 				 NULL, NULL, 0, 0);
777 	napii->stats[cpu].napi_schedules++;
778 	NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id);
779 	napi_schedule(&(napii->napi));
780 
781 	return true;
782 }
783 qdf_export_symbol(hif_napi_schedule);
784 
785 /**
786  * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed
787  * @napi_info: pointer to qca_napi_info for the napi instance
788  *
789  * Return: true  => interrupt already on correct cpu, no correction needed
790  *         false => interrupt on wrong cpu, correction done for cpu affinity
791  *                   of the interrupt
792  */
793 static inline
794 bool hif_napi_correct_cpu(struct qca_napi_info *napi_info)
795 {
796 	bool right_cpu = true;
797 	int rc = 0;
798 	cpumask_t cpumask;
799 	int cpu;
800 	struct qca_napi_data *napid;
801 
802 	napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx));
803 
804 	if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) {
805 
806 		cpu = qdf_get_cpu();
807 		if (unlikely((hif_napi_cpu_blacklist(napid,
808 						BLACKLIST_QUERY) > 0) &&
809 						(cpu != napi_info->cpu))) {
810 			right_cpu = false;
811 
812 			NAPI_DEBUG("interrupt on wrong CPU, correcting");
813 			cpumask.bits[0] = (0x01 << napi_info->cpu);
814 
815 			irq_modify_status(napi_info->irq, IRQ_NO_BALANCING, 0);
816 			rc = irq_set_affinity_hint(napi_info->irq,
817 						   &cpumask);
818 			irq_modify_status(napi_info->irq, 0, IRQ_NO_BALANCING);
819 
820 			if (rc)
821 				HIF_ERROR("error setting irq affinity hint: %d",
822 					  rc);
823 			else
824 				napi_info->stats[cpu].cpu_corrected++;
825 		}
826 	}
827 	return right_cpu;
828 }
829 
830 #ifdef RECEIVE_OFFLOAD
831 /**
832  * hif_napi_offld_flush_cb() - Call upper layer flush callback
833  * @napi_info: Handle to hif_napi_info
834  *
835  * Return: None
836  */
837 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
838 {
839 	if (napi_info->offld_flush_cb)
840 		napi_info->offld_flush_cb(napi_info);
841 }
842 #else
843 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
844 {
845 }
846 #endif
847 
848 /**
849  * hif_napi_poll() - NAPI poll routine
850  * @napi  : pointer to NAPI struct as kernel holds it
851  * @budget:
852  *
853  * This is the body of the poll function.
854  * The poll function is called by kernel. So, there is a wrapper
855  * function in HDD, which in turn calls this function.
856  * Two main reasons why the whole thing is not implemented in HDD:
857  * a) references to things like ce_service that HDD is not aware of
858  * b) proximity to the implementation of ce_tasklet, which the body
859  *    of this function should be very close to.
860  *
861  * NOTE TO THE MAINTAINER:
862  *  Consider this function and ce_tasklet very tightly coupled pairs.
863  *  Any changes to ce_tasklet or this function may likely need to be
864  *  reflected in the counterpart.
865  *
866  * Returns:
867  *  int: the amount of work done in this poll (<= budget)
868  */
869 int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
870 		  struct napi_struct *napi,
871 		  int budget)
872 {
873 	int    rc = 0; /* default: no work done, also takes care of error */
874 	int    normalized = 0;
875 	int    bucket;
876 	int    cpu = smp_processor_id();
877 	bool poll_on_right_cpu;
878 	struct hif_softc      *hif = HIF_GET_SOFTC(hif_ctx);
879 	struct qca_napi_info *napi_info;
880 	struct CE_state *ce_state = NULL;
881 
882 	if (unlikely(NULL == hif)) {
883 		HIF_ERROR("%s: hif context is NULL", __func__);
884 		QDF_ASSERT(0);
885 		goto out;
886 	}
887 
888 	napi_info = (struct qca_napi_info *)
889 		container_of(napi, struct qca_napi_info, napi);
890 
891 	NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)",
892 		   __func__, napi_info->id, napi_info->irq, budget);
893 
894 	napi_info->stats[cpu].napi_polls++;
895 
896 	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
897 				 NAPI_POLL_ENTER, NULL, NULL, cpu, 0);
898 
899 	rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id));
900 	NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
901 		    __func__, rc);
902 
903 	hif_napi_offld_flush_cb(napi_info);
904 
905 	/* do not return 0, if there was some work done,
906 	 * even if it is below the scale
907 	 */
908 	if (rc) {
909 		napi_info->stats[cpu].napi_workdone += rc;
910 		normalized = (rc / napi_info->scale);
911 		if (normalized == 0)
912 			normalized++;
913 		bucket = normalized / (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS);
914 		if (bucket >= QCA_NAPI_NUM_BUCKETS) {
915 			bucket = QCA_NAPI_NUM_BUCKETS - 1;
916 			HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)",
917 				bucket, QCA_NAPI_NUM_BUCKETS);
918 		}
919 		napi_info->stats[cpu].napi_budget_uses[bucket]++;
920 	} else {
921 	/* if ce_per engine reports 0, then poll should be terminated */
922 		NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI",
923 			   __func__, __LINE__);
924 	}
925 
926 	ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)];
927 
928 	/*
929 	 * Not using the API hif_napi_correct_cpu directly in the if statement
930 	 * below since the API may not get evaluated if put at the end if any
931 	 * prior condition would evaluate to be true. The CPU correction
932 	 * check should kick in every poll.
933 	 */
934 #ifdef NAPI_YIELD_BUDGET_BASED
935 	if (ce_state && (ce_state->force_break || 0 == rc)) {
936 #else
937 	poll_on_right_cpu = hif_napi_correct_cpu(napi_info);
938 	if ((ce_state) &&
939 	    (!ce_check_rx_pending(ce_state) || (0 == rc) ||
940 	     !poll_on_right_cpu)) {
941 #endif
942 		napi_info->stats[cpu].napi_completes++;
943 #ifdef NAPI_YIELD_BUDGET_BASED
944 		ce_state->force_break = 0;
945 #endif
946 
947 		hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE,
948 					 NULL, NULL, 0, 0);
949 		if (normalized >= budget)
950 			normalized = budget - 1;
951 
952 		napi_complete(napi);
953 		/* enable interrupts */
954 		hif_napi_enable_irq(hif_ctx, napi_info->id);
955 		/* support suspend/resume */
956 		qdf_atomic_dec(&(hif->active_tasklet_cnt));
957 
958 		NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts",
959 			   __func__, __LINE__);
960 	} else {
961 		/* 4.4 kernel NAPI implementation requires drivers to
962 		 * return full work when they ask to be re-scheduled,
963 		 * or napi_complete and re-start with a fresh interrupt
964 		 */
965 		normalized = budget;
966 	}
967 
968 	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
969 				 NAPI_POLL_EXIT, NULL, NULL, normalized, 0);
970 
971 	NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized);
972 	return normalized;
973 out:
974 	return rc;
975 }
976 qdf_export_symbol(hif_napi_poll);
977 
978 void hif_update_napi_max_poll_time(struct CE_state *ce_state,
979 				   int ce_id,
980 				   int cpu_id)
981 {
982 	struct hif_softc *hif;
983 	struct qca_napi_info *napi_info;
984 	unsigned long long napi_poll_time = sched_clock() -
985 					ce_state->ce_service_start_time;
986 
987 	hif = ce_state->scn;
988 	napi_info = hif->napi_data.napis[ce_id];
989 	if (napi_poll_time >
990 			napi_info->stats[cpu_id].napi_max_poll_time)
991 		napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time;
992 }
993 
994 #ifdef HIF_IRQ_AFFINITY
995 /**
996  *
997  * hif_napi_update_yield_stats() - update NAPI yield related stats
998  * @cpu_id: CPU ID for which stats needs to be updates
999  * @ce_id: Copy Engine ID for which yield stats needs to be updates
1000  * @time_limit_reached: indicates whether the time limit was reached
1001  * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached
1002  *
1003  * Return: None
1004  */
1005 void hif_napi_update_yield_stats(struct CE_state *ce_state,
1006 				 bool time_limit_reached,
1007 				 bool rxpkt_thresh_reached)
1008 {
1009 	struct hif_softc *hif;
1010 	struct qca_napi_data *napi_data = NULL;
1011 	int ce_id = 0;
1012 	int cpu_id = 0;
1013 
1014 	if (unlikely(NULL == ce_state)) {
1015 		QDF_ASSERT(NULL != ce_state);
1016 		return;
1017 	}
1018 
1019 	hif = ce_state->scn;
1020 
1021 	if (unlikely(NULL == hif)) {
1022 		QDF_ASSERT(NULL != hif);
1023 		return;
1024 	}
1025 	napi_data = &(hif->napi_data);
1026 	if (unlikely(NULL == napi_data)) {
1027 		QDF_ASSERT(NULL != napi_data);
1028 		return;
1029 	}
1030 
1031 	ce_id = ce_state->id;
1032 	cpu_id = qdf_get_cpu();
1033 
1034 	if (unlikely(!napi_data->napis[ce_id])) {
1035 		HIF_INFO("%s: NAPI info is NULL for ce id: %d",
1036 			 __func__, ce_id);
1037 		return;
1038 	}
1039 
1040 	if (time_limit_reached)
1041 		napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++;
1042 	else
1043 		napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++;
1044 
1045 	hif_update_napi_max_poll_time(ce_state, ce_id,
1046 				      cpu_id);
1047 }
1048 
1049 /**
1050  *
1051  * hif_napi_stats() - display NAPI CPU statistics
1052  * @napid: pointer to qca_napi_data
1053  *
1054  * Description:
1055  *    Prints the various CPU cores on which the NAPI instances /CEs interrupts
1056  *    are being executed. Can be called from outside NAPI layer.
1057  *
1058  * Return: None
1059  */
1060 void hif_napi_stats(struct qca_napi_data *napid)
1061 {
1062 	int i;
1063 	struct qca_napi_cpu *cpu;
1064 
1065 	if (napid == NULL) {
1066 		qdf_debug("%s: napiid struct is null", __func__);
1067 		return;
1068 	}
1069 
1070 	cpu = napid->napi_cpu;
1071 	qdf_debug("NAPI CPU TABLE");
1072 	qdf_debug("lilclhead=%d, bigclhead=%d",
1073 		  napid->lilcl_head, napid->bigcl_head);
1074 	for (i = 0; i < NR_CPUS; i++) {
1075 		qdf_debug("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d",
1076 			  i,
1077 			  cpu[i].state, cpu[i].core_id, cpu[i].cluster_id,
1078 			  cpu[i].core_mask.bits[0],
1079 			  cpu[i].thread_mask.bits[0],
1080 			  cpu[i].max_freq, cpu[i].napis,
1081 			  cpu[i].cluster_nxt);
1082 	}
1083 }
1084 
1085 #ifdef FEATURE_NAPI_DEBUG
1086 /*
1087  * Local functions
1088  * - no argument checks, all internal/trusted callers
1089  */
1090 static void hnc_dump_cpus(struct qca_napi_data *napid)
1091 {
1092 	hif_napi_stats(napid);
1093 }
1094 #else
1095 static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ };
1096 #endif /* FEATURE_NAPI_DEBUG */
1097 /**
1098  * hnc_link_clusters() - partitions to cpu table into clusters
1099  * @napid: pointer to NAPI data
1100  *
1101  * Takes in a CPU topology table and builds two linked lists
1102  * (big cluster cores, list-head at bigcl_head, and little cluster
1103  * cores, list-head at lilcl_head) out of it.
1104  *
1105  * If there are more than two clusters:
1106  * - bigcl_head and lilcl_head will be different,
1107  * - the cluster with highest cpufreq will be considered the "big" cluster.
1108  *   If there are more than one with the highest frequency, the *last* of such
1109  *   clusters will be designated as the "big cluster"
1110  * - the cluster with lowest cpufreq will be considered the "li'l" cluster.
1111  *   If there are more than one clusters with the lowest cpu freq, the *first*
1112  *   of such clusters will be designated as the "little cluster"
1113  * - We only support up to 32 clusters
1114  * Return: 0 : OK
1115  *         !0: error (at least one of lil/big clusters could not be found)
1116  */
1117 #define HNC_MIN_CLUSTER 0
1118 #define HNC_MAX_CLUSTER 1
1119 static int hnc_link_clusters(struct qca_napi_data *napid)
1120 {
1121 	int rc = 0;
1122 
1123 	int i;
1124 	int it = 0;
1125 	uint32_t cl_done = 0x0;
1126 	int cl, curcl, curclhead = 0;
1127 	int more;
1128 	unsigned int lilfrq = INT_MAX;
1129 	unsigned int bigfrq = 0;
1130 	unsigned int clfrq = 0;
1131 	int prev = 0;
1132 	struct qca_napi_cpu *cpus = napid->napi_cpu;
1133 
1134 	napid->lilcl_head = napid->bigcl_head = -1;
1135 
1136 	do {
1137 		more = 0;
1138 		it++; curcl = -1;
1139 		for (i = 0; i < NR_CPUS; i++) {
1140 			cl = cpus[i].cluster_id;
1141 			NAPI_DEBUG("Processing cpu[%d], cluster=%d\n",
1142 				   i, cl);
1143 			if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) {
1144 				NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl);
1145 				QDF_ASSERT(0);
1146 				/* continue if ASSERTs are disabled */
1147 				continue;
1148 			};
1149 			if (cpumask_weight(&(cpus[i].core_mask)) == 0) {
1150 				NAPI_DEBUG("Core mask 0. SKIPPED\n");
1151 				continue;
1152 			}
1153 			if (cl_done & (0x01 << cl)) {
1154 				NAPI_DEBUG("Cluster already processed. SKIPPED\n");
1155 				continue;
1156 			} else {
1157 				if (more == 0) {
1158 					more = 1;
1159 					curcl = cl;
1160 					curclhead = i; /* row */
1161 					clfrq = cpus[i].max_freq;
1162 					prev = -1;
1163 				};
1164 				if ((curcl >= 0) && (curcl != cl)) {
1165 					NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n",
1166 						   cl, curcl);
1167 					continue;
1168 				}
1169 				if (cpus[i].max_freq != clfrq)
1170 					NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n",
1171 						   cpus[i].max_freq, clfrq);
1172 				if (clfrq >= bigfrq) {
1173 					bigfrq = clfrq;
1174 					napid->bigcl_head  = curclhead;
1175 					NAPI_DEBUG("bigcl=%d\n", curclhead);
1176 				}
1177 				if (clfrq < lilfrq) {
1178 					lilfrq = clfrq;
1179 					napid->lilcl_head = curclhead;
1180 					NAPI_DEBUG("lilcl=%d\n", curclhead);
1181 				}
1182 				if (prev != -1)
1183 					cpus[prev].cluster_nxt = i;
1184 
1185 				prev = i;
1186 			}
1187 		}
1188 		if (curcl >= 0)
1189 			cl_done |= (0x01 << curcl);
1190 
1191 	} while (more);
1192 
1193 	if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0)))
1194 		rc = -EFAULT;
1195 
1196 	hnc_dump_cpus(napid); /* if NAPI_DEBUG */
1197 	return rc;
1198 }
1199 #undef HNC_MIN_CLUSTER
1200 #undef HNC_MAX_CLUSTER
1201 
1202 /*
1203  * hotplug function group
1204  */
1205 
1206 /**
1207  * hnc_cpu_online_cb() - handles CPU hotplug "up" events
1208  * @context: the associated HIF context
1209  * @cpu: the CPU Id of the CPU the event happened on
1210  *
1211  * Return: None
1212  */
1213 static void hnc_cpu_online_cb(void *context, uint32_t cpu)
1214 {
1215 	struct hif_softc *hif = context;
1216 	struct qca_napi_data *napid = &hif->napi_data;
1217 
1218 	if (cpu >= NR_CPUS)
1219 		return;
1220 
1221 	NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu);
1222 
1223 	napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
1224 	NAPI_DEBUG("%s: CPU %u marked %d",
1225 		   __func__, cpu, napid->napi_cpu[cpu].state);
1226 
1227 	NAPI_DEBUG("<--%s", __func__);
1228 }
1229 
1230 /**
1231  * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events
1232  * @context: the associated HIF context
1233  * @cpu: the CPU Id of the CPU the event happened on
1234  *
1235  * On transtion to offline, we act on PREP events, because we may need to move
1236  * the irqs/NAPIs to another CPU before it is actually off-lined.
1237  *
1238  * Return: None
1239  */
1240 static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu)
1241 {
1242 	struct hif_softc *hif = context;
1243 	struct qca_napi_data *napid = &hif->napi_data;
1244 
1245 	if (cpu >= NR_CPUS)
1246 		return;
1247 
1248 	NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu);
1249 
1250 	napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
1251 
1252 	NAPI_DEBUG("%s: CPU %u marked %d; updating affinity",
1253 		   __func__, cpu, napid->napi_cpu[cpu].state);
1254 
1255 	/**
1256 	 * we need to move any NAPIs on this CPU out.
1257 	 * if we are in LO throughput mode, then this is valid
1258 	 * if the CPU is the the low designated CPU.
1259 	 */
1260 	hif_napi_event(GET_HIF_OPAQUE_HDL(hif),
1261 		       NAPI_EVT_CPU_STATE,
1262 		       (void *)
1263 		       ((size_t)cpu << 16 | napid->napi_cpu[cpu].state));
1264 
1265 	NAPI_DEBUG("<--%s", __func__);
1266 }
1267 
1268 static int hnc_hotplug_register(struct hif_softc *hif_sc)
1269 {
1270 	QDF_STATUS status;
1271 
1272 	NAPI_DEBUG("-->%s", __func__);
1273 
1274 	status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler,
1275 				    hif_sc,
1276 				    hnc_cpu_online_cb,
1277 				    hnc_cpu_before_offline_cb);
1278 
1279 	NAPI_DEBUG("<--%s [%d]", __func__, status);
1280 
1281 	return qdf_status_to_os_return(status);
1282 }
1283 
1284 static void hnc_hotplug_unregister(struct hif_softc *hif_sc)
1285 {
1286 	NAPI_DEBUG("-->%s", __func__);
1287 
1288 	if (hif_sc->napi_data.cpuhp_handler)
1289 		qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler);
1290 
1291 	NAPI_DEBUG("<--%s", __func__);
1292 }
1293 
1294 /**
1295  * hnc_install_tput() - installs a callback in the throughput detector
1296  * @register: !0 => register; =0: unregister
1297  *
1298  * installs a callback to be called when wifi driver throughput (tx+rx)
1299  * crosses a threshold. Currently, we are using the same criteria as
1300  * TCP ack suppression (500 packets/100ms by default).
1301  *
1302  * Return: 0 : success
1303  *         <0: failure
1304  */
1305 
1306 static int hnc_tput_hook(int install)
1307 {
1308 	int rc = 0;
1309 
1310 	/*
1311 	 * Nothing, until the bw_calculation accepts registration
1312 	 * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk
1313 	 *   hdd_napi_throughput_policy(...)
1314 	 */
1315 	return rc;
1316 }
1317 
1318 /*
1319  * Implementation of hif_napi_cpu API
1320  */
1321 
1322 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1323 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i)
1324 {
1325 	cpumask_copy(&(cpus[i].thread_mask),
1326 			     topology_sibling_cpumask(i));
1327 }
1328 #else
1329 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i)
1330 {
1331 }
1332 #endif
1333 
1334 
1335 /**
1336  * hif_napi_cpu_init() - initialization of irq affinity block
1337  * @ctx: pointer to qca_napi_data
1338  *
1339  * called by hif_napi_create, after the first instance is called
1340  * - builds napi_rss_cpus table from cpu topology
1341  * - links cores of the same clusters together
1342  * - installs hot-plug notifier
1343  * - installs throughput trigger notifier (when such mechanism exists)
1344  *
1345  * Return: 0: OK
1346  *         <0: error code
1347  */
1348 int hif_napi_cpu_init(struct hif_opaque_softc *hif)
1349 {
1350 	int rc = 0;
1351 	int i;
1352 	struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1353 	struct qca_napi_cpu *cpus = napid->napi_cpu;
1354 
1355 	NAPI_DEBUG("--> ");
1356 
1357 	if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) {
1358 		NAPI_DEBUG("NAPI RSS table already initialized.\n");
1359 		rc = -EALREADY;
1360 		goto lab_rss_init;
1361 	}
1362 
1363 	/* build CPU topology table */
1364 	for_each_possible_cpu(i) {
1365 		cpus[i].state       = ((cpumask_test_cpu(i, cpu_online_mask)
1366 					? QCA_NAPI_CPU_UP
1367 					: QCA_NAPI_CPU_DOWN));
1368 		cpus[i].core_id     = topology_core_id(i);
1369 		cpus[i].cluster_id  = topology_physical_package_id(i);
1370 		cpumask_copy(&(cpus[i].core_mask),
1371 			     topology_core_cpumask(i));
1372 		record_sibling_cpumask(cpus, i);
1373 		cpus[i].max_freq    = cpufreq_quick_get_max(i);
1374 		cpus[i].napis       = 0x0;
1375 		cpus[i].cluster_nxt = -1; /* invalid */
1376 	}
1377 
1378 	/* link clusters together */
1379 	rc = hnc_link_clusters(napid);
1380 	if (0 != rc)
1381 		goto lab_err_topology;
1382 
1383 	/* install hotplug notifier */
1384 	rc = hnc_hotplug_register(HIF_GET_SOFTC(hif));
1385 	if (0 != rc)
1386 		goto lab_err_hotplug;
1387 
1388 	/* install throughput notifier */
1389 	rc = hnc_tput_hook(1);
1390 	if (0 == rc)
1391 		goto lab_rss_init;
1392 
1393 lab_err_hotplug:
1394 	hnc_tput_hook(0);
1395 	hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
1396 lab_err_topology:
1397 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1398 lab_rss_init:
1399 	NAPI_DEBUG("<-- [rc=%d]", rc);
1400 	return rc;
1401 }
1402 
1403 /**
1404  * hif_napi_cpu_deinit() - clean-up of irq affinity block
1405  *
1406  * called by hif_napi_destroy, when the last instance is removed
1407  * - uninstalls throughput and hotplug notifiers
1408  * - clears cpu topology table
1409  * Return: 0: OK
1410  */
1411 int hif_napi_cpu_deinit(struct hif_opaque_softc *hif)
1412 {
1413 	int rc = 0;
1414 	struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1415 
1416 	NAPI_DEBUG("-->%s(...)", __func__);
1417 
1418 	/* uninstall tput notifier */
1419 	rc = hnc_tput_hook(0);
1420 
1421 	/* uninstall hotplug notifier */
1422 	hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
1423 
1424 	/* clear the topology table */
1425 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1426 
1427 	NAPI_DEBUG("<--%s[rc=%d]", __func__, rc);
1428 
1429 	return rc;
1430 }
1431 
1432 /**
1433  * hncm_migrate_to() - migrates a NAPI to a CPU
1434  * @napid: pointer to NAPI block
1435  * @ce_id: CE_id of the NAPI instance
1436  * @didx : index in the CPU topology table for the CPU to migrate to
1437  *
1438  * Migrates NAPI (identified by the CE_id) to the destination core
1439  * Updates the napi_map of the destination entry
1440  *
1441  * Return:
1442  *  =0 : success
1443  *  <0 : error
1444  */
1445 static int hncm_migrate_to(struct qca_napi_data *napid,
1446 			   int                   napi_ce,
1447 			   int                   didx)
1448 {
1449 	int rc = 0;
1450 	cpumask_t cpumask;
1451 
1452 	NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx);
1453 
1454 	cpumask.bits[0] = (1 << didx);
1455 	if (!napid->napis[napi_ce])
1456 		return -EINVAL;
1457 
1458 	irq_modify_status(napid->napis[napi_ce]->irq, IRQ_NO_BALANCING, 0);
1459 	rc = irq_set_affinity_hint(napid->napis[napi_ce]->irq, &cpumask);
1460 
1461 	/* unmark the napis bitmap in the cpu table */
1462 	napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce);
1463 	/* mark the napis bitmap for the new designated cpu */
1464 	napid->napi_cpu[didx].napis |= (0x01 << napi_ce);
1465 	napid->napis[napi_ce]->cpu = didx;
1466 
1467 	NAPI_DEBUG("<--%s[%d]", __func__, rc);
1468 	return rc;
1469 }
1470 /**
1471  * hncm_dest_cpu() - finds a destination CPU for NAPI
1472  * @napid: pointer to NAPI block
1473  * @act  : RELOCATE | COLLAPSE | DISPERSE
1474  *
1475  * Finds the designated destionation for the next IRQ.
1476  * RELOCATE: translated to either COLLAPSE or DISPERSE based
1477  *           on napid->napi_mode (throughput state)
1478  * COLLAPSE: All have the same destination: the first online CPU in lilcl
1479  * DISPERSE: One of the CPU in bigcl, which has the smallest number of
1480  *           NAPIs on it
1481  *
1482  * Return: >=0 : index in the cpu topology table
1483  *       : < 0 : error
1484  */
1485 static int hncm_dest_cpu(struct qca_napi_data *napid, int act)
1486 {
1487 	int destidx = -1;
1488 	int head, i;
1489 
1490 	NAPI_DEBUG("-->%s(act=%d)", __func__, act);
1491 	if (act == HNC_ACT_RELOCATE) {
1492 		if (napid->napi_mode == QCA_NAPI_TPUT_LO)
1493 			act = HNC_ACT_COLLAPSE;
1494 		else
1495 			act = HNC_ACT_DISPERSE;
1496 		NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d",
1497 			   __func__, act);
1498 	}
1499 	if (act == HNC_ACT_COLLAPSE) {
1500 		head = i = napid->lilcl_head;
1501 retry_collapse:
1502 		while (i >= 0) {
1503 			if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) {
1504 				destidx = i;
1505 				break;
1506 			}
1507 			i = napid->napi_cpu[i].cluster_nxt;
1508 		}
1509 		if ((destidx < 0) && (head == napid->lilcl_head)) {
1510 			NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl",
1511 				__func__);
1512 			head = i = napid->bigcl_head;
1513 			goto retry_collapse;
1514 		}
1515 	} else { /* HNC_ACT_DISPERSE */
1516 		int smallest = 99; /* all 32 bits full */
1517 		int smallidx = -1;
1518 
1519 		head = i = napid->bigcl_head;
1520 retry_disperse:
1521 		while (i >= 0) {
1522 			if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) &&
1523 			    (hweight32(napid->napi_cpu[i].napis) <= smallest)) {
1524 				smallest = napid->napi_cpu[i].napis;
1525 				smallidx = i;
1526 			}
1527 			i = napid->napi_cpu[i].cluster_nxt;
1528 		}
1529 		/* Check if matches with user sepecified CPU mask */
1530 		smallidx = ((1 << smallidx) & napid->user_cpu_affin_mask) ?
1531 								smallidx : -1;
1532 
1533 		if ((smallidx < 0) && (head == napid->bigcl_head)) {
1534 			NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl",
1535 				__func__);
1536 			head = i = napid->lilcl_head;
1537 			goto retry_disperse;
1538 		}
1539 		destidx = smallidx;
1540 	}
1541 	NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx);
1542 	return destidx;
1543 }
1544 /**
1545  * hif_napi_cpu_migrate() - migrate IRQs away
1546  * @cpu: -1: all CPUs <n> specific CPU
1547  * @act: COLLAPSE | DISPERSE
1548  *
1549  * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible
1550  * cores. Eligible cores are:
1551  * act=COLLAPSE -> the first online core of the little cluster
1552  * act=DISPERSE -> separate cores of the big cluster, so that each core will
1553  *                 host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis)
1554  *
1555  * Note that this function is called with a spinlock acquired already.
1556  *
1557  * Return: =0: success
1558  *         <0: error
1559  */
1560 
1561 int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action)
1562 {
1563 	int      rc = 0;
1564 	struct qca_napi_cpu *cpup;
1565 	int      i, dind;
1566 	uint32_t napis;
1567 
1568 	NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)",
1569 		   __func__, cpu, action);
1570 	/* the following is really: hif_napi_enabled() with less overhead */
1571 	if (napid->ce_map == 0) {
1572 		NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__);
1573 		goto hncm_return;
1574 	}
1575 
1576 	cpup = napid->napi_cpu;
1577 
1578 	switch (action) {
1579 	case HNC_ACT_RELOCATE:
1580 	case HNC_ACT_DISPERSE:
1581 	case HNC_ACT_COLLAPSE: {
1582 		/* first find the src napi set */
1583 		if (cpu == HNC_ANY_CPU)
1584 			napis = napid->ce_map;
1585 		else
1586 			napis = cpup[cpu].napis;
1587 		/* then clear the napi bitmap on each CPU */
1588 		for (i = 0; i < NR_CPUS; i++)
1589 			cpup[i].napis = 0;
1590 		/* then for each of the NAPIs to disperse: */
1591 		for (i = 0; i < CE_COUNT_MAX; i++)
1592 			if (napis & (1 << i)) {
1593 				/* find a destination CPU */
1594 				dind = hncm_dest_cpu(napid, action);
1595 				if (dind >= 0) {
1596 					NAPI_DEBUG("Migrating NAPI ce%d to %d",
1597 						   i, dind);
1598 					rc = hncm_migrate_to(napid, i, dind);
1599 				} else {
1600 					NAPI_DEBUG("No dest for NAPI ce%d", i);
1601 					hnc_dump_cpus(napid);
1602 					rc = -1;
1603 				}
1604 			}
1605 		break;
1606 	}
1607 	default: {
1608 		NAPI_DEBUG("%s: bad action: %d\n", __func__, action);
1609 		QDF_BUG(0);
1610 		break;
1611 	}
1612 	} /* switch action */
1613 
1614 hncm_return:
1615 	hnc_dump_cpus(napid);
1616 	return rc;
1617 }
1618 
1619 
1620 /**
1621  * hif_napi_bl_irq() - calls irq_modify_status to enable/disable blacklisting
1622  * @napid: pointer to qca_napi_data structure
1623  * @bl_flag: blacklist flag to enable/disable blacklisting
1624  *
1625  * The function enables/disables blacklisting for all the copy engine
1626  * interrupts on which NAPI is enabled.
1627  *
1628  * Return: None
1629  */
1630 static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag)
1631 {
1632 	int i;
1633 	struct qca_napi_info *napii;
1634 
1635 	for (i = 0; i < CE_COUNT_MAX; i++) {
1636 		/* check if NAPI is enabled on the CE */
1637 		if (!(napid->ce_map & (0x01 << i)))
1638 			continue;
1639 
1640 		/*double check that NAPI is allocated for the CE */
1641 		napii = napid->napis[i];
1642 		if (!(napii))
1643 			continue;
1644 
1645 		if (bl_flag == true)
1646 			irq_modify_status(napii->irq,
1647 					  0, IRQ_NO_BALANCING);
1648 		else
1649 			irq_modify_status(napii->irq,
1650 					  IRQ_NO_BALANCING, 0);
1651 		HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i);
1652 	}
1653 }
1654 
1655 #ifdef CONFIG_SCHED_CORE_CTL
1656 /* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */
1657 static inline int hif_napi_core_ctl_set_boost(bool boost)
1658 {
1659 	return core_ctl_set_boost(boost);
1660 }
1661 #else
1662 static inline int hif_napi_core_ctl_set_boost(bool boost)
1663 {
1664 	return 0;
1665 }
1666 #endif
1667 /**
1668  * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts.
1669  * @napid: pointer to qca_napi_data structure
1670  * @op: blacklist operation to perform
1671  *
1672  * The function enables/disables/queries blacklisting for all CE RX
1673  * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables
1674  * core_ctl_set_boost.
1675  * Once blacklisting is enabled, the interrupts will not be managed by the IRQ
1676  * balancer.
1677  *
1678  * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled
1679  *         for BLACKLIST_QUERY op - blacklist refcount
1680  *         for BLACKLIST_ON op    - return value from core_ctl_set_boost API
1681  *         for BLACKLIST_OFF op   - return value from core_ctl_set_boost API
1682  */
1683 int hif_napi_cpu_blacklist(struct qca_napi_data *napid,
1684 			   enum qca_blacklist_op op)
1685 {
1686 	int rc = 0;
1687 	static int ref_count; /* = 0 by the compiler */
1688 	uint8_t flags = napid->flags;
1689 	bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING;
1690 	bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST;
1691 
1692 	NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op);
1693 
1694 	if (!(bl_en && ccb_en)) {
1695 		rc = -EINVAL;
1696 		goto out;
1697 	}
1698 
1699 	switch (op) {
1700 	case BLACKLIST_QUERY:
1701 		rc = ref_count;
1702 		break;
1703 	case BLACKLIST_ON:
1704 		ref_count++;
1705 		rc = 0;
1706 		if (ref_count == 1) {
1707 			rc = hif_napi_core_ctl_set_boost(true);
1708 			NAPI_DEBUG("boost_on() returns %d - refcnt=%d",
1709 				rc, ref_count);
1710 			hif_napi_bl_irq(napid, true);
1711 		}
1712 		break;
1713 	case BLACKLIST_OFF:
1714 		if (ref_count) {
1715 			ref_count--;
1716 			rc = 0;
1717 			if (ref_count == 0) {
1718 				rc = hif_napi_core_ctl_set_boost(false);
1719 				NAPI_DEBUG("boost_off() returns %d - refcnt=%d",
1720 					   rc, ref_count);
1721 				hif_napi_bl_irq(napid, false);
1722 			}
1723 		}
1724 		break;
1725 	default:
1726 		NAPI_DEBUG("Invalid blacklist op: %d", op);
1727 		rc = -EINVAL;
1728 	} /* switch */
1729 out:
1730 	NAPI_DEBUG("<--%s[%d]", __func__, rc);
1731 	return rc;
1732 }
1733 
1734 /**
1735  * hif_napi_serialize() - [de-]serialize NAPI operations
1736  * @hif:   context
1737  * @is_on: 1: serialize, 0: deserialize
1738  *
1739  * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the
1740  * following steps (see hif_napi_event for code):
1741  * - put irqs of all NAPI instances on the same CPU
1742  * - only for the first serialize call: blacklist
1743  *
1744  * hif_napi_serialize(hif, 0):
1745  * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec)
1746  * - at the end of the timer, check the current throughput state and
1747  *   implement it.
1748  */
1749 static unsigned long napi_serialize_reqs;
1750 int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on)
1751 {
1752 	int rc = -EINVAL;
1753 
1754 	if (hif != NULL)
1755 		switch (is_on) {
1756 		case 0: { /* de-serialize */
1757 			rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL,
1758 					    (void *) 0);
1759 			napi_serialize_reqs = 0;
1760 			break;
1761 		} /* end de-serialize */
1762 		case 1: { /* serialize */
1763 			rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL,
1764 					    (void *)napi_serialize_reqs++);
1765 			break;
1766 		} /* end serialize */
1767 		default:
1768 			break; /* no-op */
1769 		} /* switch */
1770 	return rc;
1771 }
1772 
1773 #endif /* ifdef HIF_IRQ_AFFINITY */
1774