xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/hif_exec.h (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef __HIF_EXEC_H__
20 #define __HIF_EXEC_H__
21 
22 #include <hif.h>
23 #include <hif_irq_affinity.h>
24 #include <linux/cpumask.h>
25 /*Number of buckets for latency*/
26 #define HIF_SCHED_LATENCY_BUCKETS 8
27 
28 /*Buckets for latency between 0 to 2 ms*/
29 #define HIF_SCHED_LATENCY_BUCKET_0_2 2
30 /*Buckets for latency between 3 to 10 ms*/
31 #define HIF_SCHED_LATENCY_BUCKET_3_10 10
32 /*Buckets for latency between 11 to 20 ms*/
33 #define HIF_SCHED_LATENCY_BUCKET_11_20 20
34 /*Buckets for latency between 21 to 50 ms*/
35 #define HIF_SCHED_LATENCY_BUCKET_21_50 50
36 /*Buckets for latency between 50 to 100 ms*/
37 #define HIF_SCHED_LATENCY_BUCKET_51_100 100
38 /*Buckets for latency between 100 to 250 ms*/
39 #define HIF_SCHED_LATENCY_BUCKET_101_250 250
40 /*Buckets for latency between 250 to 500 ms*/
41 #define HIF_SCHED_LATENCY_BUCKET_251_500 500
42 
43 struct hif_exec_context;
44 
45 struct hif_execution_ops {
46 	char *context_type;
47 	void (*schedule)(struct hif_exec_context *);
48 	void (*reschedule)(struct hif_exec_context *);
49 	void (*kill)(struct hif_exec_context *);
50 };
51 
52 /**
53  * hif_exec_context: only ever allocated as a subtype eg.
54  *					hif_tasklet_exec_context
55  *
56  * @context: context for the handler function to use.
57  * @context_name: a pointer to a const string for debugging.
58  *		this should help whenever there could be ambiguity
59  *		in what type of context the void* context points to
60  * @irq: irq handle coresponding to hw block
61  * @os_irq: irq handle for irq_afinity
62  * @cpu: the cpu this context should be affined to
63  * @work_complete: Function call called when leaving the execution context to
64  *	determine if this context should reschedule or wait for an interrupt.
65  *	This function may be used as a hook for post processing.
66  *
67  * @sched_latency_stats: schdule latency stats for different latency buckets
68  * @tstamp: timestamp when napi poll happens
69  * @irq_disable: called before scheduling the context.
70  * @irq_enable: called when the context leaves polling mode
71  * @irq_name: pointer to function to return irq name/string mapped to irq number
72  * @irq_lock: spinlock used while enabling/disabling IRQs
73  * @type: type of execution context
74  * @poll_start_time: hif napi poll start time in nanoseconds
75  * @force_break: flag to indicate if HIF execution context was forced to return
76  *		 to HIF. This means there is more work to be done. Hence do not
77  *		 call napi_complete.
78  */
79 struct hif_exec_context {
80 	struct hif_execution_ops *sched_ops;
81 	struct hif_opaque_softc *hif;
82 	uint32_t numirq;
83 	uint32_t irq[HIF_MAX_GRP_IRQ];
84 	uint32_t os_irq[HIF_MAX_GRP_IRQ];
85 	cpumask_t cpumask;
86 	uint32_t grp_id;
87 	uint32_t scale_bin_shift;
88 	const char *context_name;
89 	void *context;
90 	ext_intr_handler handler;
91 
92 	bool (*work_complete)(struct hif_exec_context *, int work_done);
93 	void (*irq_enable)(struct hif_exec_context *);
94 	void (*irq_disable)(struct hif_exec_context *);
95 	const char* (*irq_name)(int irq_no);
96 	uint64_t sched_latency_stats[HIF_SCHED_LATENCY_BUCKETS];
97 	uint64_t tstamp;
98 
99 	uint8_t cpu;
100 	struct qca_napi_stat stats[NR_CPUS];
101 	bool inited;
102 	bool configured;
103 	bool irq_requested;
104 	bool irq_enabled;
105 	qdf_spinlock_t irq_lock;
106 	enum hif_exec_type type;
107 	unsigned long long poll_start_time;
108 	bool force_break;
109 #if defined(HIF_CPU_PERF_AFFINE_MASK) || defined(HIF_CPU_CLEAR_AFFINITY)
110 	/* Stores the affinity hint mask for each WLAN IRQ */
111 	qdf_cpu_mask new_cpu_mask[HIF_MAX_GRP_IRQ];
112 #endif
113 };
114 
115 /**
116  * struct hif_tasklet_exec_context - exec_context for tasklets
117  * @exec_ctx: inherited data type
118  * @tasklet: tasklet structure for scheduling
119  */
120 struct hif_tasklet_exec_context {
121 	struct hif_exec_context exec_ctx;
122 	struct tasklet_struct tasklet;
123 };
124 
125 /**
126  * struct hif_napi_exec_context - exec_context for NAPI
127  * @exec_ctx: inherited data type
128  * @netdev: dummy net device associated with the napi context
129  * @napi: napi structure used in scheduling
130  */
131 struct hif_napi_exec_context {
132 	struct hif_exec_context exec_ctx;
133 	struct net_device    netdev; /* dummy net_dev */
134 	struct napi_struct   napi;
135 };
136 
137 static inline struct hif_napi_exec_context*
138 	hif_exec_get_napi(struct hif_exec_context *ctx)
139 {
140 	return (struct hif_napi_exec_context *) ctx;
141 }
142 
143 static inline struct hif_tasklet_exec_context*
144 	hif_exec_get_tasklet(struct hif_exec_context *ctx)
145 {
146 	return (struct hif_tasklet_exec_context *) ctx;
147 }
148 
149 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
150 						uint32_t scale);
151 
152 void hif_exec_destroy(struct hif_exec_context *ctx);
153 
154 int hif_grp_irq_configure(struct hif_softc *scn,
155 			  struct hif_exec_context *hif_exec);
156 void hif_grp_irq_deconfigure(struct hif_softc *scn);
157 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context);
158 
159 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *hif,
160 					  uint8_t id);
161 void hif_exec_kill(struct hif_opaque_softc *scn);
162 
163 #ifdef HIF_CPU_PERF_AFFINE_MASK
164 /**
165  * hif_pci_irq_set_affinity_hint() - API to set IRQ affinity
166  * @hif_ext_group: hif_ext_group to extract the irq info
167  *
168  * This function will set the IRQ affinity to the gold cores
169  * only for defconfig builds
170  *
171  * Return: none
172  */
173 void hif_pci_irq_set_affinity_hint(
174 	struct hif_exec_context *hif_ext_group);
175 
176 /**
177  * hif_pci_ce_irq_set_affinity_hint() - API to set IRQ affinity
178  * @hif_softc: hif_softc to extract the CE irq info
179  *
180  * This function will set the CE IRQ affinity to the gold cores
181  * only for defconfig builds
182  *
183  * Return: none
184  */
185 void hif_pci_ce_irq_set_affinity_hint(
186 	struct hif_softc *scn);
187 
188 /**
189  * hif_ce_irq_remove_affinity_hint() - remove affinity for the irq
190  * @irq: irq number to remove affinity from
191  */
192 static inline void hif_ce_irq_remove_affinity_hint(int irq)
193 {
194 	hif_irq_affinity_remove(irq);
195 }
196 #else
197 static inline void hif_pci_irq_set_affinity_hint(
198 	struct hif_exec_context *hif_ext_group)
199 {
200 }
201 
202 static inline void hif_pci_ce_irq_set_affinity_hint(
203 	struct hif_softc *scn)
204 {
205 }
206 
207 static inline void hif_ce_irq_remove_affinity_hint(int irq)
208 {
209 }
210 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
211 
212 #ifdef HIF_CPU_CLEAR_AFFINITY
213 /*
214  * hif_pci_config_irq_clear_affinity() - Remove cpu affinity of IRQ
215  * @scn: HIF handle
216  * @intr_ctxt: interrupt group index
217  * @cpu: CPU core to clear
218  *
219  * Return: None
220  */
221 void hif_pci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
222 					   int intr_ctxt_id, int cpu);
223 #else
224 static inline
225 void hif_pci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
226 					   int intr_ctxt_id, int cpu)
227 {
228 }
229 #endif /* HIF_CPU_CLEAR_AFFINITY */
230 
231 #endif
232 
233