1 /* 2 * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef __HIF_NAPI_H__ 20 #define __HIF_NAPI_H__ 21 22 /** 23 * DOC: hif_napi.h 24 * 25 * Interface to HIF implemented functions of NAPI. 26 * These are used by hdd_napi. 27 */ 28 29 30 /* CLD headers */ 31 #include <hif.h> /* struct hif_opaque_softc; */ 32 33 /** 34 * common stuff 35 * The declarations until #ifdef FEATURE_NAPI below 36 * are valid whether or not FEATURE_NAPI has been 37 * defined. 38 */ 39 40 /** 41 * NAPI manages the following states: 42 * NAPI state: per NAPI instance, ENABLED/DISABLED 43 * CPU state: per CPU, DOWN/UP 44 * TPUT state: global, LOW/HI 45 * 46 * "Dynamic" changes to state of various NAPI structures are 47 * managed by NAPI events. The events may be produced by 48 * various detection points. With each event, some data is 49 * sent. The main event handler in hif_napi handles and makes 50 * the state changes. 51 * 52 * event : data : generated 53 * ---------------:------------------:------------------ 54 * EVT_INI_FILE : cfg->napi_enable : after ini file processed 55 * EVT_CMD_STATE : cmd arg : by the vendor cmd 56 * EVT_INT_STATE : 0 : internal - shut off/disable 57 * EVT_CPU_STATE : (cpu << 16)|state: CPU hotplug events 58 * EVT_TPUT_STATE : (high/low) : tput trigger 59 * EVT_USR_SERIAL : num-serial_calls : WMA/ROAMING-START/IND 60 * EVT_USR_NORMAL : N/A : WMA/ROAMING-END 61 */ 62 enum qca_napi_event { 63 NAPI_EVT_INVALID, 64 NAPI_EVT_INI_FILE, 65 NAPI_EVT_CMD_STATE, 66 NAPI_EVT_INT_STATE, 67 NAPI_EVT_CPU_STATE, 68 NAPI_EVT_TPUT_STATE, 69 NAPI_EVT_USR_SERIAL, 70 NAPI_EVT_USR_NORMAL 71 }; 72 /** 73 * Following are some of NAPI related features controlled using feature flag 74 * These flags need to be enabled in the qca_napi_data->flags variable for the 75 * feature to kick in. 76 .* QCA_NAPI_FEATURE_CPU_CORRECTION - controls CPU correction logic 77 .* QCA_NAPI_FEATURE_IRQ_BLACKLISTING - controls call to irq_blacklist_on API 78 .* QCA_NAPI_FEATURE_CORE_CTL_BOOST - controls call to core_ctl_set_boost API 79 */ 80 #define QCA_NAPI_FEATURE_CPU_CORRECTION BIT(1) 81 #define QCA_NAPI_FEATURE_IRQ_BLACKLISTING BIT(2) 82 #define QCA_NAPI_FEATURE_CORE_CTL_BOOST BIT(3) 83 84 /** 85 * Macros to map ids -returned by ...create()- to pipes and vice versa 86 */ 87 #define NAPI_ID2PIPE(i) ((i)-1) 88 #define NAPI_PIPE2ID(p) ((p)+1) 89 90 #ifdef RECEIVE_OFFLOAD 91 /** 92 * hif_napi_rx_offld_flush_cb_register() - Register flush callback for Rx offld 93 * @hif_hdl: pointer to hif context 94 * @offld_flush_handler: register offld flush callback 95 * 96 * Return: None 97 */ 98 void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl, 99 void (rx_ol_flush_handler)(void *arg)); 100 101 /** 102 * hif_napi_rx_offld_flush_cb_deregister() - Degregister offld flush_cb 103 * @hif_hdl: pointer to hif context 104 * 105 * Return: NONE 106 */ 107 void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl); 108 #endif /* RECEIVE_OFFLOAD */ 109 110 /** 111 * hif_napi_get_lro_info() - returns the address LRO data for napi_id 112 * @hif: pointer to hif context 113 * @napi_id: napi instance 114 * 115 * Description: 116 * Returns the address of the LRO structure 117 * 118 * Return: 119 * <addr>: address of the LRO structure 120 */ 121 void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id); 122 123 enum qca_blacklist_op { 124 BLACKLIST_QUERY, 125 BLACKLIST_OFF, 126 BLACKLIST_ON 127 }; 128 129 #ifdef FEATURE_NAPI 130 131 /** 132 * NAPI HIF API 133 * 134 * the declarations below only apply to the case 135 * where FEATURE_NAPI is defined 136 */ 137 138 int hif_napi_create(struct hif_opaque_softc *hif, 139 int (*poll)(struct napi_struct *, int), 140 int budget, 141 int scale, 142 uint8_t flags); 143 int hif_napi_destroy(struct hif_opaque_softc *hif, 144 uint8_t id, 145 int force); 146 147 struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif); 148 149 /** 150 * hif_get_napi() - get NAPI corresponding to napi_id 151 * @napi_id: NAPI instance 152 * @napid: Handle NAPI 153 * 154 * Return: napi corresponding napi_id 155 */ 156 struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid); 157 158 int hif_napi_event(struct hif_opaque_softc *hif, 159 enum qca_napi_event event, 160 void *data); 161 162 /* called from the ISR within hif, so, ce is known */ 163 int hif_napi_enabled(struct hif_opaque_softc *hif, int ce); 164 165 bool hif_napi_created(struct hif_opaque_softc *hif, int ce); 166 167 /* called from hdd (napi_poll), using napi id as a selector */ 168 void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id); 169 170 /* called by ce_tasklet.c::ce_dispatch_interrupt*/ 171 bool hif_napi_schedule(struct hif_opaque_softc *scn, int ce_id); 172 173 /* called by hdd_napi, which is called by kernel */ 174 int hif_napi_poll(struct hif_opaque_softc *hif_ctx, 175 struct napi_struct *napi, int budget); 176 177 #ifdef FEATURE_NAPI_DEBUG 178 #define NAPI_DEBUG(fmt, ...) \ 179 qdf_debug("wlan: NAPI: %s:%d "fmt, __func__, __LINE__, ##__VA_ARGS__) 180 #else 181 #define NAPI_DEBUG(fmt, ...) /* NO-OP */ 182 #endif /* FEATURE NAPI_DEBUG */ 183 184 #define HNC_ANY_CPU (-1) 185 #define HNC_ACT_RELOCATE (0) 186 #define HNC_ACT_COLLAPSE (1) 187 #define HNC_ACT_DISPERSE (-1) 188 189 /** 190 * hif_update_napi_max_poll_time() - updates NAPI max poll time 191 * @ce_state: ce state 192 * @ce_id: Copy engine ID 193 * @cpu_id: cpu id 194 * 195 * This API updates NAPI max poll time per CE per SPU. 196 * 197 * Return: void 198 */ 199 void hif_update_napi_max_poll_time(struct CE_state *ce_state, 200 int ce_id, 201 int cpu_id); 202 /** 203 * Local interface to HIF implemented functions of NAPI CPU affinity management. 204 * Note: 205 * 1- The symbols in this file are NOT supposed to be used by any 206 * entity other than hif_napi.c 207 * 2- The symbols are valid only if HELIUMPLUS is defined. They are otherwise 208 * mere wrappers. 209 * 210 */ 211 212 #else /* ! defined(FEATURE_NAPI) */ 213 214 /** 215 * Stub API 216 * 217 * The declarations in this section are valid only 218 * when FEATURE_NAPI has *not* been defined. 219 */ 220 221 #define NAPI_DEBUG(fmt, ...) /* NO-OP */ 222 223 static inline int hif_napi_create(struct hif_opaque_softc *hif, 224 uint8_t pipe_id, 225 int (*poll)(struct napi_struct *, int), 226 int budget, 227 int scale, 228 uint8_t flags) 229 { return -EPERM; } 230 231 static inline int hif_napi_destroy(struct hif_opaque_softc *hif, 232 uint8_t id, 233 int force) 234 { return -EPERM; } 235 236 static inline struct qca_napi_data *hif_napi_get_all( 237 struct hif_opaque_softc *hif) 238 { return NULL; } 239 240 static inline int hif_napi_event(struct hif_opaque_softc *hif, 241 enum qca_napi_event event, 242 void *data) 243 { return -EPERM; } 244 245 /* called from the ISR within hif, so, ce is known */ 246 static inline int hif_napi_enabled(struct hif_opaque_softc *hif, int ce) 247 { return 0; } 248 249 static inline bool hif_napi_created(struct hif_opaque_softc *hif, int ce) 250 { return false; } 251 252 /* called from hdd (napi_poll), using napi id as a selector */ 253 static inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id) 254 { return; } 255 256 static inline bool hif_napi_schedule(struct hif_opaque_softc *hif, int ce_id) 257 { return false; } 258 259 static inline int hif_napi_poll(struct napi_struct *napi, int budget) 260 { return -EPERM; } 261 262 /** 263 * hif_update_napi_max_poll_time() - updates NAPI max poll time 264 * @ce_state: ce state 265 * @ce_id: Copy engine ID 266 * @cpu_id: cpu id 267 * 268 * This API updates NAPI max poll time per CE per SPU. 269 * 270 * Return: void 271 */ 272 static inline void hif_update_napi_max_poll_time(struct CE_state *ce_state, 273 int ce_id, 274 int cpu_id) 275 { return; } 276 #endif /* FEATURE_NAPI */ 277 278 #if defined(HIF_IRQ_AFFINITY) && defined(FEATURE_NAPI) 279 /* 280 * prototype signatures 281 */ 282 int hif_napi_cpu_init(struct hif_opaque_softc *hif); 283 int hif_napi_cpu_deinit(struct hif_opaque_softc *hif); 284 285 int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action); 286 int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on); 287 288 int hif_napi_cpu_blacklist(struct qca_napi_data *napid, 289 enum qca_blacklist_op op); 290 291 /* not directly related to irq affinity, but oh well */ 292 void hif_napi_stats(struct qca_napi_data *napid); 293 void hif_napi_update_yield_stats(struct CE_state *ce_state, 294 bool time_limit_reached, 295 bool rxpkt_thresh_reached); 296 #else 297 struct qca_napi_data; 298 static inline int hif_napi_cpu_init(struct hif_opaque_softc *hif) 299 { return 0; } 300 301 static inline int hif_napi_cpu_deinit(struct hif_opaque_softc *hif) 302 { return 0; } 303 304 static inline int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, 305 int action) 306 { return 0; } 307 308 static inline int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on) 309 { return -EPERM; } 310 311 static inline void hif_napi_stats(struct qca_napi_data *napid) { } 312 static inline void hif_napi_update_yield_stats(struct CE_state *ce_state, 313 bool time_limit_reached, 314 bool rxpkt_thresh_reached) { } 315 316 static inline int hif_napi_cpu_blacklist(struct qca_napi_data *napid, 317 enum qca_blacklist_op op) 318 { return 0; } 319 #endif /* HIF_IRQ_AFFINITY */ 320 321 #endif /* __HIF_NAPI_H__ */ 322