1 /* 2 * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef __HIF_EXEC_H__ 20 #define __HIF_EXEC_H__ 21 22 #include <hif.h> 23 #include <hif_irq_affinity.h> 24 #include <linux/cpumask.h> 25 /*Number of buckets for latency*/ 26 #define HIF_SCHED_LATENCY_BUCKETS 8 27 28 /*Buckets for latency between 0 to 2 ms*/ 29 #define HIF_SCHED_LATENCY_BUCKET_0_2 2 30 /*Buckets for latency between 3 to 10 ms*/ 31 #define HIF_SCHED_LATENCY_BUCKET_3_10 10 32 /*Buckets for latency between 11 to 20 ms*/ 33 #define HIF_SCHED_LATENCY_BUCKET_11_20 20 34 /*Buckets for latency between 21 to 50 ms*/ 35 #define HIF_SCHED_LATENCY_BUCKET_21_50 50 36 /*Buckets for latency between 50 to 100 ms*/ 37 #define HIF_SCHED_LATENCY_BUCKET_51_100 100 38 /*Buckets for latency between 100 to 250 ms*/ 39 #define HIF_SCHED_LATENCY_BUCKET_101_250 250 40 /*Buckets for latency between 250 to 500 ms*/ 41 #define HIF_SCHED_LATENCY_BUCKET_251_500 500 42 43 struct hif_exec_context; 44 45 struct hif_execution_ops { 46 char *context_type; 47 void (*schedule)(struct hif_exec_context *); 48 void (*reschedule)(struct hif_exec_context *); 49 void (*kill)(struct hif_exec_context *); 50 }; 51 52 /** 53 * hif_exec_context: only ever allocated as a subtype eg. 54 * hif_tasklet_exec_context 55 * 56 * @context: context for the handler function to use. 57 * @evt_hist: a pointer to the DP event history 58 * @context_name: a pointer to a const string for debugging. 59 * this should help whenever there could be ambiguity 60 * in what type of context the void* context points to 61 * @irq: irq handle coresponding to hw block 62 * @os_irq: irq handle for irq_afinity 63 * @cpu: the cpu this context should be affined to 64 * @work_complete: Function call called when leaving the execution context to 65 * determine if this context should reschedule or wait for an interrupt. 66 * This function may be used as a hook for post processing. 67 * 68 * @sched_latency_stats: schdule latency stats for different latency buckets 69 * @tstamp: timestamp when napi poll happens 70 * @irq_disable: called before scheduling the context. 71 * @irq_enable: called when the context leaves polling mode 72 * @irq_name: pointer to function to return irq name/string mapped to irq number 73 * @irq_lock: spinlock used while enabling/disabling IRQs 74 * @type: type of execution context 75 * @poll_start_time: hif napi poll start time in nanoseconds 76 * @force_break: flag to indicate if HIF execution context was forced to return 77 * to HIF. This means there is more work to be done. Hence do not 78 * call napi_complete. 79 */ 80 struct hif_exec_context { 81 struct hif_execution_ops *sched_ops; 82 struct hif_opaque_softc *hif; 83 uint32_t numirq; 84 uint32_t irq[HIF_MAX_GRP_IRQ]; 85 uint32_t os_irq[HIF_MAX_GRP_IRQ]; 86 cpumask_t cpumask; 87 uint32_t grp_id; 88 uint32_t scale_bin_shift; 89 const char *context_name; 90 void *context; 91 ext_intr_handler handler; 92 struct hif_event_history *evt_hist; 93 94 bool (*work_complete)(struct hif_exec_context *, int work_done); 95 void (*irq_enable)(struct hif_exec_context *); 96 void (*irq_disable)(struct hif_exec_context *); 97 const char* (*irq_name)(int irq_no); 98 uint64_t sched_latency_stats[HIF_SCHED_LATENCY_BUCKETS]; 99 uint64_t tstamp; 100 101 uint8_t cpu; 102 struct qca_napi_stat stats[NR_CPUS]; 103 bool inited; 104 bool configured; 105 bool irq_requested; 106 bool irq_enabled; 107 qdf_spinlock_t irq_lock; 108 enum hif_exec_type type; 109 unsigned long long poll_start_time; 110 bool force_break; 111 #ifdef HIF_CPU_PERF_AFFINE_MASK 112 /* Stores the affinity hint mask for each WLAN IRQ */ 113 qdf_cpu_mask new_cpu_mask[HIF_MAX_GRP_IRQ]; 114 #endif 115 }; 116 117 /** 118 * struct hif_tasklet_exec_context - exec_context for tasklets 119 * @exec_ctx: inherited data type 120 * @tasklet: tasklet structure for scheduling 121 */ 122 struct hif_tasklet_exec_context { 123 struct hif_exec_context exec_ctx; 124 struct tasklet_struct tasklet; 125 }; 126 127 /** 128 * struct hif_napi_exec_context - exec_context for NAPI 129 * @exec_ctx: inherited data type 130 * @netdev: dummy net device associated with the napi context 131 * @napi: napi structure used in scheduling 132 */ 133 struct hif_napi_exec_context { 134 struct hif_exec_context exec_ctx; 135 struct net_device netdev; /* dummy net_dev */ 136 struct napi_struct napi; 137 }; 138 139 static inline struct hif_napi_exec_context* 140 hif_exec_get_napi(struct hif_exec_context *ctx) 141 { 142 return (struct hif_napi_exec_context *) ctx; 143 } 144 145 static inline struct hif_tasklet_exec_context* 146 hif_exec_get_tasklet(struct hif_exec_context *ctx) 147 { 148 return (struct hif_tasklet_exec_context *) ctx; 149 } 150 151 struct hif_exec_context *hif_exec_create(enum hif_exec_type type, 152 uint32_t scale); 153 154 void hif_exec_destroy(struct hif_exec_context *ctx); 155 156 int hif_grp_irq_configure(struct hif_softc *scn, 157 struct hif_exec_context *hif_exec); 158 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context); 159 160 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *hif, 161 uint8_t id); 162 void hif_exec_kill(struct hif_opaque_softc *scn); 163 164 #ifdef HIF_CPU_PERF_AFFINE_MASK 165 /** 166 * hif_pci_irq_set_affinity_hint() - API to set IRQ affinity 167 * @hif_ext_group: hif_ext_group to extract the irq info 168 * 169 * This function will set the IRQ affinity to the gold cores 170 * only for defconfig builds 171 * 172 * Return: none 173 */ 174 void hif_pci_irq_set_affinity_hint( 175 struct hif_exec_context *hif_ext_group); 176 #else 177 static inline void hif_pci_irq_set_affinity_hint( 178 struct hif_exec_context *hif_ext_group) 179 { 180 } 181 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */ 182 #endif 183 184