1 /* 2 * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef __CE_H__ 20 #define __CE_H__ 21 22 #include "qdf_atomic.h" 23 #include "qdf_lock.h" 24 #include "hif_main.h" 25 #include "qdf_util.h" 26 #include "hif_exec.h" 27 28 #ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE 29 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr) \ 30 (x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr)) 31 #else 32 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr) 33 #endif 34 35 /* 36 * Number of times to check for any pending tx/rx completion on 37 * a copy engine, this count should be big enough. Once we hit 38 * this threashold we'll not check for any Tx/Rx comlpetion in same 39 * interrupt handling. Note that this threashold is only used for 40 * Rx interrupt processing, this can be used tor Tx as well if we 41 * suspect any infinite loop in checking for pending Tx completion. 42 */ 43 #define CE_TXRX_COMP_CHECK_THRESHOLD 20 44 45 #define CE_HTT_T2H_MSG 1 46 #define CE_HTT_H2T_MSG 4 47 48 #define CE_OFFSET 0x00000400 49 #define CE_USEFUL_SIZE 0x00000058 50 #define CE_ALL_BITMAP 0xFFFF 51 52 #define HIF_REQUESTED_EVENTS 20 53 /** 54 * enum ce_id_type 55 * 56 * @ce_id_type: Copy engine ID 57 */ 58 enum ce_id_type { 59 CE_ID_0, 60 CE_ID_1, 61 CE_ID_2, 62 CE_ID_3, 63 CE_ID_4, 64 CE_ID_5, 65 CE_ID_6, 66 CE_ID_7, 67 CE_ID_8, 68 CE_ID_9, 69 CE_ID_10, 70 CE_ID_11, 71 CE_ID_MAX 72 }; 73 74 /** 75 * enum ce_buckets 76 * 77 * @ce_buckets: CE tasklet time buckets 78 * @CE_BUCKET_500_US: tasklet bucket to store 0-0.5ms 79 * @CE_BUCKET_1_MS: tasklet bucket to store 0.5-1ms 80 * @CE_BUCKET_2_MS: tasklet bucket to store 1-2ms 81 * @CE_BUCKET_5_MS: tasklet bucket to store 2-5ms 82 * @CE_BUCKET_10_MS: tasklet bucket to store 5-10ms 83 * @CE_BUCKET_BEYOND: tasklet bucket to store > 10ms 84 * @CE_BUCKET_MAX: enum max value 85 */ 86 #ifdef CE_TASKLET_DEBUG_ENABLE 87 enum ce_buckets { 88 CE_BUCKET_500_US, 89 CE_BUCKET_1_MS, 90 CE_BUCKET_2_MS, 91 CE_BUCKET_5_MS, 92 CE_BUCKET_10_MS, 93 CE_BUCKET_BEYOND, 94 CE_BUCKET_MAX, 95 }; 96 #endif 97 98 enum ce_target_type { 99 CE_SVC_LEGACY, 100 CE_SVC_SRNG, 101 CE_MAX_TARGET_TYPE 102 }; 103 104 enum ol_ath_hif_pkt_ecodes { 105 HIF_PIPE_NO_RESOURCE = 0 106 }; 107 108 struct HIF_CE_state; 109 110 /* Per-pipe state. */ 111 struct HIF_CE_pipe_info { 112 /* Handle of underlying Copy Engine */ 113 struct CE_handle *ce_hdl; 114 115 /* Our pipe number; facilitiates use of pipe_info ptrs. */ 116 uint8_t pipe_num; 117 118 /* Convenience back pointer to HIF_CE_state. */ 119 struct HIF_CE_state *HIF_CE_state; 120 121 /* Instantaneous number of receive buffers that should be posted */ 122 atomic_t recv_bufs_needed; 123 qdf_size_t buf_sz; 124 qdf_spinlock_t recv_bufs_needed_lock; 125 126 qdf_spinlock_t completion_freeq_lock; 127 /* Limit the number of outstanding send requests. */ 128 int num_sends_allowed; 129 130 /* adding three counts for debugging ring buffer errors */ 131 uint32_t nbuf_alloc_err_count; 132 uint32_t nbuf_dma_err_count; 133 uint32_t nbuf_ce_enqueue_err_count; 134 struct hif_msg_callbacks pipe_callbacks; 135 }; 136 137 /** 138 * struct ce_tasklet_entry 139 * 140 * @intr_tq: intr_tq 141 * @ce_id: ce_id 142 * @inited: inited 143 * @hif_ce_state: hif_ce_state 144 * @from_irq: from_irq 145 */ 146 struct ce_tasklet_entry { 147 struct tasklet_struct intr_tq; 148 enum ce_id_type ce_id; 149 bool inited; 150 void *hif_ce_state; 151 }; 152 153 static inline bool hif_dummy_grp_done(struct hif_exec_context *grp_entry, int 154 work_done) 155 { 156 return true; 157 } 158 159 extern struct hif_execution_ops tasklet_sched_ops; 160 extern struct hif_execution_ops napi_sched_ops; 161 162 /** 163 * struct ce_stats 164 * 165 * @ce_per_cpu: Stats of the CEs running per CPU 166 * @record_index: Current index to store in time record 167 * @tasklet_sched_entry_ts: Timestamp when tasklet is scheduled 168 * @tasklet_exec_entry_ts: Timestamp when tasklet is started execuiton 169 * @tasklet_exec_time_record: Last N number of tasklets execution time 170 * @tasklet_sched_time_record: Last N number of tasklets scheduled time 171 * @ce_tasklet_exec_bucket: Tasklet execution time buckets 172 * @ce_tasklet_sched_bucket: Tasklet time in queue buckets 173 * @ce_tasklet_exec_last_update: Latest timestamp when bucket is updated 174 * @ce_tasklet_sched_last_update: Latest timestamp when bucket is updated 175 */ 176 struct ce_stats { 177 uint32_t ce_per_cpu[CE_COUNT_MAX][QDF_MAX_AVAILABLE_CPU]; 178 #ifdef CE_TASKLET_DEBUG_ENABLE 179 uint32_t record_index[CE_COUNT_MAX]; 180 uint64_t tasklet_sched_entry_ts[CE_COUNT_MAX]; 181 uint64_t tasklet_exec_entry_ts[CE_COUNT_MAX]; 182 uint64_t tasklet_exec_time_record[CE_COUNT_MAX][HIF_REQUESTED_EVENTS]; 183 uint64_t tasklet_sched_time_record[CE_COUNT_MAX][HIF_REQUESTED_EVENTS]; 184 uint64_t ce_tasklet_exec_bucket[CE_COUNT_MAX][CE_BUCKET_MAX]; 185 uint64_t ce_tasklet_sched_bucket[CE_COUNT_MAX][CE_BUCKET_MAX]; 186 uint64_t ce_tasklet_exec_last_update[CE_COUNT_MAX][CE_BUCKET_MAX]; 187 uint64_t ce_tasklet_sched_last_update[CE_COUNT_MAX][CE_BUCKET_MAX]; 188 #endif 189 }; 190 191 struct HIF_CE_state { 192 struct hif_softc ol_sc; 193 bool started; 194 struct ce_tasklet_entry tasklets[CE_COUNT_MAX]; 195 struct hif_exec_context *hif_ext_group[HIF_MAX_GROUP]; 196 uint32_t hif_num_extgroup; 197 qdf_spinlock_t keep_awake_lock; 198 qdf_spinlock_t irq_reg_lock; 199 unsigned int keep_awake_count; 200 bool verified_awake; 201 bool fake_sleep; 202 qdf_timer_t sleep_timer; 203 bool sleep_timer_init; 204 qdf_time_t sleep_ticks; 205 uint32_t ce_register_irq_done; 206 207 struct CE_pipe_config *target_ce_config; 208 struct CE_attr *host_ce_config; 209 uint32_t target_ce_config_sz; 210 /* Per-pipe state. */ 211 struct HIF_CE_pipe_info pipe_info[CE_COUNT_MAX]; 212 /* to be activated after BMI_DONE */ 213 struct hif_msg_callbacks msg_callbacks_pending; 214 /* current msg callbacks in use */ 215 struct hif_msg_callbacks msg_callbacks_current; 216 217 /* Target address used to signal a pending firmware event */ 218 uint32_t fw_indicator_address; 219 220 /* Copy Engine used for Diagnostic Accesses */ 221 struct CE_handle *ce_diag; 222 struct ce_stats stats; 223 struct ce_ops *ce_services; 224 }; 225 226 /* 227 * HIA Map Definition 228 */ 229 struct host_interest_area_t { 230 uint32_t hi_interconnect_state; 231 uint32_t hi_early_alloc; 232 uint32_t hi_option_flag2; 233 uint32_t hi_board_data; 234 uint32_t hi_board_data_initialized; 235 uint32_t hi_failure_state; 236 uint32_t hi_rddi_msi_num; 237 uint32_t hi_pcie_perst_couple_en; 238 uint32_t hi_sw_protocol_version; 239 }; 240 241 struct shadow_reg_cfg { 242 uint16_t ce_id; 243 uint16_t reg_offset; 244 }; 245 246 struct shadow_reg_v2_cfg { 247 uint32_t reg_value; 248 }; 249 250 #ifdef CONFIG_BYPASS_QMI 251 252 #define FW_SHARED_MEM (2 * 1024 * 1024) 253 254 #ifdef QCN7605_SUPPORT 255 struct msi_cfg { 256 u16 ce_id; 257 u16 msi_vector; 258 } qdf_packed; 259 260 struct ce_info { 261 u32 rri_over_ddr_low_paddr; 262 u32 rri_over_ddr_high_paddr; 263 struct msi_cfg cfg[CE_COUNT_MAX]; 264 } qdf_packed; 265 #endif 266 #endif 267 268 void hif_ce_stop(struct hif_softc *scn); 269 int hif_dump_ce_registers(struct hif_softc *scn); 270 void 271 hif_ce_dump_target_memory(struct hif_softc *scn, void *ramdump_base, 272 uint32_t address, uint32_t size); 273 274 #ifdef IPA_OFFLOAD 275 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, 276 qdf_shared_mem_t **ce_sr, 277 uint32_t *ce_sr_ring_size, 278 qdf_dma_addr_t *ce_reg_paddr); 279 #else 280 static inline 281 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, 282 qdf_shared_mem_t **ce_sr, 283 uint32_t *ce_sr_ring_size, 284 qdf_dma_addr_t *ce_reg_paddr) 285 { 286 } 287 288 #endif 289 int hif_wlan_enable(struct hif_softc *scn); 290 void ce_enable_polling(void *cestate); 291 void ce_disable_polling(void *cestate); 292 void hif_wlan_disable(struct hif_softc *scn); 293 void hif_get_target_ce_config(struct hif_softc *scn, 294 struct CE_pipe_config **target_ce_config_ret, 295 uint32_t *target_ce_config_sz_ret, 296 struct service_to_pipe **target_service_to_ce_map_ret, 297 uint32_t *target_service_to_ce_map_sz_ret, 298 struct shadow_reg_cfg **target_shadow_reg_cfg_v1_ret, 299 uint32_t *shadow_cfg_v1_sz_ret); 300 301 #ifdef WLAN_FEATURE_EPPING 302 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state); 303 void hif_select_epping_service_to_pipe_map(struct service_to_pipe 304 **tgt_svc_map_to_use, 305 uint32_t *sz_tgt_svc_map_to_use); 306 307 #else 308 static inline 309 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state) 310 { } 311 static inline 312 void hif_select_epping_service_to_pipe_map(struct service_to_pipe 313 **tgt_svc_map_to_use, 314 uint32_t *sz_tgt_svc_map_to_use) 315 { } 316 #endif 317 318 void ce_service_register_module(enum ce_target_type target_type, 319 struct ce_ops* (*ce_attach)(void)); 320 321 #endif /* __CE_H__ */ 322