1 /* 2 * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef __CE_H__ 20 #define __CE_H__ 21 22 #include "qdf_atomic.h" 23 #include "qdf_lock.h" 24 #include "hif_main.h" 25 #include "qdf_util.h" 26 #include "hif_exec.h" 27 28 #ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE 29 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr) \ 30 (x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr)) 31 #else 32 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr) 33 #endif 34 35 /* 36 * Number of times to check for any pending tx/rx completion on 37 * a copy engine, this count should be big enough. Once we hit 38 * this threashold we'll not check for any Tx/Rx comlpetion in same 39 * interrupt handling. Note that this threashold is only used for 40 * Rx interrupt processing, this can be used tor Tx as well if we 41 * suspect any infinite loop in checking for pending Tx completion. 42 */ 43 #define CE_TXRX_COMP_CHECK_THRESHOLD 20 44 45 #define CE_HTT_T2H_MSG 1 46 #define CE_HTT_H2T_MSG 4 47 48 #define CE_OFFSET 0x00000400 49 #define CE_USEFUL_SIZE 0x00000058 50 #define CE_ALL_BITMAP 0xFFFF 51 52 #define HIF_REQUESTED_EVENTS 20 53 /** 54 * enum ce_id_type 55 * 56 * @ce_id_type: Copy engine ID 57 */ 58 enum ce_id_type { 59 CE_ID_0, 60 CE_ID_1, 61 CE_ID_2, 62 CE_ID_3, 63 CE_ID_4, 64 CE_ID_5, 65 CE_ID_6, 66 CE_ID_7, 67 CE_ID_8, 68 CE_ID_9, 69 CE_ID_10, 70 CE_ID_11, 71 #ifdef QCA_WIFI_QCN9224 72 CE_ID_12, 73 CE_ID_13, 74 CE_ID_14, 75 CE_ID_15, 76 #endif 77 CE_ID_MAX 78 }; 79 80 /** 81 * enum ce_buckets 82 * 83 * @ce_buckets: CE tasklet time buckets 84 * @CE_BUCKET_500_US: tasklet bucket to store 0-0.5ms 85 * @CE_BUCKET_1_MS: tasklet bucket to store 0.5-1ms 86 * @CE_BUCKET_2_MS: tasklet bucket to store 1-2ms 87 * @CE_BUCKET_5_MS: tasklet bucket to store 2-5ms 88 * @CE_BUCKET_10_MS: tasklet bucket to store 5-10ms 89 * @CE_BUCKET_BEYOND: tasklet bucket to store > 10ms 90 * @CE_BUCKET_MAX: enum max value 91 */ 92 #ifdef CE_TASKLET_DEBUG_ENABLE 93 enum ce_buckets { 94 CE_BUCKET_500_US, 95 CE_BUCKET_1_MS, 96 CE_BUCKET_2_MS, 97 CE_BUCKET_5_MS, 98 CE_BUCKET_10_MS, 99 CE_BUCKET_BEYOND, 100 CE_BUCKET_MAX, 101 }; 102 #endif 103 104 enum ce_target_type { 105 CE_SVC_LEGACY, 106 CE_SVC_SRNG, 107 CE_MAX_TARGET_TYPE 108 }; 109 110 enum ol_ath_hif_pkt_ecodes { 111 HIF_PIPE_NO_RESOURCE = 0 112 }; 113 114 struct HIF_CE_state; 115 116 /* Per-pipe state. */ 117 struct HIF_CE_pipe_info { 118 /* Handle of underlying Copy Engine */ 119 struct CE_handle *ce_hdl; 120 121 /* Our pipe number; facilitiates use of pipe_info ptrs. */ 122 uint8_t pipe_num; 123 124 /* Convenience back pointer to HIF_CE_state. */ 125 struct HIF_CE_state *HIF_CE_state; 126 127 /* Instantaneous number of receive buffers that should be posted */ 128 atomic_t recv_bufs_needed; 129 qdf_size_t buf_sz; 130 qdf_spinlock_t recv_bufs_needed_lock; 131 132 qdf_spinlock_t completion_freeq_lock; 133 /* Limit the number of outstanding send requests. */ 134 int num_sends_allowed; 135 136 /* adding three counts for debugging ring buffer errors */ 137 uint32_t nbuf_alloc_err_count; 138 uint32_t nbuf_dma_err_count; 139 uint32_t nbuf_ce_enqueue_err_count; 140 struct hif_msg_callbacks pipe_callbacks; 141 }; 142 143 /** 144 * struct ce_tasklet_entry 145 * 146 * @intr_tq: intr_tq 147 * @ce_id: ce_id 148 * @inited: inited 149 * @hif_ce_state: hif_ce_state 150 * @from_irq: from_irq 151 */ 152 struct ce_tasklet_entry { 153 struct tasklet_struct intr_tq; 154 enum ce_id_type ce_id; 155 bool inited; 156 bool hi_tasklet_ce; 157 void *hif_ce_state; 158 }; 159 160 static inline bool hif_dummy_grp_done(struct hif_exec_context *grp_entry, int 161 work_done) 162 { 163 return true; 164 } 165 166 extern struct hif_execution_ops tasklet_sched_ops; 167 extern struct hif_execution_ops napi_sched_ops; 168 169 /** 170 * struct ce_stats 171 * 172 * @ce_per_cpu: Stats of the CEs running per CPU 173 * @record_index: Current index to store in time record 174 * @tasklet_sched_entry_ts: Timestamp when tasklet is scheduled 175 * @tasklet_exec_entry_ts: Timestamp when tasklet is started execuiton 176 * @tasklet_exec_time_record: Last N number of tasklets execution time 177 * @tasklet_sched_time_record: Last N number of tasklets scheduled time 178 * @ce_tasklet_exec_bucket: Tasklet execution time buckets 179 * @ce_tasklet_sched_bucket: Tasklet time in queue buckets 180 * @ce_tasklet_exec_last_update: Latest timestamp when bucket is updated 181 * @ce_tasklet_sched_last_update: Latest timestamp when bucket is updated 182 */ 183 struct ce_stats { 184 uint32_t ce_per_cpu[CE_COUNT_MAX][QDF_MAX_AVAILABLE_CPU]; 185 #ifdef CE_TASKLET_DEBUG_ENABLE 186 uint32_t record_index[CE_COUNT_MAX]; 187 uint64_t tasklet_sched_entry_ts[CE_COUNT_MAX]; 188 uint64_t tasklet_exec_entry_ts[CE_COUNT_MAX]; 189 uint64_t tasklet_exec_time_record[CE_COUNT_MAX][HIF_REQUESTED_EVENTS]; 190 uint64_t tasklet_sched_time_record[CE_COUNT_MAX][HIF_REQUESTED_EVENTS]; 191 uint64_t ce_tasklet_exec_bucket[CE_COUNT_MAX][CE_BUCKET_MAX]; 192 uint64_t ce_tasklet_sched_bucket[CE_COUNT_MAX][CE_BUCKET_MAX]; 193 uint64_t ce_tasklet_exec_last_update[CE_COUNT_MAX][CE_BUCKET_MAX]; 194 uint64_t ce_tasklet_sched_last_update[CE_COUNT_MAX][CE_BUCKET_MAX]; 195 #endif 196 }; 197 198 struct HIF_CE_state { 199 struct hif_softc ol_sc; 200 bool started; 201 struct ce_tasklet_entry tasklets[CE_COUNT_MAX]; 202 struct hif_exec_context *hif_ext_group[HIF_MAX_GROUP]; 203 uint32_t hif_num_extgroup; 204 qdf_spinlock_t keep_awake_lock; 205 qdf_spinlock_t irq_reg_lock; 206 unsigned int keep_awake_count; 207 bool verified_awake; 208 bool fake_sleep; 209 qdf_timer_t sleep_timer; 210 bool sleep_timer_init; 211 qdf_time_t sleep_ticks; 212 uint32_t ce_register_irq_done; 213 214 struct CE_pipe_config *target_ce_config; 215 struct CE_attr *host_ce_config; 216 uint32_t target_ce_config_sz; 217 /* Per-pipe state. */ 218 struct HIF_CE_pipe_info pipe_info[CE_COUNT_MAX]; 219 /* to be activated after BMI_DONE */ 220 struct hif_msg_callbacks msg_callbacks_pending; 221 /* current msg callbacks in use */ 222 struct hif_msg_callbacks msg_callbacks_current; 223 224 /* Target address used to signal a pending firmware event */ 225 uint32_t fw_indicator_address; 226 227 /* Copy Engine used for Diagnostic Accesses */ 228 struct CE_handle *ce_diag; 229 struct ce_stats stats; 230 struct ce_ops *ce_services; 231 struct service_to_pipe *tgt_svc_map; 232 int sz_tgt_svc_map; 233 }; 234 235 /* 236 * HIA Map Definition 237 */ 238 struct host_interest_area_t { 239 uint32_t hi_interconnect_state; 240 uint32_t hi_early_alloc; 241 uint32_t hi_option_flag2; 242 uint32_t hi_board_data; 243 uint32_t hi_board_data_initialized; 244 uint32_t hi_failure_state; 245 uint32_t hi_rddi_msi_num; 246 uint32_t hi_pcie_perst_couple_en; 247 uint32_t hi_sw_protocol_version; 248 }; 249 250 struct shadow_reg_cfg { 251 uint16_t ce_id; 252 uint16_t reg_offset; 253 }; 254 255 struct shadow_reg_v2_cfg { 256 uint32_t reg_value; 257 }; 258 259 #ifdef CONFIG_BYPASS_QMI 260 261 #define FW_SHARED_MEM (2 * 1024 * 1024) 262 263 #ifdef QCN7605_SUPPORT 264 struct msi_cfg { 265 u16 ce_id; 266 u16 msi_vector; 267 } qdf_packed; 268 269 struct ce_info { 270 u32 rri_over_ddr_low_paddr; 271 u32 rri_over_ddr_high_paddr; 272 struct msi_cfg cfg[CE_COUNT_MAX]; 273 } qdf_packed; 274 #endif 275 #endif 276 277 /** 278 * struct ce_index 279 * 280 * @id: CE id 281 * @sw_index: sw index 282 * @write_index: write index 283 * @hp: ring head pointer 284 * @tp: ring tail pointer 285 * @status_hp: status ring head pointer 286 * @status_tp: status ring tail pointer 287 */ 288 struct ce_index { 289 uint8_t id; 290 union { 291 struct { 292 uint16_t sw_index; 293 uint16_t write_index; 294 } legacy_info; 295 struct { 296 uint16_t hp; 297 uint16_t tp; 298 uint16_t status_hp; 299 uint16_t status_tp; 300 } srng_info; 301 } u; 302 } qdf_packed; 303 304 /** 305 * struct hang_event_info 306 * 307 * @tlv_header: tlv header 308 * @active_tasklet_count: active tasklet count 309 * @active_grp_tasklet_cnt: active grp tasklet count 310 * @ce_info: CE info 311 */ 312 struct hang_event_info { 313 uint16_t tlv_header; 314 uint8_t active_tasklet_count; 315 uint8_t active_grp_tasklet_cnt; 316 uint8_t ce_count; 317 struct ce_index ce_info[CE_COUNT_MAX]; 318 } qdf_packed; 319 320 void hif_ce_stop(struct hif_softc *scn); 321 int hif_dump_ce_registers(struct hif_softc *scn); 322 void 323 hif_ce_dump_target_memory(struct hif_softc *scn, void *ramdump_base, 324 uint32_t address, uint32_t size); 325 326 #ifdef IPA_OFFLOAD 327 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, 328 qdf_shared_mem_t **ce_sr, 329 uint32_t *ce_sr_ring_size, 330 qdf_dma_addr_t *ce_reg_paddr); 331 #else 332 static inline 333 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, 334 qdf_shared_mem_t **ce_sr, 335 uint32_t *ce_sr_ring_size, 336 qdf_dma_addr_t *ce_reg_paddr) 337 { 338 } 339 340 #endif 341 int hif_wlan_enable(struct hif_softc *scn); 342 void ce_enable_polling(void *cestate); 343 void ce_disable_polling(void *cestate); 344 void hif_wlan_disable(struct hif_softc *scn); 345 void hif_get_target_ce_config(struct hif_softc *scn, 346 struct CE_pipe_config **target_ce_config_ret, 347 uint32_t *target_ce_config_sz_ret, 348 struct service_to_pipe **target_service_to_ce_map_ret, 349 uint32_t *target_service_to_ce_map_sz_ret, 350 struct shadow_reg_cfg **target_shadow_reg_cfg_v1_ret, 351 uint32_t *shadow_cfg_v1_sz_ret); 352 353 #ifdef WLAN_FEATURE_EPPING 354 void hif_ce_prepare_epping_config(struct hif_softc *scn, 355 struct HIF_CE_state *hif_state); 356 void hif_select_epping_service_to_pipe_map(struct service_to_pipe 357 **tgt_svc_map_to_use, 358 uint32_t *sz_tgt_svc_map_to_use); 359 360 #else 361 static inline 362 void hif_ce_prepare_epping_config(struct hif_softc *scn, 363 struct HIF_CE_state *hif_state) 364 { } 365 static inline 366 void hif_select_epping_service_to_pipe_map(struct service_to_pipe 367 **tgt_svc_map_to_use, 368 uint32_t *sz_tgt_svc_map_to_use) 369 { } 370 #endif 371 372 void ce_service_register_module(enum ce_target_type target_type, 373 struct ce_ops* (*ce_attach)(void)); 374 375 #endif /* __CE_H__ */ 376