1 /*
2  * Copyright (c) 2011, 2014-2018-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HTT_TYPES__H_
21 #define _HTT_TYPES__H_
22 
23 #include <osdep.h>              /* uint16_t, dma_addr_t */
24 #include <qdf_types.h>          /* qdf_device_t */
25 #include <qdf_lock.h>           /* qdf_spinlock_t */
26 #include <qdf_timer.h>		/* qdf_timer_t */
27 #include <qdf_atomic.h>         /* qdf_atomic_inc */
28 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
29 #include <htc_api.h>            /* HTC_PACKET */
30 #include <ol_htt_api.h>
31 #include <cdp_txrx_handle.h>
32 #define DEBUG_DMA_DONE
33 
34 #define HTT_TX_MUTEX_TYPE qdf_spinlock_t
35 
36 #ifdef QCA_TX_HTT2_SUPPORT
37 #ifndef HTC_TX_HTT2_MAX_SIZE
38 /* Should sync to the target's implementation. */
39 #define HTC_TX_HTT2_MAX_SIZE    (120)
40 #endif
41 #endif /* QCA_TX_HTT2_SUPPORT */
42 
43 /*
44  * Set the base misclist size to the size of the htt tx copy engine
45  * to guarantee that a packet on the misclist won't be freed while it
46  * is sitting in the copy engine.
47  */
48 #define HTT_HTC_PKT_MISCLIST_SIZE          2048
49 
50 struct htt_htc_pkt {
51 	void *pdev_ctxt;
52 	target_paddr_t nbuf_paddr;
53 	HTC_PACKET htc_pkt;
54 	uint16_t msdu_id;
55 };
56 
57 struct htt_htc_pkt_union {
58 	union {
59 		struct htt_htc_pkt pkt;
60 		struct htt_htc_pkt_union *next;
61 	} u;
62 };
63 
64 /*
65  * HTT host descriptor:
66  * Include the htt_tx_msdu_desc that gets downloaded to the target,
67  * but also include the HTC_FRAME_HDR and alignment padding that
68  * precede the htt_tx_msdu_desc.
69  * htc_send_data_pkt expects this header space at the front of the
70  * initial fragment (i.e. tx descriptor) that is downloaded.
71  */
72 struct htt_host_tx_desc_t {
73 	uint8_t htc_header[HTC_HEADER_LEN];
74 	/* force the tx_desc field to begin on a 4-byte boundary */
75 	union {
76 		uint32_t dummy_force_align;
77 		struct htt_tx_msdu_desc_t tx_desc;
78 	} align32;
79 };
80 
81 struct htt_list_node {
82 	struct htt_list_node *prev;
83 	struct htt_list_node *next;
84 };
85 
86 struct htt_rx_hash_entry {
87 	qdf_dma_addr_t paddr;
88 	qdf_nbuf_t netbuf;
89 	A_UINT8 fromlist;
90 	struct htt_list_node listnode;
91 #ifdef RX_HASH_DEBUG
92 	A_UINT32 cookie;
93 #endif
94 };
95 
96 struct htt_rx_hash_bucket {
97 	struct htt_list_node listhead;
98 	struct htt_rx_hash_entry *entries;
99 	struct htt_list_node freepool;
100 #ifdef RX_HASH_DEBUG
101 	A_UINT32 count;
102 #endif
103 };
104 
105 /*
106  * Micro controller datapath offload
107  * WLAN TX resources
108  */
109 struct htt_ipa_uc_tx_resource_t {
110 	qdf_shared_mem_t *tx_ce_idx;
111 	qdf_shared_mem_t *tx_comp_ring;
112 
113 	qdf_dma_addr_t tx_comp_idx_paddr;
114 	qdf_shared_mem_t **tx_buf_pool_strg;
115 	uint32_t alloc_tx_buf_cnt;
116 	bool ipa_smmu_mapped;
117 };
118 
119 /**
120  * struct htt_ipa_uc_rx_resource_t
121  * @rx_rdy_idx_paddr: rx ready index physical address
122  * @rx_ind_ring: rx indication ring memory info
123  * @rx_ipa_prc_done_idx: rx process done index memory info
124  * @rx2_ind_ring: rx2 indication ring memory info
125  * @rx2_ipa_prc_done_idx: rx2 process done index memory info
126  */
127 struct htt_ipa_uc_rx_resource_t {
128 	qdf_dma_addr_t rx_rdy_idx_paddr;
129 	qdf_shared_mem_t *rx_ind_ring;
130 	qdf_shared_mem_t *rx_ipa_prc_done_idx;
131 
132 	/* 2nd RX ring */
133 	qdf_shared_mem_t *rx2_ind_ring;
134 	qdf_shared_mem_t *rx2_ipa_prc_done_idx;
135 };
136 
137 /**
138  * struct ipa_uc_rx_ring_elem_t
139  * @rx_packet_paddr: rx packet physical address
140  * @vdev_id: virtual interface id
141  * @rx_packet_leng: packet length
142  */
143 #if HTT_PADDR64
144 struct ipa_uc_rx_ring_elem_t {
145 	target_paddr_t rx_packet_paddr;
146 	uint32_t vdev_id;
147 	uint32_t rx_packet_leng;
148 };
149 #else
150 struct ipa_uc_rx_ring_elem_t {
151 	target_paddr_t rx_packet_paddr;
152 	uint16_t vdev_id;
153 	uint16_t rx_packet_leng;
154 };
155 #endif
156 
157 struct htt_tx_credit_t {
158 	qdf_atomic_t bus_delta;
159 	qdf_atomic_t target_delta;
160 };
161 
162 #if defined(HELIUMPLUS)
163 /**
164  * msdu_ext_frag_desc:
165  * semantically, this is an array of 6 of 2-tuples of
166  * a 48-bit physical address and a 16 bit len field
167  * with the following layout:
168  * 31               16       8       0
169  * |        p t r - l o w 3 2         |
170  * | len             | ptr-7/16       |
171  */
172 struct msdu_ext_frag_desc {
173 	union {
174 		uint64_t desc64;
175 		struct {
176 			uint32_t ptr_low;
177 			uint32_t ptr_hi:16,
178 				len:16;
179 		} frag32;
180 	} u;
181 };
182 
183 struct msdu_ext_desc_t {
184 	struct qdf_tso_flags_t tso_flags;
185 	struct msdu_ext_frag_desc frags[6];
186 /*
187  *	u_int32_t frag_ptr0;
188  *	u_int32_t frag_len0;
189  *	u_int32_t frag_ptr1;
190  *	u_int32_t frag_len1;
191  *	u_int32_t frag_ptr2;
192  *	u_int32_t frag_len2;
193  *	u_int32_t frag_ptr3;
194  *	u_int32_t frag_len3;
195  *	u_int32_t frag_ptr4;
196  *	u_int32_t frag_len4;
197  *	u_int32_t frag_ptr5;
198  *	u_int32_t frag_len5;
199  */
200 };
201 #endif  /* defined(HELIUMPLUS) */
202 
203 /**
204  * struct mon_channel
205  * @ch_num: Monitor mode capture channel number
206  * @ch_freq: channel frequency.
207  */
208 struct mon_channel {
209 	uint32_t ch_num;
210 	uint32_t ch_freq;
211 };
212 
213 struct htt_pdev_t {
214 	struct cdp_cfg *ctrl_pdev;
215 	ol_txrx_pdev_handle txrx_pdev;
216 	HTC_HANDLE htc_pdev;
217 	qdf_device_t osdev;
218 
219 	HTC_ENDPOINT_ID htc_tx_endpoint;
220 
221 #ifdef QCA_TX_HTT2_SUPPORT
222 	HTC_ENDPOINT_ID htc_tx_htt2_endpoint;
223 	uint16_t htc_tx_htt2_max_size;
224 #endif /* QCA_TX_HTT2_SUPPORT */
225 
226 #ifdef ATH_11AC_TXCOMPACT
227 	HTT_TX_MUTEX_TYPE txnbufq_mutex;
228 	qdf_nbuf_queue_t txnbufq;
229 	struct htt_htc_pkt_union *htt_htc_pkt_misclist;
230 #endif
231 
232 	struct htt_htc_pkt_union *htt_htc_pkt_freelist;
233 	struct {
234 		int is_high_latency;
235 		int is_full_reorder_offload;
236 		int default_tx_comp_req;
237 		int ce_classify_enabled;
238 		uint8_t is_first_wakeup_packet;
239 		/*
240 		 * To track if credit reporting through
241 		 * HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND is enabled/disabled.
242 		 * In Genoa(QCN7605) credits are reported through
243 		 * HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND only.
244 		 */
245 		u8 credit_update_enabled;
246 		/* Explicitly request TX completions. */
247 		u8 request_tx_comp;
248 	} cfg;
249 	struct {
250 		uint8_t major;
251 		uint8_t minor;
252 	} tgt_ver;
253 #if defined(HELIUMPLUS)
254 	struct {
255 		u_int8_t major;
256 		u_int8_t minor;
257 	} wifi_ip_ver;
258 #endif /* defined(HELIUMPLUS) */
259 	struct {
260 		struct {
261 			/*
262 			 * Ring of network buffer objects -
263 			 * This ring is used exclusively by the host SW.
264 			 * This ring mirrors the dev_addrs_ring that is shared
265 			 * between the host SW and the MAC HW.
266 			 * The host SW uses this netbufs ring to locate the nw
267 			 * buffer objects whose data buffers the HW has filled.
268 			 */
269 			qdf_nbuf_t *netbufs_ring;
270 			/*
271 			 * Ring of buffer addresses -
272 			 * This ring holds the "physical" device address of the
273 			 * rx buffers the host SW provides for MAC HW to fill.
274 			 */
275 #if HTT_PADDR64
276 			uint64_t *paddrs_ring;
277 #else   /* ! HTT_PADDR64 */
278 			uint32_t *paddrs_ring;
279 #endif
280 			qdf_dma_mem_context(memctx);
281 		} buf;
282 		/*
283 		 * Base address of ring, as a "physical" device address rather
284 		 * than a CPU address.
285 		 */
286 		qdf_dma_addr_t base_paddr;
287 		int32_t  size;	/* how many elems in the ring (power of 2) */
288 		uint32_t size_mask;	/* size - 1, at least 16 bits long */
289 
290 		int fill_level; /* how many rx buffers to keep in the ring */
291 		/* # of rx buffers (full+empty) in the ring */
292 		qdf_atomic_t fill_cnt;
293 		int pop_fail_cnt;   /* # of nebuf pop failures */
294 
295 		/*
296 		 * target_idx -
297 		 * Without reorder offload:
298 		 * not used
299 		 * With reorder offload:
300 		 * points to the location in the rx ring from which rx buffers
301 		 * are available to copy into the MAC DMA ring
302 		 */
303 		struct {
304 			uint32_t *vaddr;
305 			qdf_dma_addr_t paddr;
306 			qdf_dma_mem_context(memctx);
307 		} target_idx;
308 
309 		/*
310 		 * alloc_idx/host_idx -
311 		 * Without reorder offload:
312 		 * where HTT SW has deposited empty buffers
313 		 * This is allocated in consistent mem, so that the FW can read
314 		 * this variable, and program the HW's FW_IDX reg with the value
315 		 * of this shadow register
316 		 * With reorder offload:
317 		 * points to the end of the available free rx buffers
318 		 */
319 		struct {
320 			uint32_t *vaddr;
321 			qdf_dma_addr_t paddr;
322 			qdf_dma_mem_context(memctx);
323 		} alloc_idx;
324 
325 		/*
326 		 * sw_rd_idx -
327 		 * where HTT SW has processed bufs filled by rx MAC DMA
328 		 */
329 		struct {
330 			unsigned int msdu_desc;
331 			unsigned int msdu_payld;
332 		} sw_rd_idx;
333 
334 		/*
335 		 * refill_retry_timer - timer triggered when the ring is not
336 		 * refilled to the level expected
337 		 */
338 		qdf_timer_t refill_retry_timer;
339 
340 		/*
341 		 * refill_ref_cnt - ref cnt for Rx buffer replenishment - this
342 		 * variable is used to guarantee that only one thread tries
343 		 * to replenish Rx ring.
344 		 */
345 		qdf_atomic_t   refill_ref_cnt;
346 		qdf_spinlock_t refill_lock;
347 		qdf_atomic_t   refill_debt;
348 #ifdef DEBUG_DMA_DONE
349 		uint32_t dbg_initial_msdu_payld;
350 		uint32_t dbg_mpdu_range;
351 		uint32_t dbg_mpdu_count;
352 		uint32_t dbg_ring_idx;
353 		uint32_t dbg_refill_cnt;
354 		uint32_t dbg_sync_success;
355 #endif
356 #ifdef HTT_RX_RESTORE
357 		int rx_reset;
358 		uint8_t htt_rx_restore;
359 #endif
360 		qdf_spinlock_t rx_hash_lock;
361 		struct htt_rx_hash_bucket **hash_table;
362 		uint32_t listnode_offset;
363 		bool smmu_map;
364 	} rx_ring;
365 
366 #ifndef CONFIG_HL_SUPPORT
367 	struct {
368 		qdf_atomic_t fill_cnt;          /* # of buffers in pool */
369 		qdf_atomic_t refill_low_mem;    /* if set refill the ring */
370 		qdf_nbuf_t *netbufs_ring;
371 		qdf_spinlock_t rx_buff_pool_lock;
372 	} rx_buff_pool;
373 #endif
374 
375 #ifdef CONFIG_HL_SUPPORT
376 	int rx_desc_size_hl;
377 #endif
378 	long rx_fw_desc_offset;
379 	int rx_mpdu_range_offset_words;
380 	int rx_ind_msdu_byte_idx;
381 
382 	struct {
383 		int size;       /* of each HTT tx desc */
384 		uint16_t pool_elems;
385 		uint16_t alloc_cnt;
386 		struct qdf_mem_multi_page_t desc_pages;
387 		uint32_t *freelist;
388 		qdf_dma_mem_context(memctx);
389 	} tx_descs;
390 #if defined(HELIUMPLUS)
391 	struct {
392 		int size; /* of each Fragment/MSDU-Ext descriptor */
393 		int pool_elems;
394 		struct qdf_mem_multi_page_t desc_pages;
395 		qdf_dma_mem_context(memctx);
396 	} frag_descs;
397 #endif /* defined(HELIUMPLUS) */
398 
399 	int download_len;
400 	void (*tx_send_complete_part2)(void *pdev, A_STATUS status,
401 				       qdf_nbuf_t msdu, uint16_t msdu_id);
402 
403 	HTT_TX_MUTEX_TYPE htt_tx_mutex;
404 	HTT_TX_MUTEX_TYPE credit_mutex;
405 
406 	struct {
407 		int htc_err_cnt;
408 	} stats;
409 #ifdef CONFIG_HL_SUPPORT
410 	int cur_seq_num_hl;
411 #endif
412 	struct targetdef_s *targetdef;
413 	struct ce_reg_def *target_ce_def;
414 
415 	struct htt_ipa_uc_tx_resource_t ipa_uc_tx_rsc;
416 	struct htt_ipa_uc_rx_resource_t ipa_uc_rx_rsc;
417 	int is_ipa_uc_enabled;
418 
419 	struct htt_tx_credit_t htt_tx_credit;
420 
421 #ifdef DEBUG_RX_RING_BUFFER
422 	struct rx_buf_debug *rx_buff_list;
423 	qdf_spinlock_t       rx_buff_list_lock;
424 	int rx_buff_index;
425 	int rx_buff_posted_cum;
426 	int rx_buff_recvd_cum;
427 	int rx_buff_recvd_err;
428 #endif
429 	/*
430 	 * Counters below are being invoked from functions defined outside of
431 	 * DEBUG_RX_RING_BUFFER
432 	 */
433 	int rx_buff_debt_invoked;
434 	int rx_buff_fill_n_invoked;
435 	int refill_retry_timer_starts;
436 	int refill_retry_timer_calls;
437 	int refill_retry_timer_doubles;
438 
439 	/* callback function for packetdump */
440 	tp_rx_pkt_dump_cb rx_pkt_dump_cb;
441 
442 	struct mon_channel mon_ch_info;
443 
444 	/* Flag to indicate whether new htt format is supported */
445 	bool new_htt_format_enabled;
446 };
447 
448 #define HTT_EPID_GET(_htt_pdev_hdl)  \
449 	(((struct htt_pdev_t *)(_htt_pdev_hdl))->htc_tx_endpoint)
450 
451 #if defined(HELIUMPLUS)
452 #define HTT_WIFI_IP(pdev, x, y) (((pdev)->wifi_ip_ver.major == (x)) &&	\
453 				 ((pdev)->wifi_ip_ver.minor == (y)))
454 
455 #define HTT_SET_WIFI_IP(pdev, x, y) (((pdev)->wifi_ip_ver.major = (x)) && \
456 				     ((pdev)->wifi_ip_ver.minor = (y)))
457 #endif /* defined(HELIUMPLUS) */
458 
459 #endif /* _HTT_TYPES__H_ */
460