1  /* SPDX-License-Identifier: GPL-2.0 */
2  /******************************************************************************
3   *
4   * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
5   *
6   ******************************************************************************/
7  #ifndef _RTW_XMIT_H_
8  #define _RTW_XMIT_H_
9  
10  #include <linux/completion.h>
11  
12  #define MAX_XMITBUF_SZ	(20480)	/*  20k */
13  
14  #define NR_XMITBUFF	(16)
15  
16  #define XMITBUF_ALIGN_SZ 512
17  
18  /*  xmit extension buff definition */
19  #define MAX_XMIT_EXTBUF_SZ	(1536)
20  #define NR_XMIT_EXTBUFF	(32)
21  
22  #define MAX_CMDBUF_SZ	(5120)	/* 4096) */
23  
24  #define MAX_NUMBLKS		(1)
25  
26  #define XMIT_VO_QUEUE (0)
27  #define XMIT_VI_QUEUE (1)
28  #define XMIT_BE_QUEUE (2)
29  #define XMIT_BK_QUEUE (3)
30  
31  #define VO_QUEUE_INX		0
32  #define VI_QUEUE_INX		1
33  #define BE_QUEUE_INX		2
34  #define BK_QUEUE_INX		3
35  #define BCN_QUEUE_INX		4
36  #define MGT_QUEUE_INX		5
37  #define HIGH_QUEUE_INX		6
38  
39  #define HW_QUEUE_ENTRY	8
40  
41  #define WEP_IV(pattrib_iv, dot11txpn, keyidx)\
42  do {\
43  	pattrib_iv[0] = dot11txpn._byte_.TSC0;\
44  	pattrib_iv[1] = dot11txpn._byte_.TSC1;\
45  	pattrib_iv[2] = dot11txpn._byte_.TSC2;\
46  	pattrib_iv[3] = ((keyidx & 0x3)<<6);\
47  	dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0 : (dot11txpn.val + 1);\
48  } while (0)
49  
50  
51  #define TKIP_IV(pattrib_iv, dot11txpn, keyidx)\
52  do {\
53  	pattrib_iv[0] = dot11txpn._byte_.TSC1;\
54  	pattrib_iv[1] = (dot11txpn._byte_.TSC1 | 0x20) & 0x7f;\
55  	pattrib_iv[2] = dot11txpn._byte_.TSC0;\
56  	pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6);\
57  	pattrib_iv[4] = dot11txpn._byte_.TSC2;\
58  	pattrib_iv[5] = dot11txpn._byte_.TSC3;\
59  	pattrib_iv[6] = dot11txpn._byte_.TSC4;\
60  	pattrib_iv[7] = dot11txpn._byte_.TSC5;\
61  	dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val + 1);\
62  } while (0)
63  
64  #define AES_IV(pattrib_iv, dot11txpn, keyidx)\
65  do {\
66  	pattrib_iv[0] = dot11txpn._byte_.TSC0;\
67  	pattrib_iv[1] = dot11txpn._byte_.TSC1;\
68  	pattrib_iv[2] = 0;\
69  	pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6);\
70  	pattrib_iv[4] = dot11txpn._byte_.TSC2;\
71  	pattrib_iv[5] = dot11txpn._byte_.TSC3;\
72  	pattrib_iv[6] = dot11txpn._byte_.TSC4;\
73  	pattrib_iv[7] = dot11txpn._byte_.TSC5;\
74  	dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val + 1);\
75  } while (0)
76  
77  
78  #define HWXMIT_ENTRY	4
79  
80  /*  For Buffer Descriptor ring architecture */
81  #define TXDESC_SIZE 40
82  
83  #define TXDESC_OFFSET TXDESC_SIZE
84  
85  #define TXDESC_40_BYTES
86  
87  struct tx_desc {
88  	__le32 txdw0;
89  	__le32 txdw1;
90  	__le32 txdw2;
91  	__le32 txdw3;
92  	__le32 txdw4;
93  	__le32 txdw5;
94  	__le32 txdw6;
95  	__le32 txdw7;
96  
97  #if defined(TXDESC_40_BYTES) || defined(TXDESC_64_BYTES)
98  	__le32 txdw8;
99  	__le32 txdw9;
100  #endif /*  TXDESC_40_BYTES */
101  
102  #ifdef TXDESC_64_BYTES
103  	__le32 txdw10;
104  	__le32 txdw11;
105  
106  	/*  2008/05/15 MH Because PCIE HW memory R/W 4K limit. And now,  our descriptor */
107  	/*  size is 40 bytes. If you use more than 102 descriptor(103*40>4096), HW will execute */
108  	/*  memoryR/W CRC error. And then all DMA fetch will fail. We must decrease descriptor */
109  	/*  number or enlarge descriptor size as 64 bytes. */
110  	__le32 txdw12;
111  	__le32 txdw13;
112  	__le32 txdw14;
113  	__le32 txdw15;
114  #endif
115  };
116  
117  union txdesc {
118  	struct tx_desc txdesc;
119  	unsigned int value[TXDESC_SIZE>>2];
120  };
121  
122  struct	hw_xmit	{
123  	/* spinlock_t xmit_lock; */
124  	/* struct list_head	pending; */
125  	struct __queue *sta_queue;
126  	/* struct hw_txqueue *phwtxqueue; */
127  	/* signed int	txcmdcnt; */
128  	int	accnt;
129  };
130  
131  /* reduce size */
132  struct pkt_attrib {
133  	u8 type;
134  	u8 subtype;
135  	u8 bswenc;
136  	u8 dhcp_pkt;
137  	u16 ether_type;
138  	u16 seqnum;
139  	u16 pkt_hdrlen;	/* the original 802.3 pkt header len */
140  	u16 hdrlen;		/* the WLAN Header Len */
141  	u32 pktlen;		/* the original 802.3 pkt raw_data len (not include ether_hdr data) */
142  	u32 last_txcmdsz;
143  	u8 nr_frags;
144  	u8 encrypt;	/* when 0 indicates no encryption; when non-zero, indicates the encryption algorithm */
145  	u8 iv_len;
146  	u8 icv_len;
147  	u8 iv[18];
148  	u8 icv[16];
149  	u8 priority;
150  	u8 ack_policy;
151  	u8 mac_id;
152  	u8 vcs_mode;	/* virtual carrier sense method */
153  	u8 dst[ETH_ALEN];
154  	u8 src[ETH_ALEN];
155  	u8 ta[ETH_ALEN];
156  	u8 ra[ETH_ALEN];
157  	u8 key_idx;
158  	u8 qos_en;
159  	u8 ht_en;
160  	u8 raid;/* rate adpative id */
161  	u8 bwmode;
162  	u8 ch_offset;/* PRIME_CHNL_OFFSET */
163  	u8 sgi;/* short GI */
164  	u8 ampdu_en;/* tx ampdu enable */
165  	u8 ampdu_spacing; /* ampdu_min_spacing for peer sta's rx */
166  	u8 mdata;/* more data bit */
167  	u8 pctrl;/* per packet txdesc control enable */
168  	u8 triggered;/* for ap mode handling Power Saving sta */
169  	u8 qsel;
170  	u8 order;/* order bit */
171  	u8 eosp;
172  	u8 rate;
173  	u8 intel_proxim;
174  	u8 retry_ctrl;
175  	u8   mbssid;
176  	u8 ldpc;
177  	u8 stbc;
178  	struct sta_info *psta;
179  
180  	u8 rtsen;
181  	u8 cts2self;
182  	union Keytype	dot11tkiptxmickey;
183  	/* union Keytype	dot11tkiprxmickey; */
184  	union Keytype	dot118021x_UncstKey;
185  
186  	u8 icmp_pkt;
187  
188  };
189  
190  #define WLANHDR_OFFSET	64
191  
192  #define NULL_FRAMETAG		(0x0)
193  #define DATA_FRAMETAG		0x01
194  #define MGNT_FRAMETAG		0x03
195  
196  enum {
197  	XMITBUF_DATA = 0,
198  	XMITBUF_MGNT = 1,
199  	XMITBUF_CMD = 2,
200  };
201  
202  struct  submit_ctx {
203  	unsigned long submit_time; /* */
204  	u32 timeout_ms; /* <0: not synchronous, 0: wait forever, >0: up to ms waiting */
205  	int status; /* status for operation */
206  	struct completion done;
207  };
208  
209  enum {
210  	RTW_SCTX_SUBMITTED = -1,
211  	RTW_SCTX_DONE_SUCCESS = 0,
212  	RTW_SCTX_DONE_UNKNOWN,
213  	RTW_SCTX_DONE_TIMEOUT,
214  	RTW_SCTX_DONE_BUF_ALLOC,
215  	RTW_SCTX_DONE_BUF_FREE,
216  	RTW_SCTX_DONE_WRITE_PORT_ERR,
217  	RTW_SCTX_DONE_TX_DESC_NA,
218  	RTW_SCTX_DONE_TX_DENY,
219  	RTW_SCTX_DONE_CCX_PKT_FAIL,
220  	RTW_SCTX_DONE_DRV_STOP,
221  	RTW_SCTX_DONE_DEV_REMOVE,
222  	RTW_SCTX_DONE_CMD_ERROR,
223  };
224  
225  
226  void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms);
227  int rtw_sctx_wait(struct submit_ctx *sctx);
228  void rtw_sctx_done_err(struct submit_ctx **sctx, int status);
229  void rtw_sctx_done(struct submit_ctx **sctx);
230  
231  struct xmit_buf {
232  	struct list_head	list;
233  
234  	struct adapter *padapter;
235  
236  	u8 *pallocated_buf;
237  
238  	u8 *pbuf;
239  
240  	void *priv_data;
241  
242  	u16 buf_tag; /*  0: Normal xmitbuf, 1: extension xmitbuf, 2:cmd xmitbuf */
243  	u16 flags;
244  	u32 alloc_sz;
245  
246  	u32  len;
247  
248  	struct submit_ctx *sctx;
249  
250  	u8 *phead;
251  	u8 *pdata;
252  	u8 *ptail;
253  	u8 *pend;
254  	u32 ff_hwaddr;
255  	u8 pg_num;
256  	u8 agg_num;
257  
258  #if defined(DBG_XMIT_BUF) || defined(DBG_XMIT_BUF_EXT)
259  	u8 no;
260  #endif
261  
262  };
263  
264  
265  struct xmit_frame {
266  	struct list_head	list;
267  
268  	struct pkt_attrib attrib;
269  
270  	struct sk_buff *pkt;
271  
272  	int	frame_tag;
273  
274  	struct adapter *padapter;
275  
276  	u8 *buf_addr;
277  
278  	struct xmit_buf *pxmitbuf;
279  
280  	u8 pg_num;
281  	u8 agg_num;
282  
283  	u8 ack_report;
284  
285  	u8 *alloc_addr; /* the actual address this xmitframe allocated */
286  	u8 ext_tag; /* 0:data, 1:mgmt */
287  
288  };
289  
290  struct tx_servq {
291  	struct list_head	tx_pending;
292  	struct __queue	sta_pending;
293  	int qcnt;
294  };
295  
296  
297  struct sta_xmit_priv {
298  	spinlock_t	lock;
299  	signed int	option;
300  	signed int	apsd_setting;	/* When bit mask is on, the associated edca queue supports APSD. */
301  
302  
303  	/* struct tx_servq blk_q[MAX_NUMBLKS]; */
304  	struct tx_servq	be_q;			/* priority == 0, 3 */
305  	struct tx_servq	bk_q;			/* priority == 1, 2 */
306  	struct tx_servq	vi_q;			/* priority == 4, 5 */
307  	struct tx_servq	vo_q;			/* priority == 6, 7 */
308  	struct list_head	legacy_dz;
309  	struct list_head  apsd;
310  
311  	u16 txseq_tid[16];
312  
313  	/* uint	sta_tx_bytes; */
314  	/* u64	sta_tx_pkts; */
315  	/* uint	sta_tx_fail; */
316  
317  
318  };
319  
320  
321  struct	hw_txqueue	{
322  	volatile signed int	head;
323  	volatile signed int	tail;
324  	volatile signed int	free_sz;	/* in units of 64 bytes */
325  	volatile signed int      free_cmdsz;
326  	volatile signed int	 txsz[8];
327  	uint	ff_hwaddr;
328  	uint	cmd_hwaddr;
329  	signed int	ac_tag;
330  };
331  
332  enum cmdbuf_type {
333  	CMDBUF_BEACON = 0x00,
334  	CMDBUF_RSVD,
335  	CMDBUF_MAX
336  };
337  
338  struct	xmit_priv {
339  
340  	spinlock_t	lock;
341  
342  	struct completion xmit_comp;
343  	struct completion terminate_xmitthread_comp;
344  
345  	/* struct __queue	blk_strms[MAX_NUMBLKS]; */
346  	struct __queue	be_pending;
347  	struct __queue	bk_pending;
348  	struct __queue	vi_pending;
349  	struct __queue	vo_pending;
350  	struct __queue	bm_pending;
351  
352  	/* struct __queue	legacy_dz_queue; */
353  	/* struct __queue	apsd_queue; */
354  
355  	u8 *pallocated_frame_buf;
356  	u8 *pxmit_frame_buf;
357  	uint free_xmitframe_cnt;
358  	struct __queue	free_xmit_queue;
359  
360  	/* uint mapping_addr; */
361  	/* uint pkt_sz; */
362  
363  	u8 *xframe_ext_alloc_addr;
364  	u8 *xframe_ext;
365  	uint free_xframe_ext_cnt;
366  	struct __queue free_xframe_ext_queue;
367  
368  	/* struct	hw_txqueue	be_txqueue; */
369  	/* struct	hw_txqueue	bk_txqueue; */
370  	/* struct	hw_txqueue	vi_txqueue; */
371  	/* struct	hw_txqueue	vo_txqueue; */
372  	/* struct	hw_txqueue	bmc_txqueue; */
373  
374  	uint	frag_len;
375  
376  	struct adapter	*adapter;
377  
378  	u8   vcs_setting;
379  	u8 vcs;
380  	u8 vcs_type;
381  	/* u16  rts_thresh; */
382  
383  	u64	tx_bytes;
384  	u64	tx_pkts;
385  	u64	tx_drop;
386  	u64	last_tx_pkts;
387  
388  	struct hw_xmit *hwxmits;
389  	u8 hwxmit_entry;
390  
391  	u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength from large to small. it's value is 0->vo, 1->vi, 2->be, 3->bk. */
392  
393  	void *SdioXmitThread;
394  	struct completion SdioXmitStart;
395  	struct completion SdioXmitTerminate;
396  
397  	struct __queue free_xmitbuf_queue;
398  	struct __queue pending_xmitbuf_queue;
399  	u8 *pallocated_xmitbuf;
400  	u8 *pxmitbuf;
401  	uint free_xmitbuf_cnt;
402  
403  	struct __queue free_xmit_extbuf_queue;
404  	u8 *pallocated_xmit_extbuf;
405  	u8 *pxmit_extbuf;
406  	uint free_xmit_extbuf_cnt;
407  
408  	struct xmit_buf	pcmd_xmitbuf[CMDBUF_MAX];
409  
410  	u16 nqos_ssn;
411  
412  	int	ack_tx;
413  	struct mutex ack_tx_mutex;
414  	struct submit_ctx ack_tx_ops;
415  	u8 seq_no;
416  	spinlock_t lock_sctx;
417  };
418  
419  extern struct xmit_frame *__rtw_alloc_cmdxmitframe(struct xmit_priv *pxmitpriv,
420  		enum cmdbuf_type buf_type);
421  #define rtw_alloc_cmdxmitframe(p) __rtw_alloc_cmdxmitframe(p, CMDBUF_RSVD)
422  #define rtw_alloc_bcnxmitframe(p) __rtw_alloc_cmdxmitframe(p, CMDBUF_BEACON)
423  
424  extern struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv);
425  extern s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
426  
427  extern struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv);
428  extern s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
429  
430  void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz);
431  extern void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len);
432  extern s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib);
433  extern s32 rtw_put_snap(u8 *data, u16 h_proto);
434  
435  extern struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv);
436  struct xmit_frame *rtw_alloc_xmitframe_ext(struct xmit_priv *pxmitpriv);
437  struct xmit_frame *rtw_alloc_xmitframe_once(struct xmit_priv *pxmitpriv);
438  extern s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe);
439  extern void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue);
440  struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *psta, signed int up, u8 *ac);
441  extern s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe);
442  
443  extern s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe);
444  extern u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib);
445  #define rtw_wlan_pkt_size(f) rtw_calculate_wlan_pkt_size_by_attribue(&f->attrib)
446  extern s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe);
447  extern s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe);
448  s32 _rtw_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag);
449  void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv);
450  
451  
452  s32 rtw_txframes_pending(struct adapter *padapter);
453  void rtw_init_hwxmits(struct hw_xmit *phwxmit, signed int entry);
454  
455  
456  s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
457  void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
458  
459  
460  s32 rtw_alloc_hwxmits(struct adapter *padapter);
461  void rtw_free_hwxmits(struct adapter *padapter);
462  
463  
464  s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
465  bool xmitframe_hiq_filter(struct xmit_frame *xmitframe);
466  
467  signed int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe);
468  void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta);
469  void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta);
470  void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta);
471  
472  u8 query_ra_short_GI(struct sta_info *psta);
473  
474  u8 qos_acm(u8 acm_mask, u8 priority);
475  
476  void enqueue_pending_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
477  void enqueue_pending_xmitbuf_to_head(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
478  struct xmit_buf *dequeue_pending_xmitbuf(struct xmit_priv *pxmitpriv);
479  struct xmit_buf *dequeue_pending_xmitbuf_under_survey(struct xmit_priv *pxmitpriv);
480  signed int	check_pending_xmitbuf(struct xmit_priv *pxmitpriv);
481  int	rtw_xmit_thread(void *context);
482  
483  u32 rtw_get_ff_hwaddr(struct xmit_frame	*pxmitframe);
484  
485  int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms);
486  void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status);
487  
488  /* include after declaring struct xmit_buf, in order to avoid warning */
489  #include <xmit_osdep.h>
490  
491  #endif	/* _RTL871X_XMIT_H_ */
492