1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   * Linux network driver for QLogic BR-series Converged Network Adapter.
4   */
5  /*
6   * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7   * Copyright (c) 2014-2015 QLogic Corporation
8   * All rights reserved
9   * www.qlogic.com
10   */
11  #ifndef __BNAD_H__
12  #define __BNAD_H__
13  
14  #include <linux/rtnetlink.h>
15  #include <linux/workqueue.h>
16  #include <linux/ipv6.h>
17  #include <linux/etherdevice.h>
18  #include <linux/mutex.h>
19  #include <linux/firmware.h>
20  #include <linux/if_vlan.h>
21  
22  #include <asm/checksum.h>
23  #include <net/ip6_checksum.h>
24  
25  #include <net/ip.h>
26  #include <net/tcp.h>
27  
28  #include "bna.h"
29  
30  #define BNAD_TXQ_DEPTH		2048
31  #define BNAD_RXQ_DEPTH		2048
32  
33  #define BNAD_MAX_TX		1
34  #define BNAD_MAX_TXQ_PER_TX	8	/* 8 priority queues */
35  #define BNAD_TXQ_NUM		1
36  
37  #define BNAD_MAX_RX		1
38  #define BNAD_MAX_RXP_PER_RX	16
39  #define BNAD_MAX_RXQ_PER_RXP	2
40  
41  /*
42   * Control structure pointed to ccb->ctrl, which
43   * determines the NAPI / LRO behavior CCB
44   * There is 1:1 corres. between ccb & ctrl
45   */
46  struct bnad_rx_ctrl {
47  	struct bna_ccb *ccb;
48  	struct bnad *bnad;
49  	unsigned long  flags;
50  	struct napi_struct	napi;
51  	u64		rx_intr_ctr;
52  	u64		rx_poll_ctr;
53  	u64		rx_schedule;
54  	u64		rx_keep_poll;
55  	u64		rx_complete;
56  };
57  
58  #define BNAD_RXMODE_PROMISC_DEFAULT	BNA_RXMODE_PROMISC
59  
60  /*
61   * GLOBAL #defines (CONSTANTS)
62   */
63  #define BNAD_NAME			"bna"
64  #define BNAD_NAME_LEN			64
65  
66  #define BNAD_MAILBOX_MSIX_INDEX		0
67  #define BNAD_MAILBOX_MSIX_VECTORS	1
68  #define BNAD_INTX_TX_IB_BITMASK		0x1
69  #define BNAD_INTX_RX_IB_BITMASK		0x2
70  
71  #define BNAD_STATS_TIMER_FREQ		1000	/* in msecs */
72  #define BNAD_DIM_TIMER_FREQ		1000	/* in msecs */
73  
74  #define BNAD_IOCETH_TIMEOUT	     10000
75  
76  #define BNAD_MIN_Q_DEPTH		512
77  #define BNAD_MAX_RXQ_DEPTH		16384
78  #define BNAD_MAX_TXQ_DEPTH		2048
79  
80  #define BNAD_JUMBO_MTU			9000
81  
82  #define BNAD_NETIF_WAKE_THRESHOLD	8
83  
84  #define BNAD_RXQ_REFILL_THRESHOLD_SHIFT	3
85  
86  /* Bit positions for tcb->flags */
87  #define BNAD_TXQ_FREE_SENT		0
88  #define BNAD_TXQ_TX_STARTED		1
89  
90  /* Bit positions for rcb->flags */
91  #define BNAD_RXQ_STARTED		0
92  #define BNAD_RXQ_POST_OK		1
93  
94  /* Resource limits */
95  #define BNAD_NUM_TXQ			(bnad->num_tx * bnad->num_txq_per_tx)
96  #define BNAD_NUM_RXP			(bnad->num_rx * bnad->num_rxp_per_rx)
97  
98  #define BNAD_FRAME_SIZE(_mtu) \
99  	(ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN)
100  
101  /*
102   * DATA STRUCTURES
103   */
104  
105  /* enums */
106  enum bnad_intr_source {
107  	BNAD_INTR_TX		= 1,
108  	BNAD_INTR_RX		= 2
109  };
110  
111  enum bnad_link_state {
112  	BNAD_LS_DOWN		= 0,
113  	BNAD_LS_UP		= 1
114  };
115  
116  struct bnad_iocmd_comp {
117  	struct bnad		*bnad;
118  	struct completion	comp;
119  	int			comp_status;
120  };
121  
122  struct bnad_completion {
123  	struct completion	ioc_comp;
124  	struct completion	ucast_comp;
125  	struct completion	mcast_comp;
126  	struct completion	tx_comp;
127  	struct completion	rx_comp;
128  	struct completion	stats_comp;
129  	struct completion	enet_comp;
130  	struct completion	mtu_comp;
131  
132  	u8			ioc_comp_status;
133  	u8			ucast_comp_status;
134  	u8			mcast_comp_status;
135  	u8			tx_comp_status;
136  	u8			rx_comp_status;
137  	u8			stats_comp_status;
138  	u8			port_comp_status;
139  	u8			mtu_comp_status;
140  };
141  
142  /* Tx Rx Control Stats */
143  struct bnad_drv_stats {
144  	u64		netif_queue_stop;
145  	u64		netif_queue_wakeup;
146  	u64		netif_queue_stopped;
147  	u64		tso4;
148  	u64		tso6;
149  	u64		tso_err;
150  	u64		tcpcsum_offload;
151  	u64		udpcsum_offload;
152  	u64		csum_help;
153  	u64		tx_skb_too_short;
154  	u64		tx_skb_stopping;
155  	u64		tx_skb_max_vectors;
156  	u64		tx_skb_mss_too_long;
157  	u64		tx_skb_tso_too_short;
158  	u64		tx_skb_tso_prepare;
159  	u64		tx_skb_non_tso_too_long;
160  	u64		tx_skb_tcp_hdr;
161  	u64		tx_skb_udp_hdr;
162  	u64		tx_skb_csum_err;
163  	u64		tx_skb_headlen_too_long;
164  	u64		tx_skb_headlen_zero;
165  	u64		tx_skb_frag_zero;
166  	u64		tx_skb_len_mismatch;
167  	u64		tx_skb_map_failed;
168  
169  	u64		hw_stats_updates;
170  	u64		netif_rx_dropped;
171  
172  	u64		link_toggle;
173  	u64		cee_toggle;
174  
175  	u64		rxp_info_alloc_failed;
176  	u64		mbox_intr_disabled;
177  	u64		mbox_intr_enabled;
178  	u64		tx_unmap_q_alloc_failed;
179  	u64		rx_unmap_q_alloc_failed;
180  
181  	u64		rxbuf_alloc_failed;
182  	u64		rxbuf_map_failed;
183  };
184  
185  /* Complete driver stats */
186  struct bnad_stats {
187  	struct bnad_drv_stats drv_stats;
188  	struct bna_stats *bna_stats;
189  };
190  
191  /* Tx / Rx Resources */
192  struct bnad_tx_res_info {
193  	struct bna_res_info res_info[BNA_TX_RES_T_MAX];
194  };
195  
196  struct bnad_rx_res_info {
197  	struct bna_res_info res_info[BNA_RX_RES_T_MAX];
198  };
199  
200  struct bnad_tx_info {
201  	struct bna_tx *tx; /* 1:1 between tx_info & tx */
202  	struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
203  	u32 tx_id;
204  	struct delayed_work tx_cleanup_work;
205  } ____cacheline_aligned;
206  
207  struct bnad_rx_info {
208  	struct bna_rx *rx; /* 1:1 between rx_info & rx */
209  
210  	struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX];
211  	u32 rx_id;
212  	struct work_struct rx_cleanup_work;
213  } ____cacheline_aligned;
214  
215  struct bnad_tx_vector {
216  	DEFINE_DMA_UNMAP_ADDR(dma_addr);
217  	DEFINE_DMA_UNMAP_LEN(dma_len);
218  };
219  
220  struct bnad_tx_unmap {
221  	struct sk_buff		*skb;
222  	u32			nvecs;
223  	struct bnad_tx_vector	vectors[BFI_TX_MAX_VECTORS_PER_WI];
224  };
225  
226  struct bnad_rx_vector {
227  	DEFINE_DMA_UNMAP_ADDR(dma_addr);
228  	u32			len;
229  };
230  
231  struct bnad_rx_unmap {
232  	struct page		*page;
233  	struct sk_buff		*skb;
234  	struct bnad_rx_vector	vector;
235  	u32			page_offset;
236  };
237  
238  enum bnad_rxbuf_type {
239  	BNAD_RXBUF_NONE		= 0,
240  	BNAD_RXBUF_SK_BUFF	= 1,
241  	BNAD_RXBUF_PAGE		= 2,
242  	BNAD_RXBUF_MULTI_BUFF	= 3
243  };
244  
245  #define BNAD_RXBUF_IS_SK_BUFF(_type)	((_type) == BNAD_RXBUF_SK_BUFF)
246  #define BNAD_RXBUF_IS_MULTI_BUFF(_type)	((_type) == BNAD_RXBUF_MULTI_BUFF)
247  
248  struct bnad_rx_unmap_q {
249  	int			reuse_pi;
250  	int			alloc_order;
251  	u32			map_size;
252  	enum bnad_rxbuf_type	type;
253  	struct bnad_rx_unmap	unmap[] ____cacheline_aligned;
254  };
255  
256  #define BNAD_PCI_DEV_IS_CAT2(_bnad) \
257  	((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2)
258  
259  /* Bit mask values for bnad->cfg_flags */
260  #define	BNAD_CF_DIM_ENABLED		0x01	/* DIM */
261  #define	BNAD_CF_PROMISC			0x02
262  #define BNAD_CF_ALLMULTI		0x04
263  #define	BNAD_CF_DEFAULT			0x08
264  #define	BNAD_CF_MSIX			0x10	/* If in MSIx mode */
265  
266  /* Defines for run_flags bit-mask */
267  /* Set, tested & cleared using xxx_bit() functions */
268  /* Values indicated bit positions */
269  #define BNAD_RF_CEE_RUNNING		0
270  #define BNAD_RF_MTU_SET		1
271  #define BNAD_RF_MBOX_IRQ_DISABLED	2
272  #define BNAD_RF_NETDEV_REGISTERED	3
273  #define BNAD_RF_DIM_TIMER_RUNNING	4
274  #define BNAD_RF_STATS_TIMER_RUNNING	5
275  #define BNAD_RF_TX_PRIO_SET		6
276  
277  struct bnad {
278  	struct net_device	*netdev;
279  	u32			id;
280  
281  	/* Data path */
282  	struct bnad_tx_info tx_info[BNAD_MAX_TX];
283  	struct bnad_rx_info rx_info[BNAD_MAX_RX];
284  
285  	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
286  	/*
287  	 * These q numbers are global only because
288  	 * they are used to calculate MSIx vectors.
289  	 * Actually the exact # of queues are per Tx/Rx
290  	 * object.
291  	 */
292  	u32		num_tx;
293  	u32		num_rx;
294  	u32		num_txq_per_tx;
295  	u32		num_rxp_per_rx;
296  
297  	u32		txq_depth;
298  	u32		rxq_depth;
299  
300  	u8			tx_coalescing_timeo;
301  	u8			rx_coalescing_timeo;
302  
303  	struct bna_rx_config rx_config[BNAD_MAX_RX] ____cacheline_aligned;
304  	struct bna_tx_config tx_config[BNAD_MAX_TX] ____cacheline_aligned;
305  
306  	void __iomem		*bar0;	/* BAR0 address */
307  
308  	struct bna bna;
309  
310  	u32		cfg_flags;
311  	unsigned long		run_flags;
312  
313  	struct pci_dev		*pcidev;
314  	u64		mmio_start;
315  	u64		mmio_len;
316  
317  	u32		msix_num;
318  	struct msix_entry	*msix_table;
319  
320  	struct mutex		conf_mutex;
321  	spinlock_t		bna_lock ____cacheline_aligned;
322  
323  	/* Timers */
324  	struct timer_list	ioc_timer;
325  	struct timer_list	dim_timer;
326  	struct timer_list	stats_timer;
327  
328  	/* Control path resources, memory & irq */
329  	struct bna_res_info res_info[BNA_RES_T_MAX];
330  	struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX];
331  	struct bnad_tx_res_info tx_res_info[BNAD_MAX_TX];
332  	struct bnad_rx_res_info rx_res_info[BNAD_MAX_RX];
333  
334  	struct bnad_completion bnad_completions;
335  
336  	/* Burnt in MAC address */
337  	u8			perm_addr[ETH_ALEN];
338  
339  	struct workqueue_struct *work_q;
340  
341  	/* Statistics */
342  	struct bnad_stats stats;
343  
344  	struct bnad_diag *diag;
345  
346  	char			adapter_name[BNAD_NAME_LEN];
347  	char			port_name[BNAD_NAME_LEN];
348  	char			mbox_irq_name[BNAD_NAME_LEN];
349  	char			wq_name[BNAD_NAME_LEN];
350  
351  	/* debugfs specific data */
352  	char	*regdata;
353  	u32	reglen;
354  	struct dentry *bnad_dentry_files[5];
355  	struct dentry *port_debugfs_root;
356  };
357  
358  struct bnad_drvinfo {
359  	struct bfa_ioc_attr  ioc_attr;
360  	struct bfa_cee_attr  cee_attr;
361  	struct bfa_flash_attr flash_attr;
362  	u32	cee_status;
363  	u32	flash_status;
364  };
365  
366  /*
367   * EXTERN VARIABLES
368   */
369  extern const struct firmware *bfi_fw;
370  
371  /*
372   * EXTERN PROTOTYPES
373   */
374  u32 *cna_get_firmware_buf(struct pci_dev *pdev);
375  /* Netdev entry point prototypes */
376  void bnad_set_rx_mode(struct net_device *netdev);
377  struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev);
378  int bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr);
379  int bnad_enable_default_bcast(struct bnad *bnad);
380  void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
381  void bnad_set_ethtool_ops(struct net_device *netdev);
382  void bnad_cb_completion(void *arg, enum bfa_status status);
383  
384  /* Configuration & setup */
385  void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
386  void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
387  
388  int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
389  int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
390  void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
391  void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
392  
393  /* Timer start/stop protos */
394  void bnad_dim_timer_start(struct bnad *bnad);
395  
396  /* Statistics */
397  void bnad_netdev_qstats_fill(struct bnad *bnad,
398  			     struct rtnl_link_stats64 *stats);
399  void bnad_netdev_hwstats_fill(struct bnad *bnad,
400  			      struct rtnl_link_stats64 *stats);
401  
402  /* Debugfs */
403  void bnad_debugfs_init(struct bnad *bnad);
404  void bnad_debugfs_uninit(struct bnad *bnad);
405  
406  /* MACROS */
407  /* To set & get the stats counters */
408  #define BNAD_UPDATE_CTR(_bnad, _ctr)				\
409  				(((_bnad)->stats.drv_stats._ctr)++)
410  
411  #define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr)
412  
413  #define bnad_enable_rx_irq_unsafe(_ccb)			\
414  {							\
415  	if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\
416  		bna_ib_coalescing_timer_set((_ccb)->i_dbell,	\
417  			(_ccb)->rx_coalescing_timeo);		\
418  		bna_ib_ack((_ccb)->i_dbell, 0);			\
419  	}							\
420  }
421  
422  #endif /* __BNAD_H__ */
423