1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Linux network driver for QLogic BR-series Converged Network Adapter.
4   */
5  /*
6   * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7   * Copyright (c) 2014-2015 QLogic Corporation
8   * All rights reserved
9   * www.qlogic.com
10   */
11  #include <linux/bitops.h>
12  #include <linux/netdevice.h>
13  #include <linux/skbuff.h>
14  #include <linux/etherdevice.h>
15  #include <linux/in.h>
16  #include <linux/ethtool.h>
17  #include <linux/if_vlan.h>
18  #include <linux/if_ether.h>
19  #include <linux/ip.h>
20  #include <linux/prefetch.h>
21  #include <linux/module.h>
22  
23  #include "bnad.h"
24  #include "bna.h"
25  #include "cna.h"
26  
27  static DEFINE_MUTEX(bnad_fwimg_mutex);
28  
29  /*
30   * Module params
31   */
32  static uint bnad_msix_disable;
33  module_param(bnad_msix_disable, uint, 0444);
34  MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
35  
36  static uint bnad_ioc_auto_recover = 1;
37  module_param(bnad_ioc_auto_recover, uint, 0444);
38  MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
39  
40  static uint bna_debugfs_enable = 1;
41  module_param(bna_debugfs_enable, uint, 0644);
42  MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
43  		 " Range[false:0|true:1]");
44  
45  /*
46   * Global variables
47   */
48  static u32 bnad_rxqs_per_cq = 2;
49  static atomic_t bna_id;
50  static const u8 bnad_bcast_addr[] __aligned(2) =
51  	{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
52  
53  /*
54   * Local MACROS
55   */
56  #define BNAD_GET_MBOX_IRQ(_bnad)				\
57  	(((_bnad)->cfg_flags & BNAD_CF_MSIX) ?			\
58  	 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
59  	 ((_bnad)->pcidev->irq))
60  
61  #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)	\
62  do {								\
63  	(_res_info)->res_type = BNA_RES_T_MEM;			\
64  	(_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;	\
65  	(_res_info)->res_u.mem_info.num = (_num);		\
66  	(_res_info)->res_u.mem_info.len = (_size);		\
67  } while (0)
68  
69  /*
70   * Reinitialize completions in CQ, once Rx is taken down
71   */
72  static void
bnad_cq_cleanup(struct bnad * bnad,struct bna_ccb * ccb)73  bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
74  {
75  	struct bna_cq_entry *cmpl;
76  	int i;
77  
78  	for (i = 0; i < ccb->q_depth; i++) {
79  		cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
80  		cmpl->valid = 0;
81  	}
82  }
83  
84  /* Tx Datapath functions */
85  
86  
87  /* Caller should ensure that the entry at unmap_q[index] is valid */
88  static u32
bnad_tx_buff_unmap(struct bnad * bnad,struct bnad_tx_unmap * unmap_q,u32 q_depth,u32 index)89  bnad_tx_buff_unmap(struct bnad *bnad,
90  			      struct bnad_tx_unmap *unmap_q,
91  			      u32 q_depth, u32 index)
92  {
93  	struct bnad_tx_unmap *unmap;
94  	struct sk_buff *skb;
95  	int vector, nvecs;
96  
97  	unmap = &unmap_q[index];
98  	nvecs = unmap->nvecs;
99  
100  	skb = unmap->skb;
101  	unmap->skb = NULL;
102  	unmap->nvecs = 0;
103  	dma_unmap_single(&bnad->pcidev->dev,
104  		dma_unmap_addr(&unmap->vectors[0], dma_addr),
105  		skb_headlen(skb), DMA_TO_DEVICE);
106  	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
107  	nvecs--;
108  
109  	vector = 0;
110  	while (nvecs) {
111  		vector++;
112  		if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
113  			vector = 0;
114  			BNA_QE_INDX_INC(index, q_depth);
115  			unmap = &unmap_q[index];
116  		}
117  
118  		dma_unmap_page(&bnad->pcidev->dev,
119  			dma_unmap_addr(&unmap->vectors[vector], dma_addr),
120  			dma_unmap_len(&unmap->vectors[vector], dma_len),
121  			DMA_TO_DEVICE);
122  		dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
123  		nvecs--;
124  	}
125  
126  	BNA_QE_INDX_INC(index, q_depth);
127  
128  	return index;
129  }
130  
131  /*
132   * Frees all pending Tx Bufs
133   * At this point no activity is expected on the Q,
134   * so DMA unmap & freeing is fine.
135   */
136  static void
bnad_txq_cleanup(struct bnad * bnad,struct bna_tcb * tcb)137  bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
138  {
139  	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
140  	struct sk_buff *skb;
141  	int i;
142  
143  	for (i = 0; i < tcb->q_depth; i++) {
144  		skb = unmap_q[i].skb;
145  		if (!skb)
146  			continue;
147  		bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
148  
149  		dev_kfree_skb_any(skb);
150  	}
151  }
152  
153  /*
154   * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
155   * Can be called in a) Interrupt context
156   *		    b) Sending context
157   */
158  static u32
bnad_txcmpl_process(struct bnad * bnad,struct bna_tcb * tcb)159  bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
160  {
161  	u32 sent_packets = 0, sent_bytes = 0;
162  	u32 wis, unmap_wis, hw_cons, cons, q_depth;
163  	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
164  	struct bnad_tx_unmap *unmap;
165  	struct sk_buff *skb;
166  
167  	/* Just return if TX is stopped */
168  	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
169  		return 0;
170  
171  	hw_cons = *(tcb->hw_consumer_index);
172  	rmb();
173  	cons = tcb->consumer_index;
174  	q_depth = tcb->q_depth;
175  
176  	wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
177  	BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
178  
179  	while (wis) {
180  		unmap = &unmap_q[cons];
181  
182  		skb = unmap->skb;
183  
184  		sent_packets++;
185  		sent_bytes += skb->len;
186  
187  		unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
188  		wis -= unmap_wis;
189  
190  		cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
191  		dev_kfree_skb_any(skb);
192  	}
193  
194  	/* Update consumer pointers. */
195  	tcb->consumer_index = hw_cons;
196  
197  	tcb->txq->tx_packets += sent_packets;
198  	tcb->txq->tx_bytes += sent_bytes;
199  
200  	return sent_packets;
201  }
202  
203  static u32
bnad_tx_complete(struct bnad * bnad,struct bna_tcb * tcb)204  bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
205  {
206  	struct net_device *netdev = bnad->netdev;
207  	u32 sent = 0;
208  
209  	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
210  		return 0;
211  
212  	sent = bnad_txcmpl_process(bnad, tcb);
213  	if (sent) {
214  		if (netif_queue_stopped(netdev) &&
215  		    netif_carrier_ok(netdev) &&
216  		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
217  				    BNAD_NETIF_WAKE_THRESHOLD) {
218  			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
219  				netif_wake_queue(netdev);
220  				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
221  			}
222  		}
223  	}
224  
225  	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
226  		bna_ib_ack(tcb->i_dbell, sent);
227  
228  	smp_mb__before_atomic();
229  	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
230  
231  	return sent;
232  }
233  
234  /* MSIX Tx Completion Handler */
235  static irqreturn_t
bnad_msix_tx(int irq,void * data)236  bnad_msix_tx(int irq, void *data)
237  {
238  	struct bna_tcb *tcb = (struct bna_tcb *)data;
239  	struct bnad *bnad = tcb->bnad;
240  
241  	bnad_tx_complete(bnad, tcb);
242  
243  	return IRQ_HANDLED;
244  }
245  
246  static inline void
bnad_rxq_alloc_uninit(struct bnad * bnad,struct bna_rcb * rcb)247  bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
248  {
249  	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
250  
251  	unmap_q->reuse_pi = -1;
252  	unmap_q->alloc_order = -1;
253  	unmap_q->map_size = 0;
254  	unmap_q->type = BNAD_RXBUF_NONE;
255  }
256  
257  /* Default is page-based allocation. Multi-buffer support - TBD */
258  static int
bnad_rxq_alloc_init(struct bnad * bnad,struct bna_rcb * rcb)259  bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
260  {
261  	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
262  	int order;
263  
264  	bnad_rxq_alloc_uninit(bnad, rcb);
265  
266  	order = get_order(rcb->rxq->buffer_size);
267  
268  	unmap_q->type = BNAD_RXBUF_PAGE;
269  
270  	if (bna_is_small_rxq(rcb->id)) {
271  		unmap_q->alloc_order = 0;
272  		unmap_q->map_size = rcb->rxq->buffer_size;
273  	} else {
274  		if (rcb->rxq->multi_buffer) {
275  			unmap_q->alloc_order = 0;
276  			unmap_q->map_size = rcb->rxq->buffer_size;
277  			unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
278  		} else {
279  			unmap_q->alloc_order = order;
280  			unmap_q->map_size =
281  				(rcb->rxq->buffer_size > 2048) ?
282  				PAGE_SIZE << order : 2048;
283  		}
284  	}
285  
286  	BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
287  
288  	return 0;
289  }
290  
291  static inline void
bnad_rxq_cleanup_page(struct bnad * bnad,struct bnad_rx_unmap * unmap)292  bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
293  {
294  	if (!unmap->page)
295  		return;
296  
297  	dma_unmap_page(&bnad->pcidev->dev,
298  			dma_unmap_addr(&unmap->vector, dma_addr),
299  			unmap->vector.len, DMA_FROM_DEVICE);
300  	put_page(unmap->page);
301  	unmap->page = NULL;
302  	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
303  	unmap->vector.len = 0;
304  }
305  
306  static inline void
bnad_rxq_cleanup_skb(struct bnad * bnad,struct bnad_rx_unmap * unmap)307  bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
308  {
309  	if (!unmap->skb)
310  		return;
311  
312  	dma_unmap_single(&bnad->pcidev->dev,
313  			dma_unmap_addr(&unmap->vector, dma_addr),
314  			unmap->vector.len, DMA_FROM_DEVICE);
315  	dev_kfree_skb_any(unmap->skb);
316  	unmap->skb = NULL;
317  	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
318  	unmap->vector.len = 0;
319  }
320  
321  static void
bnad_rxq_cleanup(struct bnad * bnad,struct bna_rcb * rcb)322  bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
323  {
324  	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
325  	int i;
326  
327  	for (i = 0; i < rcb->q_depth; i++) {
328  		struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
329  
330  		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
331  			bnad_rxq_cleanup_skb(bnad, unmap);
332  		else
333  			bnad_rxq_cleanup_page(bnad, unmap);
334  	}
335  	bnad_rxq_alloc_uninit(bnad, rcb);
336  }
337  
338  static u32
bnad_rxq_refill_page(struct bnad * bnad,struct bna_rcb * rcb,u32 nalloc)339  bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
340  {
341  	u32 alloced, prod, q_depth;
342  	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
343  	struct bnad_rx_unmap *unmap, *prev;
344  	struct bna_rxq_entry *rxent;
345  	struct page *page;
346  	u32 page_offset, alloc_size;
347  	dma_addr_t dma_addr;
348  
349  	prod = rcb->producer_index;
350  	q_depth = rcb->q_depth;
351  
352  	alloc_size = PAGE_SIZE << unmap_q->alloc_order;
353  	alloced = 0;
354  
355  	while (nalloc--) {
356  		unmap = &unmap_q->unmap[prod];
357  
358  		if (unmap_q->reuse_pi < 0) {
359  			page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
360  					unmap_q->alloc_order);
361  			page_offset = 0;
362  		} else {
363  			prev = &unmap_q->unmap[unmap_q->reuse_pi];
364  			page = prev->page;
365  			page_offset = prev->page_offset + unmap_q->map_size;
366  			get_page(page);
367  		}
368  
369  		if (unlikely(!page)) {
370  			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
371  			rcb->rxq->rxbuf_alloc_failed++;
372  			goto finishing;
373  		}
374  
375  		dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
376  					unmap_q->map_size, DMA_FROM_DEVICE);
377  		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
378  			put_page(page);
379  			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
380  			rcb->rxq->rxbuf_map_failed++;
381  			goto finishing;
382  		}
383  
384  		unmap->page = page;
385  		unmap->page_offset = page_offset;
386  		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
387  		unmap->vector.len = unmap_q->map_size;
388  		page_offset += unmap_q->map_size;
389  
390  		if (page_offset < alloc_size)
391  			unmap_q->reuse_pi = prod;
392  		else
393  			unmap_q->reuse_pi = -1;
394  
395  		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
396  		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
397  		BNA_QE_INDX_INC(prod, q_depth);
398  		alloced++;
399  	}
400  
401  finishing:
402  	if (likely(alloced)) {
403  		rcb->producer_index = prod;
404  		smp_mb();
405  		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
406  			bna_rxq_prod_indx_doorbell(rcb);
407  	}
408  
409  	return alloced;
410  }
411  
412  static u32
bnad_rxq_refill_skb(struct bnad * bnad,struct bna_rcb * rcb,u32 nalloc)413  bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
414  {
415  	u32 alloced, prod, q_depth, buff_sz;
416  	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
417  	struct bnad_rx_unmap *unmap;
418  	struct bna_rxq_entry *rxent;
419  	struct sk_buff *skb;
420  	dma_addr_t dma_addr;
421  
422  	buff_sz = rcb->rxq->buffer_size;
423  	prod = rcb->producer_index;
424  	q_depth = rcb->q_depth;
425  
426  	alloced = 0;
427  	while (nalloc--) {
428  		unmap = &unmap_q->unmap[prod];
429  
430  		skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
431  
432  		if (unlikely(!skb)) {
433  			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
434  			rcb->rxq->rxbuf_alloc_failed++;
435  			goto finishing;
436  		}
437  
438  		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
439  					  buff_sz, DMA_FROM_DEVICE);
440  		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
441  			dev_kfree_skb_any(skb);
442  			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
443  			rcb->rxq->rxbuf_map_failed++;
444  			goto finishing;
445  		}
446  
447  		unmap->skb = skb;
448  		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
449  		unmap->vector.len = buff_sz;
450  
451  		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
452  		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
453  		BNA_QE_INDX_INC(prod, q_depth);
454  		alloced++;
455  	}
456  
457  finishing:
458  	if (likely(alloced)) {
459  		rcb->producer_index = prod;
460  		smp_mb();
461  		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
462  			bna_rxq_prod_indx_doorbell(rcb);
463  	}
464  
465  	return alloced;
466  }
467  
468  static inline void
bnad_rxq_post(struct bnad * bnad,struct bna_rcb * rcb)469  bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
470  {
471  	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
472  	u32 to_alloc;
473  
474  	to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
475  	if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
476  		return;
477  
478  	if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
479  		bnad_rxq_refill_skb(bnad, rcb, to_alloc);
480  	else
481  		bnad_rxq_refill_page(bnad, rcb, to_alloc);
482  }
483  
484  #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
485  					BNA_CQ_EF_IPV6 | \
486  					BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
487  					BNA_CQ_EF_L4_CKSUM_OK)
488  
489  #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
490  				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
491  #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
492  				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
493  #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
494  				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
495  #define flags_udp6 (BNA_CQ_EF_IPV6 | \
496  				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
497  
498  static void
bnad_cq_drop_packet(struct bnad * bnad,struct bna_rcb * rcb,u32 sop_ci,u32 nvecs)499  bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
500  		    u32 sop_ci, u32 nvecs)
501  {
502  	struct bnad_rx_unmap_q *unmap_q;
503  	struct bnad_rx_unmap *unmap;
504  	u32 ci, vec;
505  
506  	unmap_q = rcb->unmap_q;
507  	for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
508  		unmap = &unmap_q->unmap[ci];
509  		BNA_QE_INDX_INC(ci, rcb->q_depth);
510  
511  		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
512  			bnad_rxq_cleanup_skb(bnad, unmap);
513  		else
514  			bnad_rxq_cleanup_page(bnad, unmap);
515  	}
516  }
517  
518  static void
bnad_cq_setup_skb_frags(struct bna_ccb * ccb,struct sk_buff * skb,u32 nvecs)519  bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
520  {
521  	struct bna_rcb *rcb;
522  	struct bnad *bnad;
523  	struct bnad_rx_unmap_q *unmap_q;
524  	struct bna_cq_entry *cq, *cmpl;
525  	u32 ci, pi, totlen = 0;
526  
527  	cq = ccb->sw_q;
528  	pi = ccb->producer_index;
529  	cmpl = &cq[pi];
530  
531  	rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
532  	unmap_q = rcb->unmap_q;
533  	bnad = rcb->bnad;
534  	ci = rcb->consumer_index;
535  
536  	/* prefetch header */
537  	prefetch(page_address(unmap_q->unmap[ci].page) +
538  		 unmap_q->unmap[ci].page_offset);
539  
540  	while (nvecs--) {
541  		struct bnad_rx_unmap *unmap;
542  		u32 len;
543  
544  		unmap = &unmap_q->unmap[ci];
545  		BNA_QE_INDX_INC(ci, rcb->q_depth);
546  
547  		dma_unmap_page(&bnad->pcidev->dev,
548  			       dma_unmap_addr(&unmap->vector, dma_addr),
549  			       unmap->vector.len, DMA_FROM_DEVICE);
550  
551  		len = ntohs(cmpl->length);
552  		skb->truesize += unmap->vector.len;
553  		totlen += len;
554  
555  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
556  				   unmap->page, unmap->page_offset, len);
557  
558  		unmap->page = NULL;
559  		unmap->vector.len = 0;
560  
561  		BNA_QE_INDX_INC(pi, ccb->q_depth);
562  		cmpl = &cq[pi];
563  	}
564  
565  	skb->len += totlen;
566  	skb->data_len += totlen;
567  }
568  
569  static inline void
bnad_cq_setup_skb(struct bnad * bnad,struct sk_buff * skb,struct bnad_rx_unmap * unmap,u32 len)570  bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
571  		  struct bnad_rx_unmap *unmap, u32 len)
572  {
573  	prefetch(skb->data);
574  
575  	dma_unmap_single(&bnad->pcidev->dev,
576  			dma_unmap_addr(&unmap->vector, dma_addr),
577  			unmap->vector.len, DMA_FROM_DEVICE);
578  
579  	skb_put(skb, len);
580  	skb->protocol = eth_type_trans(skb, bnad->netdev);
581  
582  	unmap->skb = NULL;
583  	unmap->vector.len = 0;
584  }
585  
586  static u32
bnad_cq_process(struct bnad * bnad,struct bna_ccb * ccb,int budget)587  bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
588  {
589  	struct bna_cq_entry *cq, *cmpl, *next_cmpl;
590  	struct bna_rcb *rcb = NULL;
591  	struct bnad_rx_unmap_q *unmap_q;
592  	struct bnad_rx_unmap *unmap = NULL;
593  	struct sk_buff *skb = NULL;
594  	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
595  	struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
596  	u32 packets = 0, len = 0, totlen = 0;
597  	u32 pi, vec, sop_ci = 0, nvecs = 0;
598  	u32 flags, masked_flags;
599  
600  	prefetch(bnad->netdev);
601  
602  	cq = ccb->sw_q;
603  
604  	while (packets < budget) {
605  		cmpl = &cq[ccb->producer_index];
606  		if (!cmpl->valid)
607  			break;
608  		/* The 'valid' field is set by the adapter, only after writing
609  		 * the other fields of completion entry. Hence, do not load
610  		 * other fields of completion entry *before* the 'valid' is
611  		 * loaded. Adding the rmb() here prevents the compiler and/or
612  		 * CPU from reordering the reads which would potentially result
613  		 * in reading stale values in completion entry.
614  		 */
615  		rmb();
616  
617  		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
618  
619  		if (bna_is_small_rxq(cmpl->rxq_id))
620  			rcb = ccb->rcb[1];
621  		else
622  			rcb = ccb->rcb[0];
623  
624  		unmap_q = rcb->unmap_q;
625  
626  		/* start of packet ci */
627  		sop_ci = rcb->consumer_index;
628  
629  		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
630  			unmap = &unmap_q->unmap[sop_ci];
631  			skb = unmap->skb;
632  		} else {
633  			skb = napi_get_frags(&rx_ctrl->napi);
634  			if (unlikely(!skb))
635  				break;
636  		}
637  		prefetch(skb);
638  
639  		flags = ntohl(cmpl->flags);
640  		len = ntohs(cmpl->length);
641  		totlen = len;
642  		nvecs = 1;
643  
644  		/* Check all the completions for this frame.
645  		 * busy-wait doesn't help much, break here.
646  		 */
647  		if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
648  		    (flags & BNA_CQ_EF_EOP) == 0) {
649  			pi = ccb->producer_index;
650  			do {
651  				BNA_QE_INDX_INC(pi, ccb->q_depth);
652  				next_cmpl = &cq[pi];
653  
654  				if (!next_cmpl->valid)
655  					break;
656  				/* The 'valid' field is set by the adapter, only
657  				 * after writing the other fields of completion
658  				 * entry. Hence, do not load other fields of
659  				 * completion entry *before* the 'valid' is
660  				 * loaded. Adding the rmb() here prevents the
661  				 * compiler and/or CPU from reordering the reads
662  				 * which would potentially result in reading
663  				 * stale values in completion entry.
664  				 */
665  				rmb();
666  
667  				len = ntohs(next_cmpl->length);
668  				flags = ntohl(next_cmpl->flags);
669  
670  				nvecs++;
671  				totlen += len;
672  			} while ((flags & BNA_CQ_EF_EOP) == 0);
673  
674  			if (!next_cmpl->valid)
675  				break;
676  		}
677  		packets++;
678  
679  		/* TODO: BNA_CQ_EF_LOCAL ? */
680  		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
681  						BNA_CQ_EF_FCS_ERROR |
682  						BNA_CQ_EF_TOO_LONG))) {
683  			bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
684  			rcb->rxq->rx_packets_with_error++;
685  
686  			goto next;
687  		}
688  
689  		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
690  			bnad_cq_setup_skb(bnad, skb, unmap, len);
691  		else
692  			bnad_cq_setup_skb_frags(ccb, skb, nvecs);
693  
694  		rcb->rxq->rx_packets++;
695  		rcb->rxq->rx_bytes += totlen;
696  		ccb->bytes_per_intr += totlen;
697  
698  		masked_flags = flags & flags_cksum_prot_mask;
699  
700  		if (likely
701  		    ((bnad->netdev->features & NETIF_F_RXCSUM) &&
702  		     ((masked_flags == flags_tcp4) ||
703  		      (masked_flags == flags_udp4) ||
704  		      (masked_flags == flags_tcp6) ||
705  		      (masked_flags == flags_udp6))))
706  			skb->ip_summed = CHECKSUM_UNNECESSARY;
707  		else
708  			skb_checksum_none_assert(skb);
709  
710  		if ((flags & BNA_CQ_EF_VLAN) &&
711  		    (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
712  			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
713  
714  		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
715  			netif_receive_skb(skb);
716  		else
717  			napi_gro_frags(&rx_ctrl->napi);
718  
719  next:
720  		BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
721  		for (vec = 0; vec < nvecs; vec++) {
722  			cmpl = &cq[ccb->producer_index];
723  			cmpl->valid = 0;
724  			BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
725  		}
726  	}
727  
728  	napi_gro_flush(&rx_ctrl->napi, false);
729  	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
730  		bna_ib_ack_disable_irq(ccb->i_dbell, packets);
731  
732  	bnad_rxq_post(bnad, ccb->rcb[0]);
733  	if (ccb->rcb[1])
734  		bnad_rxq_post(bnad, ccb->rcb[1]);
735  
736  	return packets;
737  }
738  
739  static void
bnad_netif_rx_schedule_poll(struct bnad * bnad,struct bna_ccb * ccb)740  bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
741  {
742  	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
743  	struct napi_struct *napi = &rx_ctrl->napi;
744  
745  	if (likely(napi_schedule_prep(napi))) {
746  		__napi_schedule(napi);
747  		rx_ctrl->rx_schedule++;
748  	}
749  }
750  
751  /* MSIX Rx Path Handler */
752  static irqreturn_t
bnad_msix_rx(int irq,void * data)753  bnad_msix_rx(int irq, void *data)
754  {
755  	struct bna_ccb *ccb = (struct bna_ccb *)data;
756  
757  	if (ccb) {
758  		((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
759  		bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
760  	}
761  
762  	return IRQ_HANDLED;
763  }
764  
765  /* Interrupt handlers */
766  
767  /* Mbox Interrupt Handlers */
768  static irqreturn_t
bnad_msix_mbox_handler(int irq,void * data)769  bnad_msix_mbox_handler(int irq, void *data)
770  {
771  	u32 intr_status;
772  	unsigned long flags;
773  	struct bnad *bnad = (struct bnad *)data;
774  
775  	spin_lock_irqsave(&bnad->bna_lock, flags);
776  	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
777  		spin_unlock_irqrestore(&bnad->bna_lock, flags);
778  		return IRQ_HANDLED;
779  	}
780  
781  	bna_intr_status_get(&bnad->bna, intr_status);
782  
783  	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
784  		bna_mbox_handler(&bnad->bna, intr_status);
785  
786  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
787  
788  	return IRQ_HANDLED;
789  }
790  
791  static irqreturn_t
bnad_isr(int irq,void * data)792  bnad_isr(int irq, void *data)
793  {
794  	int i, j;
795  	u32 intr_status;
796  	unsigned long flags;
797  	struct bnad *bnad = (struct bnad *)data;
798  	struct bnad_rx_info *rx_info;
799  	struct bnad_rx_ctrl *rx_ctrl;
800  	struct bna_tcb *tcb = NULL;
801  
802  	spin_lock_irqsave(&bnad->bna_lock, flags);
803  	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
804  		spin_unlock_irqrestore(&bnad->bna_lock, flags);
805  		return IRQ_NONE;
806  	}
807  
808  	bna_intr_status_get(&bnad->bna, intr_status);
809  
810  	if (unlikely(!intr_status)) {
811  		spin_unlock_irqrestore(&bnad->bna_lock, flags);
812  		return IRQ_NONE;
813  	}
814  
815  	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
816  		bna_mbox_handler(&bnad->bna, intr_status);
817  
818  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
819  
820  	if (!BNA_IS_INTX_DATA_INTR(intr_status))
821  		return IRQ_HANDLED;
822  
823  	/* Process data interrupts */
824  	/* Tx processing */
825  	for (i = 0; i < bnad->num_tx; i++) {
826  		for (j = 0; j < bnad->num_txq_per_tx; j++) {
827  			tcb = bnad->tx_info[i].tcb[j];
828  			if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
829  				bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
830  		}
831  	}
832  	/* Rx processing */
833  	for (i = 0; i < bnad->num_rx; i++) {
834  		rx_info = &bnad->rx_info[i];
835  		if (!rx_info->rx)
836  			continue;
837  		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
838  			rx_ctrl = &rx_info->rx_ctrl[j];
839  			if (rx_ctrl->ccb)
840  				bnad_netif_rx_schedule_poll(bnad,
841  							    rx_ctrl->ccb);
842  		}
843  	}
844  	return IRQ_HANDLED;
845  }
846  
847  /*
848   * Called in interrupt / callback context
849   * with bna_lock held, so cfg_flags access is OK
850   */
851  static void
bnad_enable_mbox_irq(struct bnad * bnad)852  bnad_enable_mbox_irq(struct bnad *bnad)
853  {
854  	clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
855  
856  	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
857  }
858  
859  /*
860   * Called with bnad->bna_lock held b'cos of
861   * bnad->cfg_flags access.
862   */
863  static void
bnad_disable_mbox_irq(struct bnad * bnad)864  bnad_disable_mbox_irq(struct bnad *bnad)
865  {
866  	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
867  
868  	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
869  }
870  
871  static void
bnad_set_netdev_perm_addr(struct bnad * bnad)872  bnad_set_netdev_perm_addr(struct bnad *bnad)
873  {
874  	struct net_device *netdev = bnad->netdev;
875  
876  	ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
877  	if (is_zero_ether_addr(netdev->dev_addr))
878  		eth_hw_addr_set(netdev, bnad->perm_addr);
879  }
880  
881  /* Control Path Handlers */
882  
883  /* Callbacks */
884  void
bnad_cb_mbox_intr_enable(struct bnad * bnad)885  bnad_cb_mbox_intr_enable(struct bnad *bnad)
886  {
887  	bnad_enable_mbox_irq(bnad);
888  }
889  
890  void
bnad_cb_mbox_intr_disable(struct bnad * bnad)891  bnad_cb_mbox_intr_disable(struct bnad *bnad)
892  {
893  	bnad_disable_mbox_irq(bnad);
894  }
895  
896  void
bnad_cb_ioceth_ready(struct bnad * bnad)897  bnad_cb_ioceth_ready(struct bnad *bnad)
898  {
899  	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
900  	complete(&bnad->bnad_completions.ioc_comp);
901  }
902  
903  void
bnad_cb_ioceth_failed(struct bnad * bnad)904  bnad_cb_ioceth_failed(struct bnad *bnad)
905  {
906  	bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
907  	complete(&bnad->bnad_completions.ioc_comp);
908  }
909  
910  void
bnad_cb_ioceth_disabled(struct bnad * bnad)911  bnad_cb_ioceth_disabled(struct bnad *bnad)
912  {
913  	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
914  	complete(&bnad->bnad_completions.ioc_comp);
915  }
916  
917  static void
bnad_cb_enet_disabled(void * arg)918  bnad_cb_enet_disabled(void *arg)
919  {
920  	struct bnad *bnad = (struct bnad *)arg;
921  
922  	netif_carrier_off(bnad->netdev);
923  	complete(&bnad->bnad_completions.enet_comp);
924  }
925  
926  void
bnad_cb_ethport_link_status(struct bnad * bnad,enum bna_link_status link_status)927  bnad_cb_ethport_link_status(struct bnad *bnad,
928  			enum bna_link_status link_status)
929  {
930  	bool link_up = false;
931  
932  	link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
933  
934  	if (link_status == BNA_CEE_UP) {
935  		if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
936  			BNAD_UPDATE_CTR(bnad, cee_toggle);
937  		set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
938  	} else {
939  		if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
940  			BNAD_UPDATE_CTR(bnad, cee_toggle);
941  		clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
942  	}
943  
944  	if (link_up) {
945  		if (!netif_carrier_ok(bnad->netdev)) {
946  			uint tx_id, tcb_id;
947  			netdev_info(bnad->netdev, "link up\n");
948  			netif_carrier_on(bnad->netdev);
949  			BNAD_UPDATE_CTR(bnad, link_toggle);
950  			for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
951  				for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
952  				      tcb_id++) {
953  					struct bna_tcb *tcb =
954  					bnad->tx_info[tx_id].tcb[tcb_id];
955  					u32 txq_id;
956  					if (!tcb)
957  						continue;
958  
959  					txq_id = tcb->id;
960  
961  					if (test_bit(BNAD_TXQ_TX_STARTED,
962  						     &tcb->flags)) {
963  						/*
964  						 * Force an immediate
965  						 * Transmit Schedule */
966  						netif_wake_subqueue(
967  								bnad->netdev,
968  								txq_id);
969  						BNAD_UPDATE_CTR(bnad,
970  							netif_queue_wakeup);
971  					} else {
972  						netif_stop_subqueue(
973  								bnad->netdev,
974  								txq_id);
975  						BNAD_UPDATE_CTR(bnad,
976  							netif_queue_stop);
977  					}
978  				}
979  			}
980  		}
981  	} else {
982  		if (netif_carrier_ok(bnad->netdev)) {
983  			netdev_info(bnad->netdev, "link down\n");
984  			netif_carrier_off(bnad->netdev);
985  			BNAD_UPDATE_CTR(bnad, link_toggle);
986  		}
987  	}
988  }
989  
990  static void
bnad_cb_tx_disabled(void * arg,struct bna_tx * tx)991  bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
992  {
993  	struct bnad *bnad = (struct bnad *)arg;
994  
995  	complete(&bnad->bnad_completions.tx_comp);
996  }
997  
998  static void
bnad_cb_tcb_setup(struct bnad * bnad,struct bna_tcb * tcb)999  bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1000  {
1001  	struct bnad_tx_info *tx_info =
1002  			(struct bnad_tx_info *)tcb->txq->tx->priv;
1003  
1004  	tcb->priv = tcb;
1005  	tx_info->tcb[tcb->id] = tcb;
1006  }
1007  
1008  static void
bnad_cb_tcb_destroy(struct bnad * bnad,struct bna_tcb * tcb)1009  bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1010  {
1011  	struct bnad_tx_info *tx_info =
1012  			(struct bnad_tx_info *)tcb->txq->tx->priv;
1013  
1014  	tx_info->tcb[tcb->id] = NULL;
1015  	tcb->priv = NULL;
1016  }
1017  
1018  static void
bnad_cb_ccb_setup(struct bnad * bnad,struct bna_ccb * ccb)1019  bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1020  {
1021  	struct bnad_rx_info *rx_info =
1022  			(struct bnad_rx_info *)ccb->cq->rx->priv;
1023  
1024  	rx_info->rx_ctrl[ccb->id].ccb = ccb;
1025  	ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1026  }
1027  
1028  static void
bnad_cb_ccb_destroy(struct bnad * bnad,struct bna_ccb * ccb)1029  bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1030  {
1031  	struct bnad_rx_info *rx_info =
1032  			(struct bnad_rx_info *)ccb->cq->rx->priv;
1033  
1034  	rx_info->rx_ctrl[ccb->id].ccb = NULL;
1035  }
1036  
1037  static void
bnad_cb_tx_stall(struct bnad * bnad,struct bna_tx * tx)1038  bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1039  {
1040  	struct bnad_tx_info *tx_info = tx->priv;
1041  	struct bna_tcb *tcb;
1042  	u32 txq_id;
1043  	int i;
1044  
1045  	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1046  		tcb = tx_info->tcb[i];
1047  		if (!tcb)
1048  			continue;
1049  		txq_id = tcb->id;
1050  		clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1051  		netif_stop_subqueue(bnad->netdev, txq_id);
1052  	}
1053  }
1054  
1055  static void
bnad_cb_tx_resume(struct bnad * bnad,struct bna_tx * tx)1056  bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1057  {
1058  	struct bnad_tx_info *tx_info = tx->priv;
1059  	struct bna_tcb *tcb;
1060  	u32 txq_id;
1061  	int i;
1062  
1063  	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1064  		tcb = tx_info->tcb[i];
1065  		if (!tcb)
1066  			continue;
1067  		txq_id = tcb->id;
1068  
1069  		BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1070  		set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1071  		BUG_ON(*(tcb->hw_consumer_index) != 0);
1072  
1073  		if (netif_carrier_ok(bnad->netdev)) {
1074  			netif_wake_subqueue(bnad->netdev, txq_id);
1075  			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1076  		}
1077  	}
1078  
1079  	/*
1080  	 * Workaround for first ioceth enable failure & we
1081  	 * get a 0 MAC address. We try to get the MAC address
1082  	 * again here.
1083  	 */
1084  	if (is_zero_ether_addr(bnad->perm_addr)) {
1085  		bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1086  		bnad_set_netdev_perm_addr(bnad);
1087  	}
1088  }
1089  
1090  /*
1091   * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1092   */
1093  static void
bnad_tx_cleanup(struct work_struct * work)1094  bnad_tx_cleanup(struct work_struct *work)
1095  {
1096  	struct bnad_tx_info *tx_info =
1097  		container_of(work, struct bnad_tx_info, tx_cleanup_work.work);
1098  	struct bnad *bnad = NULL;
1099  	struct bna_tcb *tcb;
1100  	unsigned long flags;
1101  	u32 i, pending = 0;
1102  
1103  	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1104  		tcb = tx_info->tcb[i];
1105  		if (!tcb)
1106  			continue;
1107  
1108  		bnad = tcb->bnad;
1109  
1110  		if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1111  			pending++;
1112  			continue;
1113  		}
1114  
1115  		bnad_txq_cleanup(bnad, tcb);
1116  
1117  		smp_mb__before_atomic();
1118  		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1119  	}
1120  
1121  	if (pending) {
1122  		queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1123  			msecs_to_jiffies(1));
1124  		return;
1125  	}
1126  
1127  	spin_lock_irqsave(&bnad->bna_lock, flags);
1128  	bna_tx_cleanup_complete(tx_info->tx);
1129  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1130  }
1131  
1132  static void
bnad_cb_tx_cleanup(struct bnad * bnad,struct bna_tx * tx)1133  bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1134  {
1135  	struct bnad_tx_info *tx_info = tx->priv;
1136  	struct bna_tcb *tcb;
1137  	int i;
1138  
1139  	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1140  		tcb = tx_info->tcb[i];
1141  		if (!tcb)
1142  			continue;
1143  	}
1144  
1145  	queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1146  }
1147  
1148  static void
bnad_cb_rx_stall(struct bnad * bnad,struct bna_rx * rx)1149  bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1150  {
1151  	struct bnad_rx_info *rx_info = rx->priv;
1152  	struct bna_ccb *ccb;
1153  	struct bnad_rx_ctrl *rx_ctrl;
1154  	int i;
1155  
1156  	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1157  		rx_ctrl = &rx_info->rx_ctrl[i];
1158  		ccb = rx_ctrl->ccb;
1159  		if (!ccb)
1160  			continue;
1161  
1162  		clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1163  
1164  		if (ccb->rcb[1])
1165  			clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1166  	}
1167  }
1168  
1169  /*
1170   * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1171   */
1172  static void
bnad_rx_cleanup(struct work_struct * work)1173  bnad_rx_cleanup(struct work_struct *work)
1174  {
1175  	struct bnad_rx_info *rx_info =
1176  		container_of(work, struct bnad_rx_info, rx_cleanup_work);
1177  	struct bnad_rx_ctrl *rx_ctrl;
1178  	struct bnad *bnad = NULL;
1179  	unsigned long flags;
1180  	u32 i;
1181  
1182  	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1183  		rx_ctrl = &rx_info->rx_ctrl[i];
1184  
1185  		if (!rx_ctrl->ccb)
1186  			continue;
1187  
1188  		bnad = rx_ctrl->ccb->bnad;
1189  
1190  		/*
1191  		 * Wait till the poll handler has exited
1192  		 * and nothing can be scheduled anymore
1193  		 */
1194  		napi_disable(&rx_ctrl->napi);
1195  
1196  		bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1197  		bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1198  		if (rx_ctrl->ccb->rcb[1])
1199  			bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1200  	}
1201  
1202  	spin_lock_irqsave(&bnad->bna_lock, flags);
1203  	bna_rx_cleanup_complete(rx_info->rx);
1204  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1205  }
1206  
1207  static void
bnad_cb_rx_cleanup(struct bnad * bnad,struct bna_rx * rx)1208  bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1209  {
1210  	struct bnad_rx_info *rx_info = rx->priv;
1211  	struct bna_ccb *ccb;
1212  	struct bnad_rx_ctrl *rx_ctrl;
1213  	int i;
1214  
1215  	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1216  		rx_ctrl = &rx_info->rx_ctrl[i];
1217  		ccb = rx_ctrl->ccb;
1218  		if (!ccb)
1219  			continue;
1220  
1221  		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1222  
1223  		if (ccb->rcb[1])
1224  			clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1225  	}
1226  
1227  	queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1228  }
1229  
1230  static void
bnad_cb_rx_post(struct bnad * bnad,struct bna_rx * rx)1231  bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1232  {
1233  	struct bnad_rx_info *rx_info = rx->priv;
1234  	struct bna_ccb *ccb;
1235  	struct bna_rcb *rcb;
1236  	struct bnad_rx_ctrl *rx_ctrl;
1237  	int i, j;
1238  
1239  	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1240  		rx_ctrl = &rx_info->rx_ctrl[i];
1241  		ccb = rx_ctrl->ccb;
1242  		if (!ccb)
1243  			continue;
1244  
1245  		napi_enable(&rx_ctrl->napi);
1246  
1247  		for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1248  			rcb = ccb->rcb[j];
1249  			if (!rcb)
1250  				continue;
1251  
1252  			bnad_rxq_alloc_init(bnad, rcb);
1253  			set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1254  			set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1255  			bnad_rxq_post(bnad, rcb);
1256  		}
1257  	}
1258  }
1259  
1260  static void
bnad_cb_rx_disabled(void * arg,struct bna_rx * rx)1261  bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1262  {
1263  	struct bnad *bnad = (struct bnad *)arg;
1264  
1265  	complete(&bnad->bnad_completions.rx_comp);
1266  }
1267  
1268  static void
bnad_cb_rx_mcast_add(struct bnad * bnad,struct bna_rx * rx)1269  bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1270  {
1271  	bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1272  	complete(&bnad->bnad_completions.mcast_comp);
1273  }
1274  
1275  void
bnad_cb_stats_get(struct bnad * bnad,enum bna_cb_status status,struct bna_stats * stats)1276  bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1277  		       struct bna_stats *stats)
1278  {
1279  	if (status == BNA_CB_SUCCESS)
1280  		BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1281  
1282  	if (!netif_running(bnad->netdev) ||
1283  		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1284  		return;
1285  
1286  	mod_timer(&bnad->stats_timer,
1287  		  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1288  }
1289  
1290  static void
bnad_cb_enet_mtu_set(struct bnad * bnad)1291  bnad_cb_enet_mtu_set(struct bnad *bnad)
1292  {
1293  	bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1294  	complete(&bnad->bnad_completions.mtu_comp);
1295  }
1296  
1297  void
bnad_cb_completion(void * arg,enum bfa_status status)1298  bnad_cb_completion(void *arg, enum bfa_status status)
1299  {
1300  	struct bnad_iocmd_comp *iocmd_comp =
1301  			(struct bnad_iocmd_comp *)arg;
1302  
1303  	iocmd_comp->comp_status = (u32) status;
1304  	complete(&iocmd_comp->comp);
1305  }
1306  
1307  /* Resource allocation, free functions */
1308  
1309  static void
bnad_mem_free(struct bnad * bnad,struct bna_mem_info * mem_info)1310  bnad_mem_free(struct bnad *bnad,
1311  	      struct bna_mem_info *mem_info)
1312  {
1313  	int i;
1314  	dma_addr_t dma_pa;
1315  
1316  	if (mem_info->mdl == NULL)
1317  		return;
1318  
1319  	for (i = 0; i < mem_info->num; i++) {
1320  		if (mem_info->mdl[i].kva != NULL) {
1321  			if (mem_info->mem_type == BNA_MEM_T_DMA) {
1322  				BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1323  						dma_pa);
1324  				dma_free_coherent(&bnad->pcidev->dev,
1325  						  mem_info->mdl[i].len,
1326  						  mem_info->mdl[i].kva, dma_pa);
1327  			} else
1328  				kfree(mem_info->mdl[i].kva);
1329  		}
1330  	}
1331  	kfree(mem_info->mdl);
1332  	mem_info->mdl = NULL;
1333  }
1334  
1335  static int
bnad_mem_alloc(struct bnad * bnad,struct bna_mem_info * mem_info)1336  bnad_mem_alloc(struct bnad *bnad,
1337  	       struct bna_mem_info *mem_info)
1338  {
1339  	int i;
1340  	dma_addr_t dma_pa;
1341  
1342  	if ((mem_info->num == 0) || (mem_info->len == 0)) {
1343  		mem_info->mdl = NULL;
1344  		return 0;
1345  	}
1346  
1347  	mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1348  				GFP_KERNEL);
1349  	if (mem_info->mdl == NULL)
1350  		return -ENOMEM;
1351  
1352  	if (mem_info->mem_type == BNA_MEM_T_DMA) {
1353  		for (i = 0; i < mem_info->num; i++) {
1354  			mem_info->mdl[i].len = mem_info->len;
1355  			mem_info->mdl[i].kva =
1356  				dma_alloc_coherent(&bnad->pcidev->dev,
1357  						   mem_info->len, &dma_pa,
1358  						   GFP_KERNEL);
1359  			if (mem_info->mdl[i].kva == NULL)
1360  				goto err_return;
1361  
1362  			BNA_SET_DMA_ADDR(dma_pa,
1363  					 &(mem_info->mdl[i].dma));
1364  		}
1365  	} else {
1366  		for (i = 0; i < mem_info->num; i++) {
1367  			mem_info->mdl[i].len = mem_info->len;
1368  			mem_info->mdl[i].kva = kzalloc(mem_info->len,
1369  							GFP_KERNEL);
1370  			if (mem_info->mdl[i].kva == NULL)
1371  				goto err_return;
1372  		}
1373  	}
1374  
1375  	return 0;
1376  
1377  err_return:
1378  	bnad_mem_free(bnad, mem_info);
1379  	return -ENOMEM;
1380  }
1381  
1382  /* Free IRQ for Mailbox */
1383  static void
bnad_mbox_irq_free(struct bnad * bnad)1384  bnad_mbox_irq_free(struct bnad *bnad)
1385  {
1386  	int irq;
1387  	unsigned long flags;
1388  
1389  	spin_lock_irqsave(&bnad->bna_lock, flags);
1390  	bnad_disable_mbox_irq(bnad);
1391  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1392  
1393  	irq = BNAD_GET_MBOX_IRQ(bnad);
1394  	free_irq(irq, bnad);
1395  }
1396  
1397  /*
1398   * Allocates IRQ for Mailbox, but keep it disabled
1399   * This will be enabled once we get the mbox enable callback
1400   * from bna
1401   */
1402  static int
bnad_mbox_irq_alloc(struct bnad * bnad)1403  bnad_mbox_irq_alloc(struct bnad *bnad)
1404  {
1405  	int		err = 0;
1406  	unsigned long	irq_flags, flags;
1407  	u32	irq;
1408  	irq_handler_t	irq_handler;
1409  
1410  	spin_lock_irqsave(&bnad->bna_lock, flags);
1411  	if (bnad->cfg_flags & BNAD_CF_MSIX) {
1412  		irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1413  		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1414  		irq_flags = 0;
1415  	} else {
1416  		irq_handler = (irq_handler_t)bnad_isr;
1417  		irq = bnad->pcidev->irq;
1418  		irq_flags = IRQF_SHARED;
1419  	}
1420  
1421  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1422  	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1423  
1424  	/*
1425  	 * Set the Mbox IRQ disable flag, so that the IRQ handler
1426  	 * called from request_irq() for SHARED IRQs do not execute
1427  	 */
1428  	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1429  
1430  	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1431  
1432  	err = request_irq(irq, irq_handler, irq_flags,
1433  			  bnad->mbox_irq_name, bnad);
1434  
1435  	return err;
1436  }
1437  
1438  static void
bnad_txrx_irq_free(struct bnad * bnad,struct bna_intr_info * intr_info)1439  bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1440  {
1441  	kfree(intr_info->idl);
1442  	intr_info->idl = NULL;
1443  }
1444  
1445  /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1446  static int
bnad_txrx_irq_alloc(struct bnad * bnad,enum bnad_intr_source src,u32 txrx_id,struct bna_intr_info * intr_info)1447  bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1448  		    u32 txrx_id, struct bna_intr_info *intr_info)
1449  {
1450  	int i, vector_start = 0;
1451  	u32 cfg_flags;
1452  	unsigned long flags;
1453  
1454  	spin_lock_irqsave(&bnad->bna_lock, flags);
1455  	cfg_flags = bnad->cfg_flags;
1456  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1457  
1458  	if (cfg_flags & BNAD_CF_MSIX) {
1459  		intr_info->intr_type = BNA_INTR_T_MSIX;
1460  		intr_info->idl = kcalloc(intr_info->num,
1461  					sizeof(struct bna_intr_descr),
1462  					GFP_KERNEL);
1463  		if (!intr_info->idl)
1464  			return -ENOMEM;
1465  
1466  		switch (src) {
1467  		case BNAD_INTR_TX:
1468  			vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1469  			break;
1470  
1471  		case BNAD_INTR_RX:
1472  			vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1473  					(bnad->num_tx * bnad->num_txq_per_tx) +
1474  					txrx_id;
1475  			break;
1476  
1477  		default:
1478  			BUG();
1479  		}
1480  
1481  		for (i = 0; i < intr_info->num; i++)
1482  			intr_info->idl[i].vector = vector_start + i;
1483  	} else {
1484  		intr_info->intr_type = BNA_INTR_T_INTX;
1485  		intr_info->num = 1;
1486  		intr_info->idl = kcalloc(intr_info->num,
1487  					sizeof(struct bna_intr_descr),
1488  					GFP_KERNEL);
1489  		if (!intr_info->idl)
1490  			return -ENOMEM;
1491  
1492  		switch (src) {
1493  		case BNAD_INTR_TX:
1494  			intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1495  			break;
1496  
1497  		case BNAD_INTR_RX:
1498  			intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1499  			break;
1500  		}
1501  	}
1502  	return 0;
1503  }
1504  
1505  /* NOTE: Should be called for MSIX only
1506   * Unregisters Tx MSIX vector(s) from the kernel
1507   */
1508  static void
bnad_tx_msix_unregister(struct bnad * bnad,struct bnad_tx_info * tx_info,int num_txqs)1509  bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1510  			int num_txqs)
1511  {
1512  	int i;
1513  	int vector_num;
1514  
1515  	for (i = 0; i < num_txqs; i++) {
1516  		if (tx_info->tcb[i] == NULL)
1517  			continue;
1518  
1519  		vector_num = tx_info->tcb[i]->intr_vector;
1520  		free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1521  	}
1522  }
1523  
1524  /* NOTE: Should be called for MSIX only
1525   * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1526   */
1527  static int
bnad_tx_msix_register(struct bnad * bnad,struct bnad_tx_info * tx_info,u32 tx_id,int num_txqs)1528  bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1529  			u32 tx_id, int num_txqs)
1530  {
1531  	int i;
1532  	int err;
1533  	int vector_num;
1534  
1535  	for (i = 0; i < num_txqs; i++) {
1536  		vector_num = tx_info->tcb[i]->intr_vector;
1537  		snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d",
1538  			 bnad->netdev->name,
1539  			 tx_id + tx_info->tcb[i]->id);
1540  		err = request_irq(bnad->msix_table[vector_num].vector,
1541  				  (irq_handler_t)bnad_msix_tx, 0,
1542  				  tx_info->tcb[i]->name,
1543  				  tx_info->tcb[i]);
1544  		if (err)
1545  			goto err_return;
1546  	}
1547  
1548  	return 0;
1549  
1550  err_return:
1551  	if (i > 0)
1552  		bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1553  	return -1;
1554  }
1555  
1556  /* NOTE: Should be called for MSIX only
1557   * Unregisters Rx MSIX vector(s) from the kernel
1558   */
1559  static void
bnad_rx_msix_unregister(struct bnad * bnad,struct bnad_rx_info * rx_info,int num_rxps)1560  bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1561  			int num_rxps)
1562  {
1563  	int i;
1564  	int vector_num;
1565  
1566  	for (i = 0; i < num_rxps; i++) {
1567  		if (rx_info->rx_ctrl[i].ccb == NULL)
1568  			continue;
1569  
1570  		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1571  		free_irq(bnad->msix_table[vector_num].vector,
1572  			 rx_info->rx_ctrl[i].ccb);
1573  	}
1574  }
1575  
1576  /* NOTE: Should be called for MSIX only
1577   * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1578   */
1579  static int
bnad_rx_msix_register(struct bnad * bnad,struct bnad_rx_info * rx_info,u32 rx_id,int num_rxps)1580  bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1581  			u32 rx_id, int num_rxps)
1582  {
1583  	int i;
1584  	int err;
1585  	int vector_num;
1586  
1587  	for (i = 0; i < num_rxps; i++) {
1588  		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1589  		snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE,
1590  			 "%s CQ %d", bnad->netdev->name,
1591  			 rx_id + rx_info->rx_ctrl[i].ccb->id);
1592  		err = request_irq(bnad->msix_table[vector_num].vector,
1593  				  (irq_handler_t)bnad_msix_rx, 0,
1594  				  rx_info->rx_ctrl[i].ccb->name,
1595  				  rx_info->rx_ctrl[i].ccb);
1596  		if (err)
1597  			goto err_return;
1598  	}
1599  
1600  	return 0;
1601  
1602  err_return:
1603  	if (i > 0)
1604  		bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1605  	return -1;
1606  }
1607  
1608  /* Free Tx object Resources */
1609  static void
bnad_tx_res_free(struct bnad * bnad,struct bna_res_info * res_info)1610  bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1611  {
1612  	int i;
1613  
1614  	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1615  		if (res_info[i].res_type == BNA_RES_T_MEM)
1616  			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1617  		else if (res_info[i].res_type == BNA_RES_T_INTR)
1618  			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1619  	}
1620  }
1621  
1622  /* Allocates memory and interrupt resources for Tx object */
1623  static int
bnad_tx_res_alloc(struct bnad * bnad,struct bna_res_info * res_info,u32 tx_id)1624  bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1625  		  u32 tx_id)
1626  {
1627  	int i, err = 0;
1628  
1629  	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1630  		if (res_info[i].res_type == BNA_RES_T_MEM)
1631  			err = bnad_mem_alloc(bnad,
1632  					&res_info[i].res_u.mem_info);
1633  		else if (res_info[i].res_type == BNA_RES_T_INTR)
1634  			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1635  					&res_info[i].res_u.intr_info);
1636  		if (err)
1637  			goto err_return;
1638  	}
1639  	return 0;
1640  
1641  err_return:
1642  	bnad_tx_res_free(bnad, res_info);
1643  	return err;
1644  }
1645  
1646  /* Free Rx object Resources */
1647  static void
bnad_rx_res_free(struct bnad * bnad,struct bna_res_info * res_info)1648  bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1649  {
1650  	int i;
1651  
1652  	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1653  		if (res_info[i].res_type == BNA_RES_T_MEM)
1654  			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1655  		else if (res_info[i].res_type == BNA_RES_T_INTR)
1656  			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1657  	}
1658  }
1659  
1660  /* Allocates memory and interrupt resources for Rx object */
1661  static int
bnad_rx_res_alloc(struct bnad * bnad,struct bna_res_info * res_info,uint rx_id)1662  bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1663  		  uint rx_id)
1664  {
1665  	int i, err = 0;
1666  
1667  	/* All memory needs to be allocated before setup_ccbs */
1668  	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1669  		if (res_info[i].res_type == BNA_RES_T_MEM)
1670  			err = bnad_mem_alloc(bnad,
1671  					&res_info[i].res_u.mem_info);
1672  		else if (res_info[i].res_type == BNA_RES_T_INTR)
1673  			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1674  					&res_info[i].res_u.intr_info);
1675  		if (err)
1676  			goto err_return;
1677  	}
1678  	return 0;
1679  
1680  err_return:
1681  	bnad_rx_res_free(bnad, res_info);
1682  	return err;
1683  }
1684  
1685  /* Timer callbacks */
1686  /* a) IOC timer */
1687  static void
bnad_ioc_timeout(struct timer_list * t)1688  bnad_ioc_timeout(struct timer_list *t)
1689  {
1690  	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
1691  	unsigned long flags;
1692  
1693  	spin_lock_irqsave(&bnad->bna_lock, flags);
1694  	bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1695  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1696  }
1697  
1698  static void
bnad_ioc_hb_check(struct timer_list * t)1699  bnad_ioc_hb_check(struct timer_list *t)
1700  {
1701  	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
1702  	unsigned long flags;
1703  
1704  	spin_lock_irqsave(&bnad->bna_lock, flags);
1705  	bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1706  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1707  }
1708  
1709  static void
bnad_iocpf_timeout(struct timer_list * t)1710  bnad_iocpf_timeout(struct timer_list *t)
1711  {
1712  	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
1713  	unsigned long flags;
1714  
1715  	spin_lock_irqsave(&bnad->bna_lock, flags);
1716  	bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1717  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1718  }
1719  
1720  static void
bnad_iocpf_sem_timeout(struct timer_list * t)1721  bnad_iocpf_sem_timeout(struct timer_list *t)
1722  {
1723  	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
1724  	unsigned long flags;
1725  
1726  	spin_lock_irqsave(&bnad->bna_lock, flags);
1727  	bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1728  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1729  }
1730  
1731  /*
1732   * All timer routines use bnad->bna_lock to protect against
1733   * the following race, which may occur in case of no locking:
1734   *	Time	CPU m	CPU n
1735   *	0       1 = test_bit
1736   *	1			clear_bit
1737   *	2			del_timer_sync
1738   *	3	mod_timer
1739   */
1740  
1741  /* b) Dynamic Interrupt Moderation Timer */
1742  static void
bnad_dim_timeout(struct timer_list * t)1743  bnad_dim_timeout(struct timer_list *t)
1744  {
1745  	struct bnad *bnad = from_timer(bnad, t, dim_timer);
1746  	struct bnad_rx_info *rx_info;
1747  	struct bnad_rx_ctrl *rx_ctrl;
1748  	int i, j;
1749  	unsigned long flags;
1750  
1751  	if (!netif_carrier_ok(bnad->netdev))
1752  		return;
1753  
1754  	spin_lock_irqsave(&bnad->bna_lock, flags);
1755  	for (i = 0; i < bnad->num_rx; i++) {
1756  		rx_info = &bnad->rx_info[i];
1757  		if (!rx_info->rx)
1758  			continue;
1759  		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1760  			rx_ctrl = &rx_info->rx_ctrl[j];
1761  			if (!rx_ctrl->ccb)
1762  				continue;
1763  			bna_rx_dim_update(rx_ctrl->ccb);
1764  		}
1765  	}
1766  
1767  	/* Check for BNAD_CF_DIM_ENABLED, does not eliminate a race */
1768  	if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1769  		mod_timer(&bnad->dim_timer,
1770  			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1771  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1772  }
1773  
1774  /* c)  Statistics Timer */
1775  static void
bnad_stats_timeout(struct timer_list * t)1776  bnad_stats_timeout(struct timer_list *t)
1777  {
1778  	struct bnad *bnad = from_timer(bnad, t, stats_timer);
1779  	unsigned long flags;
1780  
1781  	if (!netif_running(bnad->netdev) ||
1782  		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1783  		return;
1784  
1785  	spin_lock_irqsave(&bnad->bna_lock, flags);
1786  	bna_hw_stats_get(&bnad->bna);
1787  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1788  }
1789  
1790  /*
1791   * Set up timer for DIM
1792   * Called with bnad->bna_lock held
1793   */
1794  void
bnad_dim_timer_start(struct bnad * bnad)1795  bnad_dim_timer_start(struct bnad *bnad)
1796  {
1797  	if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1798  	    !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1799  		timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
1800  		set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1801  		mod_timer(&bnad->dim_timer,
1802  			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1803  	}
1804  }
1805  
1806  /*
1807   * Set up timer for statistics
1808   * Called with mutex_lock(&bnad->conf_mutex) held
1809   */
1810  static void
bnad_stats_timer_start(struct bnad * bnad)1811  bnad_stats_timer_start(struct bnad *bnad)
1812  {
1813  	unsigned long flags;
1814  
1815  	spin_lock_irqsave(&bnad->bna_lock, flags);
1816  	if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1817  		timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
1818  		mod_timer(&bnad->stats_timer,
1819  			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1820  	}
1821  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1822  }
1823  
1824  /*
1825   * Stops the stats timer
1826   * Called with mutex_lock(&bnad->conf_mutex) held
1827   */
1828  static void
bnad_stats_timer_stop(struct bnad * bnad)1829  bnad_stats_timer_stop(struct bnad *bnad)
1830  {
1831  	int to_del = 0;
1832  	unsigned long flags;
1833  
1834  	spin_lock_irqsave(&bnad->bna_lock, flags);
1835  	if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1836  		to_del = 1;
1837  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1838  	if (to_del)
1839  		del_timer_sync(&bnad->stats_timer);
1840  }
1841  
1842  /* Utilities */
1843  
1844  static void
bnad_netdev_mc_list_get(struct net_device * netdev,u8 * mc_list)1845  bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1846  {
1847  	int i = 1; /* Index 0 has broadcast address */
1848  	struct netdev_hw_addr *mc_addr;
1849  
1850  	netdev_for_each_mc_addr(mc_addr, netdev) {
1851  		ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1852  		i++;
1853  	}
1854  }
1855  
1856  static int
bnad_napi_poll_rx(struct napi_struct * napi,int budget)1857  bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1858  {
1859  	struct bnad_rx_ctrl *rx_ctrl =
1860  		container_of(napi, struct bnad_rx_ctrl, napi);
1861  	struct bnad *bnad = rx_ctrl->bnad;
1862  	int rcvd = 0;
1863  
1864  	rx_ctrl->rx_poll_ctr++;
1865  
1866  	if (!netif_carrier_ok(bnad->netdev))
1867  		goto poll_exit;
1868  
1869  	rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1870  	if (rcvd >= budget)
1871  		return rcvd;
1872  
1873  poll_exit:
1874  	napi_complete_done(napi, rcvd);
1875  
1876  	rx_ctrl->rx_complete++;
1877  
1878  	if (rx_ctrl->ccb)
1879  		bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1880  
1881  	return rcvd;
1882  }
1883  
1884  static void
bnad_napi_add(struct bnad * bnad,u32 rx_id)1885  bnad_napi_add(struct bnad *bnad, u32 rx_id)
1886  {
1887  	struct bnad_rx_ctrl *rx_ctrl;
1888  	int i;
1889  
1890  	/* Initialize & enable NAPI */
1891  	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
1892  		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1893  		netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1894  			       bnad_napi_poll_rx);
1895  	}
1896  }
1897  
1898  static void
bnad_napi_delete(struct bnad * bnad,u32 rx_id)1899  bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1900  {
1901  	int i;
1902  
1903  	/* First disable and then clean up */
1904  	for (i = 0; i < bnad->num_rxp_per_rx; i++)
1905  		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1906  }
1907  
1908  /* Should be held with conf_lock held */
1909  void
bnad_destroy_tx(struct bnad * bnad,u32 tx_id)1910  bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1911  {
1912  	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1913  	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1914  	unsigned long flags;
1915  
1916  	if (!tx_info->tx)
1917  		return;
1918  
1919  	init_completion(&bnad->bnad_completions.tx_comp);
1920  	spin_lock_irqsave(&bnad->bna_lock, flags);
1921  	bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1922  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1923  	wait_for_completion(&bnad->bnad_completions.tx_comp);
1924  
1925  	if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1926  		bnad_tx_msix_unregister(bnad, tx_info,
1927  			bnad->num_txq_per_tx);
1928  
1929  	spin_lock_irqsave(&bnad->bna_lock, flags);
1930  	bna_tx_destroy(tx_info->tx);
1931  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1932  
1933  	tx_info->tx = NULL;
1934  	tx_info->tx_id = 0;
1935  
1936  	bnad_tx_res_free(bnad, res_info);
1937  }
1938  
1939  /* Should be held with conf_lock held */
1940  int
bnad_setup_tx(struct bnad * bnad,u32 tx_id)1941  bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1942  {
1943  	int err;
1944  	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1945  	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1946  	struct bna_intr_info *intr_info =
1947  			&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1948  	struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1949  	static const struct bna_tx_event_cbfn tx_cbfn = {
1950  		.tcb_setup_cbfn = bnad_cb_tcb_setup,
1951  		.tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1952  		.tx_stall_cbfn = bnad_cb_tx_stall,
1953  		.tx_resume_cbfn = bnad_cb_tx_resume,
1954  		.tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1955  	};
1956  
1957  	struct bna_tx *tx;
1958  	unsigned long flags;
1959  
1960  	tx_info->tx_id = tx_id;
1961  
1962  	/* Initialize the Tx object configuration */
1963  	tx_config->num_txq = bnad->num_txq_per_tx;
1964  	tx_config->txq_depth = bnad->txq_depth;
1965  	tx_config->tx_type = BNA_TX_T_REGULAR;
1966  	tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1967  
1968  	/* Get BNA's resource requirement for one tx object */
1969  	spin_lock_irqsave(&bnad->bna_lock, flags);
1970  	bna_tx_res_req(bnad->num_txq_per_tx,
1971  		bnad->txq_depth, res_info);
1972  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1973  
1974  	/* Fill Unmap Q memory requirements */
1975  	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1976  			bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1977  			bnad->txq_depth));
1978  
1979  	/* Allocate resources */
1980  	err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1981  	if (err)
1982  		return err;
1983  
1984  	/* Ask BNA to create one Tx object, supplying required resources */
1985  	spin_lock_irqsave(&bnad->bna_lock, flags);
1986  	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1987  			tx_info);
1988  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1989  	if (!tx) {
1990  		err = -ENOMEM;
1991  		goto err_return;
1992  	}
1993  	tx_info->tx = tx;
1994  
1995  	INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup);
1996  
1997  	/* Register ISR for the Tx object */
1998  	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1999  		err = bnad_tx_msix_register(bnad, tx_info,
2000  			tx_id, bnad->num_txq_per_tx);
2001  		if (err)
2002  			goto cleanup_tx;
2003  	}
2004  
2005  	spin_lock_irqsave(&bnad->bna_lock, flags);
2006  	bna_tx_enable(tx);
2007  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2008  
2009  	return 0;
2010  
2011  cleanup_tx:
2012  	spin_lock_irqsave(&bnad->bna_lock, flags);
2013  	bna_tx_destroy(tx_info->tx);
2014  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2015  	tx_info->tx = NULL;
2016  	tx_info->tx_id = 0;
2017  err_return:
2018  	bnad_tx_res_free(bnad, res_info);
2019  	return err;
2020  }
2021  
2022  /* Setup the rx config for bna_rx_create */
2023  /* bnad decides the configuration */
2024  static void
bnad_init_rx_config(struct bnad * bnad,struct bna_rx_config * rx_config)2025  bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2026  {
2027  	memset(rx_config, 0, sizeof(*rx_config));
2028  	rx_config->rx_type = BNA_RX_T_REGULAR;
2029  	rx_config->num_paths = bnad->num_rxp_per_rx;
2030  	rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2031  
2032  	if (bnad->num_rxp_per_rx > 1) {
2033  		rx_config->rss_status = BNA_STATUS_T_ENABLED;
2034  		rx_config->rss_config.hash_type =
2035  				(BFI_ENET_RSS_IPV6 |
2036  				 BFI_ENET_RSS_IPV6_TCP |
2037  				 BFI_ENET_RSS_IPV4 |
2038  				 BFI_ENET_RSS_IPV4_TCP);
2039  		rx_config->rss_config.hash_mask =
2040  				bnad->num_rxp_per_rx - 1;
2041  		netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2042  			sizeof(rx_config->rss_config.toeplitz_hash_key));
2043  	} else {
2044  		rx_config->rss_status = BNA_STATUS_T_DISABLED;
2045  		memset(&rx_config->rss_config, 0,
2046  		       sizeof(rx_config->rss_config));
2047  	}
2048  
2049  	rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2050  	rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2051  
2052  	/* BNA_RXP_SINGLE - one data-buffer queue
2053  	 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2054  	 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2055  	 */
2056  	/* TODO: configurable param for queue type */
2057  	rx_config->rxp_type = BNA_RXP_SLR;
2058  
2059  	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2060  	    rx_config->frame_size > 4096) {
2061  		/* though size_routing_enable is set in SLR,
2062  		 * small packets may get routed to same rxq.
2063  		 * set buf_size to 2048 instead of PAGE_SIZE.
2064  		 */
2065  		rx_config->q0_buf_size = 2048;
2066  		/* this should be in multiples of 2 */
2067  		rx_config->q0_num_vecs = 4;
2068  		rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2069  		rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2070  	} else {
2071  		rx_config->q0_buf_size = rx_config->frame_size;
2072  		rx_config->q0_num_vecs = 1;
2073  		rx_config->q0_depth = bnad->rxq_depth;
2074  	}
2075  
2076  	/* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2077  	if (rx_config->rxp_type == BNA_RXP_SLR) {
2078  		rx_config->q1_depth = bnad->rxq_depth;
2079  		rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2080  	}
2081  
2082  	rx_config->vlan_strip_status =
2083  		(bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2084  		BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2085  }
2086  
2087  static void
bnad_rx_ctrl_init(struct bnad * bnad,u32 rx_id)2088  bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2089  {
2090  	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2091  	int i;
2092  
2093  	for (i = 0; i < bnad->num_rxp_per_rx; i++)
2094  		rx_info->rx_ctrl[i].bnad = bnad;
2095  }
2096  
2097  /* Called with mutex_lock(&bnad->conf_mutex) held */
2098  static u32
bnad_reinit_rx(struct bnad * bnad)2099  bnad_reinit_rx(struct bnad *bnad)
2100  {
2101  	struct net_device *netdev = bnad->netdev;
2102  	u32 err = 0, current_err = 0;
2103  	u32 rx_id = 0, count = 0;
2104  	unsigned long flags;
2105  
2106  	/* destroy and create new rx objects */
2107  	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2108  		if (!bnad->rx_info[rx_id].rx)
2109  			continue;
2110  		bnad_destroy_rx(bnad, rx_id);
2111  	}
2112  
2113  	spin_lock_irqsave(&bnad->bna_lock, flags);
2114  	bna_enet_mtu_set(&bnad->bna.enet,
2115  			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2116  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2117  
2118  	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2119  		count++;
2120  		current_err = bnad_setup_rx(bnad, rx_id);
2121  		if (current_err && !err) {
2122  			err = current_err;
2123  			netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2124  		}
2125  	}
2126  
2127  	/* restore rx configuration */
2128  	if (bnad->rx_info[0].rx && !err) {
2129  		bnad_restore_vlans(bnad, 0);
2130  		bnad_enable_default_bcast(bnad);
2131  		spin_lock_irqsave(&bnad->bna_lock, flags);
2132  		bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2133  		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2134  		bnad_set_rx_mode(netdev);
2135  	}
2136  
2137  	return count;
2138  }
2139  
2140  /* Called with bnad_conf_lock() held */
2141  void
bnad_destroy_rx(struct bnad * bnad,u32 rx_id)2142  bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2143  {
2144  	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2145  	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2146  	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2147  	unsigned long flags;
2148  	int to_del = 0;
2149  
2150  	if (!rx_info->rx)
2151  		return;
2152  
2153  	if (0 == rx_id) {
2154  		spin_lock_irqsave(&bnad->bna_lock, flags);
2155  		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2156  		    test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2157  			clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2158  			to_del = 1;
2159  		}
2160  		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2161  		if (to_del)
2162  			del_timer_sync(&bnad->dim_timer);
2163  	}
2164  
2165  	init_completion(&bnad->bnad_completions.rx_comp);
2166  	spin_lock_irqsave(&bnad->bna_lock, flags);
2167  	bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2168  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2169  	wait_for_completion(&bnad->bnad_completions.rx_comp);
2170  
2171  	if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2172  		bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2173  
2174  	bnad_napi_delete(bnad, rx_id);
2175  
2176  	spin_lock_irqsave(&bnad->bna_lock, flags);
2177  	bna_rx_destroy(rx_info->rx);
2178  
2179  	rx_info->rx = NULL;
2180  	rx_info->rx_id = 0;
2181  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2182  
2183  	bnad_rx_res_free(bnad, res_info);
2184  }
2185  
2186  /* Called with mutex_lock(&bnad->conf_mutex) held */
2187  int
bnad_setup_rx(struct bnad * bnad,u32 rx_id)2188  bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2189  {
2190  	int err;
2191  	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2192  	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2193  	struct bna_intr_info *intr_info =
2194  			&res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2195  	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2196  	static const struct bna_rx_event_cbfn rx_cbfn = {
2197  		.rcb_setup_cbfn = NULL,
2198  		.rcb_destroy_cbfn = NULL,
2199  		.ccb_setup_cbfn = bnad_cb_ccb_setup,
2200  		.ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2201  		.rx_stall_cbfn = bnad_cb_rx_stall,
2202  		.rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2203  		.rx_post_cbfn = bnad_cb_rx_post,
2204  	};
2205  	struct bna_rx *rx;
2206  	unsigned long flags;
2207  
2208  	rx_info->rx_id = rx_id;
2209  
2210  	/* Initialize the Rx object configuration */
2211  	bnad_init_rx_config(bnad, rx_config);
2212  
2213  	/* Get BNA's resource requirement for one Rx object */
2214  	spin_lock_irqsave(&bnad->bna_lock, flags);
2215  	bna_rx_res_req(rx_config, res_info);
2216  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2217  
2218  	/* Fill Unmap Q memory requirements */
2219  	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2220  				 rx_config->num_paths,
2221  			(rx_config->q0_depth *
2222  			 sizeof(struct bnad_rx_unmap)) +
2223  			 sizeof(struct bnad_rx_unmap_q));
2224  
2225  	if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2226  		BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2227  					 rx_config->num_paths,
2228  				(rx_config->q1_depth *
2229  				 sizeof(struct bnad_rx_unmap) +
2230  				 sizeof(struct bnad_rx_unmap_q)));
2231  	}
2232  	/* Allocate resource */
2233  	err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2234  	if (err)
2235  		return err;
2236  
2237  	bnad_rx_ctrl_init(bnad, rx_id);
2238  
2239  	/* Ask BNA to create one Rx object, supplying required resources */
2240  	spin_lock_irqsave(&bnad->bna_lock, flags);
2241  	rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2242  			rx_info);
2243  	if (!rx) {
2244  		err = -ENOMEM;
2245  		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2246  		goto err_return;
2247  	}
2248  	rx_info->rx = rx;
2249  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2250  
2251  	INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup);
2252  
2253  	/*
2254  	 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2255  	 * so that IRQ handler cannot schedule NAPI at this point.
2256  	 */
2257  	bnad_napi_add(bnad, rx_id);
2258  
2259  	/* Register ISR for the Rx object */
2260  	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2261  		err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2262  						rx_config->num_paths);
2263  		if (err)
2264  			goto err_return;
2265  	}
2266  
2267  	spin_lock_irqsave(&bnad->bna_lock, flags);
2268  	if (0 == rx_id) {
2269  		/* Set up Dynamic Interrupt Moderation Vector */
2270  		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2271  			bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2272  
2273  		/* Enable VLAN filtering only on the default Rx */
2274  		bna_rx_vlanfilter_enable(rx);
2275  
2276  		/* Start the DIM timer */
2277  		bnad_dim_timer_start(bnad);
2278  	}
2279  
2280  	bna_rx_enable(rx);
2281  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2282  
2283  	return 0;
2284  
2285  err_return:
2286  	bnad_destroy_rx(bnad, rx_id);
2287  	return err;
2288  }
2289  
2290  /* Called with conf_lock & bnad->bna_lock held */
2291  void
bnad_tx_coalescing_timeo_set(struct bnad * bnad)2292  bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2293  {
2294  	struct bnad_tx_info *tx_info;
2295  
2296  	tx_info = &bnad->tx_info[0];
2297  	if (!tx_info->tx)
2298  		return;
2299  
2300  	bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2301  }
2302  
2303  /* Called with conf_lock & bnad->bna_lock held */
2304  void
bnad_rx_coalescing_timeo_set(struct bnad * bnad)2305  bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2306  {
2307  	struct bnad_rx_info *rx_info;
2308  	int	i;
2309  
2310  	for (i = 0; i < bnad->num_rx; i++) {
2311  		rx_info = &bnad->rx_info[i];
2312  		if (!rx_info->rx)
2313  			continue;
2314  		bna_rx_coalescing_timeo_set(rx_info->rx,
2315  				bnad->rx_coalescing_timeo);
2316  	}
2317  }
2318  
2319  /*
2320   * Called with bnad->bna_lock held
2321   */
2322  int
bnad_mac_addr_set_locked(struct bnad * bnad,const u8 * mac_addr)2323  bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2324  {
2325  	int ret;
2326  
2327  	if (!is_valid_ether_addr(mac_addr))
2328  		return -EADDRNOTAVAIL;
2329  
2330  	/* If datapath is down, pretend everything went through */
2331  	if (!bnad->rx_info[0].rx)
2332  		return 0;
2333  
2334  	ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2335  	if (ret != BNA_CB_SUCCESS)
2336  		return -EADDRNOTAVAIL;
2337  
2338  	return 0;
2339  }
2340  
2341  /* Should be called with conf_lock held */
2342  int
bnad_enable_default_bcast(struct bnad * bnad)2343  bnad_enable_default_bcast(struct bnad *bnad)
2344  {
2345  	struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2346  	int ret;
2347  	unsigned long flags;
2348  
2349  	init_completion(&bnad->bnad_completions.mcast_comp);
2350  
2351  	spin_lock_irqsave(&bnad->bna_lock, flags);
2352  	ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2353  			       bnad_cb_rx_mcast_add);
2354  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2355  
2356  	if (ret == BNA_CB_SUCCESS)
2357  		wait_for_completion(&bnad->bnad_completions.mcast_comp);
2358  	else
2359  		return -ENODEV;
2360  
2361  	if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2362  		return -ENODEV;
2363  
2364  	return 0;
2365  }
2366  
2367  /* Called with mutex_lock(&bnad->conf_mutex) held */
2368  void
bnad_restore_vlans(struct bnad * bnad,u32 rx_id)2369  bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2370  {
2371  	u16 vid;
2372  	unsigned long flags;
2373  
2374  	for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2375  		spin_lock_irqsave(&bnad->bna_lock, flags);
2376  		bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2377  		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2378  	}
2379  }
2380  
2381  /* Statistics utilities */
2382  void
bnad_netdev_qstats_fill(struct bnad * bnad,struct rtnl_link_stats64 * stats)2383  bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2384  {
2385  	int i, j;
2386  
2387  	for (i = 0; i < bnad->num_rx; i++) {
2388  		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2389  			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2390  				stats->rx_packets += bnad->rx_info[i].
2391  				rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2392  				stats->rx_bytes += bnad->rx_info[i].
2393  					rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2394  				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2395  					bnad->rx_info[i].rx_ctrl[j].ccb->
2396  					rcb[1]->rxq) {
2397  					stats->rx_packets +=
2398  						bnad->rx_info[i].rx_ctrl[j].
2399  						ccb->rcb[1]->rxq->rx_packets;
2400  					stats->rx_bytes +=
2401  						bnad->rx_info[i].rx_ctrl[j].
2402  						ccb->rcb[1]->rxq->rx_bytes;
2403  				}
2404  			}
2405  		}
2406  	}
2407  	for (i = 0; i < bnad->num_tx; i++) {
2408  		for (j = 0; j < bnad->num_txq_per_tx; j++) {
2409  			if (bnad->tx_info[i].tcb[j]) {
2410  				stats->tx_packets +=
2411  				bnad->tx_info[i].tcb[j]->txq->tx_packets;
2412  				stats->tx_bytes +=
2413  					bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2414  			}
2415  		}
2416  	}
2417  }
2418  
2419  /*
2420   * Must be called with the bna_lock held.
2421   */
2422  void
bnad_netdev_hwstats_fill(struct bnad * bnad,struct rtnl_link_stats64 * stats)2423  bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2424  {
2425  	struct bfi_enet_stats_mac *mac_stats;
2426  	u32 bmap;
2427  	int i;
2428  
2429  	mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2430  	stats->rx_errors =
2431  		mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2432  		mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2433  		mac_stats->rx_undersize;
2434  	stats->tx_errors = mac_stats->tx_fcs_error +
2435  					mac_stats->tx_undersize;
2436  	stats->rx_dropped = mac_stats->rx_drop;
2437  	stats->tx_dropped = mac_stats->tx_drop;
2438  	stats->multicast = mac_stats->rx_multicast;
2439  	stats->collisions = mac_stats->tx_total_collision;
2440  
2441  	stats->rx_length_errors = mac_stats->rx_frame_length_error;
2442  
2443  	/* receive ring buffer overflow  ?? */
2444  
2445  	stats->rx_crc_errors = mac_stats->rx_fcs_error;
2446  	stats->rx_frame_errors = mac_stats->rx_alignment_error;
2447  	/* recv'r fifo overrun */
2448  	bmap = bna_rx_rid_mask(&bnad->bna);
2449  	for (i = 0; bmap; i++) {
2450  		if (bmap & 1) {
2451  			stats->rx_fifo_errors +=
2452  				bnad->stats.bna_stats->
2453  					hw_stats.rxf_stats[i].frame_drops;
2454  			break;
2455  		}
2456  		bmap >>= 1;
2457  	}
2458  }
2459  
2460  static void
bnad_mbox_irq_sync(struct bnad * bnad)2461  bnad_mbox_irq_sync(struct bnad *bnad)
2462  {
2463  	u32 irq;
2464  	unsigned long flags;
2465  
2466  	spin_lock_irqsave(&bnad->bna_lock, flags);
2467  	if (bnad->cfg_flags & BNAD_CF_MSIX)
2468  		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2469  	else
2470  		irq = bnad->pcidev->irq;
2471  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2472  
2473  	synchronize_irq(irq);
2474  }
2475  
2476  /* Utility used by bnad_start_xmit, for doing TSO */
2477  static int
bnad_tso_prepare(struct bnad * bnad,struct sk_buff * skb)2478  bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2479  {
2480  	int err;
2481  
2482  	err = skb_cow_head(skb, 0);
2483  	if (err < 0) {
2484  		BNAD_UPDATE_CTR(bnad, tso_err);
2485  		return err;
2486  	}
2487  
2488  	/*
2489  	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2490  	 * excluding the length field.
2491  	 */
2492  	if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2493  		struct iphdr *iph = ip_hdr(skb);
2494  
2495  		/* Do we really need these? */
2496  		iph->tot_len = 0;
2497  		iph->check = 0;
2498  
2499  		tcp_hdr(skb)->check =
2500  			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2501  					   IPPROTO_TCP, 0);
2502  		BNAD_UPDATE_CTR(bnad, tso4);
2503  	} else {
2504  		tcp_v6_gso_csum_prep(skb);
2505  		BNAD_UPDATE_CTR(bnad, tso6);
2506  	}
2507  
2508  	return 0;
2509  }
2510  
2511  /*
2512   * Initialize Q numbers depending on Rx Paths
2513   * Called with bnad->bna_lock held, because of cfg_flags
2514   * access.
2515   */
2516  static void
bnad_q_num_init(struct bnad * bnad)2517  bnad_q_num_init(struct bnad *bnad)
2518  {
2519  	int rxps;
2520  
2521  	rxps = min((uint)num_online_cpus(),
2522  			(uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2523  
2524  	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2525  		rxps = 1;	/* INTx */
2526  
2527  	bnad->num_rx = 1;
2528  	bnad->num_tx = 1;
2529  	bnad->num_rxp_per_rx = rxps;
2530  	bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2531  }
2532  
2533  /*
2534   * Adjusts the Q numbers, given a number of msix vectors
2535   * Give preference to RSS as opposed to Tx priority Queues,
2536   * in such a case, just use 1 Tx Q
2537   * Called with bnad->bna_lock held b'cos of cfg_flags access
2538   */
2539  static void
bnad_q_num_adjust(struct bnad * bnad,int msix_vectors,int temp)2540  bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2541  {
2542  	bnad->num_txq_per_tx = 1;
2543  	if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2544  	     bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2545  	    (bnad->cfg_flags & BNAD_CF_MSIX)) {
2546  		bnad->num_rxp_per_rx = msix_vectors -
2547  			(bnad->num_tx * bnad->num_txq_per_tx) -
2548  			BNAD_MAILBOX_MSIX_VECTORS;
2549  	} else
2550  		bnad->num_rxp_per_rx = 1;
2551  }
2552  
2553  /* Enable / disable ioceth */
2554  static int
bnad_ioceth_disable(struct bnad * bnad)2555  bnad_ioceth_disable(struct bnad *bnad)
2556  {
2557  	unsigned long flags;
2558  	int err = 0;
2559  
2560  	spin_lock_irqsave(&bnad->bna_lock, flags);
2561  	init_completion(&bnad->bnad_completions.ioc_comp);
2562  	bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2563  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2564  
2565  	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2566  		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2567  
2568  	err = bnad->bnad_completions.ioc_comp_status;
2569  	return err;
2570  }
2571  
2572  static int
bnad_ioceth_enable(struct bnad * bnad)2573  bnad_ioceth_enable(struct bnad *bnad)
2574  {
2575  	int err = 0;
2576  	unsigned long flags;
2577  
2578  	spin_lock_irqsave(&bnad->bna_lock, flags);
2579  	init_completion(&bnad->bnad_completions.ioc_comp);
2580  	bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2581  	bna_ioceth_enable(&bnad->bna.ioceth);
2582  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2583  
2584  	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2585  		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2586  
2587  	err = bnad->bnad_completions.ioc_comp_status;
2588  
2589  	return err;
2590  }
2591  
2592  /* Free BNA resources */
2593  static void
bnad_res_free(struct bnad * bnad,struct bna_res_info * res_info,u32 res_val_max)2594  bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2595  		u32 res_val_max)
2596  {
2597  	int i;
2598  
2599  	for (i = 0; i < res_val_max; i++)
2600  		bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2601  }
2602  
2603  /* Allocates memory and interrupt resources for BNA */
2604  static int
bnad_res_alloc(struct bnad * bnad,struct bna_res_info * res_info,u32 res_val_max)2605  bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2606  		u32 res_val_max)
2607  {
2608  	int i, err;
2609  
2610  	for (i = 0; i < res_val_max; i++) {
2611  		err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2612  		if (err)
2613  			goto err_return;
2614  	}
2615  	return 0;
2616  
2617  err_return:
2618  	bnad_res_free(bnad, res_info, res_val_max);
2619  	return err;
2620  }
2621  
2622  /* Interrupt enable / disable */
2623  static void
bnad_enable_msix(struct bnad * bnad)2624  bnad_enable_msix(struct bnad *bnad)
2625  {
2626  	int i, ret;
2627  	unsigned long flags;
2628  
2629  	spin_lock_irqsave(&bnad->bna_lock, flags);
2630  	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2631  		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2632  		return;
2633  	}
2634  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2635  
2636  	if (bnad->msix_table)
2637  		return;
2638  
2639  	bnad->msix_table =
2640  		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2641  
2642  	if (!bnad->msix_table)
2643  		goto intx_mode;
2644  
2645  	for (i = 0; i < bnad->msix_num; i++)
2646  		bnad->msix_table[i].entry = i;
2647  
2648  	ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2649  				    1, bnad->msix_num);
2650  	if (ret < 0) {
2651  		goto intx_mode;
2652  	} else if (ret < bnad->msix_num) {
2653  		dev_warn(&bnad->pcidev->dev,
2654  			 "%d MSI-X vectors allocated < %d requested\n",
2655  			 ret, bnad->msix_num);
2656  
2657  		spin_lock_irqsave(&bnad->bna_lock, flags);
2658  		/* ret = #of vectors that we got */
2659  		bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2660  			(ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2661  		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2662  
2663  		bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2664  			 BNAD_MAILBOX_MSIX_VECTORS;
2665  
2666  		if (bnad->msix_num > ret) {
2667  			pci_disable_msix(bnad->pcidev);
2668  			goto intx_mode;
2669  		}
2670  	}
2671  
2672  	pci_intx(bnad->pcidev, 0);
2673  
2674  	return;
2675  
2676  intx_mode:
2677  	dev_warn(&bnad->pcidev->dev,
2678  		 "MSI-X enable failed - operating in INTx mode\n");
2679  
2680  	kfree(bnad->msix_table);
2681  	bnad->msix_table = NULL;
2682  	bnad->msix_num = 0;
2683  	spin_lock_irqsave(&bnad->bna_lock, flags);
2684  	bnad->cfg_flags &= ~BNAD_CF_MSIX;
2685  	bnad_q_num_init(bnad);
2686  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2687  }
2688  
2689  static void
bnad_disable_msix(struct bnad * bnad)2690  bnad_disable_msix(struct bnad *bnad)
2691  {
2692  	u32 cfg_flags;
2693  	unsigned long flags;
2694  
2695  	spin_lock_irqsave(&bnad->bna_lock, flags);
2696  	cfg_flags = bnad->cfg_flags;
2697  	if (bnad->cfg_flags & BNAD_CF_MSIX)
2698  		bnad->cfg_flags &= ~BNAD_CF_MSIX;
2699  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2700  
2701  	if (cfg_flags & BNAD_CF_MSIX) {
2702  		pci_disable_msix(bnad->pcidev);
2703  		kfree(bnad->msix_table);
2704  		bnad->msix_table = NULL;
2705  	}
2706  }
2707  
2708  /* Netdev entry points */
2709  static int
bnad_open(struct net_device * netdev)2710  bnad_open(struct net_device *netdev)
2711  {
2712  	int err;
2713  	struct bnad *bnad = netdev_priv(netdev);
2714  	struct bna_pause_config pause_config;
2715  	unsigned long flags;
2716  
2717  	mutex_lock(&bnad->conf_mutex);
2718  
2719  	/* Tx */
2720  	err = bnad_setup_tx(bnad, 0);
2721  	if (err)
2722  		goto err_return;
2723  
2724  	/* Rx */
2725  	err = bnad_setup_rx(bnad, 0);
2726  	if (err)
2727  		goto cleanup_tx;
2728  
2729  	/* Port */
2730  	pause_config.tx_pause = 0;
2731  	pause_config.rx_pause = 0;
2732  
2733  	spin_lock_irqsave(&bnad->bna_lock, flags);
2734  	bna_enet_mtu_set(&bnad->bna.enet,
2735  			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2736  	bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2737  	bna_enet_enable(&bnad->bna.enet);
2738  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2739  
2740  	/* Enable broadcast */
2741  	bnad_enable_default_bcast(bnad);
2742  
2743  	/* Restore VLANs, if any */
2744  	bnad_restore_vlans(bnad, 0);
2745  
2746  	/* Set the UCAST address */
2747  	spin_lock_irqsave(&bnad->bna_lock, flags);
2748  	bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2749  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2750  
2751  	/* Start the stats timer */
2752  	bnad_stats_timer_start(bnad);
2753  
2754  	mutex_unlock(&bnad->conf_mutex);
2755  
2756  	return 0;
2757  
2758  cleanup_tx:
2759  	bnad_destroy_tx(bnad, 0);
2760  
2761  err_return:
2762  	mutex_unlock(&bnad->conf_mutex);
2763  	return err;
2764  }
2765  
2766  static int
bnad_stop(struct net_device * netdev)2767  bnad_stop(struct net_device *netdev)
2768  {
2769  	struct bnad *bnad = netdev_priv(netdev);
2770  	unsigned long flags;
2771  
2772  	mutex_lock(&bnad->conf_mutex);
2773  
2774  	/* Stop the stats timer */
2775  	bnad_stats_timer_stop(bnad);
2776  
2777  	init_completion(&bnad->bnad_completions.enet_comp);
2778  
2779  	spin_lock_irqsave(&bnad->bna_lock, flags);
2780  	bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2781  			bnad_cb_enet_disabled);
2782  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2783  
2784  	wait_for_completion(&bnad->bnad_completions.enet_comp);
2785  
2786  	bnad_destroy_tx(bnad, 0);
2787  	bnad_destroy_rx(bnad, 0);
2788  
2789  	/* Synchronize mailbox IRQ */
2790  	bnad_mbox_irq_sync(bnad);
2791  
2792  	mutex_unlock(&bnad->conf_mutex);
2793  
2794  	return 0;
2795  }
2796  
2797  /* TX */
2798  /* Returns 0 for success */
2799  static int
bnad_txq_wi_prepare(struct bnad * bnad,struct bna_tcb * tcb,struct sk_buff * skb,struct bna_txq_entry * txqent)2800  bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2801  		    struct sk_buff *skb, struct bna_txq_entry *txqent)
2802  {
2803  	u16 flags = 0;
2804  	u32 gso_size;
2805  	u16 vlan_tag = 0;
2806  
2807  	if (skb_vlan_tag_present(skb)) {
2808  		vlan_tag = (u16)skb_vlan_tag_get(skb);
2809  		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2810  	}
2811  	if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2812  		vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2813  				| (vlan_tag & 0x1fff);
2814  		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2815  	}
2816  	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2817  
2818  	if (skb_is_gso(skb)) {
2819  		gso_size = skb_shinfo(skb)->gso_size;
2820  		if (unlikely(gso_size > bnad->netdev->mtu)) {
2821  			BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2822  			return -EINVAL;
2823  		}
2824  		if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) {
2825  			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2826  			txqent->hdr.wi.lso_mss = 0;
2827  			BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2828  		} else {
2829  			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2830  			txqent->hdr.wi.lso_mss = htons(gso_size);
2831  		}
2832  
2833  		if (bnad_tso_prepare(bnad, skb)) {
2834  			BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2835  			return -EINVAL;
2836  		}
2837  
2838  		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2839  		txqent->hdr.wi.l4_hdr_size_n_offset =
2840  			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2841  			tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2842  	} else  {
2843  		txqent->hdr.wi.opcode =	htons(BNA_TXQ_WI_SEND);
2844  		txqent->hdr.wi.lso_mss = 0;
2845  
2846  		if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2847  			BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2848  			return -EINVAL;
2849  		}
2850  
2851  		if (skb->ip_summed == CHECKSUM_PARTIAL) {
2852  			__be16 net_proto = vlan_get_protocol(skb);
2853  			u8 proto = 0;
2854  
2855  			if (net_proto == htons(ETH_P_IP))
2856  				proto = ip_hdr(skb)->protocol;
2857  #ifdef NETIF_F_IPV6_CSUM
2858  			else if (net_proto == htons(ETH_P_IPV6)) {
2859  				/* nexthdr may not be TCP immediately. */
2860  				proto = ipv6_hdr(skb)->nexthdr;
2861  			}
2862  #endif
2863  			if (proto == IPPROTO_TCP) {
2864  				flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2865  				txqent->hdr.wi.l4_hdr_size_n_offset =
2866  					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2867  					      (0, skb_transport_offset(skb)));
2868  
2869  				BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2870  
2871  				if (unlikely(skb_headlen(skb) <
2872  					    skb_tcp_all_headers(skb))) {
2873  					BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2874  					return -EINVAL;
2875  				}
2876  			} else if (proto == IPPROTO_UDP) {
2877  				flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2878  				txqent->hdr.wi.l4_hdr_size_n_offset =
2879  					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2880  					      (0, skb_transport_offset(skb)));
2881  
2882  				BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2883  				if (unlikely(skb_headlen(skb) <
2884  					    skb_transport_offset(skb) +
2885  				    sizeof(struct udphdr))) {
2886  					BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2887  					return -EINVAL;
2888  				}
2889  			} else {
2890  
2891  				BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2892  				return -EINVAL;
2893  			}
2894  		} else
2895  			txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2896  	}
2897  
2898  	txqent->hdr.wi.flags = htons(flags);
2899  	txqent->hdr.wi.frame_length = htonl(skb->len);
2900  
2901  	return 0;
2902  }
2903  
2904  /*
2905   * bnad_start_xmit : Netdev entry point for Transmit
2906   *		     Called under lock held by net_device
2907   */
2908  static netdev_tx_t
bnad_start_xmit(struct sk_buff * skb,struct net_device * netdev)2909  bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2910  {
2911  	struct bnad *bnad = netdev_priv(netdev);
2912  	u32 txq_id = 0;
2913  	struct bna_tcb *tcb = NULL;
2914  	struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2915  	u32		prod, q_depth, vect_id;
2916  	u32		wis, vectors, len;
2917  	int		i;
2918  	dma_addr_t		dma_addr;
2919  	struct bna_txq_entry *txqent;
2920  
2921  	len = skb_headlen(skb);
2922  
2923  	/* Sanity checks for the skb */
2924  
2925  	if (unlikely(skb->len <= ETH_HLEN)) {
2926  		dev_kfree_skb_any(skb);
2927  		BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2928  		return NETDEV_TX_OK;
2929  	}
2930  	if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2931  		dev_kfree_skb_any(skb);
2932  		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2933  		return NETDEV_TX_OK;
2934  	}
2935  	if (unlikely(len == 0)) {
2936  		dev_kfree_skb_any(skb);
2937  		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2938  		return NETDEV_TX_OK;
2939  	}
2940  
2941  	tcb = bnad->tx_info[0].tcb[txq_id];
2942  
2943  	/*
2944  	 * Takes care of the Tx that is scheduled between clearing the flag
2945  	 * and the netif_tx_stop_all_queues() call.
2946  	 */
2947  	if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2948  		dev_kfree_skb_any(skb);
2949  		BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2950  		return NETDEV_TX_OK;
2951  	}
2952  
2953  	q_depth = tcb->q_depth;
2954  	prod = tcb->producer_index;
2955  	unmap_q = tcb->unmap_q;
2956  
2957  	vectors = 1 + skb_shinfo(skb)->nr_frags;
2958  	wis = BNA_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
2959  
2960  	if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2961  		dev_kfree_skb_any(skb);
2962  		BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2963  		return NETDEV_TX_OK;
2964  	}
2965  
2966  	/* Check for available TxQ resources */
2967  	if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2968  		if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2969  		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2970  			u32 sent;
2971  			sent = bnad_txcmpl_process(bnad, tcb);
2972  			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2973  				bna_ib_ack(tcb->i_dbell, sent);
2974  			smp_mb__before_atomic();
2975  			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2976  		} else {
2977  			netif_stop_queue(netdev);
2978  			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2979  		}
2980  
2981  		smp_mb();
2982  		/*
2983  		 * Check again to deal with race condition between
2984  		 * netif_stop_queue here, and netif_wake_queue in
2985  		 * interrupt handler which is not inside netif tx lock.
2986  		 */
2987  		if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2988  			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2989  			return NETDEV_TX_BUSY;
2990  		} else {
2991  			netif_wake_queue(netdev);
2992  			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2993  		}
2994  	}
2995  
2996  	txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
2997  	head_unmap = &unmap_q[prod];
2998  
2999  	/* Program the opcode, flags, frame_len, num_vectors in WI */
3000  	if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3001  		dev_kfree_skb_any(skb);
3002  		return NETDEV_TX_OK;
3003  	}
3004  	txqent->hdr.wi.reserved = 0;
3005  	txqent->hdr.wi.num_vectors = vectors;
3006  
3007  	head_unmap->skb = skb;
3008  	head_unmap->nvecs = 0;
3009  
3010  	/* Program the vectors */
3011  	unmap = head_unmap;
3012  	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3013  				  len, DMA_TO_DEVICE);
3014  	if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3015  		dev_kfree_skb_any(skb);
3016  		BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3017  		return NETDEV_TX_OK;
3018  	}
3019  	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3020  	txqent->vector[0].length = htons(len);
3021  	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3022  	head_unmap->nvecs++;
3023  
3024  	for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3025  		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3026  		u32		size = skb_frag_size(frag);
3027  
3028  		if (unlikely(size == 0)) {
3029  			/* Undo the changes starting at tcb->producer_index */
3030  			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3031  				tcb->producer_index);
3032  			dev_kfree_skb_any(skb);
3033  			BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3034  			return NETDEV_TX_OK;
3035  		}
3036  
3037  		len += size;
3038  
3039  		vect_id++;
3040  		if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3041  			vect_id = 0;
3042  			BNA_QE_INDX_INC(prod, q_depth);
3043  			txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3044  			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3045  			unmap = &unmap_q[prod];
3046  		}
3047  
3048  		dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3049  					    0, size, DMA_TO_DEVICE);
3050  		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3051  			/* Undo the changes starting at tcb->producer_index */
3052  			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3053  					   tcb->producer_index);
3054  			dev_kfree_skb_any(skb);
3055  			BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3056  			return NETDEV_TX_OK;
3057  		}
3058  
3059  		dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3060  		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3061  		txqent->vector[vect_id].length = htons(size);
3062  		dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3063  				   dma_addr);
3064  		head_unmap->nvecs++;
3065  	}
3066  
3067  	if (unlikely(len != skb->len)) {
3068  		/* Undo the changes starting at tcb->producer_index */
3069  		bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3070  		dev_kfree_skb_any(skb);
3071  		BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3072  		return NETDEV_TX_OK;
3073  	}
3074  
3075  	BNA_QE_INDX_INC(prod, q_depth);
3076  	tcb->producer_index = prod;
3077  
3078  	wmb();
3079  
3080  	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3081  		return NETDEV_TX_OK;
3082  
3083  	skb_tx_timestamp(skb);
3084  
3085  	bna_txq_prod_indx_doorbell(tcb);
3086  
3087  	return NETDEV_TX_OK;
3088  }
3089  
3090  /*
3091   * Used spin_lock to synchronize reading of stats structures, which
3092   * is written by BNA under the same lock.
3093   */
3094  static void
bnad_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)3095  bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3096  {
3097  	struct bnad *bnad = netdev_priv(netdev);
3098  	unsigned long flags;
3099  
3100  	spin_lock_irqsave(&bnad->bna_lock, flags);
3101  
3102  	bnad_netdev_qstats_fill(bnad, stats);
3103  	bnad_netdev_hwstats_fill(bnad, stats);
3104  
3105  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3106  }
3107  
3108  static void
bnad_set_rx_ucast_fltr(struct bnad * bnad)3109  bnad_set_rx_ucast_fltr(struct bnad *bnad)
3110  {
3111  	struct net_device *netdev = bnad->netdev;
3112  	int uc_count = netdev_uc_count(netdev);
3113  	enum bna_cb_status ret;
3114  	u8 *mac_list;
3115  	struct netdev_hw_addr *ha;
3116  	int entry;
3117  
3118  	if (netdev_uc_empty(bnad->netdev)) {
3119  		bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3120  		return;
3121  	}
3122  
3123  	if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3124  		goto mode_default;
3125  
3126  	mac_list = kcalloc(ETH_ALEN, uc_count, GFP_ATOMIC);
3127  	if (mac_list == NULL)
3128  		goto mode_default;
3129  
3130  	entry = 0;
3131  	netdev_for_each_uc_addr(ha, netdev) {
3132  		ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3133  		entry++;
3134  	}
3135  
3136  	ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3137  	kfree(mac_list);
3138  
3139  	if (ret != BNA_CB_SUCCESS)
3140  		goto mode_default;
3141  
3142  	return;
3143  
3144  	/* ucast packets not in UCAM are routed to default function */
3145  mode_default:
3146  	bnad->cfg_flags |= BNAD_CF_DEFAULT;
3147  	bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3148  }
3149  
3150  static void
bnad_set_rx_mcast_fltr(struct bnad * bnad)3151  bnad_set_rx_mcast_fltr(struct bnad *bnad)
3152  {
3153  	struct net_device *netdev = bnad->netdev;
3154  	int mc_count = netdev_mc_count(netdev);
3155  	enum bna_cb_status ret;
3156  	u8 *mac_list;
3157  
3158  	if (netdev->flags & IFF_ALLMULTI)
3159  		goto mode_allmulti;
3160  
3161  	if (netdev_mc_empty(netdev))
3162  		return;
3163  
3164  	if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3165  		goto mode_allmulti;
3166  
3167  	mac_list = kcalloc(mc_count + 1, ETH_ALEN, GFP_ATOMIC);
3168  
3169  	if (mac_list == NULL)
3170  		goto mode_allmulti;
3171  
3172  	ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3173  
3174  	/* copy rest of the MCAST addresses */
3175  	bnad_netdev_mc_list_get(netdev, mac_list);
3176  	ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3177  	kfree(mac_list);
3178  
3179  	if (ret != BNA_CB_SUCCESS)
3180  		goto mode_allmulti;
3181  
3182  	return;
3183  
3184  mode_allmulti:
3185  	bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3186  	bna_rx_mcast_delall(bnad->rx_info[0].rx);
3187  }
3188  
3189  void
bnad_set_rx_mode(struct net_device * netdev)3190  bnad_set_rx_mode(struct net_device *netdev)
3191  {
3192  	struct bnad *bnad = netdev_priv(netdev);
3193  	enum bna_rxmode new_mode, mode_mask;
3194  	unsigned long flags;
3195  
3196  	spin_lock_irqsave(&bnad->bna_lock, flags);
3197  
3198  	if (bnad->rx_info[0].rx == NULL) {
3199  		spin_unlock_irqrestore(&bnad->bna_lock, flags);
3200  		return;
3201  	}
3202  
3203  	/* clear bnad flags to update it with new settings */
3204  	bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3205  			BNAD_CF_ALLMULTI);
3206  
3207  	new_mode = 0;
3208  	if (netdev->flags & IFF_PROMISC) {
3209  		new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3210  		bnad->cfg_flags |= BNAD_CF_PROMISC;
3211  	} else {
3212  		bnad_set_rx_mcast_fltr(bnad);
3213  
3214  		if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3215  			new_mode |= BNA_RXMODE_ALLMULTI;
3216  
3217  		bnad_set_rx_ucast_fltr(bnad);
3218  
3219  		if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3220  			new_mode |= BNA_RXMODE_DEFAULT;
3221  	}
3222  
3223  	mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3224  			BNA_RXMODE_ALLMULTI;
3225  	bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3226  
3227  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3228  }
3229  
3230  /*
3231   * bna_lock is used to sync writes to netdev->addr
3232   * conf_lock cannot be used since this call may be made
3233   * in a non-blocking context.
3234   */
3235  static int
bnad_set_mac_address(struct net_device * netdev,void * addr)3236  bnad_set_mac_address(struct net_device *netdev, void *addr)
3237  {
3238  	int err;
3239  	struct bnad *bnad = netdev_priv(netdev);
3240  	struct sockaddr *sa = (struct sockaddr *)addr;
3241  	unsigned long flags;
3242  
3243  	spin_lock_irqsave(&bnad->bna_lock, flags);
3244  
3245  	err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3246  	if (!err)
3247  		eth_hw_addr_set(netdev, sa->sa_data);
3248  
3249  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3250  
3251  	return err;
3252  }
3253  
3254  static int
bnad_mtu_set(struct bnad * bnad,int frame_size)3255  bnad_mtu_set(struct bnad *bnad, int frame_size)
3256  {
3257  	unsigned long flags;
3258  
3259  	init_completion(&bnad->bnad_completions.mtu_comp);
3260  
3261  	spin_lock_irqsave(&bnad->bna_lock, flags);
3262  	bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3263  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3264  
3265  	wait_for_completion(&bnad->bnad_completions.mtu_comp);
3266  
3267  	return bnad->bnad_completions.mtu_comp_status;
3268  }
3269  
3270  static int
bnad_change_mtu(struct net_device * netdev,int new_mtu)3271  bnad_change_mtu(struct net_device *netdev, int new_mtu)
3272  {
3273  	int err, mtu;
3274  	struct bnad *bnad = netdev_priv(netdev);
3275  	u32 frame, new_frame;
3276  
3277  	mutex_lock(&bnad->conf_mutex);
3278  
3279  	mtu = netdev->mtu;
3280  	WRITE_ONCE(netdev->mtu, new_mtu);
3281  
3282  	frame = BNAD_FRAME_SIZE(mtu);
3283  	new_frame = BNAD_FRAME_SIZE(new_mtu);
3284  
3285  	/* check if multi-buffer needs to be enabled */
3286  	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3287  	    netif_running(bnad->netdev)) {
3288  		/* only when transition is over 4K */
3289  		if ((frame <= 4096 && new_frame > 4096) ||
3290  		    (frame > 4096 && new_frame <= 4096))
3291  			bnad_reinit_rx(bnad);
3292  	}
3293  
3294  	err = bnad_mtu_set(bnad, new_frame);
3295  	if (err)
3296  		err = -EBUSY;
3297  
3298  	mutex_unlock(&bnad->conf_mutex);
3299  	return err;
3300  }
3301  
3302  static int
bnad_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3303  bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3304  {
3305  	struct bnad *bnad = netdev_priv(netdev);
3306  	unsigned long flags;
3307  
3308  	if (!bnad->rx_info[0].rx)
3309  		return 0;
3310  
3311  	mutex_lock(&bnad->conf_mutex);
3312  
3313  	spin_lock_irqsave(&bnad->bna_lock, flags);
3314  	bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3315  	set_bit(vid, bnad->active_vlans);
3316  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3317  
3318  	mutex_unlock(&bnad->conf_mutex);
3319  
3320  	return 0;
3321  }
3322  
3323  static int
bnad_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3324  bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3325  {
3326  	struct bnad *bnad = netdev_priv(netdev);
3327  	unsigned long flags;
3328  
3329  	if (!bnad->rx_info[0].rx)
3330  		return 0;
3331  
3332  	mutex_lock(&bnad->conf_mutex);
3333  
3334  	spin_lock_irqsave(&bnad->bna_lock, flags);
3335  	clear_bit(vid, bnad->active_vlans);
3336  	bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3337  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3338  
3339  	mutex_unlock(&bnad->conf_mutex);
3340  
3341  	return 0;
3342  }
3343  
bnad_set_features(struct net_device * dev,netdev_features_t features)3344  static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3345  {
3346  	struct bnad *bnad = netdev_priv(dev);
3347  	netdev_features_t changed = features ^ dev->features;
3348  
3349  	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3350  		unsigned long flags;
3351  
3352  		spin_lock_irqsave(&bnad->bna_lock, flags);
3353  
3354  		if (features & NETIF_F_HW_VLAN_CTAG_RX)
3355  			bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3356  		else
3357  			bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3358  
3359  		spin_unlock_irqrestore(&bnad->bna_lock, flags);
3360  	}
3361  
3362  	return 0;
3363  }
3364  
3365  #ifdef CONFIG_NET_POLL_CONTROLLER
3366  static void
bnad_netpoll(struct net_device * netdev)3367  bnad_netpoll(struct net_device *netdev)
3368  {
3369  	struct bnad *bnad = netdev_priv(netdev);
3370  	struct bnad_rx_info *rx_info;
3371  	struct bnad_rx_ctrl *rx_ctrl;
3372  	u32 curr_mask;
3373  	int i, j;
3374  
3375  	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3376  		bna_intx_disable(&bnad->bna, curr_mask);
3377  		bnad_isr(bnad->pcidev->irq, netdev);
3378  		bna_intx_enable(&bnad->bna, curr_mask);
3379  	} else {
3380  		/*
3381  		 * Tx processing may happen in sending context, so no need
3382  		 * to explicitly process completions here
3383  		 */
3384  
3385  		/* Rx processing */
3386  		for (i = 0; i < bnad->num_rx; i++) {
3387  			rx_info = &bnad->rx_info[i];
3388  			if (!rx_info->rx)
3389  				continue;
3390  			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3391  				rx_ctrl = &rx_info->rx_ctrl[j];
3392  				if (rx_ctrl->ccb)
3393  					bnad_netif_rx_schedule_poll(bnad,
3394  							    rx_ctrl->ccb);
3395  			}
3396  		}
3397  	}
3398  }
3399  #endif
3400  
3401  static const struct net_device_ops bnad_netdev_ops = {
3402  	.ndo_open		= bnad_open,
3403  	.ndo_stop		= bnad_stop,
3404  	.ndo_start_xmit		= bnad_start_xmit,
3405  	.ndo_get_stats64	= bnad_get_stats64,
3406  	.ndo_set_rx_mode	= bnad_set_rx_mode,
3407  	.ndo_validate_addr      = eth_validate_addr,
3408  	.ndo_set_mac_address    = bnad_set_mac_address,
3409  	.ndo_change_mtu		= bnad_change_mtu,
3410  	.ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3411  	.ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3412  	.ndo_set_features	= bnad_set_features,
3413  #ifdef CONFIG_NET_POLL_CONTROLLER
3414  	.ndo_poll_controller    = bnad_netpoll
3415  #endif
3416  };
3417  
3418  static void
bnad_netdev_init(struct bnad * bnad)3419  bnad_netdev_init(struct bnad *bnad)
3420  {
3421  	struct net_device *netdev = bnad->netdev;
3422  
3423  	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3424  		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3425  		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3426  		NETIF_F_HW_VLAN_CTAG_RX;
3427  
3428  	netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3429  		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3430  		NETIF_F_TSO | NETIF_F_TSO6;
3431  
3432  	netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER |
3433  			    NETIF_F_HIGHDMA;
3434  
3435  	netdev->mem_start = bnad->mmio_start;
3436  	netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3437  
3438  	/* MTU range: 46 - 9000 */
3439  	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
3440  	netdev->max_mtu = BNAD_JUMBO_MTU;
3441  
3442  	netdev->netdev_ops = &bnad_netdev_ops;
3443  	bnad_set_ethtool_ops(netdev);
3444  }
3445  
3446  /*
3447   * 1. Initialize the bnad structure
3448   * 2. Setup netdev pointer in pci_dev
3449   * 3. Initialize no. of TxQ & CQs & MSIX vectors
3450   * 4. Initialize work queue.
3451   */
3452  static int
bnad_init(struct bnad * bnad,struct pci_dev * pdev,struct net_device * netdev)3453  bnad_init(struct bnad *bnad,
3454  	  struct pci_dev *pdev, struct net_device *netdev)
3455  {
3456  	unsigned long flags;
3457  
3458  	SET_NETDEV_DEV(netdev, &pdev->dev);
3459  	pci_set_drvdata(pdev, netdev);
3460  
3461  	bnad->netdev = netdev;
3462  	bnad->pcidev = pdev;
3463  	bnad->mmio_start = pci_resource_start(pdev, 0);
3464  	bnad->mmio_len = pci_resource_len(pdev, 0);
3465  	bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
3466  	if (!bnad->bar0) {
3467  		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3468  		return -ENOMEM;
3469  	}
3470  	dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3471  		 (unsigned long long) bnad->mmio_len);
3472  
3473  	spin_lock_irqsave(&bnad->bna_lock, flags);
3474  	if (!bnad_msix_disable)
3475  		bnad->cfg_flags = BNAD_CF_MSIX;
3476  
3477  	bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3478  
3479  	bnad_q_num_init(bnad);
3480  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3481  
3482  	bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3483  		(bnad->num_rx * bnad->num_rxp_per_rx) +
3484  			 BNAD_MAILBOX_MSIX_VECTORS;
3485  
3486  	bnad->txq_depth = BNAD_TXQ_DEPTH;
3487  	bnad->rxq_depth = BNAD_RXQ_DEPTH;
3488  
3489  	bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3490  	bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3491  
3492  	sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3493  	bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3494  	if (!bnad->work_q) {
3495  		iounmap(bnad->bar0);
3496  		return -ENOMEM;
3497  	}
3498  
3499  	return 0;
3500  }
3501  
3502  /*
3503   * Must be called after bnad_pci_uninit()
3504   * so that iounmap() and pci_set_drvdata(NULL)
3505   * happens only after PCI uninitialization.
3506   */
3507  static void
bnad_uninit(struct bnad * bnad)3508  bnad_uninit(struct bnad *bnad)
3509  {
3510  	if (bnad->work_q) {
3511  		destroy_workqueue(bnad->work_q);
3512  		bnad->work_q = NULL;
3513  	}
3514  
3515  	if (bnad->bar0)
3516  		iounmap(bnad->bar0);
3517  }
3518  
3519  /*
3520   * Initialize locks
3521  	a) Per ioceth mutes used for serializing configuration
3522  	   changes from OS interface
3523  	b) spin lock used to protect bna state machine
3524   */
3525  static void
bnad_lock_init(struct bnad * bnad)3526  bnad_lock_init(struct bnad *bnad)
3527  {
3528  	spin_lock_init(&bnad->bna_lock);
3529  	mutex_init(&bnad->conf_mutex);
3530  }
3531  
3532  static void
bnad_lock_uninit(struct bnad * bnad)3533  bnad_lock_uninit(struct bnad *bnad)
3534  {
3535  	mutex_destroy(&bnad->conf_mutex);
3536  }
3537  
3538  /* PCI Initialization */
3539  static int
bnad_pci_init(struct bnad * bnad,struct pci_dev * pdev)3540  bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev)
3541  {
3542  	int err;
3543  
3544  	err = pci_enable_device(pdev);
3545  	if (err)
3546  		return err;
3547  	err = pci_request_regions(pdev, BNAD_NAME);
3548  	if (err)
3549  		goto disable_device;
3550  	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3551  	if (err)
3552  		goto release_regions;
3553  	pci_set_master(pdev);
3554  	return 0;
3555  
3556  release_regions:
3557  	pci_release_regions(pdev);
3558  disable_device:
3559  	pci_disable_device(pdev);
3560  
3561  	return err;
3562  }
3563  
3564  static void
bnad_pci_uninit(struct pci_dev * pdev)3565  bnad_pci_uninit(struct pci_dev *pdev)
3566  {
3567  	pci_release_regions(pdev);
3568  	pci_disable_device(pdev);
3569  }
3570  
3571  static int
bnad_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pcidev_id)3572  bnad_pci_probe(struct pci_dev *pdev,
3573  		const struct pci_device_id *pcidev_id)
3574  {
3575  	int	err;
3576  	struct bnad *bnad;
3577  	struct bna *bna;
3578  	struct net_device *netdev;
3579  	struct bfa_pcidev pcidev_info;
3580  	unsigned long flags;
3581  
3582  	mutex_lock(&bnad_fwimg_mutex);
3583  	if (!cna_get_firmware_buf(pdev)) {
3584  		mutex_unlock(&bnad_fwimg_mutex);
3585  		dev_err(&pdev->dev, "failed to load firmware image!\n");
3586  		return -ENODEV;
3587  	}
3588  	mutex_unlock(&bnad_fwimg_mutex);
3589  
3590  	/*
3591  	 * Allocates sizeof(struct net_device + struct bnad)
3592  	 * bnad = netdev->priv
3593  	 */
3594  	netdev = alloc_etherdev(sizeof(struct bnad));
3595  	if (!netdev) {
3596  		err = -ENOMEM;
3597  		return err;
3598  	}
3599  	bnad = netdev_priv(netdev);
3600  	bnad_lock_init(bnad);
3601  	bnad->id = atomic_inc_return(&bna_id) - 1;
3602  
3603  	mutex_lock(&bnad->conf_mutex);
3604  	/* PCI initialization */
3605  	err = bnad_pci_init(bnad, pdev);
3606  	if (err)
3607  		goto unlock_mutex;
3608  
3609  	/*
3610  	 * Initialize bnad structure
3611  	 * Setup relation between pci_dev & netdev
3612  	 */
3613  	err = bnad_init(bnad, pdev, netdev);
3614  	if (err)
3615  		goto pci_uninit;
3616  
3617  	/* Initialize netdev structure, set up ethtool ops */
3618  	bnad_netdev_init(bnad);
3619  
3620  	/* Set link to down state */
3621  	netif_carrier_off(netdev);
3622  
3623  	/* Setup the debugfs node for this bfad */
3624  	if (bna_debugfs_enable)
3625  		bnad_debugfs_init(bnad);
3626  
3627  	/* Get resource requirement form bna */
3628  	spin_lock_irqsave(&bnad->bna_lock, flags);
3629  	bna_res_req(&bnad->res_info[0]);
3630  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3631  
3632  	/* Allocate resources from bna */
3633  	err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3634  	if (err)
3635  		goto drv_uninit;
3636  
3637  	bna = &bnad->bna;
3638  
3639  	/* Setup pcidev_info for bna_init() */
3640  	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3641  	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3642  	pcidev_info.device_id = bnad->pcidev->device;
3643  	pcidev_info.pci_bar_kva = bnad->bar0;
3644  
3645  	spin_lock_irqsave(&bnad->bna_lock, flags);
3646  	bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3647  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3648  
3649  	bnad->stats.bna_stats = &bna->stats;
3650  
3651  	bnad_enable_msix(bnad);
3652  	err = bnad_mbox_irq_alloc(bnad);
3653  	if (err)
3654  		goto res_free;
3655  
3656  	/* Set up timers */
3657  	timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
3658  	timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
3659  	timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
3660  	timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3661  		    0);
3662  
3663  	/*
3664  	 * Start the chip
3665  	 * If the call back comes with error, we bail out.
3666  	 * This is a catastrophic error.
3667  	 */
3668  	err = bnad_ioceth_enable(bnad);
3669  	if (err) {
3670  		dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3671  		goto probe_success;
3672  	}
3673  
3674  	spin_lock_irqsave(&bnad->bna_lock, flags);
3675  	if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3676  		bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3677  		bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3678  			bna_attr(bna)->num_rxp - 1);
3679  		if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3680  			bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3681  			err = -EIO;
3682  	}
3683  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3684  	if (err)
3685  		goto disable_ioceth;
3686  
3687  	spin_lock_irqsave(&bnad->bna_lock, flags);
3688  	bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3689  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3690  
3691  	err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3692  	if (err) {
3693  		err = -EIO;
3694  		goto disable_ioceth;
3695  	}
3696  
3697  	spin_lock_irqsave(&bnad->bna_lock, flags);
3698  	bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3699  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3700  
3701  	/* Get the burnt-in mac */
3702  	spin_lock_irqsave(&bnad->bna_lock, flags);
3703  	bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3704  	bnad_set_netdev_perm_addr(bnad);
3705  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3706  
3707  	mutex_unlock(&bnad->conf_mutex);
3708  
3709  	/* Finally, reguister with net_device layer */
3710  	err = register_netdev(netdev);
3711  	if (err) {
3712  		dev_err(&pdev->dev, "registering net device failed\n");
3713  		goto probe_uninit;
3714  	}
3715  	set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3716  
3717  	return 0;
3718  
3719  probe_success:
3720  	mutex_unlock(&bnad->conf_mutex);
3721  	return 0;
3722  
3723  probe_uninit:
3724  	mutex_lock(&bnad->conf_mutex);
3725  	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3726  disable_ioceth:
3727  	bnad_ioceth_disable(bnad);
3728  	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3729  	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3730  	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3731  	spin_lock_irqsave(&bnad->bna_lock, flags);
3732  	bna_uninit(bna);
3733  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3734  	bnad_mbox_irq_free(bnad);
3735  	bnad_disable_msix(bnad);
3736  res_free:
3737  	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3738  drv_uninit:
3739  	/* Remove the debugfs node for this bnad */
3740  	kfree(bnad->regdata);
3741  	bnad_debugfs_uninit(bnad);
3742  	bnad_uninit(bnad);
3743  pci_uninit:
3744  	bnad_pci_uninit(pdev);
3745  unlock_mutex:
3746  	mutex_unlock(&bnad->conf_mutex);
3747  	bnad_lock_uninit(bnad);
3748  	free_netdev(netdev);
3749  	return err;
3750  }
3751  
3752  static void
bnad_pci_remove(struct pci_dev * pdev)3753  bnad_pci_remove(struct pci_dev *pdev)
3754  {
3755  	struct net_device *netdev = pci_get_drvdata(pdev);
3756  	struct bnad *bnad;
3757  	struct bna *bna;
3758  	unsigned long flags;
3759  
3760  	if (!netdev)
3761  		return;
3762  
3763  	bnad = netdev_priv(netdev);
3764  	bna = &bnad->bna;
3765  
3766  	if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3767  		unregister_netdev(netdev);
3768  
3769  	mutex_lock(&bnad->conf_mutex);
3770  	bnad_ioceth_disable(bnad);
3771  	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3772  	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3773  	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3774  	spin_lock_irqsave(&bnad->bna_lock, flags);
3775  	bna_uninit(bna);
3776  	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3777  
3778  	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3779  	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3780  	bnad_mbox_irq_free(bnad);
3781  	bnad_disable_msix(bnad);
3782  	bnad_pci_uninit(pdev);
3783  	mutex_unlock(&bnad->conf_mutex);
3784  	bnad_lock_uninit(bnad);
3785  	/* Remove the debugfs node for this bnad */
3786  	kfree(bnad->regdata);
3787  	bnad_debugfs_uninit(bnad);
3788  	bnad_uninit(bnad);
3789  	free_netdev(netdev);
3790  }
3791  
3792  static const struct pci_device_id bnad_pci_id_table[] = {
3793  	{
3794  		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3795  			PCI_DEVICE_ID_BROCADE_CT),
3796  		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3797  		.class_mask =  0xffff00
3798  	},
3799  	{
3800  		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3801  			BFA_PCI_DEVICE_ID_CT2),
3802  		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3803  		.class_mask =  0xffff00
3804  	},
3805  	{0,  },
3806  };
3807  
3808  MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3809  
3810  static struct pci_driver bnad_pci_driver = {
3811  	.name = BNAD_NAME,
3812  	.id_table = bnad_pci_id_table,
3813  	.probe = bnad_pci_probe,
3814  	.remove = bnad_pci_remove,
3815  };
3816  
3817  static int __init
bnad_module_init(void)3818  bnad_module_init(void)
3819  {
3820  	int err;
3821  
3822  	bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3823  
3824  	err = pci_register_driver(&bnad_pci_driver);
3825  	if (err < 0) {
3826  		pr_err("bna: PCI driver registration failed err=%d\n", err);
3827  		return err;
3828  	}
3829  
3830  	return 0;
3831  }
3832  
3833  static void __exit
bnad_module_exit(void)3834  bnad_module_exit(void)
3835  {
3836  	pci_unregister_driver(&bnad_pci_driver);
3837  	release_firmware(bfi_fw);
3838  }
3839  
3840  module_init(bnad_module_init);
3841  module_exit(bnad_module_exit);
3842  
3843  MODULE_AUTHOR("Brocade");
3844  MODULE_LICENSE("GPL");
3845  MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3846  MODULE_FIRMWARE(CNA_FW_FILE_CT);
3847  MODULE_FIRMWARE(CNA_FW_FILE_CT2);
3848