1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * sonic.c
4   *
5   * (C) 2005 Finn Thain
6   *
7   * Converted to DMA API, added zero-copy buffer handling, and
8   * (from the mac68k project) introduced dhd's support for 16-bit cards.
9   *
10   * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
11   *
12   * This driver is based on work from Andreas Busse, but most of
13   * the code is rewritten.
14   *
15   * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
16   *
17   *    Core code included by system sonic drivers
18   *
19   * And... partially rewritten again by David Huggins-Daines in order
20   * to cope with screwed up Macintosh NICs that may or may not use
21   * 16-bit DMA.
22   *
23   * (C) 1999 David Huggins-Daines <dhd@debian.org>
24   *
25   */
26  
27  /*
28   * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
29   * National Semiconductors data sheet for the DP83932B Sonic Ethernet
30   * controller, and the files "8390.c" and "skeleton.c" in this directory.
31   *
32   * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
33   * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
34   * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
35   */
36  
37  static unsigned int version_printed;
38  
39  static int sonic_debug = -1;
40  module_param(sonic_debug, int, 0);
41  MODULE_PARM_DESC(sonic_debug, "debug message level");
42  
sonic_msg_init(struct net_device * dev)43  static void sonic_msg_init(struct net_device *dev)
44  {
45  	struct sonic_local *lp = netdev_priv(dev);
46  
47  	lp->msg_enable = netif_msg_init(sonic_debug, 0);
48  
49  	if (version_printed++ == 0)
50  		netif_dbg(lp, drv, dev, "%s", version);
51  }
52  
sonic_alloc_descriptors(struct net_device * dev)53  static int sonic_alloc_descriptors(struct net_device *dev)
54  {
55  	struct sonic_local *lp = netdev_priv(dev);
56  
57  	/* Allocate a chunk of memory for the descriptors. Note that this
58  	 * must not cross a 64K boundary. It is smaller than one page which
59  	 * means that page alignment is a sufficient condition.
60  	 */
61  	lp->descriptors =
62  		dma_alloc_coherent(lp->device,
63  				   SIZEOF_SONIC_DESC *
64  				   SONIC_BUS_SCALE(lp->dma_bitmode),
65  				   &lp->descriptors_laddr, GFP_KERNEL);
66  
67  	if (!lp->descriptors)
68  		return -ENOMEM;
69  
70  	lp->cda = lp->descriptors;
71  	lp->tda = lp->cda + SIZEOF_SONIC_CDA *
72  			    SONIC_BUS_SCALE(lp->dma_bitmode);
73  	lp->rda = lp->tda + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
74  			    SONIC_BUS_SCALE(lp->dma_bitmode);
75  	lp->rra = lp->rda + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
76  			    SONIC_BUS_SCALE(lp->dma_bitmode);
77  
78  	lp->cda_laddr = lp->descriptors_laddr;
79  	lp->tda_laddr = lp->cda_laddr + SIZEOF_SONIC_CDA *
80  					SONIC_BUS_SCALE(lp->dma_bitmode);
81  	lp->rda_laddr = lp->tda_laddr + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
82  					SONIC_BUS_SCALE(lp->dma_bitmode);
83  	lp->rra_laddr = lp->rda_laddr + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
84  					SONIC_BUS_SCALE(lp->dma_bitmode);
85  
86  	return 0;
87  }
88  
89  /*
90   * Open/initialize the SONIC controller.
91   *
92   * This routine should set everything up anew at each open, even
93   *  registers that "should" only need to be set once at boot, so that
94   *  there is non-reboot way to recover if something goes wrong.
95   */
sonic_open(struct net_device * dev)96  static int sonic_open(struct net_device *dev)
97  {
98  	struct sonic_local *lp = netdev_priv(dev);
99  	int i;
100  
101  	netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
102  
103  	spin_lock_init(&lp->lock);
104  
105  	for (i = 0; i < SONIC_NUM_RRS; i++) {
106  		struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
107  		if (skb == NULL) {
108  			while(i > 0) { /* free any that were allocated successfully */
109  				i--;
110  				dev_kfree_skb(lp->rx_skb[i]);
111  				lp->rx_skb[i] = NULL;
112  			}
113  			printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
114  			       dev->name);
115  			return -ENOMEM;
116  		}
117  		/* align IP header unless DMA requires otherwise */
118  		if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
119  			skb_reserve(skb, 2);
120  		lp->rx_skb[i] = skb;
121  	}
122  
123  	for (i = 0; i < SONIC_NUM_RRS; i++) {
124  		dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
125  		                                  SONIC_RBSIZE, DMA_FROM_DEVICE);
126  		if (dma_mapping_error(lp->device, laddr)) {
127  			while(i > 0) { /* free any that were mapped successfully */
128  				i--;
129  				dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
130  				lp->rx_laddr[i] = (dma_addr_t)0;
131  			}
132  			for (i = 0; i < SONIC_NUM_RRS; i++) {
133  				dev_kfree_skb(lp->rx_skb[i]);
134  				lp->rx_skb[i] = NULL;
135  			}
136  			printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
137  			       dev->name);
138  			return -ENOMEM;
139  		}
140  		lp->rx_laddr[i] = laddr;
141  	}
142  
143  	/*
144  	 * Initialize the SONIC
145  	 */
146  	sonic_init(dev, true);
147  
148  	netif_start_queue(dev);
149  
150  	netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__);
151  
152  	return 0;
153  }
154  
155  /* Wait for the SONIC to become idle. */
sonic_quiesce(struct net_device * dev,u16 mask,bool may_sleep)156  static void sonic_quiesce(struct net_device *dev, u16 mask, bool may_sleep)
157  {
158  	struct sonic_local * __maybe_unused lp = netdev_priv(dev);
159  	int i;
160  	u16 bits;
161  
162  	for (i = 0; i < 1000; ++i) {
163  		bits = SONIC_READ(SONIC_CMD) & mask;
164  		if (!bits)
165  			return;
166  		if (!may_sleep)
167  			udelay(20);
168  		else
169  			usleep_range(100, 200);
170  	}
171  	WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
172  }
173  
174  /*
175   * Close the SONIC device
176   */
sonic_close(struct net_device * dev)177  static int sonic_close(struct net_device *dev)
178  {
179  	struct sonic_local *lp = netdev_priv(dev);
180  	int i;
181  
182  	netif_dbg(lp, ifdown, dev, "%s\n", __func__);
183  
184  	netif_stop_queue(dev);
185  
186  	/*
187  	 * stop the SONIC, disable interrupts
188  	 */
189  	SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
190  	sonic_quiesce(dev, SONIC_CR_ALL, true);
191  
192  	SONIC_WRITE(SONIC_IMR, 0);
193  	SONIC_WRITE(SONIC_ISR, 0x7fff);
194  	SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
195  
196  	/* unmap and free skbs that haven't been transmitted */
197  	for (i = 0; i < SONIC_NUM_TDS; i++) {
198  		if(lp->tx_laddr[i]) {
199  			dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
200  			lp->tx_laddr[i] = (dma_addr_t)0;
201  		}
202  		if(lp->tx_skb[i]) {
203  			dev_kfree_skb(lp->tx_skb[i]);
204  			lp->tx_skb[i] = NULL;
205  		}
206  	}
207  
208  	/* unmap and free the receive buffers */
209  	for (i = 0; i < SONIC_NUM_RRS; i++) {
210  		if(lp->rx_laddr[i]) {
211  			dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
212  			lp->rx_laddr[i] = (dma_addr_t)0;
213  		}
214  		if(lp->rx_skb[i]) {
215  			dev_kfree_skb(lp->rx_skb[i]);
216  			lp->rx_skb[i] = NULL;
217  		}
218  	}
219  
220  	return 0;
221  }
222  
sonic_tx_timeout(struct net_device * dev,unsigned int txqueue)223  static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
224  {
225  	struct sonic_local *lp = netdev_priv(dev);
226  	int i;
227  	/*
228  	 * put the Sonic into software-reset mode and
229  	 * disable all interrupts before releasing DMA buffers
230  	 */
231  	SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
232  	sonic_quiesce(dev, SONIC_CR_ALL, false);
233  
234  	SONIC_WRITE(SONIC_IMR, 0);
235  	SONIC_WRITE(SONIC_ISR, 0x7fff);
236  	SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
237  	/* We could resend the original skbs. Easier to re-initialise. */
238  	for (i = 0; i < SONIC_NUM_TDS; i++) {
239  		if(lp->tx_laddr[i]) {
240  			dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
241  			lp->tx_laddr[i] = (dma_addr_t)0;
242  		}
243  		if(lp->tx_skb[i]) {
244  			dev_kfree_skb(lp->tx_skb[i]);
245  			lp->tx_skb[i] = NULL;
246  		}
247  	}
248  	/* Try to restart the adaptor. */
249  	sonic_init(dev, false);
250  	lp->stats.tx_errors++;
251  	netif_trans_update(dev); /* prevent tx timeout */
252  	netif_wake_queue(dev);
253  }
254  
255  /*
256   * transmit packet
257   *
258   * Appends new TD during transmission thus avoiding any TX interrupts
259   * until we run out of TDs.
260   * This routine interacts closely with the ISR in that it may,
261   *   set tx_skb[i]
262   *   reset the status flags of the new TD
263   *   set and reset EOL flags
264   *   stop the tx queue
265   * The ISR interacts with this routine in various ways. It may,
266   *   reset tx_skb[i]
267   *   test the EOL and status flags of the TDs
268   *   wake the tx queue
269   * Concurrently with all of this, the SONIC is potentially writing to
270   * the status flags of the TDs.
271   */
272  
sonic_send_packet(struct sk_buff * skb,struct net_device * dev)273  static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
274  {
275  	struct sonic_local *lp = netdev_priv(dev);
276  	dma_addr_t laddr;
277  	int length;
278  	int entry;
279  	unsigned long flags;
280  
281  	netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
282  
283  	length = skb->len;
284  	if (length < ETH_ZLEN) {
285  		if (skb_padto(skb, ETH_ZLEN))
286  			return NETDEV_TX_OK;
287  		length = ETH_ZLEN;
288  	}
289  
290  	/*
291  	 * Map the packet data into the logical DMA address space
292  	 */
293  
294  	laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
295  	if (dma_mapping_error(lp->device, laddr)) {
296  		pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
297  		dev_kfree_skb_any(skb);
298  		return NETDEV_TX_OK;
299  	}
300  
301  	spin_lock_irqsave(&lp->lock, flags);
302  
303  	entry = (lp->eol_tx + 1) & SONIC_TDS_MASK;
304  
305  	sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0);       /* clear status */
306  	sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1);   /* single fragment */
307  	sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
308  	sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
309  	sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
310  	sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
311  	sonic_tda_put(dev, entry, SONIC_TD_LINK,
312  		sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
313  
314  	sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, ~SONIC_EOL &
315  		      sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK));
316  
317  	netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
318  
319  	SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
320  
321  	lp->tx_len[entry] = length;
322  	lp->tx_laddr[entry] = laddr;
323  	lp->tx_skb[entry] = skb;
324  
325  	lp->eol_tx = entry;
326  
327  	entry = (entry + 1) & SONIC_TDS_MASK;
328  	if (lp->tx_skb[entry]) {
329  		/* The ring is full, the ISR has yet to process the next TD. */
330  		netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
331  		netif_stop_queue(dev);
332  		/* after this packet, wait for ISR to free up some TDAs */
333  	}
334  
335  	spin_unlock_irqrestore(&lp->lock, flags);
336  
337  	return NETDEV_TX_OK;
338  }
339  
340  /*
341   * The typical workload of the driver:
342   * Handle the network interface interrupts.
343   */
sonic_interrupt(int irq,void * dev_id)344  static irqreturn_t sonic_interrupt(int irq, void *dev_id)
345  {
346  	struct net_device *dev = dev_id;
347  	struct sonic_local *lp = netdev_priv(dev);
348  	int status;
349  	unsigned long flags;
350  
351  	/* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
352  	 * with sonic_send_packet() so that the two functions can share state.
353  	 * Secondly, it makes sonic_interrupt() re-entrant, as that is required
354  	 * by macsonic which must use two IRQs with different priority levels.
355  	 */
356  	spin_lock_irqsave(&lp->lock, flags);
357  
358  	status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
359  	if (!status) {
360  		spin_unlock_irqrestore(&lp->lock, flags);
361  
362  		return IRQ_NONE;
363  	}
364  
365  	do {
366  		SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
367  
368  		if (status & SONIC_INT_PKTRX) {
369  			netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
370  			sonic_rx(dev);	/* got packet(s) */
371  		}
372  
373  		if (status & SONIC_INT_TXDN) {
374  			int entry = lp->cur_tx;
375  			int td_status;
376  			int freed_some = 0;
377  
378  			/* The state of a Transmit Descriptor may be inferred
379  			 * from { tx_skb[entry], td_status } as follows.
380  			 * { clear, clear } => the TD has never been used
381  			 * { set,   clear } => the TD was handed to SONIC
382  			 * { set,   set   } => the TD was handed back
383  			 * { clear, set   } => the TD is available for re-use
384  			 */
385  
386  			netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
387  
388  			while (lp->tx_skb[entry] != NULL) {
389  				if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
390  					break;
391  
392  				if (td_status & SONIC_TCR_PTX) {
393  					lp->stats.tx_packets++;
394  					lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
395  				} else {
396  					if (td_status & (SONIC_TCR_EXD |
397  					    SONIC_TCR_EXC | SONIC_TCR_BCM))
398  						lp->stats.tx_aborted_errors++;
399  					if (td_status &
400  					    (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
401  						lp->stats.tx_carrier_errors++;
402  					if (td_status & SONIC_TCR_OWC)
403  						lp->stats.tx_window_errors++;
404  					if (td_status & SONIC_TCR_FU)
405  						lp->stats.tx_fifo_errors++;
406  				}
407  
408  				/* We must free the original skb */
409  				dev_consume_skb_irq(lp->tx_skb[entry]);
410  				lp->tx_skb[entry] = NULL;
411  				/* and unmap DMA buffer */
412  				dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
413  				lp->tx_laddr[entry] = (dma_addr_t)0;
414  				freed_some = 1;
415  
416  				if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
417  					entry = (entry + 1) & SONIC_TDS_MASK;
418  					break;
419  				}
420  				entry = (entry + 1) & SONIC_TDS_MASK;
421  			}
422  
423  			if (freed_some || lp->tx_skb[entry] == NULL)
424  				netif_wake_queue(dev);  /* The ring is no longer full */
425  			lp->cur_tx = entry;
426  		}
427  
428  		/*
429  		 * check error conditions
430  		 */
431  		if (status & SONIC_INT_RFO) {
432  			netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
433  				  __func__);
434  		}
435  		if (status & SONIC_INT_RDE) {
436  			netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
437  				  __func__);
438  		}
439  		if (status & SONIC_INT_RBAE) {
440  			netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
441  				  __func__);
442  		}
443  
444  		/* counter overruns; all counters are 16bit wide */
445  		if (status & SONIC_INT_FAE)
446  			lp->stats.rx_frame_errors += 65536;
447  		if (status & SONIC_INT_CRC)
448  			lp->stats.rx_crc_errors += 65536;
449  		if (status & SONIC_INT_MP)
450  			lp->stats.rx_missed_errors += 65536;
451  
452  		/* transmit error */
453  		if (status & SONIC_INT_TXER) {
454  			u16 tcr = SONIC_READ(SONIC_TCR);
455  
456  			netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
457  				  __func__, tcr);
458  
459  			if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
460  				   SONIC_TCR_FU | SONIC_TCR_BCM)) {
461  				/* Aborted transmission. Try again. */
462  				netif_stop_queue(dev);
463  				SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
464  			}
465  		}
466  
467  		/* bus retry */
468  		if (status & SONIC_INT_BR) {
469  			printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
470  				dev->name);
471  			/* ... to help debug DMA problems causing endless interrupts. */
472  			/* Bounce the eth interface to turn on the interrupt again. */
473  			SONIC_WRITE(SONIC_IMR, 0);
474  		}
475  
476  		status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
477  	} while (status);
478  
479  	spin_unlock_irqrestore(&lp->lock, flags);
480  
481  	return IRQ_HANDLED;
482  }
483  
484  /* Return the array index corresponding to a given Receive Buffer pointer. */
index_from_addr(struct sonic_local * lp,dma_addr_t addr,unsigned int last)485  static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
486  			   unsigned int last)
487  {
488  	unsigned int i = last;
489  
490  	do {
491  		i = (i + 1) & SONIC_RRS_MASK;
492  		if (addr == lp->rx_laddr[i])
493  			return i;
494  	} while (i != last);
495  
496  	return -ENOENT;
497  }
498  
499  /* Allocate and map a new skb to be used as a receive buffer. */
sonic_alloc_rb(struct net_device * dev,struct sonic_local * lp,struct sk_buff ** new_skb,dma_addr_t * new_addr)500  static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
501  			   struct sk_buff **new_skb, dma_addr_t *new_addr)
502  {
503  	*new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
504  	if (!*new_skb)
505  		return false;
506  
507  	if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
508  		skb_reserve(*new_skb, 2);
509  
510  	*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
511  				   SONIC_RBSIZE, DMA_FROM_DEVICE);
512  	if (dma_mapping_error(lp->device, *new_addr)) {
513  		dev_kfree_skb(*new_skb);
514  		*new_skb = NULL;
515  		return false;
516  	}
517  
518  	return true;
519  }
520  
521  /* Place a new receive resource in the Receive Resource Area and update RWP. */
sonic_update_rra(struct net_device * dev,struct sonic_local * lp,dma_addr_t old_addr,dma_addr_t new_addr)522  static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
523  			     dma_addr_t old_addr, dma_addr_t new_addr)
524  {
525  	unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
526  	unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
527  	u32 buf;
528  
529  	/* The resources in the range [RRP, RWP) belong to the SONIC. This loop
530  	 * scans the other resources in the RRA, those in the range [RWP, RRP).
531  	 */
532  	do {
533  		buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
534  		      sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
535  
536  		if (buf == old_addr)
537  			break;
538  
539  		entry = (entry + 1) & SONIC_RRS_MASK;
540  	} while (entry != end);
541  
542  	WARN_ONCE(buf != old_addr, "failed to find resource!\n");
543  
544  	sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
545  	sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
546  
547  	entry = (entry + 1) & SONIC_RRS_MASK;
548  
549  	SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
550  }
551  
552  /*
553   * We have a good packet(s), pass it/them up the network stack.
554   */
sonic_rx(struct net_device * dev)555  static void sonic_rx(struct net_device *dev)
556  {
557  	struct sonic_local *lp = netdev_priv(dev);
558  	int entry = lp->cur_rx;
559  	int prev_entry = lp->eol_rx;
560  	bool rbe = false;
561  
562  	while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
563  		u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
564  
565  		/* If the RD has LPKT set, the chip has finished with the RB */
566  		if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
567  			struct sk_buff *new_skb;
568  			dma_addr_t new_laddr;
569  			u32 addr = (sonic_rda_get(dev, entry,
570  						  SONIC_RD_PKTPTR_H) << 16) |
571  				   sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
572  			int i = index_from_addr(lp, addr, entry);
573  
574  			if (i < 0) {
575  				WARN_ONCE(1, "failed to find buffer!\n");
576  				break;
577  			}
578  
579  			if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
580  				struct sk_buff *used_skb = lp->rx_skb[i];
581  				int pkt_len;
582  
583  				/* Pass the used buffer up the stack */
584  				dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
585  						 DMA_FROM_DEVICE);
586  
587  				pkt_len = sonic_rda_get(dev, entry,
588  							SONIC_RD_PKTLEN);
589  				skb_trim(used_skb, pkt_len);
590  				used_skb->protocol = eth_type_trans(used_skb,
591  								    dev);
592  				netif_rx(used_skb);
593  				lp->stats.rx_packets++;
594  				lp->stats.rx_bytes += pkt_len;
595  
596  				lp->rx_skb[i] = new_skb;
597  				lp->rx_laddr[i] = new_laddr;
598  			} else {
599  				/* Failed to obtain a new buffer so re-use it */
600  				new_laddr = addr;
601  				lp->stats.rx_dropped++;
602  			}
603  			/* If RBE is already asserted when RWP advances then
604  			 * it's safe to clear RBE after processing this packet.
605  			 */
606  			rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
607  			sonic_update_rra(dev, lp, addr, new_laddr);
608  		}
609  		/*
610  		 * give back the descriptor
611  		 */
612  		sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
613  		sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
614  
615  		prev_entry = entry;
616  		entry = (entry + 1) & SONIC_RDS_MASK;
617  	}
618  
619  	lp->cur_rx = entry;
620  
621  	if (prev_entry != lp->eol_rx) {
622  		/* Advance the EOL flag to put descriptors back into service */
623  		sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
624  			      sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
625  		sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
626  			      sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
627  		lp->eol_rx = prev_entry;
628  	}
629  
630  	if (rbe)
631  		SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
632  }
633  
634  
635  /*
636   * Get the current statistics.
637   * This may be called with the device open or closed.
638   */
sonic_get_stats(struct net_device * dev)639  static struct net_device_stats *sonic_get_stats(struct net_device *dev)
640  {
641  	struct sonic_local *lp = netdev_priv(dev);
642  
643  	/* read the tally counter from the SONIC and reset them */
644  	lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
645  	SONIC_WRITE(SONIC_CRCT, 0xffff);
646  	lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
647  	SONIC_WRITE(SONIC_FAET, 0xffff);
648  	lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
649  	SONIC_WRITE(SONIC_MPT, 0xffff);
650  
651  	return &lp->stats;
652  }
653  
654  
655  /*
656   * Set or clear the multicast filter for this adaptor.
657   */
sonic_multicast_list(struct net_device * dev)658  static void sonic_multicast_list(struct net_device *dev)
659  {
660  	struct sonic_local *lp = netdev_priv(dev);
661  	unsigned int rcr;
662  	struct netdev_hw_addr *ha;
663  	unsigned char *addr;
664  	int i;
665  
666  	rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
667  	rcr |= SONIC_RCR_BRD;	/* accept broadcast packets */
668  
669  	if (dev->flags & IFF_PROMISC) {	/* set promiscuous mode */
670  		rcr |= SONIC_RCR_PRO;
671  	} else {
672  		if ((dev->flags & IFF_ALLMULTI) ||
673  		    (netdev_mc_count(dev) > 15)) {
674  			rcr |= SONIC_RCR_AMC;
675  		} else {
676  			unsigned long flags;
677  
678  			netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
679  				  netdev_mc_count(dev));
680  			sonic_set_cam_enable(dev, 1);  /* always enable our own address */
681  			i = 1;
682  			netdev_for_each_mc_addr(ha, dev) {
683  				addr = ha->addr;
684  				sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
685  				sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
686  				sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
687  				sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
688  				i++;
689  			}
690  			SONIC_WRITE(SONIC_CDC, 16);
691  			SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
692  
693  			/* LCAM and TXP commands can't be used simultaneously */
694  			spin_lock_irqsave(&lp->lock, flags);
695  			sonic_quiesce(dev, SONIC_CR_TXP, false);
696  			SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
697  			sonic_quiesce(dev, SONIC_CR_LCAM, false);
698  			spin_unlock_irqrestore(&lp->lock, flags);
699  		}
700  	}
701  
702  	netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr);
703  
704  	SONIC_WRITE(SONIC_RCR, rcr);
705  }
706  
707  
708  /*
709   * Initialize the SONIC ethernet controller.
710   */
sonic_init(struct net_device * dev,bool may_sleep)711  static int sonic_init(struct net_device *dev, bool may_sleep)
712  {
713  	struct sonic_local *lp = netdev_priv(dev);
714  	int i;
715  
716  	/*
717  	 * put the Sonic into software-reset mode and
718  	 * disable all interrupts
719  	 */
720  	SONIC_WRITE(SONIC_IMR, 0);
721  	SONIC_WRITE(SONIC_ISR, 0x7fff);
722  	SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
723  
724  	/* While in reset mode, clear CAM Enable register */
725  	SONIC_WRITE(SONIC_CE, 0);
726  
727  	/*
728  	 * clear software reset flag, disable receiver, clear and
729  	 * enable interrupts, then completely initialize the SONIC
730  	 */
731  	SONIC_WRITE(SONIC_CMD, 0);
732  	SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
733  	sonic_quiesce(dev, SONIC_CR_ALL, may_sleep);
734  
735  	/*
736  	 * initialize the receive resource area
737  	 */
738  	netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n",
739  		  __func__);
740  
741  	for (i = 0; i < SONIC_NUM_RRS; i++) {
742  		u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
743  		u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
744  		sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
745  		sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
746  		sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
747  		sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
748  	}
749  
750  	/* initialize all RRA registers */
751  	SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
752  	SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
753  	SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
754  	SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
755  	SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
756  	SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
757  
758  	/* load the resource pointers */
759  	netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
760  
761  	SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
762  	sonic_quiesce(dev, SONIC_CR_RRRA, may_sleep);
763  
764  	/*
765  	 * Initialize the receive descriptors so that they
766  	 * become a circular linked list, ie. let the last
767  	 * descriptor point to the first again.
768  	 */
769  	netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n",
770  		  __func__);
771  
772  	for (i=0; i<SONIC_NUM_RDS; i++) {
773  		sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
774  		sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
775  		sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
776  		sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
777  		sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
778  		sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
779  		sonic_rda_put(dev, i, SONIC_RD_LINK,
780  			lp->rda_laddr +
781  			((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
782  	}
783  	/* fix last descriptor */
784  	sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
785  		(lp->rda_laddr & 0xffff) | SONIC_EOL);
786  	lp->eol_rx = SONIC_NUM_RDS - 1;
787  	lp->cur_rx = 0;
788  	SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
789  	SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
790  
791  	/*
792  	 * initialize transmit descriptors
793  	 */
794  	netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n",
795  		  __func__);
796  
797  	for (i = 0; i < SONIC_NUM_TDS; i++) {
798  		sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
799  		sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
800  		sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
801  		sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
802  		sonic_tda_put(dev, i, SONIC_TD_LINK,
803  			(lp->tda_laddr & 0xffff) +
804  			(i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
805  		lp->tx_skb[i] = NULL;
806  	}
807  	/* fix last descriptor */
808  	sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
809  		(lp->tda_laddr & 0xffff));
810  
811  	SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
812  	SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
813  	lp->cur_tx = 0;
814  	lp->eol_tx = SONIC_NUM_TDS - 1;
815  
816  	/*
817  	 * put our own address to CAM desc[0]
818  	 */
819  	sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
820  	sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
821  	sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
822  	sonic_set_cam_enable(dev, 1);
823  
824  	for (i = 0; i < 16; i++)
825  		sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
826  
827  	/*
828  	 * initialize CAM registers
829  	 */
830  	SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
831  	SONIC_WRITE(SONIC_CDC, 16);
832  
833  	/*
834  	 * load the CAM
835  	 */
836  	SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
837  	sonic_quiesce(dev, SONIC_CR_LCAM, may_sleep);
838  
839  	/*
840  	 * enable receiver, disable loopback
841  	 * and enable all interrupts
842  	 */
843  	SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
844  	SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
845  	SONIC_WRITE(SONIC_ISR, 0x7fff);
846  	SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
847  	SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
848  
849  	netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
850  		  SONIC_READ(SONIC_CMD));
851  
852  	return 0;
853  }
854  
855  MODULE_LICENSE("GPL");
856