1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7  * Copyright (C) 2006 Broadcom Corporation.
8  * Copyright (C) 2007 Michael Buesch <m@bues.ch>
9  * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
10  *
11  * Distribute under GPL.
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/netdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/mii.h>
23 #include <linux/if_ether.h>
24 #include <linux/if_vlan.h>
25 #include <linux/etherdevice.h>
26 #include <linux/pci.h>
27 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/ssb/ssb.h>
32 #include <linux/slab.h>
33 #include <linux/phy.h>
34 
35 #include <linux/uaccess.h>
36 #include <asm/io.h>
37 #include <asm/irq.h>
38 
39 
40 #include "b44.h"
41 
42 #define DRV_MODULE_NAME		"b44"
43 #define DRV_DESCRIPTION		"Broadcom 44xx/47xx 10/100 PCI ethernet driver"
44 
45 #define B44_DEF_MSG_ENABLE	  \
46 	(NETIF_MSG_DRV		| \
47 	 NETIF_MSG_PROBE	| \
48 	 NETIF_MSG_LINK		| \
49 	 NETIF_MSG_TIMER	| \
50 	 NETIF_MSG_IFDOWN	| \
51 	 NETIF_MSG_IFUP		| \
52 	 NETIF_MSG_RX_ERR	| \
53 	 NETIF_MSG_TX_ERR)
54 
55 /* length of time before we decide the hardware is borked,
56  * and dev->tx_timeout() should be called to fix the problem
57  */
58 #define B44_TX_TIMEOUT			(5 * HZ)
59 
60 /* hardware minimum and maximum for a single frame's data payload */
61 #define B44_MIN_MTU			ETH_ZLEN
62 #define B44_MAX_MTU			ETH_DATA_LEN
63 
64 #define B44_RX_RING_SIZE		512
65 #define B44_DEF_RX_RING_PENDING		200
66 #define B44_RX_RING_BYTES	(sizeof(struct dma_desc) * \
67 				 B44_RX_RING_SIZE)
68 #define B44_TX_RING_SIZE		512
69 #define B44_DEF_TX_RING_PENDING		(B44_TX_RING_SIZE - 1)
70 #define B44_TX_RING_BYTES	(sizeof(struct dma_desc) * \
71 				 B44_TX_RING_SIZE)
72 
73 #define TX_RING_GAP(BP)	\
74 	(B44_TX_RING_SIZE - (BP)->tx_pending)
75 #define TX_BUFFS_AVAIL(BP)						\
76 	(((BP)->tx_cons <= (BP)->tx_prod) ?				\
77 	  (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :		\
78 	  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
79 #define NEXT_TX(N)		(((N) + 1) & (B44_TX_RING_SIZE - 1))
80 
81 #define RX_PKT_OFFSET		(RX_HEADER_LEN + 2)
82 #define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET)
83 
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define B44_TX_WAKEUP_THRESH		(B44_TX_RING_SIZE / 4)
86 
87 /* b44 internal pattern match filter info */
88 #define B44_PATTERN_BASE	0x400
89 #define B44_PATTERN_SIZE	0x80
90 #define B44_PMASK_BASE		0x600
91 #define B44_PMASK_SIZE		0x10
92 #define B44_MAX_PATTERNS	16
93 #define B44_ETHIPV6UDP_HLEN	62
94 #define B44_ETHIPV4UDP_HLEN	42
95 
96 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
97 MODULE_DESCRIPTION(DRV_DESCRIPTION);
98 MODULE_LICENSE("GPL");
99 
100 static int b44_debug = -1;	/* -1 == use B44_DEF_MSG_ENABLE as value */
101 module_param(b44_debug, int, 0);
102 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103 
104 
105 #ifdef CONFIG_B44_PCI
106 static const struct pci_device_id b44_pci_tbl[] = {
107 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110 	{ 0 } /* terminate list with empty entry */
111 };
112 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113 
114 static struct pci_driver b44_pci_driver = {
115 	.name		= DRV_MODULE_NAME,
116 	.id_table	= b44_pci_tbl,
117 };
118 #endif /* CONFIG_B44_PCI */
119 
120 static const struct ssb_device_id b44_ssb_tbl[] = {
121 	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
122 	{},
123 };
124 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125 
126 static void b44_halt(struct b44 *);
127 static void b44_init_rings(struct b44 *);
128 
129 #define B44_FULL_RESET		1
130 #define B44_FULL_RESET_SKIP_PHY	2
131 #define B44_PARTIAL_RESET	3
132 #define B44_CHIP_RESET_FULL	4
133 #define B44_CHIP_RESET_PARTIAL	5
134 
135 static void b44_init_hw(struct b44 *, int);
136 
137 static int dma_desc_sync_size;
138 static int instance;
139 
140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141 #define _B44(x...)	# x,
142 B44_STAT_REG_DECLARE
143 #undef _B44
144 };
145 
b44_sync_dma_desc_for_device(struct ssb_device * sdev,dma_addr_t dma_base,unsigned long offset,enum dma_data_direction dir)146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147 						dma_addr_t dma_base,
148 						unsigned long offset,
149 						enum dma_data_direction dir)
150 {
151 	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152 				   dma_desc_sync_size, dir);
153 }
154 
b44_sync_dma_desc_for_cpu(struct ssb_device * sdev,dma_addr_t dma_base,unsigned long offset,enum dma_data_direction dir)155 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
156 					     dma_addr_t dma_base,
157 					     unsigned long offset,
158 					     enum dma_data_direction dir)
159 {
160 	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161 				dma_desc_sync_size, dir);
162 }
163 
br32(const struct b44 * bp,unsigned long reg)164 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
165 {
166 	return ssb_read32(bp->sdev, reg);
167 }
168 
bw32(const struct b44 * bp,unsigned long reg,unsigned long val)169 static inline void bw32(const struct b44 *bp,
170 			unsigned long reg, unsigned long val)
171 {
172 	ssb_write32(bp->sdev, reg, val);
173 }
174 
b44_wait_bit(struct b44 * bp,unsigned long reg,u32 bit,unsigned long timeout,const int clear)175 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176 			u32 bit, unsigned long timeout, const int clear)
177 {
178 	unsigned long i;
179 
180 	for (i = 0; i < timeout; i++) {
181 		u32 val = br32(bp, reg);
182 
183 		if (clear && !(val & bit))
184 			break;
185 		if (!clear && (val & bit))
186 			break;
187 		udelay(10);
188 	}
189 	if (i == timeout) {
190 		if (net_ratelimit())
191 			netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
192 				   bit, reg, clear ? "clear" : "set");
193 
194 		return -ENODEV;
195 	}
196 	return 0;
197 }
198 
__b44_cam_write(struct b44 * bp,const unsigned char * data,int index)199 static inline void __b44_cam_write(struct b44 *bp,
200 				   const unsigned char *data, int index)
201 {
202 	u32 val;
203 
204 	val  = ((u32) data[2]) << 24;
205 	val |= ((u32) data[3]) << 16;
206 	val |= ((u32) data[4]) <<  8;
207 	val |= ((u32) data[5]) <<  0;
208 	bw32(bp, B44_CAM_DATA_LO, val);
209 	val = (CAM_DATA_HI_VALID |
210 	       (((u32) data[0]) << 8) |
211 	       (((u32) data[1]) << 0));
212 	bw32(bp, B44_CAM_DATA_HI, val);
213 	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
214 			    (index << CAM_CTRL_INDEX_SHIFT)));
215 	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
216 }
217 
__b44_disable_ints(struct b44 * bp)218 static inline void __b44_disable_ints(struct b44 *bp)
219 {
220 	bw32(bp, B44_IMASK, 0);
221 }
222 
b44_disable_ints(struct b44 * bp)223 static void b44_disable_ints(struct b44 *bp)
224 {
225 	__b44_disable_ints(bp);
226 
227 	/* Flush posted writes. */
228 	br32(bp, B44_IMASK);
229 }
230 
b44_enable_ints(struct b44 * bp)231 static void b44_enable_ints(struct b44 *bp)
232 {
233 	bw32(bp, B44_IMASK, bp->imask);
234 }
235 
__b44_readphy(struct b44 * bp,int phy_addr,int reg,u32 * val)236 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
237 {
238 	int err;
239 
240 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
241 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
242 			     (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
243 			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
244 			     (reg << MDIO_DATA_RA_SHIFT) |
245 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
246 	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
247 	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
248 
249 	return err;
250 }
251 
__b44_writephy(struct b44 * bp,int phy_addr,int reg,u32 val)252 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
253 {
254 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
255 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
256 			     (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
257 			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
258 			     (reg << MDIO_DATA_RA_SHIFT) |
259 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
260 			     (val & MDIO_DATA_DATA)));
261 	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
262 }
263 
b44_readphy(struct b44 * bp,int reg,u32 * val)264 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
265 {
266 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
267 		return 0;
268 
269 	return __b44_readphy(bp, bp->phy_addr, reg, val);
270 }
271 
b44_writephy(struct b44 * bp,int reg,u32 val)272 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
273 {
274 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
275 		return 0;
276 
277 	return __b44_writephy(bp, bp->phy_addr, reg, val);
278 }
279 
280 /* miilib interface */
b44_mdio_read_mii(struct net_device * dev,int phy_id,int location)281 static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
282 {
283 	u32 val;
284 	struct b44 *bp = netdev_priv(dev);
285 	int rc = __b44_readphy(bp, phy_id, location, &val);
286 	if (rc)
287 		return 0xffffffff;
288 	return val;
289 }
290 
b44_mdio_write_mii(struct net_device * dev,int phy_id,int location,int val)291 static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
292 			       int val)
293 {
294 	struct b44 *bp = netdev_priv(dev);
295 	__b44_writephy(bp, phy_id, location, val);
296 }
297 
b44_mdio_read_phylib(struct mii_bus * bus,int phy_id,int location)298 static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
299 {
300 	u32 val;
301 	struct b44 *bp = bus->priv;
302 	int rc = __b44_readphy(bp, phy_id, location, &val);
303 	if (rc)
304 		return 0xffffffff;
305 	return val;
306 }
307 
b44_mdio_write_phylib(struct mii_bus * bus,int phy_id,int location,u16 val)308 static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
309 				 u16 val)
310 {
311 	struct b44 *bp = bus->priv;
312 	return __b44_writephy(bp, phy_id, location, val);
313 }
314 
b44_phy_reset(struct b44 * bp)315 static int b44_phy_reset(struct b44 *bp)
316 {
317 	u32 val;
318 	int err;
319 
320 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
321 		return 0;
322 	err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
323 	if (err)
324 		return err;
325 	udelay(100);
326 	err = b44_readphy(bp, MII_BMCR, &val);
327 	if (!err) {
328 		if (val & BMCR_RESET) {
329 			netdev_err(bp->dev, "PHY Reset would not complete\n");
330 			err = -ENODEV;
331 		}
332 	}
333 
334 	return err;
335 }
336 
__b44_set_flow_ctrl(struct b44 * bp,u32 pause_flags)337 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
338 {
339 	u32 val;
340 
341 	bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
342 	bp->flags |= pause_flags;
343 
344 	val = br32(bp, B44_RXCONFIG);
345 	if (pause_flags & B44_FLAG_RX_PAUSE)
346 		val |= RXCONFIG_FLOW;
347 	else
348 		val &= ~RXCONFIG_FLOW;
349 	bw32(bp, B44_RXCONFIG, val);
350 
351 	val = br32(bp, B44_MAC_FLOW);
352 	if (pause_flags & B44_FLAG_TX_PAUSE)
353 		val |= (MAC_FLOW_PAUSE_ENAB |
354 			(0xc0 & MAC_FLOW_RX_HI_WATER));
355 	else
356 		val &= ~MAC_FLOW_PAUSE_ENAB;
357 	bw32(bp, B44_MAC_FLOW, val);
358 }
359 
b44_set_flow_ctrl(struct b44 * bp,u32 local,u32 remote)360 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
361 {
362 	u32 pause_enab = 0;
363 
364 	/* The driver supports only rx pause by default because
365 	   the b44 mac tx pause mechanism generates excessive
366 	   pause frames.
367 	   Use ethtool to turn on b44 tx pause if necessary.
368 	 */
369 	if ((local & ADVERTISE_PAUSE_CAP) &&
370 	    (local & ADVERTISE_PAUSE_ASYM)){
371 		if ((remote & LPA_PAUSE_ASYM) &&
372 		    !(remote & LPA_PAUSE_CAP))
373 			pause_enab |= B44_FLAG_RX_PAUSE;
374 	}
375 
376 	__b44_set_flow_ctrl(bp, pause_enab);
377 }
378 
379 #ifdef CONFIG_BCM47XX
380 #include <linux/bcm47xx_nvram.h>
b44_wap54g10_workaround(struct b44 * bp)381 static void b44_wap54g10_workaround(struct b44 *bp)
382 {
383 	char buf[20];
384 	u32 val;
385 	int err;
386 
387 	/*
388 	 * workaround for bad hardware design in Linksys WAP54G v1.0
389 	 * see https://dev.openwrt.org/ticket/146
390 	 * check and reset bit "isolate"
391 	 */
392 	if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
393 		return;
394 	if (simple_strtoul(buf, NULL, 0) == 2) {
395 		err = __b44_readphy(bp, 0, MII_BMCR, &val);
396 		if (err)
397 			goto error;
398 		if (!(val & BMCR_ISOLATE))
399 			return;
400 		val &= ~BMCR_ISOLATE;
401 		err = __b44_writephy(bp, 0, MII_BMCR, val);
402 		if (err)
403 			goto error;
404 	}
405 	return;
406 error:
407 	pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
408 }
409 #else
b44_wap54g10_workaround(struct b44 * bp)410 static inline void b44_wap54g10_workaround(struct b44 *bp)
411 {
412 }
413 #endif
414 
b44_setup_phy(struct b44 * bp)415 static int b44_setup_phy(struct b44 *bp)
416 {
417 	u32 val;
418 	int err;
419 
420 	b44_wap54g10_workaround(bp);
421 
422 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
423 		return 0;
424 	if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
425 		goto out;
426 	if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
427 				val & MII_ALEDCTRL_ALLMSK)) != 0)
428 		goto out;
429 	if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
430 		goto out;
431 	if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
432 				val | MII_TLEDCTRL_ENABLE)) != 0)
433 		goto out;
434 
435 	if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
436 		u32 adv = ADVERTISE_CSMA;
437 
438 		if (bp->flags & B44_FLAG_ADV_10HALF)
439 			adv |= ADVERTISE_10HALF;
440 		if (bp->flags & B44_FLAG_ADV_10FULL)
441 			adv |= ADVERTISE_10FULL;
442 		if (bp->flags & B44_FLAG_ADV_100HALF)
443 			adv |= ADVERTISE_100HALF;
444 		if (bp->flags & B44_FLAG_ADV_100FULL)
445 			adv |= ADVERTISE_100FULL;
446 
447 		if (bp->flags & B44_FLAG_PAUSE_AUTO)
448 			adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
449 
450 		if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
451 			goto out;
452 		if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
453 						       BMCR_ANRESTART))) != 0)
454 			goto out;
455 	} else {
456 		u32 bmcr;
457 
458 		if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
459 			goto out;
460 		bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
461 		if (bp->flags & B44_FLAG_100_BASE_T)
462 			bmcr |= BMCR_SPEED100;
463 		if (bp->flags & B44_FLAG_FULL_DUPLEX)
464 			bmcr |= BMCR_FULLDPLX;
465 		if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
466 			goto out;
467 
468 		/* Since we will not be negotiating there is no safe way
469 		 * to determine if the link partner supports flow control
470 		 * or not.  So just disable it completely in this case.
471 		 */
472 		b44_set_flow_ctrl(bp, 0, 0);
473 	}
474 
475 out:
476 	return err;
477 }
478 
b44_stats_update(struct b44 * bp)479 static void b44_stats_update(struct b44 *bp)
480 {
481 	unsigned long reg;
482 	u64 *val;
483 
484 	val = &bp->hw_stats.tx_good_octets;
485 	u64_stats_update_begin(&bp->hw_stats.syncp);
486 
487 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
488 		*val++ += br32(bp, reg);
489 	}
490 
491 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
492 		*val++ += br32(bp, reg);
493 	}
494 
495 	u64_stats_update_end(&bp->hw_stats.syncp);
496 }
497 
b44_link_report(struct b44 * bp)498 static void b44_link_report(struct b44 *bp)
499 {
500 	if (!netif_carrier_ok(bp->dev)) {
501 		netdev_info(bp->dev, "Link is down\n");
502 	} else {
503 		netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
504 			    (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
505 			    (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
506 
507 		netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
508 			    (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
509 			    (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
510 	}
511 }
512 
b44_check_phy(struct b44 * bp)513 static void b44_check_phy(struct b44 *bp)
514 {
515 	u32 bmsr, aux;
516 
517 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
518 		bp->flags |= B44_FLAG_100_BASE_T;
519 		if (!netif_carrier_ok(bp->dev)) {
520 			u32 val = br32(bp, B44_TX_CTRL);
521 			if (bp->flags & B44_FLAG_FULL_DUPLEX)
522 				val |= TX_CTRL_DUPLEX;
523 			else
524 				val &= ~TX_CTRL_DUPLEX;
525 			bw32(bp, B44_TX_CTRL, val);
526 			netif_carrier_on(bp->dev);
527 			b44_link_report(bp);
528 		}
529 		return;
530 	}
531 
532 	if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
533 	    !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
534 	    (bmsr != 0xffff)) {
535 		if (aux & MII_AUXCTRL_SPEED)
536 			bp->flags |= B44_FLAG_100_BASE_T;
537 		else
538 			bp->flags &= ~B44_FLAG_100_BASE_T;
539 		if (aux & MII_AUXCTRL_DUPLEX)
540 			bp->flags |= B44_FLAG_FULL_DUPLEX;
541 		else
542 			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
543 
544 		if (!netif_carrier_ok(bp->dev) &&
545 		    (bmsr & BMSR_LSTATUS)) {
546 			u32 val = br32(bp, B44_TX_CTRL);
547 			u32 local_adv, remote_adv;
548 
549 			if (bp->flags & B44_FLAG_FULL_DUPLEX)
550 				val |= TX_CTRL_DUPLEX;
551 			else
552 				val &= ~TX_CTRL_DUPLEX;
553 			bw32(bp, B44_TX_CTRL, val);
554 
555 			if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
556 			    !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
557 			    !b44_readphy(bp, MII_LPA, &remote_adv))
558 				b44_set_flow_ctrl(bp, local_adv, remote_adv);
559 
560 			/* Link now up */
561 			netif_carrier_on(bp->dev);
562 			b44_link_report(bp);
563 		} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
564 			/* Link now down */
565 			netif_carrier_off(bp->dev);
566 			b44_link_report(bp);
567 		}
568 
569 		if (bmsr & BMSR_RFAULT)
570 			netdev_warn(bp->dev, "Remote fault detected in PHY\n");
571 		if (bmsr & BMSR_JCD)
572 			netdev_warn(bp->dev, "Jabber detected in PHY\n");
573 	}
574 }
575 
b44_timer(struct timer_list * t)576 static void b44_timer(struct timer_list *t)
577 {
578 	struct b44 *bp = from_timer(bp, t, timer);
579 
580 	spin_lock_irq(&bp->lock);
581 
582 	b44_check_phy(bp);
583 
584 	b44_stats_update(bp);
585 
586 	spin_unlock_irq(&bp->lock);
587 
588 	mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
589 }
590 
b44_tx(struct b44 * bp)591 static void b44_tx(struct b44 *bp)
592 {
593 	u32 cur, cons;
594 	unsigned bytes_compl = 0, pkts_compl = 0;
595 
596 	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
597 	cur /= sizeof(struct dma_desc);
598 
599 	/* XXX needs updating when NETIF_F_SG is supported */
600 	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
601 		struct ring_info *rp = &bp->tx_buffers[cons];
602 		struct sk_buff *skb = rp->skb;
603 
604 		BUG_ON(skb == NULL);
605 
606 		dma_unmap_single(bp->sdev->dma_dev,
607 				 rp->mapping,
608 				 skb->len,
609 				 DMA_TO_DEVICE);
610 		rp->skb = NULL;
611 
612 		bytes_compl += skb->len;
613 		pkts_compl++;
614 
615 		dev_consume_skb_irq(skb);
616 	}
617 
618 	netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
619 	bp->tx_cons = cons;
620 	if (netif_queue_stopped(bp->dev) &&
621 	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
622 		netif_wake_queue(bp->dev);
623 
624 	bw32(bp, B44_GPTIMER, 0);
625 }
626 
627 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
628  * before the DMA address you give it.  So we allocate 30 more bytes
629  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
630  * point the chip at 30 bytes past where the rx_header will go.
631  */
b44_alloc_rx_skb(struct b44 * bp,int src_idx,u32 dest_idx_unmasked)632 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
633 {
634 	struct dma_desc *dp;
635 	struct ring_info *src_map, *map;
636 	struct rx_header *rh;
637 	struct sk_buff *skb;
638 	dma_addr_t mapping;
639 	int dest_idx;
640 	u32 ctrl;
641 
642 	src_map = NULL;
643 	if (src_idx >= 0)
644 		src_map = &bp->rx_buffers[src_idx];
645 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
646 	map = &bp->rx_buffers[dest_idx];
647 	skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
648 	if (skb == NULL)
649 		return -ENOMEM;
650 
651 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
652 				 RX_PKT_BUF_SZ,
653 				 DMA_FROM_DEVICE);
654 
655 	/* Hardware bug work-around, the chip is unable to do PCI DMA
656 	   to/from anything above 1GB :-( */
657 	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
658 		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
659 		/* Sigh... */
660 		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
661 			dma_unmap_single(bp->sdev->dma_dev, mapping,
662 					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
663 		dev_kfree_skb_any(skb);
664 		skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
665 		if (skb == NULL)
666 			return -ENOMEM;
667 		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
668 					 RX_PKT_BUF_SZ,
669 					 DMA_FROM_DEVICE);
670 		if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
671 		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
672 			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
673 				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
674 			dev_kfree_skb_any(skb);
675 			return -ENOMEM;
676 		}
677 		bp->force_copybreak = 1;
678 	}
679 
680 	rh = (struct rx_header *) skb->data;
681 
682 	rh->len = 0;
683 	rh->flags = 0;
684 
685 	map->skb = skb;
686 	map->mapping = mapping;
687 
688 	if (src_map != NULL)
689 		src_map->skb = NULL;
690 
691 	ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
692 	if (dest_idx == (B44_RX_RING_SIZE - 1))
693 		ctrl |= DESC_CTRL_EOT;
694 
695 	dp = &bp->rx_ring[dest_idx];
696 	dp->ctrl = cpu_to_le32(ctrl);
697 	dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
698 
699 	if (bp->flags & B44_FLAG_RX_RING_HACK)
700 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
701 			                    dest_idx * sizeof(*dp),
702 			                    DMA_BIDIRECTIONAL);
703 
704 	return RX_PKT_BUF_SZ;
705 }
706 
b44_recycle_rx(struct b44 * bp,int src_idx,u32 dest_idx_unmasked)707 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
708 {
709 	struct dma_desc *src_desc, *dest_desc;
710 	struct ring_info *src_map, *dest_map;
711 	struct rx_header *rh;
712 	int dest_idx;
713 	__le32 ctrl;
714 
715 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
716 	dest_desc = &bp->rx_ring[dest_idx];
717 	dest_map = &bp->rx_buffers[dest_idx];
718 	src_desc = &bp->rx_ring[src_idx];
719 	src_map = &bp->rx_buffers[src_idx];
720 
721 	dest_map->skb = src_map->skb;
722 	rh = (struct rx_header *) src_map->skb->data;
723 	rh->len = 0;
724 	rh->flags = 0;
725 	dest_map->mapping = src_map->mapping;
726 
727 	if (bp->flags & B44_FLAG_RX_RING_HACK)
728 		b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
729 			                 src_idx * sizeof(*src_desc),
730 			                 DMA_BIDIRECTIONAL);
731 
732 	ctrl = src_desc->ctrl;
733 	if (dest_idx == (B44_RX_RING_SIZE - 1))
734 		ctrl |= cpu_to_le32(DESC_CTRL_EOT);
735 	else
736 		ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
737 
738 	dest_desc->ctrl = ctrl;
739 	dest_desc->addr = src_desc->addr;
740 
741 	src_map->skb = NULL;
742 
743 	if (bp->flags & B44_FLAG_RX_RING_HACK)
744 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
745 					     dest_idx * sizeof(*dest_desc),
746 					     DMA_BIDIRECTIONAL);
747 
748 	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
749 				   RX_PKT_BUF_SZ,
750 				   DMA_FROM_DEVICE);
751 }
752 
b44_rx(struct b44 * bp,int budget)753 static int b44_rx(struct b44 *bp, int budget)
754 {
755 	int received;
756 	u32 cons, prod;
757 
758 	received = 0;
759 	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
760 	prod /= sizeof(struct dma_desc);
761 	cons = bp->rx_cons;
762 
763 	while (cons != prod && budget > 0) {
764 		struct ring_info *rp = &bp->rx_buffers[cons];
765 		struct sk_buff *skb = rp->skb;
766 		dma_addr_t map = rp->mapping;
767 		struct rx_header *rh;
768 		u16 len;
769 
770 		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
771 					RX_PKT_BUF_SZ,
772 					DMA_FROM_DEVICE);
773 		rh = (struct rx_header *) skb->data;
774 		len = le16_to_cpu(rh->len);
775 		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
776 		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
777 		drop_it:
778 			b44_recycle_rx(bp, cons, bp->rx_prod);
779 		drop_it_no_recycle:
780 			bp->dev->stats.rx_dropped++;
781 			goto next_pkt;
782 		}
783 
784 		if (len == 0) {
785 			int i = 0;
786 
787 			do {
788 				udelay(2);
789 				barrier();
790 				len = le16_to_cpu(rh->len);
791 			} while (len == 0 && i++ < 5);
792 			if (len == 0)
793 				goto drop_it;
794 		}
795 
796 		/* Omit CRC. */
797 		len -= 4;
798 
799 		if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
800 			int skb_size;
801 			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
802 			if (skb_size < 0)
803 				goto drop_it;
804 			dma_unmap_single(bp->sdev->dma_dev, map,
805 					 skb_size, DMA_FROM_DEVICE);
806 			/* Leave out rx_header */
807 			skb_put(skb, len + RX_PKT_OFFSET);
808 			skb_pull(skb, RX_PKT_OFFSET);
809 		} else {
810 			struct sk_buff *copy_skb;
811 
812 			b44_recycle_rx(bp, cons, bp->rx_prod);
813 			copy_skb = napi_alloc_skb(&bp->napi, len);
814 			if (copy_skb == NULL)
815 				goto drop_it_no_recycle;
816 
817 			skb_put(copy_skb, len);
818 			/* DMA sync done above, copy just the actual packet */
819 			skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
820 							 copy_skb->data, len);
821 			skb = copy_skb;
822 		}
823 		skb_checksum_none_assert(skb);
824 		skb->protocol = eth_type_trans(skb, bp->dev);
825 		netif_receive_skb(skb);
826 		received++;
827 		budget--;
828 	next_pkt:
829 		bp->rx_prod = (bp->rx_prod + 1) &
830 			(B44_RX_RING_SIZE - 1);
831 		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
832 	}
833 
834 	bp->rx_cons = cons;
835 	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
836 
837 	return received;
838 }
839 
b44_poll(struct napi_struct * napi,int budget)840 static int b44_poll(struct napi_struct *napi, int budget)
841 {
842 	struct b44 *bp = container_of(napi, struct b44, napi);
843 	int work_done;
844 	unsigned long flags;
845 
846 	spin_lock_irqsave(&bp->lock, flags);
847 
848 	if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
849 		/* spin_lock(&bp->tx_lock); */
850 		b44_tx(bp);
851 		/* spin_unlock(&bp->tx_lock); */
852 	}
853 	if (bp->istat & ISTAT_RFO) {	/* fast recovery, in ~20msec */
854 		bp->istat &= ~ISTAT_RFO;
855 		b44_disable_ints(bp);
856 		ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
857 		b44_init_rings(bp);
858 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
859 		netif_wake_queue(bp->dev);
860 	}
861 
862 	spin_unlock_irqrestore(&bp->lock, flags);
863 
864 	work_done = 0;
865 	if (bp->istat & ISTAT_RX)
866 		work_done += b44_rx(bp, budget);
867 
868 	if (bp->istat & ISTAT_ERRORS) {
869 		spin_lock_irqsave(&bp->lock, flags);
870 		b44_halt(bp);
871 		b44_init_rings(bp);
872 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
873 		netif_wake_queue(bp->dev);
874 		spin_unlock_irqrestore(&bp->lock, flags);
875 		work_done = 0;
876 	}
877 
878 	if (work_done < budget) {
879 		napi_complete_done(napi, work_done);
880 		b44_enable_ints(bp);
881 	}
882 
883 	return work_done;
884 }
885 
b44_interrupt(int irq,void * dev_id)886 static irqreturn_t b44_interrupt(int irq, void *dev_id)
887 {
888 	struct net_device *dev = dev_id;
889 	struct b44 *bp = netdev_priv(dev);
890 	u32 istat, imask;
891 	int handled = 0;
892 
893 	spin_lock(&bp->lock);
894 
895 	istat = br32(bp, B44_ISTAT);
896 	imask = br32(bp, B44_IMASK);
897 
898 	/* The interrupt mask register controls which interrupt bits
899 	 * will actually raise an interrupt to the CPU when set by hw/firmware,
900 	 * but doesn't mask off the bits.
901 	 */
902 	istat &= imask;
903 	if (istat) {
904 		handled = 1;
905 
906 		if (unlikely(!netif_running(dev))) {
907 			netdev_info(dev, "late interrupt\n");
908 			goto irq_ack;
909 		}
910 
911 		if (napi_schedule_prep(&bp->napi)) {
912 			/* NOTE: These writes are posted by the readback of
913 			 *       the ISTAT register below.
914 			 */
915 			bp->istat = istat;
916 			__b44_disable_ints(bp);
917 			__napi_schedule(&bp->napi);
918 		}
919 
920 irq_ack:
921 		bw32(bp, B44_ISTAT, istat);
922 		br32(bp, B44_ISTAT);
923 	}
924 	spin_unlock(&bp->lock);
925 	return IRQ_RETVAL(handled);
926 }
927 
b44_tx_timeout(struct net_device * dev,unsigned int txqueue)928 static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
929 {
930 	struct b44 *bp = netdev_priv(dev);
931 
932 	netdev_err(dev, "transmit timed out, resetting\n");
933 
934 	spin_lock_irq(&bp->lock);
935 
936 	b44_halt(bp);
937 	b44_init_rings(bp);
938 	b44_init_hw(bp, B44_FULL_RESET);
939 
940 	spin_unlock_irq(&bp->lock);
941 
942 	b44_enable_ints(bp);
943 
944 	netif_wake_queue(dev);
945 }
946 
b44_start_xmit(struct sk_buff * skb,struct net_device * dev)947 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
948 {
949 	struct b44 *bp = netdev_priv(dev);
950 	int rc = NETDEV_TX_OK;
951 	dma_addr_t mapping;
952 	u32 len, entry, ctrl;
953 	unsigned long flags;
954 
955 	len = skb->len;
956 	spin_lock_irqsave(&bp->lock, flags);
957 
958 	/* This is a hard error, log it. */
959 	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
960 		netif_stop_queue(dev);
961 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
962 		goto err_out;
963 	}
964 
965 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
966 	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
967 		struct sk_buff *bounce_skb;
968 
969 		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
970 		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
971 			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
972 					     DMA_TO_DEVICE);
973 
974 		bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
975 		if (!bounce_skb)
976 			goto err_out;
977 
978 		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
979 					 len, DMA_TO_DEVICE);
980 		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
981 			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
982 				dma_unmap_single(bp->sdev->dma_dev, mapping,
983 						     len, DMA_TO_DEVICE);
984 			dev_kfree_skb_any(bounce_skb);
985 			goto err_out;
986 		}
987 
988 		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
989 		dev_consume_skb_any(skb);
990 		skb = bounce_skb;
991 	}
992 
993 	entry = bp->tx_prod;
994 	bp->tx_buffers[entry].skb = skb;
995 	bp->tx_buffers[entry].mapping = mapping;
996 
997 	ctrl  = (len & DESC_CTRL_LEN);
998 	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
999 	if (entry == (B44_TX_RING_SIZE - 1))
1000 		ctrl |= DESC_CTRL_EOT;
1001 
1002 	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1003 	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1004 
1005 	if (bp->flags & B44_FLAG_TX_RING_HACK)
1006 		b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1007 			                    entry * sizeof(bp->tx_ring[0]),
1008 			                    DMA_TO_DEVICE);
1009 
1010 	entry = NEXT_TX(entry);
1011 
1012 	bp->tx_prod = entry;
1013 
1014 	wmb();
1015 
1016 	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1017 	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1018 		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1019 	if (bp->flags & B44_FLAG_REORDER_BUG)
1020 		br32(bp, B44_DMATX_PTR);
1021 
1022 	netdev_sent_queue(dev, skb->len);
1023 
1024 	if (TX_BUFFS_AVAIL(bp) < 1)
1025 		netif_stop_queue(dev);
1026 
1027 out_unlock:
1028 	spin_unlock_irqrestore(&bp->lock, flags);
1029 
1030 	return rc;
1031 
1032 err_out:
1033 	rc = NETDEV_TX_BUSY;
1034 	goto out_unlock;
1035 }
1036 
b44_change_mtu(struct net_device * dev,int new_mtu)1037 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1038 {
1039 	struct b44 *bp = netdev_priv(dev);
1040 
1041 	if (!netif_running(dev)) {
1042 		/* We'll just catch it later when the
1043 		 * device is up'd.
1044 		 */
1045 		WRITE_ONCE(dev->mtu, new_mtu);
1046 		return 0;
1047 	}
1048 
1049 	spin_lock_irq(&bp->lock);
1050 	b44_halt(bp);
1051 	WRITE_ONCE(dev->mtu, new_mtu);
1052 	b44_init_rings(bp);
1053 	b44_init_hw(bp, B44_FULL_RESET);
1054 	spin_unlock_irq(&bp->lock);
1055 
1056 	b44_enable_ints(bp);
1057 
1058 	return 0;
1059 }
1060 
1061 /* Free up pending packets in all rx/tx rings.
1062  *
1063  * The chip has been shut down and the driver detached from
1064  * the networking, so no interrupts or new tx packets will
1065  * end up in the driver.  bp->lock is not held and we are not
1066  * in an interrupt context and thus may sleep.
1067  */
b44_free_rings(struct b44 * bp)1068 static void b44_free_rings(struct b44 *bp)
1069 {
1070 	struct ring_info *rp;
1071 	int i;
1072 
1073 	for (i = 0; i < B44_RX_RING_SIZE; i++) {
1074 		rp = &bp->rx_buffers[i];
1075 
1076 		if (rp->skb == NULL)
1077 			continue;
1078 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1079 				 DMA_FROM_DEVICE);
1080 		dev_kfree_skb_any(rp->skb);
1081 		rp->skb = NULL;
1082 	}
1083 
1084 	/* XXX needs changes once NETIF_F_SG is set... */
1085 	for (i = 0; i < B44_TX_RING_SIZE; i++) {
1086 		rp = &bp->tx_buffers[i];
1087 
1088 		if (rp->skb == NULL)
1089 			continue;
1090 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1091 				 DMA_TO_DEVICE);
1092 		dev_kfree_skb_any(rp->skb);
1093 		rp->skb = NULL;
1094 	}
1095 }
1096 
1097 /* Initialize tx/rx rings for packet processing.
1098  *
1099  * The chip has been shut down and the driver detached from
1100  * the networking, so no interrupts or new tx packets will
1101  * end up in the driver.
1102  */
b44_init_rings(struct b44 * bp)1103 static void b44_init_rings(struct b44 *bp)
1104 {
1105 	int i;
1106 
1107 	b44_free_rings(bp);
1108 
1109 	memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1110 	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1111 
1112 	if (bp->flags & B44_FLAG_RX_RING_HACK)
1113 		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1114 					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1115 
1116 	if (bp->flags & B44_FLAG_TX_RING_HACK)
1117 		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1118 					   DMA_TABLE_BYTES, DMA_TO_DEVICE);
1119 
1120 	for (i = 0; i < bp->rx_pending; i++) {
1121 		if (b44_alloc_rx_skb(bp, -1, i) < 0)
1122 			break;
1123 	}
1124 }
1125 
1126 /*
1127  * Must not be invoked with interrupt sources disabled and
1128  * the hardware shutdown down.
1129  */
b44_free_consistent(struct b44 * bp)1130 static void b44_free_consistent(struct b44 *bp)
1131 {
1132 	kfree(bp->rx_buffers);
1133 	bp->rx_buffers = NULL;
1134 	kfree(bp->tx_buffers);
1135 	bp->tx_buffers = NULL;
1136 	if (bp->rx_ring) {
1137 		if (bp->flags & B44_FLAG_RX_RING_HACK) {
1138 			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1139 					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1140 			kfree(bp->rx_ring);
1141 		} else
1142 			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1143 					  bp->rx_ring, bp->rx_ring_dma);
1144 		bp->rx_ring = NULL;
1145 		bp->flags &= ~B44_FLAG_RX_RING_HACK;
1146 	}
1147 	if (bp->tx_ring) {
1148 		if (bp->flags & B44_FLAG_TX_RING_HACK) {
1149 			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1150 					 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1151 			kfree(bp->tx_ring);
1152 		} else
1153 			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1154 					  bp->tx_ring, bp->tx_ring_dma);
1155 		bp->tx_ring = NULL;
1156 		bp->flags &= ~B44_FLAG_TX_RING_HACK;
1157 	}
1158 }
1159 
1160 /*
1161  * Must not be invoked with interrupt sources disabled and
1162  * the hardware shutdown down.  Can sleep.
1163  */
b44_alloc_consistent(struct b44 * bp,gfp_t gfp)1164 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1165 {
1166 	int size;
1167 
1168 	size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1169 	bp->rx_buffers = kzalloc(size, gfp);
1170 	if (!bp->rx_buffers)
1171 		goto out_err;
1172 
1173 	size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1174 	bp->tx_buffers = kzalloc(size, gfp);
1175 	if (!bp->tx_buffers)
1176 		goto out_err;
1177 
1178 	size = DMA_TABLE_BYTES;
1179 	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1180 					 &bp->rx_ring_dma, gfp);
1181 	if (!bp->rx_ring) {
1182 		/* Allocation may have failed due to dma_alloc_coherent
1183 		   insisting on use of GFP_DMA, which is more restrictive
1184 		   than necessary...  */
1185 		struct dma_desc *rx_ring;
1186 		dma_addr_t rx_ring_dma;
1187 
1188 		rx_ring = kzalloc(size, gfp);
1189 		if (!rx_ring)
1190 			goto out_err;
1191 
1192 		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1193 					     DMA_TABLE_BYTES,
1194 					     DMA_BIDIRECTIONAL);
1195 
1196 		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1197 			rx_ring_dma + size > DMA_BIT_MASK(30)) {
1198 			kfree(rx_ring);
1199 			goto out_err;
1200 		}
1201 
1202 		bp->rx_ring = rx_ring;
1203 		bp->rx_ring_dma = rx_ring_dma;
1204 		bp->flags |= B44_FLAG_RX_RING_HACK;
1205 	}
1206 
1207 	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1208 					 &bp->tx_ring_dma, gfp);
1209 	if (!bp->tx_ring) {
1210 		/* Allocation may have failed due to ssb_dma_alloc_consistent
1211 		   insisting on use of GFP_DMA, which is more restrictive
1212 		   than necessary...  */
1213 		struct dma_desc *tx_ring;
1214 		dma_addr_t tx_ring_dma;
1215 
1216 		tx_ring = kzalloc(size, gfp);
1217 		if (!tx_ring)
1218 			goto out_err;
1219 
1220 		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1221 					     DMA_TABLE_BYTES,
1222 					     DMA_TO_DEVICE);
1223 
1224 		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1225 			tx_ring_dma + size > DMA_BIT_MASK(30)) {
1226 			kfree(tx_ring);
1227 			goto out_err;
1228 		}
1229 
1230 		bp->tx_ring = tx_ring;
1231 		bp->tx_ring_dma = tx_ring_dma;
1232 		bp->flags |= B44_FLAG_TX_RING_HACK;
1233 	}
1234 
1235 	return 0;
1236 
1237 out_err:
1238 	b44_free_consistent(bp);
1239 	return -ENOMEM;
1240 }
1241 
1242 /* bp->lock is held. */
b44_clear_stats(struct b44 * bp)1243 static void b44_clear_stats(struct b44 *bp)
1244 {
1245 	unsigned long reg;
1246 
1247 	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1248 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1249 		br32(bp, reg);
1250 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1251 		br32(bp, reg);
1252 }
1253 
1254 /* bp->lock is held. */
b44_chip_reset(struct b44 * bp,int reset_kind)1255 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1256 {
1257 	struct ssb_device *sdev = bp->sdev;
1258 	bool was_enabled;
1259 
1260 	was_enabled = ssb_device_is_enabled(bp->sdev);
1261 
1262 	ssb_device_enable(bp->sdev, 0);
1263 	ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1264 
1265 	if (was_enabled) {
1266 		bw32(bp, B44_RCV_LAZY, 0);
1267 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1268 		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1269 		bw32(bp, B44_DMATX_CTRL, 0);
1270 		bp->tx_prod = bp->tx_cons = 0;
1271 		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1272 			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1273 				     100, 0);
1274 		}
1275 		bw32(bp, B44_DMARX_CTRL, 0);
1276 		bp->rx_prod = bp->rx_cons = 0;
1277 	}
1278 
1279 	b44_clear_stats(bp);
1280 
1281 	/*
1282 	 * Don't enable PHY if we are doing a partial reset
1283 	 * we are probably going to power down
1284 	 */
1285 	if (reset_kind == B44_CHIP_RESET_PARTIAL)
1286 		return;
1287 
1288 	switch (sdev->bus->bustype) {
1289 	case SSB_BUSTYPE_SSB:
1290 		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1291 		     (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1292 					B44_MDC_RATIO)
1293 		     & MDIO_CTRL_MAXF_MASK)));
1294 		break;
1295 	case SSB_BUSTYPE_PCI:
1296 		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1297 		     (0x0d & MDIO_CTRL_MAXF_MASK)));
1298 		break;
1299 	case SSB_BUSTYPE_PCMCIA:
1300 	case SSB_BUSTYPE_SDIO:
1301 		WARN_ON(1); /* A device with this bus does not exist. */
1302 		break;
1303 	}
1304 
1305 	br32(bp, B44_MDIO_CTRL);
1306 
1307 	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1308 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1309 		br32(bp, B44_ENET_CTRL);
1310 		bp->flags |= B44_FLAG_EXTERNAL_PHY;
1311 	} else {
1312 		u32 val = br32(bp, B44_DEVCTRL);
1313 
1314 		if (val & DEVCTRL_EPR) {
1315 			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1316 			br32(bp, B44_DEVCTRL);
1317 			udelay(100);
1318 		}
1319 		bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1320 	}
1321 }
1322 
1323 /* bp->lock is held. */
b44_halt(struct b44 * bp)1324 static void b44_halt(struct b44 *bp)
1325 {
1326 	b44_disable_ints(bp);
1327 	/* reset PHY */
1328 	b44_phy_reset(bp);
1329 	/* power down PHY */
1330 	netdev_info(bp->dev, "powering down PHY\n");
1331 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1332 	/* now reset the chip, but without enabling the MAC&PHY
1333 	 * part of it. This has to be done _after_ we shut down the PHY */
1334 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1335 		b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1336 	else
1337 		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1338 }
1339 
1340 /* bp->lock is held. */
__b44_set_mac_addr(struct b44 * bp)1341 static void __b44_set_mac_addr(struct b44 *bp)
1342 {
1343 	bw32(bp, B44_CAM_CTRL, 0);
1344 	if (!(bp->dev->flags & IFF_PROMISC)) {
1345 		u32 val;
1346 
1347 		__b44_cam_write(bp, bp->dev->dev_addr, 0);
1348 		val = br32(bp, B44_CAM_CTRL);
1349 		bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1350 	}
1351 }
1352 
b44_set_mac_addr(struct net_device * dev,void * p)1353 static int b44_set_mac_addr(struct net_device *dev, void *p)
1354 {
1355 	struct b44 *bp = netdev_priv(dev);
1356 	struct sockaddr *addr = p;
1357 	u32 val;
1358 
1359 	if (netif_running(dev))
1360 		return -EBUSY;
1361 
1362 	if (!is_valid_ether_addr(addr->sa_data))
1363 		return -EINVAL;
1364 
1365 	eth_hw_addr_set(dev, addr->sa_data);
1366 
1367 	spin_lock_irq(&bp->lock);
1368 
1369 	val = br32(bp, B44_RXCONFIG);
1370 	if (!(val & RXCONFIG_CAM_ABSENT))
1371 		__b44_set_mac_addr(bp);
1372 
1373 	spin_unlock_irq(&bp->lock);
1374 
1375 	return 0;
1376 }
1377 
1378 /* Called at device open time to get the chip ready for
1379  * packet processing.  Invoked with bp->lock held.
1380  */
1381 static void __b44_set_rx_mode(struct net_device *);
b44_init_hw(struct b44 * bp,int reset_kind)1382 static void b44_init_hw(struct b44 *bp, int reset_kind)
1383 {
1384 	u32 val;
1385 
1386 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1387 	if (reset_kind == B44_FULL_RESET) {
1388 		b44_phy_reset(bp);
1389 		b44_setup_phy(bp);
1390 	}
1391 
1392 	/* Enable CRC32, set proper LED modes and power on PHY */
1393 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1394 	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1395 
1396 	/* This sets the MAC address too.  */
1397 	__b44_set_rx_mode(bp->dev);
1398 
1399 	/* MTU + eth header + possible VLAN tag + struct rx_header */
1400 	bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1401 	bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1402 
1403 	bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1404 	if (reset_kind == B44_PARTIAL_RESET) {
1405 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1406 				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1407 	} else {
1408 		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1409 		bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1410 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1411 				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1412 		bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1413 
1414 		bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1415 		bp->rx_prod = bp->rx_pending;
1416 
1417 		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1418 	}
1419 
1420 	val = br32(bp, B44_ENET_CTRL);
1421 	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1422 
1423 	netdev_reset_queue(bp->dev);
1424 }
1425 
b44_open(struct net_device * dev)1426 static int b44_open(struct net_device *dev)
1427 {
1428 	struct b44 *bp = netdev_priv(dev);
1429 	int err;
1430 
1431 	err = b44_alloc_consistent(bp, GFP_KERNEL);
1432 	if (err)
1433 		goto out;
1434 
1435 	napi_enable(&bp->napi);
1436 
1437 	b44_init_rings(bp);
1438 	b44_init_hw(bp, B44_FULL_RESET);
1439 
1440 	b44_check_phy(bp);
1441 
1442 	err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1443 	if (unlikely(err < 0)) {
1444 		napi_disable(&bp->napi);
1445 		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1446 		b44_free_rings(bp);
1447 		b44_free_consistent(bp);
1448 		goto out;
1449 	}
1450 
1451 	timer_setup(&bp->timer, b44_timer, 0);
1452 	bp->timer.expires = jiffies + HZ;
1453 	add_timer(&bp->timer);
1454 
1455 	b44_enable_ints(bp);
1456 
1457 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1458 		phy_start(dev->phydev);
1459 
1460 	netif_start_queue(dev);
1461 out:
1462 	return err;
1463 }
1464 
1465 #ifdef CONFIG_NET_POLL_CONTROLLER
1466 /*
1467  * Polling receive - used by netconsole and other diagnostic tools
1468  * to allow network i/o with interrupts disabled.
1469  */
b44_poll_controller(struct net_device * dev)1470 static void b44_poll_controller(struct net_device *dev)
1471 {
1472 	disable_irq(dev->irq);
1473 	b44_interrupt(dev->irq, dev);
1474 	enable_irq(dev->irq);
1475 }
1476 #endif
1477 
bwfilter_table(struct b44 * bp,u8 * pp,u32 bytes,u32 table_offset)1478 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1479 {
1480 	u32 i;
1481 	u32 *pattern = (u32 *) pp;
1482 
1483 	for (i = 0; i < bytes; i += sizeof(u32)) {
1484 		bw32(bp, B44_FILT_ADDR, table_offset + i);
1485 		bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1486 	}
1487 }
1488 
b44_magic_pattern(const u8 * macaddr,u8 * ppattern,u8 * pmask,int offset)1489 static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask,
1490 			     int offset)
1491 {
1492 	int magicsync = 6;
1493 	int k, j, len = offset;
1494 	int ethaddr_bytes = ETH_ALEN;
1495 
1496 	memset(ppattern + offset, 0xff, magicsync);
1497 	for (j = 0; j < magicsync; j++) {
1498 		pmask[len >> 3] |= BIT(len & 7);
1499 		len++;
1500 	}
1501 
1502 	for (j = 0; j < B44_MAX_PATTERNS; j++) {
1503 		if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1504 			ethaddr_bytes = ETH_ALEN;
1505 		else
1506 			ethaddr_bytes = B44_PATTERN_SIZE - len;
1507 		if (ethaddr_bytes <=0)
1508 			break;
1509 		for (k = 0; k< ethaddr_bytes; k++) {
1510 			ppattern[offset + magicsync +
1511 				(j * ETH_ALEN) + k] = macaddr[k];
1512 			pmask[len >> 3] |= BIT(len & 7);
1513 			len++;
1514 		}
1515 	}
1516 	return len - 1;
1517 }
1518 
1519 /* Setup magic packet patterns in the b44 WOL
1520  * pattern matching filter.
1521  */
b44_setup_pseudo_magicp(struct b44 * bp)1522 static void b44_setup_pseudo_magicp(struct b44 *bp)
1523 {
1524 
1525 	u32 val;
1526 	int plen0, plen1, plen2;
1527 	u8 *pwol_pattern;
1528 	u8 pwol_mask[B44_PMASK_SIZE];
1529 
1530 	pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1531 	if (!pwol_pattern)
1532 		return;
1533 
1534 	/* Ipv4 magic packet pattern - pattern 0.*/
1535 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1536 	plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1537 				  B44_ETHIPV4UDP_HLEN);
1538 
1539 	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1540 	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1541 
1542 	/* Raw ethernet II magic packet pattern - pattern 1 */
1543 	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1544 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1545 	plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1546 				  ETH_HLEN);
1547 
1548 	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1549 		       B44_PATTERN_BASE + B44_PATTERN_SIZE);
1550 	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1551 		       B44_PMASK_BASE + B44_PMASK_SIZE);
1552 
1553 	/* Ipv6 magic packet pattern - pattern 2 */
1554 	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1555 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1556 	plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1557 				  B44_ETHIPV6UDP_HLEN);
1558 
1559 	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1560 		       B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1561 	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1562 		       B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1563 
1564 	kfree(pwol_pattern);
1565 
1566 	/* set these pattern's lengths: one less than each real length */
1567 	val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1568 	bw32(bp, B44_WKUP_LEN, val);
1569 
1570 	/* enable wakeup pattern matching */
1571 	val = br32(bp, B44_DEVCTRL);
1572 	bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1573 
1574 }
1575 
1576 #ifdef CONFIG_B44_PCI
b44_setup_wol_pci(struct b44 * bp)1577 static void b44_setup_wol_pci(struct b44 *bp)
1578 {
1579 	u16 val;
1580 
1581 	if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1582 		bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1583 		pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1584 		pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1585 	}
1586 }
1587 #else
b44_setup_wol_pci(struct b44 * bp)1588 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1589 #endif /* CONFIG_B44_PCI */
1590 
b44_setup_wol(struct b44 * bp)1591 static void b44_setup_wol(struct b44 *bp)
1592 {
1593 	u32 val;
1594 
1595 	bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1596 
1597 	if (bp->flags & B44_FLAG_B0_ANDLATER) {
1598 
1599 		bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1600 
1601 		val = bp->dev->dev_addr[2] << 24 |
1602 			bp->dev->dev_addr[3] << 16 |
1603 			bp->dev->dev_addr[4] << 8 |
1604 			bp->dev->dev_addr[5];
1605 		bw32(bp, B44_ADDR_LO, val);
1606 
1607 		val = bp->dev->dev_addr[0] << 8 |
1608 			bp->dev->dev_addr[1];
1609 		bw32(bp, B44_ADDR_HI, val);
1610 
1611 		val = br32(bp, B44_DEVCTRL);
1612 		bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1613 
1614 	} else {
1615 		b44_setup_pseudo_magicp(bp);
1616 	}
1617 	b44_setup_wol_pci(bp);
1618 }
1619 
b44_close(struct net_device * dev)1620 static int b44_close(struct net_device *dev)
1621 {
1622 	struct b44 *bp = netdev_priv(dev);
1623 
1624 	netif_stop_queue(dev);
1625 
1626 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1627 		phy_stop(dev->phydev);
1628 
1629 	napi_disable(&bp->napi);
1630 
1631 	del_timer_sync(&bp->timer);
1632 
1633 	spin_lock_irq(&bp->lock);
1634 
1635 	b44_halt(bp);
1636 	b44_free_rings(bp);
1637 	netif_carrier_off(dev);
1638 
1639 	spin_unlock_irq(&bp->lock);
1640 
1641 	free_irq(dev->irq, dev);
1642 
1643 	if (bp->flags & B44_FLAG_WOL_ENABLE) {
1644 		b44_init_hw(bp, B44_PARTIAL_RESET);
1645 		b44_setup_wol(bp);
1646 	}
1647 
1648 	b44_free_consistent(bp);
1649 
1650 	return 0;
1651 }
1652 
b44_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * nstat)1653 static void b44_get_stats64(struct net_device *dev,
1654 			    struct rtnl_link_stats64 *nstat)
1655 {
1656 	struct b44 *bp = netdev_priv(dev);
1657 	struct b44_hw_stats *hwstat = &bp->hw_stats;
1658 	unsigned int start;
1659 
1660 	do {
1661 		start = u64_stats_fetch_begin(&hwstat->syncp);
1662 
1663 		/* Convert HW stats into rtnl_link_stats64 stats. */
1664 		nstat->rx_packets = hwstat->rx_pkts;
1665 		nstat->tx_packets = hwstat->tx_pkts;
1666 		nstat->rx_bytes   = hwstat->rx_octets;
1667 		nstat->tx_bytes   = hwstat->tx_octets;
1668 		nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1669 				     hwstat->tx_oversize_pkts +
1670 				     hwstat->tx_underruns +
1671 				     hwstat->tx_excessive_cols +
1672 				     hwstat->tx_late_cols);
1673 		nstat->multicast  = hwstat->rx_multicast_pkts;
1674 		nstat->collisions = hwstat->tx_total_cols;
1675 
1676 		nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1677 					   hwstat->rx_undersize);
1678 		nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1679 		nstat->rx_frame_errors  = hwstat->rx_align_errs;
1680 		nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1681 		nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1682 					   hwstat->rx_oversize_pkts +
1683 					   hwstat->rx_missed_pkts +
1684 					   hwstat->rx_crc_align_errs +
1685 					   hwstat->rx_undersize +
1686 					   hwstat->rx_crc_errs +
1687 					   hwstat->rx_align_errs +
1688 					   hwstat->rx_symbol_errs);
1689 
1690 		nstat->tx_aborted_errors = hwstat->tx_underruns;
1691 #if 0
1692 		/* Carrier lost counter seems to be broken for some devices */
1693 		nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1694 #endif
1695 	} while (u64_stats_fetch_retry(&hwstat->syncp, start));
1696 
1697 }
1698 
__b44_load_mcast(struct b44 * bp,struct net_device * dev)1699 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1700 {
1701 	struct netdev_hw_addr *ha;
1702 	int i, num_ents;
1703 
1704 	num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1705 	i = 0;
1706 	netdev_for_each_mc_addr(ha, dev) {
1707 		if (i == num_ents)
1708 			break;
1709 		__b44_cam_write(bp, ha->addr, i++ + 1);
1710 	}
1711 	return i+1;
1712 }
1713 
__b44_set_rx_mode(struct net_device * dev)1714 static void __b44_set_rx_mode(struct net_device *dev)
1715 {
1716 	struct b44 *bp = netdev_priv(dev);
1717 	u32 val;
1718 
1719 	val = br32(bp, B44_RXCONFIG);
1720 	val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1721 	if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1722 		val |= RXCONFIG_PROMISC;
1723 		bw32(bp, B44_RXCONFIG, val);
1724 	} else {
1725 		unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1726 		int i = 1;
1727 
1728 		__b44_set_mac_addr(bp);
1729 
1730 		if ((dev->flags & IFF_ALLMULTI) ||
1731 		    (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1732 			val |= RXCONFIG_ALLMULTI;
1733 		else
1734 			i = __b44_load_mcast(bp, dev);
1735 
1736 		for (; i < 64; i++)
1737 			__b44_cam_write(bp, zero, i);
1738 
1739 		bw32(bp, B44_RXCONFIG, val);
1740 		val = br32(bp, B44_CAM_CTRL);
1741 	        bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1742 	}
1743 }
1744 
b44_set_rx_mode(struct net_device * dev)1745 static void b44_set_rx_mode(struct net_device *dev)
1746 {
1747 	struct b44 *bp = netdev_priv(dev);
1748 
1749 	spin_lock_irq(&bp->lock);
1750 	__b44_set_rx_mode(dev);
1751 	spin_unlock_irq(&bp->lock);
1752 }
1753 
b44_get_msglevel(struct net_device * dev)1754 static u32 b44_get_msglevel(struct net_device *dev)
1755 {
1756 	struct b44 *bp = netdev_priv(dev);
1757 	return bp->msg_enable;
1758 }
1759 
b44_set_msglevel(struct net_device * dev,u32 value)1760 static void b44_set_msglevel(struct net_device *dev, u32 value)
1761 {
1762 	struct b44 *bp = netdev_priv(dev);
1763 	bp->msg_enable = value;
1764 }
1765 
b44_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1766 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1767 {
1768 	struct b44 *bp = netdev_priv(dev);
1769 	struct ssb_bus *bus = bp->sdev->bus;
1770 
1771 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1772 	switch (bus->bustype) {
1773 	case SSB_BUSTYPE_PCI:
1774 		strscpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1775 		break;
1776 	case SSB_BUSTYPE_SSB:
1777 		strscpy(info->bus_info, "SSB", sizeof(info->bus_info));
1778 		break;
1779 	case SSB_BUSTYPE_PCMCIA:
1780 	case SSB_BUSTYPE_SDIO:
1781 		WARN_ON(1); /* A device with this bus does not exist. */
1782 		break;
1783 	}
1784 }
1785 
b44_nway_reset(struct net_device * dev)1786 static int b44_nway_reset(struct net_device *dev)
1787 {
1788 	struct b44 *bp = netdev_priv(dev);
1789 	u32 bmcr;
1790 	int r;
1791 
1792 	spin_lock_irq(&bp->lock);
1793 	b44_readphy(bp, MII_BMCR, &bmcr);
1794 	b44_readphy(bp, MII_BMCR, &bmcr);
1795 	r = -EINVAL;
1796 	if (bmcr & BMCR_ANENABLE)
1797 		r = b44_writephy(bp, MII_BMCR,
1798 				 bmcr | BMCR_ANRESTART);
1799 	spin_unlock_irq(&bp->lock);
1800 
1801 	return r;
1802 }
1803 
b44_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1804 static int b44_get_link_ksettings(struct net_device *dev,
1805 				  struct ethtool_link_ksettings *cmd)
1806 {
1807 	struct b44 *bp = netdev_priv(dev);
1808 	u32 supported, advertising;
1809 
1810 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1811 		BUG_ON(!dev->phydev);
1812 		phy_ethtool_ksettings_get(dev->phydev, cmd);
1813 
1814 		return 0;
1815 	}
1816 
1817 	supported = (SUPPORTED_Autoneg);
1818 	supported |= (SUPPORTED_100baseT_Half |
1819 		      SUPPORTED_100baseT_Full |
1820 		      SUPPORTED_10baseT_Half |
1821 		      SUPPORTED_10baseT_Full |
1822 		      SUPPORTED_MII);
1823 
1824 	advertising = 0;
1825 	if (bp->flags & B44_FLAG_ADV_10HALF)
1826 		advertising |= ADVERTISED_10baseT_Half;
1827 	if (bp->flags & B44_FLAG_ADV_10FULL)
1828 		advertising |= ADVERTISED_10baseT_Full;
1829 	if (bp->flags & B44_FLAG_ADV_100HALF)
1830 		advertising |= ADVERTISED_100baseT_Half;
1831 	if (bp->flags & B44_FLAG_ADV_100FULL)
1832 		advertising |= ADVERTISED_100baseT_Full;
1833 	advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1834 	cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1835 		SPEED_100 : SPEED_10;
1836 	cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1837 		DUPLEX_FULL : DUPLEX_HALF;
1838 	cmd->base.port = 0;
1839 	cmd->base.phy_address = bp->phy_addr;
1840 	cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1841 		AUTONEG_DISABLE : AUTONEG_ENABLE;
1842 	if (cmd->base.autoneg == AUTONEG_ENABLE)
1843 		advertising |= ADVERTISED_Autoneg;
1844 
1845 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1846 						supported);
1847 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1848 						advertising);
1849 
1850 	if (!netif_running(dev)){
1851 		cmd->base.speed = 0;
1852 		cmd->base.duplex = 0xff;
1853 	}
1854 
1855 	return 0;
1856 }
1857 
b44_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1858 static int b44_set_link_ksettings(struct net_device *dev,
1859 				  const struct ethtool_link_ksettings *cmd)
1860 {
1861 	struct b44 *bp = netdev_priv(dev);
1862 	u32 speed;
1863 	int ret;
1864 	u32 advertising;
1865 
1866 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1867 		BUG_ON(!dev->phydev);
1868 		spin_lock_irq(&bp->lock);
1869 		if (netif_running(dev))
1870 			b44_setup_phy(bp);
1871 
1872 		ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
1873 
1874 		spin_unlock_irq(&bp->lock);
1875 
1876 		return ret;
1877 	}
1878 
1879 	speed = cmd->base.speed;
1880 
1881 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1882 						cmd->link_modes.advertising);
1883 
1884 	/* We do not support gigabit. */
1885 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
1886 		if (advertising &
1887 		    (ADVERTISED_1000baseT_Half |
1888 		     ADVERTISED_1000baseT_Full))
1889 			return -EINVAL;
1890 	} else if ((speed != SPEED_100 &&
1891 		    speed != SPEED_10) ||
1892 		   (cmd->base.duplex != DUPLEX_HALF &&
1893 		    cmd->base.duplex != DUPLEX_FULL)) {
1894 			return -EINVAL;
1895 	}
1896 
1897 	spin_lock_irq(&bp->lock);
1898 
1899 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
1900 		bp->flags &= ~(B44_FLAG_FORCE_LINK |
1901 			       B44_FLAG_100_BASE_T |
1902 			       B44_FLAG_FULL_DUPLEX |
1903 			       B44_FLAG_ADV_10HALF |
1904 			       B44_FLAG_ADV_10FULL |
1905 			       B44_FLAG_ADV_100HALF |
1906 			       B44_FLAG_ADV_100FULL);
1907 		if (advertising == 0) {
1908 			bp->flags |= (B44_FLAG_ADV_10HALF |
1909 				      B44_FLAG_ADV_10FULL |
1910 				      B44_FLAG_ADV_100HALF |
1911 				      B44_FLAG_ADV_100FULL);
1912 		} else {
1913 			if (advertising & ADVERTISED_10baseT_Half)
1914 				bp->flags |= B44_FLAG_ADV_10HALF;
1915 			if (advertising & ADVERTISED_10baseT_Full)
1916 				bp->flags |= B44_FLAG_ADV_10FULL;
1917 			if (advertising & ADVERTISED_100baseT_Half)
1918 				bp->flags |= B44_FLAG_ADV_100HALF;
1919 			if (advertising & ADVERTISED_100baseT_Full)
1920 				bp->flags |= B44_FLAG_ADV_100FULL;
1921 		}
1922 	} else {
1923 		bp->flags |= B44_FLAG_FORCE_LINK;
1924 		bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1925 		if (speed == SPEED_100)
1926 			bp->flags |= B44_FLAG_100_BASE_T;
1927 		if (cmd->base.duplex == DUPLEX_FULL)
1928 			bp->flags |= B44_FLAG_FULL_DUPLEX;
1929 	}
1930 
1931 	if (netif_running(dev))
1932 		b44_setup_phy(bp);
1933 
1934 	spin_unlock_irq(&bp->lock);
1935 
1936 	return 0;
1937 }
1938 
b44_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1939 static void b44_get_ringparam(struct net_device *dev,
1940 			      struct ethtool_ringparam *ering,
1941 			      struct kernel_ethtool_ringparam *kernel_ering,
1942 			      struct netlink_ext_ack *extack)
1943 {
1944 	struct b44 *bp = netdev_priv(dev);
1945 
1946 	ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1947 	ering->rx_pending = bp->rx_pending;
1948 
1949 	/* XXX ethtool lacks a tx_max_pending, oops... */
1950 }
1951 
b44_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1952 static int b44_set_ringparam(struct net_device *dev,
1953 			     struct ethtool_ringparam *ering,
1954 			     struct kernel_ethtool_ringparam *kernel_ering,
1955 			     struct netlink_ext_ack *extack)
1956 {
1957 	struct b44 *bp = netdev_priv(dev);
1958 
1959 	if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1960 	    (ering->rx_mini_pending != 0) ||
1961 	    (ering->rx_jumbo_pending != 0) ||
1962 	    (ering->tx_pending > B44_TX_RING_SIZE - 1))
1963 		return -EINVAL;
1964 
1965 	spin_lock_irq(&bp->lock);
1966 
1967 	bp->rx_pending = ering->rx_pending;
1968 	bp->tx_pending = ering->tx_pending;
1969 
1970 	b44_halt(bp);
1971 	b44_init_rings(bp);
1972 	b44_init_hw(bp, B44_FULL_RESET);
1973 	netif_wake_queue(bp->dev);
1974 	spin_unlock_irq(&bp->lock);
1975 
1976 	b44_enable_ints(bp);
1977 
1978 	return 0;
1979 }
1980 
b44_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1981 static void b44_get_pauseparam(struct net_device *dev,
1982 				struct ethtool_pauseparam *epause)
1983 {
1984 	struct b44 *bp = netdev_priv(dev);
1985 
1986 	epause->autoneg =
1987 		(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1988 	epause->rx_pause =
1989 		(bp->flags & B44_FLAG_RX_PAUSE) != 0;
1990 	epause->tx_pause =
1991 		(bp->flags & B44_FLAG_TX_PAUSE) != 0;
1992 }
1993 
b44_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1994 static int b44_set_pauseparam(struct net_device *dev,
1995 				struct ethtool_pauseparam *epause)
1996 {
1997 	struct b44 *bp = netdev_priv(dev);
1998 
1999 	spin_lock_irq(&bp->lock);
2000 	if (epause->autoneg)
2001 		bp->flags |= B44_FLAG_PAUSE_AUTO;
2002 	else
2003 		bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2004 	if (epause->rx_pause)
2005 		bp->flags |= B44_FLAG_RX_PAUSE;
2006 	else
2007 		bp->flags &= ~B44_FLAG_RX_PAUSE;
2008 	if (epause->tx_pause)
2009 		bp->flags |= B44_FLAG_TX_PAUSE;
2010 	else
2011 		bp->flags &= ~B44_FLAG_TX_PAUSE;
2012 	if (netif_running(dev)) {
2013 		if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2014 			b44_halt(bp);
2015 			b44_init_rings(bp);
2016 			b44_init_hw(bp, B44_FULL_RESET);
2017 		} else {
2018 			__b44_set_flow_ctrl(bp, bp->flags);
2019 		}
2020 	}
2021 	spin_unlock_irq(&bp->lock);
2022 
2023 	b44_enable_ints(bp);
2024 
2025 	return 0;
2026 }
2027 
b44_get_strings(struct net_device * dev,u32 stringset,u8 * data)2028 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2029 {
2030 	switch(stringset) {
2031 	case ETH_SS_STATS:
2032 		memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2033 		break;
2034 	}
2035 }
2036 
b44_get_sset_count(struct net_device * dev,int sset)2037 static int b44_get_sset_count(struct net_device *dev, int sset)
2038 {
2039 	switch (sset) {
2040 	case ETH_SS_STATS:
2041 		return ARRAY_SIZE(b44_gstrings);
2042 	default:
2043 		return -EOPNOTSUPP;
2044 	}
2045 }
2046 
b44_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2047 static void b44_get_ethtool_stats(struct net_device *dev,
2048 				  struct ethtool_stats *stats, u64 *data)
2049 {
2050 	struct b44 *bp = netdev_priv(dev);
2051 	struct b44_hw_stats *hwstat = &bp->hw_stats;
2052 	u64 *data_src, *data_dst;
2053 	unsigned int start;
2054 	u32 i;
2055 
2056 	spin_lock_irq(&bp->lock);
2057 	b44_stats_update(bp);
2058 	spin_unlock_irq(&bp->lock);
2059 
2060 	do {
2061 		data_src = &hwstat->tx_good_octets;
2062 		data_dst = data;
2063 		start = u64_stats_fetch_begin(&hwstat->syncp);
2064 
2065 		for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2066 			*data_dst++ = *data_src++;
2067 
2068 	} while (u64_stats_fetch_retry(&hwstat->syncp, start));
2069 }
2070 
b44_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2071 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2072 {
2073 	struct b44 *bp = netdev_priv(dev);
2074 
2075 	wol->supported = WAKE_MAGIC;
2076 	if (bp->flags & B44_FLAG_WOL_ENABLE)
2077 		wol->wolopts = WAKE_MAGIC;
2078 	else
2079 		wol->wolopts = 0;
2080 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2081 }
2082 
b44_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2083 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2084 {
2085 	struct b44 *bp = netdev_priv(dev);
2086 
2087 	spin_lock_irq(&bp->lock);
2088 	if (wol->wolopts & WAKE_MAGIC)
2089 		bp->flags |= B44_FLAG_WOL_ENABLE;
2090 	else
2091 		bp->flags &= ~B44_FLAG_WOL_ENABLE;
2092 	spin_unlock_irq(&bp->lock);
2093 
2094 	device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2095 	return 0;
2096 }
2097 
2098 static const struct ethtool_ops b44_ethtool_ops = {
2099 	.get_drvinfo		= b44_get_drvinfo,
2100 	.nway_reset		= b44_nway_reset,
2101 	.get_link		= ethtool_op_get_link,
2102 	.get_wol		= b44_get_wol,
2103 	.set_wol		= b44_set_wol,
2104 	.get_ringparam		= b44_get_ringparam,
2105 	.set_ringparam		= b44_set_ringparam,
2106 	.get_pauseparam		= b44_get_pauseparam,
2107 	.set_pauseparam		= b44_set_pauseparam,
2108 	.get_msglevel		= b44_get_msglevel,
2109 	.set_msglevel		= b44_set_msglevel,
2110 	.get_strings		= b44_get_strings,
2111 	.get_sset_count		= b44_get_sset_count,
2112 	.get_ethtool_stats	= b44_get_ethtool_stats,
2113 	.get_link_ksettings	= b44_get_link_ksettings,
2114 	.set_link_ksettings	= b44_set_link_ksettings,
2115 };
2116 
b44_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)2117 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2118 {
2119 	struct b44 *bp = netdev_priv(dev);
2120 	int err = -EINVAL;
2121 
2122 	if (!netif_running(dev))
2123 		goto out;
2124 
2125 	spin_lock_irq(&bp->lock);
2126 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2127 		BUG_ON(!dev->phydev);
2128 		err = phy_mii_ioctl(dev->phydev, ifr, cmd);
2129 	} else {
2130 		err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2131 	}
2132 	spin_unlock_irq(&bp->lock);
2133 out:
2134 	return err;
2135 }
2136 
b44_get_invariants(struct b44 * bp)2137 static int b44_get_invariants(struct b44 *bp)
2138 {
2139 	struct ssb_device *sdev = bp->sdev;
2140 	int err = 0;
2141 	u8 *addr;
2142 
2143 	bp->dma_offset = ssb_dma_translation(sdev);
2144 
2145 	if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2146 	    instance > 1) {
2147 		addr = sdev->bus->sprom.et1mac;
2148 		bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2149 	} else {
2150 		addr = sdev->bus->sprom.et0mac;
2151 		bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2152 	}
2153 	/* Some ROMs have buggy PHY addresses with the high
2154 	 * bits set (sign extension?). Truncate them to a
2155 	 * valid PHY address. */
2156 	bp->phy_addr &= 0x1F;
2157 
2158 	eth_hw_addr_set(bp->dev, addr);
2159 
2160 	if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2161 		pr_err("Invalid MAC address found in EEPROM\n");
2162 		return -EINVAL;
2163 	}
2164 
2165 	bp->imask = IMASK_DEF;
2166 
2167 	/* XXX - really required?
2168 	   bp->flags |= B44_FLAG_BUGGY_TXPTR;
2169 	*/
2170 
2171 	if (bp->sdev->id.revision >= 7)
2172 		bp->flags |= B44_FLAG_B0_ANDLATER;
2173 
2174 	return err;
2175 }
2176 
2177 static const struct net_device_ops b44_netdev_ops = {
2178 	.ndo_open		= b44_open,
2179 	.ndo_stop		= b44_close,
2180 	.ndo_start_xmit		= b44_start_xmit,
2181 	.ndo_get_stats64	= b44_get_stats64,
2182 	.ndo_set_rx_mode	= b44_set_rx_mode,
2183 	.ndo_set_mac_address	= b44_set_mac_addr,
2184 	.ndo_validate_addr	= eth_validate_addr,
2185 	.ndo_eth_ioctl		= b44_ioctl,
2186 	.ndo_tx_timeout		= b44_tx_timeout,
2187 	.ndo_change_mtu		= b44_change_mtu,
2188 #ifdef CONFIG_NET_POLL_CONTROLLER
2189 	.ndo_poll_controller	= b44_poll_controller,
2190 #endif
2191 };
2192 
b44_adjust_link(struct net_device * dev)2193 static void b44_adjust_link(struct net_device *dev)
2194 {
2195 	struct b44 *bp = netdev_priv(dev);
2196 	struct phy_device *phydev = dev->phydev;
2197 	bool status_changed = false;
2198 
2199 	BUG_ON(!phydev);
2200 
2201 	if (bp->old_link != phydev->link) {
2202 		status_changed = true;
2203 		bp->old_link = phydev->link;
2204 	}
2205 
2206 	/* reflect duplex change */
2207 	if (phydev->link) {
2208 		if ((phydev->duplex == DUPLEX_HALF) &&
2209 		    (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2210 			status_changed = true;
2211 			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2212 		} else if ((phydev->duplex == DUPLEX_FULL) &&
2213 			   !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2214 			status_changed = true;
2215 			bp->flags |= B44_FLAG_FULL_DUPLEX;
2216 		}
2217 	}
2218 
2219 	if (status_changed) {
2220 		u32 val = br32(bp, B44_TX_CTRL);
2221 		if (bp->flags & B44_FLAG_FULL_DUPLEX)
2222 			val |= TX_CTRL_DUPLEX;
2223 		else
2224 			val &= ~TX_CTRL_DUPLEX;
2225 		bw32(bp, B44_TX_CTRL, val);
2226 		phy_print_status(phydev);
2227 	}
2228 }
2229 
b44_register_phy_one(struct b44 * bp)2230 static int b44_register_phy_one(struct b44 *bp)
2231 {
2232 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2233 	struct mii_bus *mii_bus;
2234 	struct ssb_device *sdev = bp->sdev;
2235 	struct phy_device *phydev;
2236 	char bus_id[MII_BUS_ID_SIZE + 3];
2237 	struct ssb_sprom *sprom = &sdev->bus->sprom;
2238 	int err;
2239 
2240 	mii_bus = mdiobus_alloc();
2241 	if (!mii_bus) {
2242 		dev_err(sdev->dev, "mdiobus_alloc() failed\n");
2243 		err = -ENOMEM;
2244 		goto err_out;
2245 	}
2246 
2247 	mii_bus->priv = bp;
2248 	mii_bus->read = b44_mdio_read_phylib;
2249 	mii_bus->write = b44_mdio_write_phylib;
2250 	mii_bus->name = "b44_eth_mii";
2251 	mii_bus->parent = sdev->dev;
2252 	mii_bus->phy_mask = ~(1 << bp->phy_addr);
2253 	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
2254 
2255 	bp->mii_bus = mii_bus;
2256 
2257 	err = mdiobus_register(mii_bus);
2258 	if (err) {
2259 		dev_err(sdev->dev, "failed to register MII bus\n");
2260 		goto err_out_mdiobus;
2261 	}
2262 
2263 	if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
2264 	    (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
2265 
2266 		dev_info(sdev->dev,
2267 			 "could not find PHY at %i, use fixed one\n",
2268 			 bp->phy_addr);
2269 
2270 		bp->phy_addr = 0;
2271 		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
2272 			 bp->phy_addr);
2273 	} else {
2274 		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
2275 			 bp->phy_addr);
2276 	}
2277 
2278 	phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
2279 			     PHY_INTERFACE_MODE_MII);
2280 	if (IS_ERR(phydev)) {
2281 		dev_err(sdev->dev, "could not attach PHY at %i\n",
2282 			bp->phy_addr);
2283 		err = PTR_ERR(phydev);
2284 		goto err_out_mdiobus_unregister;
2285 	}
2286 
2287 	/* mask with MAC supported features */
2288 	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
2289 	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
2290 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
2291 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
2292 	linkmode_and(phydev->supported, phydev->supported, mask);
2293 	linkmode_copy(phydev->advertising, phydev->supported);
2294 
2295 	bp->old_link = 0;
2296 	bp->phy_addr = phydev->mdio.addr;
2297 
2298 	phy_attached_info(phydev);
2299 
2300 	return 0;
2301 
2302 err_out_mdiobus_unregister:
2303 	mdiobus_unregister(mii_bus);
2304 
2305 err_out_mdiobus:
2306 	mdiobus_free(mii_bus);
2307 
2308 err_out:
2309 	return err;
2310 }
2311 
b44_unregister_phy_one(struct b44 * bp)2312 static void b44_unregister_phy_one(struct b44 *bp)
2313 {
2314 	struct net_device *dev = bp->dev;
2315 	struct mii_bus *mii_bus = bp->mii_bus;
2316 
2317 	phy_disconnect(dev->phydev);
2318 	mdiobus_unregister(mii_bus);
2319 	mdiobus_free(mii_bus);
2320 }
2321 
b44_init_one(struct ssb_device * sdev,const struct ssb_device_id * ent)2322 static int b44_init_one(struct ssb_device *sdev,
2323 			const struct ssb_device_id *ent)
2324 {
2325 	struct net_device *dev;
2326 	struct b44 *bp;
2327 	int err;
2328 
2329 	instance++;
2330 
2331 	dev = alloc_etherdev(sizeof(*bp));
2332 	if (!dev) {
2333 		err = -ENOMEM;
2334 		goto out;
2335 	}
2336 
2337 	SET_NETDEV_DEV(dev, sdev->dev);
2338 
2339 	/* No interesting netdevice features in this card... */
2340 	dev->features |= 0;
2341 
2342 	bp = netdev_priv(dev);
2343 	bp->sdev = sdev;
2344 	bp->dev = dev;
2345 	bp->force_copybreak = 0;
2346 
2347 	bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2348 
2349 	spin_lock_init(&bp->lock);
2350 	u64_stats_init(&bp->hw_stats.syncp);
2351 
2352 	bp->rx_pending = B44_DEF_RX_RING_PENDING;
2353 	bp->tx_pending = B44_DEF_TX_RING_PENDING;
2354 
2355 	dev->netdev_ops = &b44_netdev_ops;
2356 	netif_napi_add(dev, &bp->napi, b44_poll);
2357 	dev->watchdog_timeo = B44_TX_TIMEOUT;
2358 	dev->min_mtu = B44_MIN_MTU;
2359 	dev->max_mtu = B44_MAX_MTU;
2360 	dev->irq = sdev->irq;
2361 	dev->ethtool_ops = &b44_ethtool_ops;
2362 
2363 	err = ssb_bus_powerup(sdev->bus, 0);
2364 	if (err) {
2365 		dev_err(sdev->dev,
2366 			"Failed to powerup the bus\n");
2367 		goto err_out_free_dev;
2368 	}
2369 
2370 	err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
2371 	if (err) {
2372 		dev_err(sdev->dev,
2373 			"Required 30BIT DMA mask unsupported by the system\n");
2374 		goto err_out_powerdown;
2375 	}
2376 
2377 	err = b44_get_invariants(bp);
2378 	if (err) {
2379 		dev_err(sdev->dev,
2380 			"Problem fetching invariants of chip, aborting\n");
2381 		goto err_out_powerdown;
2382 	}
2383 
2384 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2385 		dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2386 		err = -ENODEV;
2387 		goto err_out_powerdown;
2388 	}
2389 
2390 	bp->mii_if.dev = dev;
2391 	bp->mii_if.mdio_read = b44_mdio_read_mii;
2392 	bp->mii_if.mdio_write = b44_mdio_write_mii;
2393 	bp->mii_if.phy_id = bp->phy_addr;
2394 	bp->mii_if.phy_id_mask = 0x1f;
2395 	bp->mii_if.reg_num_mask = 0x1f;
2396 
2397 	/* By default, advertise all speed/duplex settings. */
2398 	bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2399 		      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2400 
2401 	/* By default, auto-negotiate PAUSE. */
2402 	bp->flags |= B44_FLAG_PAUSE_AUTO;
2403 
2404 	err = register_netdev(dev);
2405 	if (err) {
2406 		dev_err(sdev->dev, "Cannot register net device, aborting\n");
2407 		goto err_out_powerdown;
2408 	}
2409 
2410 	netif_carrier_off(dev);
2411 
2412 	ssb_set_drvdata(sdev, dev);
2413 
2414 	/* Chip reset provides power to the b44 MAC & PCI cores, which
2415 	 * is necessary for MAC register access.
2416 	 */
2417 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2418 
2419 	/* do a phy reset to test if there is an active phy */
2420 	err = b44_phy_reset(bp);
2421 	if (err < 0) {
2422 		dev_err(sdev->dev, "phy reset failed\n");
2423 		goto err_out_unregister_netdev;
2424 	}
2425 
2426 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2427 		err = b44_register_phy_one(bp);
2428 		if (err) {
2429 			dev_err(sdev->dev, "Cannot register PHY, aborting\n");
2430 			goto err_out_unregister_netdev;
2431 		}
2432 	}
2433 
2434 	device_set_wakeup_capable(sdev->dev, true);
2435 	netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2436 
2437 	return 0;
2438 
2439 err_out_unregister_netdev:
2440 	unregister_netdev(dev);
2441 err_out_powerdown:
2442 	ssb_bus_may_powerdown(sdev->bus);
2443 
2444 err_out_free_dev:
2445 	netif_napi_del(&bp->napi);
2446 	free_netdev(dev);
2447 
2448 out:
2449 	return err;
2450 }
2451 
b44_remove_one(struct ssb_device * sdev)2452 static void b44_remove_one(struct ssb_device *sdev)
2453 {
2454 	struct net_device *dev = ssb_get_drvdata(sdev);
2455 	struct b44 *bp = netdev_priv(dev);
2456 
2457 	unregister_netdev(dev);
2458 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2459 		b44_unregister_phy_one(bp);
2460 	ssb_device_disable(sdev, 0);
2461 	ssb_bus_may_powerdown(sdev->bus);
2462 	netif_napi_del(&bp->napi);
2463 	free_netdev(dev);
2464 	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2465 	ssb_set_drvdata(sdev, NULL);
2466 }
2467 
b44_suspend(struct ssb_device * sdev,pm_message_t state)2468 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2469 {
2470 	struct net_device *dev = ssb_get_drvdata(sdev);
2471 	struct b44 *bp = netdev_priv(dev);
2472 
2473 	if (!netif_running(dev))
2474 		return 0;
2475 
2476 	del_timer_sync(&bp->timer);
2477 
2478 	spin_lock_irq(&bp->lock);
2479 
2480 	b44_halt(bp);
2481 	netif_carrier_off(bp->dev);
2482 	netif_device_detach(bp->dev);
2483 	b44_free_rings(bp);
2484 
2485 	spin_unlock_irq(&bp->lock);
2486 
2487 	free_irq(dev->irq, dev);
2488 	if (bp->flags & B44_FLAG_WOL_ENABLE) {
2489 		b44_init_hw(bp, B44_PARTIAL_RESET);
2490 		b44_setup_wol(bp);
2491 	}
2492 
2493 	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2494 	return 0;
2495 }
2496 
b44_resume(struct ssb_device * sdev)2497 static int b44_resume(struct ssb_device *sdev)
2498 {
2499 	struct net_device *dev = ssb_get_drvdata(sdev);
2500 	struct b44 *bp = netdev_priv(dev);
2501 	int rc = 0;
2502 
2503 	rc = ssb_bus_powerup(sdev->bus, 0);
2504 	if (rc) {
2505 		dev_err(sdev->dev,
2506 			"Failed to powerup the bus\n");
2507 		return rc;
2508 	}
2509 
2510 	if (!netif_running(dev))
2511 		return 0;
2512 
2513 	spin_lock_irq(&bp->lock);
2514 	b44_init_rings(bp);
2515 	b44_init_hw(bp, B44_FULL_RESET);
2516 	spin_unlock_irq(&bp->lock);
2517 
2518 	/*
2519 	 * As a shared interrupt, the handler can be called immediately. To be
2520 	 * able to check the interrupt status the hardware must already be
2521 	 * powered back on (b44_init_hw).
2522 	 */
2523 	rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2524 	if (rc) {
2525 		netdev_err(dev, "request_irq failed\n");
2526 		spin_lock_irq(&bp->lock);
2527 		b44_halt(bp);
2528 		b44_free_rings(bp);
2529 		spin_unlock_irq(&bp->lock);
2530 		return rc;
2531 	}
2532 
2533 	netif_device_attach(bp->dev);
2534 
2535 	b44_enable_ints(bp);
2536 	netif_wake_queue(dev);
2537 
2538 	mod_timer(&bp->timer, jiffies + 1);
2539 
2540 	return 0;
2541 }
2542 
2543 static struct ssb_driver b44_ssb_driver = {
2544 	.name		= DRV_MODULE_NAME,
2545 	.id_table	= b44_ssb_tbl,
2546 	.probe		= b44_init_one,
2547 	.remove		= b44_remove_one,
2548 	.suspend	= b44_suspend,
2549 	.resume		= b44_resume,
2550 };
2551 
b44_pci_init(void)2552 static inline int __init b44_pci_init(void)
2553 {
2554 	int err = 0;
2555 #ifdef CONFIG_B44_PCI
2556 	err = ssb_pcihost_register(&b44_pci_driver);
2557 #endif
2558 	return err;
2559 }
2560 
b44_pci_exit(void)2561 static inline void b44_pci_exit(void)
2562 {
2563 #ifdef CONFIG_B44_PCI
2564 	ssb_pcihost_unregister(&b44_pci_driver);
2565 #endif
2566 }
2567 
b44_init(void)2568 static int __init b44_init(void)
2569 {
2570 	unsigned int dma_desc_align_size = dma_get_cache_alignment();
2571 	int err;
2572 
2573 	/* Setup paramaters for syncing RX/TX DMA descriptors */
2574 	dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2575 
2576 	err = b44_pci_init();
2577 	if (err)
2578 		return err;
2579 	err = ssb_driver_register(&b44_ssb_driver);
2580 	if (err)
2581 		b44_pci_exit();
2582 	return err;
2583 }
2584 
b44_cleanup(void)2585 static void __exit b44_cleanup(void)
2586 {
2587 	ssb_driver_unregister(&b44_ssb_driver);
2588 	b44_pci_exit();
2589 }
2590 
2591 module_init(b44_init);
2592 module_exit(b44_cleanup);
2593 
2594