Lines Matching +full:100 +full:base +full:- +full:tx
3 Written 1999-2000 by Donald Becker.
19 [link no longer provides useful info -jgarzik]
27 /* The user-configurable values.
30 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
37 need a copy-align. */
45 100mbps_hd 100Mbps half duplex.
46 100mbps_fd 100Mbps full duplex.
50 3 100Mbps half duplex.
51 4 100Mbps full duplex.
61 Making the Tx ring too large decreases the effectiveness of channel
63 Tx error recovery.
66 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
96 #include <linux/dma-mapping.h>
109 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
110 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
120 II. Board-specific settings
126 This driver uses two statically allocated fixed-size descriptor lists
134 This driver uses a zero-copy receive and transmit scheme.
136 open() time and passes the skb->data field to the chip as receive data
143 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
144 using a full-sized skbuff for small frames vs. the copying costs of larger
155 frames are received into the skbuff at an offset of "+2", 16-byte aligning
160 The driver runs as two independent, single-threaded flows of control. One
161 is the send-packet routine, which enforces single-threaded use by the
162 dev->tbusy flag. The other thread is the interrupt handler, which is single
165 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
166 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
168 the 'lp->tx_full' flag.
171 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
172 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
182 http://www.scyld.com/expert/100mbps.html
189 /* Work-around for Kendin chip bugs. */
214 {"D-Link DFE-550TX FAST Ethernet Adapter"},
215 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
216 {"D-Link DFE-580TX 4 port Server Adapter"},
217 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
218 {"D-Link DL10050-based FAST Ethernet Adapter"},
224 /* This driver was written to use PCI memory space, however x86-oriented
228 Unlike software-only systems, device drivers interact with complex hardware.
337 /* The Rx and Tx buffer descriptors. */
338 /* Note that using only 32 bit fields simplifies conversion to big-endian
390 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
394 unsigned int default_port:4; /* Last dev->if_port value. */
410 void __iomem *base; member
449 void __iomem *ioaddr = np->base + ASICCtrl; in sundance_reset()
457 if (--countdown == 0) { in sundance_reset()
458 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name); in sundance_reset()
461 udelay(100); in sundance_reset()
470 disable_irq(np->pci_dev->irq); in sundance_poll_controller()
471 intr_handler(np->pci_dev->irq, dev); in sundance_poll_controller()
472 enable_irq(np->pci_dev->irq); in sundance_poll_controller()
498 int chip_idx = ent->driver_data; in sundance_probe1()
514 return -EIO; in sundance_probe1()
517 irq = pdev->irq; in sundance_probe1()
521 return -ENOMEM; in sundance_probe1()
522 SET_NETDEV_DEV(dev, &pdev->dev); in sundance_probe1()
537 np->ndev = dev; in sundance_probe1()
538 np->base = ioaddr; in sundance_probe1()
539 np->pci_dev = pdev; in sundance_probe1()
540 np->chip_id = chip_idx; in sundance_probe1()
541 np->msg_enable = (1 << debug) - 1; in sundance_probe1()
542 spin_lock_init(&np->lock); in sundance_probe1()
543 spin_lock_init(&np->statlock); in sundance_probe1()
544 tasklet_setup(&np->rx_tasklet, rx_poll); in sundance_probe1()
545 tasklet_setup(&np->tx_tasklet, tx_poll); in sundance_probe1()
547 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, in sundance_probe1()
551 np->tx_ring = (struct netdev_desc *)ring_space; in sundance_probe1()
552 np->tx_ring_dma = ring_dma; in sundance_probe1()
554 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, in sundance_probe1()
558 np->rx_ring = (struct netdev_desc *)ring_space; in sundance_probe1()
559 np->rx_ring_dma = ring_dma; in sundance_probe1()
561 np->mii_if.dev = dev; in sundance_probe1()
562 np->mii_if.mdio_read = mdio_read; in sundance_probe1()
563 np->mii_if.mdio_write = mdio_write; in sundance_probe1()
564 np->mii_if.phy_id_mask = 0x1f; in sundance_probe1()
565 np->mii_if.reg_num_mask = 0x1f; in sundance_probe1()
567 /* The chip-specific entries in the device structure. */ in sundance_probe1()
568 dev->netdev_ops = &netdev_ops; in sundance_probe1()
569 dev->ethtool_ops = ðtool_ops; in sundance_probe1()
570 dev->watchdog_timeo = TX_TIMEOUT; in sundance_probe1()
572 /* MTU range: 68 - 8191 */ in sundance_probe1()
573 dev->min_mtu = ETH_MIN_MTU; in sundance_probe1()
574 dev->max_mtu = 8191; in sundance_probe1()
583 dev->name, pci_id_tbl[chip_idx].name, ioaddr, in sundance_probe1()
584 dev->dev_addr, irq); in sundance_probe1()
586 np->phys[0] = 1; /* Default setting */ in sundance_probe1()
587 np->mii_preamble_required++; in sundance_probe1()
593 if (sundance_pci_tbl[np->chip_id].device == 0x0200) { in sundance_probe1()
604 np->phys[phy_idx++] = phyx; in sundance_probe1()
605 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); in sundance_probe1()
607 np->mii_preamble_required++; in sundance_probe1()
610 dev->name, phyx, mii_status, np->mii_if.advertising); in sundance_probe1()
613 np->mii_preamble_required--; in sundance_probe1()
617 dev->name, ioread32(ioaddr + ASICCtrl)); in sundance_probe1()
621 np->mii_if.phy_id = np->phys[0]; in sundance_probe1()
624 np->an_enable = 1; in sundance_probe1()
627 np->an_enable = 0; in sundance_probe1()
628 if (strcmp (media[card_idx], "100mbps_fd") == 0 || in sundance_probe1()
630 np->speed = 100; in sundance_probe1()
631 np->mii_if.full_duplex = 1; in sundance_probe1()
632 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || in sundance_probe1()
634 np->speed = 100; in sundance_probe1()
635 np->mii_if.full_duplex = 0; in sundance_probe1()
638 np->speed = 10; in sundance_probe1()
639 np->mii_if.full_duplex = 1; in sundance_probe1()
642 np->speed = 10; in sundance_probe1()
643 np->mii_if.full_duplex = 0; in sundance_probe1()
645 np->an_enable = 1; in sundance_probe1()
649 np->flowctrl = 1; in sundance_probe1()
654 /* Default 100Mbps Full */ in sundance_probe1()
655 if (np->an_enable) { in sundance_probe1()
656 np->speed = 100; in sundance_probe1()
657 np->mii_if.full_duplex = 1; in sundance_probe1()
658 np->an_enable = 0; in sundance_probe1()
662 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET); in sundance_probe1()
665 if (np->flowctrl) in sundance_probe1()
666 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400); in sundance_probe1()
667 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); in sundance_probe1()
669 if (!np->an_enable) { in sundance_probe1()
671 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0; in sundance_probe1()
672 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0; in sundance_probe1()
673 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl); in sundance_probe1()
675 np->speed, np->mii_if.full_duplex ? "Full" : "Half"); in sundance_probe1()
693 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, in sundance_probe1()
694 np->rx_ring, np->rx_ring_dma); in sundance_probe1()
696 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, in sundance_probe1()
697 np->tx_ring, np->tx_ring_dma); in sundance_probe1()
704 return -ENODEV; in sundance_probe1()
710 return -EBUSY; in change_mtu()
711 WRITE_ONCE(dev->mtu, new_mtu); in change_mtu()
726 } while (--boguscnt > 0); in eeprom_read()
731 Read and write the MII registers using software-generated serial
736 met by back-to-back 33Mhz PCI cycles. */
753 while (--bits >= 0) { in mdio_sync()
764 void __iomem *mdio_addr = np->base + MIICtrl; in mdio_read()
768 if (np->mii_preamble_required) in mdio_read()
772 for (i = 15; i >= 0; i--) { in mdio_read()
780 /* Read the two transition, 16 data, and wire-idle bits. */ in mdio_read()
781 for (i = 19; i > 0; i--) { in mdio_read()
794 void __iomem *mdio_addr = np->base + MIICtrl; in mdio_write()
798 if (np->mii_preamble_required) in mdio_write()
802 for (i = 31; i >= 0; i--) { in mdio_write()
811 for (i = 2; i > 0; i--) { in mdio_write()
826 phy_id = np->phys[0]; in mdio_wait_link()
833 } while (--wait > 0); in mdio_wait_link()
834 return -1; in mdio_wait_link()
840 void __iomem *ioaddr = np->base; in netdev_open()
841 const int irq = np->pci_dev->irq; in netdev_open()
847 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); in netdev_open()
852 printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq); in netdev_open()
856 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); in netdev_open()
857 /* The Tx list pointer is written as packets are queued. */ in netdev_open()
862 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize); in netdev_open()
864 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize); in netdev_open()
866 if (dev->mtu > 2047) in netdev_open()
871 if (dev->if_port == 0) in netdev_open()
872 dev->if_port = np->default_port; in netdev_open()
874 spin_lock_init(&np->mcastlock); in netdev_open()
880 iowrite8(100, ioaddr + RxDMAPollPeriod); in netdev_open()
882 /* Fix DFE-580TX packet drop issue */ in netdev_open()
883 if (np->pci_dev->revision >= 0x14) in netdev_open()
887 spin_lock_irqsave(&np->lock, flags); in netdev_open()
889 spin_unlock_irqrestore(&np->lock, flags); in netdev_open()
895 np->wol_enabled = 0; in netdev_open()
898 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x " in netdev_open()
900 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus), in netdev_open()
905 timer_setup(&np->timer, netdev_timer, 0); in netdev_open()
906 np->timer.expires = jiffies + 3*HZ; in netdev_open()
907 add_timer(&np->timer); in netdev_open()
918 void __iomem *ioaddr = np->base; in check_duplex()
919 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); in check_duplex()
920 int negotiated = mii_lpa & np->mii_if.advertising; in check_duplex()
924 if (!np->an_enable || mii_lpa == 0xffff) { in check_duplex()
925 if (np->mii_if.full_duplex) in check_duplex()
933 if (np->mii_if.full_duplex != duplex) { in check_duplex()
934 np->mii_if.full_duplex = duplex; in check_duplex()
936 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d " in check_duplex()
937 "negotiated capability %4.4x.\n", dev->name, in check_duplex()
938 duplex ? "full" : "half", np->phys[0], negotiated); in check_duplex()
946 struct net_device *dev = np->mii_if.dev; in netdev_timer()
947 void __iomem *ioaddr = np->base; in netdev_timer()
952 "Tx %x Rx %x.\n", in netdev_timer()
953 dev->name, ioread16(ioaddr + IntrEnable), in netdev_timer()
957 np->timer.expires = jiffies + next_tick; in netdev_timer()
958 add_timer(&np->timer); in netdev_timer()
964 void __iomem *ioaddr = np->base; in tx_timeout()
968 tasklet_disable_in_atomic(&np->tx_tasklet); in tx_timeout()
972 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus), in tx_timeout()
979 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), in tx_timeout()
980 le32_to_cpu(np->tx_ring[i].next_desc), in tx_timeout()
981 le32_to_cpu(np->tx_ring[i].status), in tx_timeout()
982 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, in tx_timeout()
983 le32_to_cpu(np->tx_ring[i].frag.addr), in tx_timeout()
984 le32_to_cpu(np->tx_ring[i].frag.length)); in tx_timeout()
987 ioread32(np->base + TxListPtr), in tx_timeout()
990 np->cur_tx, np->cur_tx % TX_RING_SIZE, in tx_timeout()
991 np->dirty_tx, np->dirty_tx % TX_RING_SIZE); in tx_timeout()
992 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); in tx_timeout()
993 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task); in tx_timeout()
995 spin_lock_irqsave(&np->lock, flag); in tx_timeout()
997 /* Stop and restart the chip's Tx processes . */ in tx_timeout()
999 spin_unlock_irqrestore(&np->lock, flag); in tx_timeout()
1001 dev->if_port = 0; in tx_timeout()
1003 netif_trans_update(dev); /* prevent tx timeout */ in tx_timeout()
1004 dev->stats.tx_errors++; in tx_timeout()
1005 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { in tx_timeout()
1009 tasklet_enable(&np->tx_tasklet); in tx_timeout()
1013 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1019 np->cur_rx = np->cur_tx = 0; in init_ring()
1020 np->dirty_rx = np->dirty_tx = 0; in init_ring()
1021 np->cur_task = 0; in init_ring()
1023 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16); in init_ring()
1027 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + in init_ring()
1028 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); in init_ring()
1029 np->rx_ring[i].status = 0; in init_ring()
1030 np->rx_ring[i].frag.length = 0; in init_ring()
1031 np->rx_skbuff[i] = NULL; in init_ring()
1037 netdev_alloc_skb(dev, np->rx_buf_sz + 2); in init_ring()
1038 np->rx_skbuff[i] = skb; in init_ring()
1042 np->rx_ring[i].frag.addr = cpu_to_le32( in init_ring()
1043 dma_map_single(&np->pci_dev->dev, skb->data, in init_ring()
1044 np->rx_buf_sz, DMA_FROM_DEVICE)); in init_ring()
1045 if (dma_mapping_error(&np->pci_dev->dev, in init_ring()
1046 np->rx_ring[i].frag.addr)) { in init_ring()
1048 np->rx_skbuff[i] = NULL; in init_ring()
1051 np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag); in init_ring()
1053 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in init_ring()
1056 np->tx_skbuff[i] = NULL; in init_ring()
1057 np->tx_ring[i].status = 0; in init_ring()
1064 unsigned head = np->cur_task % TX_RING_SIZE; in tx_poll()
1066 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; in tx_poll()
1069 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { in tx_poll()
1070 int entry = np->cur_task % TX_RING_SIZE; in tx_poll()
1071 txdesc = &np->tx_ring[entry]; in tx_poll()
1072 if (np->last_tx) { in tx_poll()
1073 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma + in tx_poll()
1076 np->last_tx = txdesc; in tx_poll()
1078 /* Indicate the latest descriptor of tx ring */ in tx_poll()
1079 txdesc->status |= cpu_to_le32(DescIntrOnTx); in tx_poll()
1081 if (ioread32 (np->base + TxListPtr) == 0) in tx_poll()
1082 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc), in tx_poll()
1083 np->base + TxListPtr); in tx_poll()
1093 /* Calculate the next Tx descriptor entry. */ in start_tx()
1094 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
1095 np->tx_skbuff[entry] = skb; in start_tx()
1096 txdesc = &np->tx_ring[entry]; in start_tx()
1098 txdesc->next_desc = 0; in start_tx()
1099 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); in start_tx()
1100 txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, in start_tx()
1101 skb->data, skb->len, DMA_TO_DEVICE)); in start_tx()
1102 if (dma_mapping_error(&np->pci_dev->dev, in start_tx()
1103 txdesc->frag.addr)) in start_tx()
1105 txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag); in start_tx()
1108 np->cur_tx++; in start_tx()
1111 tasklet_schedule(&np->tx_tasklet); in start_tx()
1114 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 && in start_tx()
1123 dev->name, np->cur_tx, entry); in start_tx()
1129 np->tx_skbuff[entry] = NULL; in start_tx()
1130 dev->stats.tx_dropped++; in start_tx()
1134 /* Reset hardware tx and free all of tx buffers */
1139 void __iomem *ioaddr = np->base; in reset_tx()
1143 /* Reset tx logic, TxListPtr will be cleaned */ in reset_tx()
1147 /* free all tx skbuff */ in reset_tx()
1149 np->tx_ring[i].next_desc = 0; in reset_tx()
1151 skb = np->tx_skbuff[i]; in reset_tx()
1153 dma_unmap_single(&np->pci_dev->dev, in reset_tx()
1154 le32_to_cpu(np->tx_ring[i].frag.addr), in reset_tx()
1155 skb->len, DMA_TO_DEVICE); in reset_tx()
1157 np->tx_skbuff[i] = NULL; in reset_tx()
1158 dev->stats.tx_dropped++; in reset_tx()
1161 np->cur_tx = np->dirty_tx = 0; in reset_tx()
1162 np->cur_task = 0; in reset_tx()
1164 np->last_tx = NULL; in reset_tx()
1171 /* The interrupt handler cleans up after the Tx thread,
1177 void __iomem *ioaddr = np->base; in intr_handler()
1190 dev->name, intr_status); in intr_handler()
1200 if (np->budget < 0) in intr_handler()
1201 np->budget = RX_BUDGET; in intr_handler()
1202 tasklet_schedule(&np->rx_tasklet); in intr_handler()
1206 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) { in intr_handler()
1210 dev->name, tx_status); in intr_handler()
1214 dev->name, tx_status); in intr_handler()
1215 dev->stats.tx_errors++; in intr_handler()
1217 dev->stats.tx_fifo_errors++; in intr_handler()
1219 dev->stats.collisions++; in intr_handler()
1221 dev->stats.tx_fifo_errors++; in intr_handler()
1223 dev->stats.tx_window_errors++; in intr_handler()
1227 ** DFE-580TX boards ! phdm@macqel.be. in intr_handler()
1230 /* Restart Tx FIFO and transmitter */ in intr_handler()
1232 /* No need to reset the Tx pointer here */ in intr_handler()
1234 /* Restart the Tx. Need to make sure tx enabled */ in intr_handler()
1241 } while (--i); in intr_handler()
1256 if (np->pci_dev->revision >= 0x14) { in intr_handler()
1257 spin_lock(&np->lock); in intr_handler()
1258 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { in intr_handler()
1259 int entry = np->dirty_tx % TX_RING_SIZE; in intr_handler()
1263 np->tx_ring[entry].status) >> 2) & 0xff; in intr_handler()
1265 !(le32_to_cpu(np->tx_ring[entry].status) in intr_handler()
1271 skb = np->tx_skbuff[entry]; in intr_handler()
1273 dma_unmap_single(&np->pci_dev->dev, in intr_handler()
1274 le32_to_cpu(np->tx_ring[entry].frag.addr), in intr_handler()
1275 skb->len, DMA_TO_DEVICE); in intr_handler()
1276 dev_consume_skb_irq(np->tx_skbuff[entry]); in intr_handler()
1277 np->tx_skbuff[entry] = NULL; in intr_handler()
1278 np->tx_ring[entry].frag.addr = 0; in intr_handler()
1279 np->tx_ring[entry].frag.length = 0; in intr_handler()
1281 spin_unlock(&np->lock); in intr_handler()
1283 spin_lock(&np->lock); in intr_handler()
1284 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { in intr_handler()
1285 int entry = np->dirty_tx % TX_RING_SIZE; in intr_handler()
1287 if (!(le32_to_cpu(np->tx_ring[entry].status) in intr_handler()
1290 skb = np->tx_skbuff[entry]; in intr_handler()
1292 dma_unmap_single(&np->pci_dev->dev, in intr_handler()
1293 le32_to_cpu(np->tx_ring[entry].frag.addr), in intr_handler()
1294 skb->len, DMA_TO_DEVICE); in intr_handler()
1295 dev_consume_skb_irq(np->tx_skbuff[entry]); in intr_handler()
1296 np->tx_skbuff[entry] = NULL; in intr_handler()
1297 np->tx_ring[entry].frag.addr = 0; in intr_handler()
1298 np->tx_ring[entry].frag.length = 0; in intr_handler()
1300 spin_unlock(&np->lock); in intr_handler()
1304 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { in intr_handler()
1314 dev->name, ioread16(ioaddr + IntrStatus)); in intr_handler()
1321 struct net_device *dev = np->ndev; in rx_poll()
1322 int entry = np->cur_rx % RX_RING_SIZE; in rx_poll()
1323 int boguscnt = np->budget; in rx_poll()
1324 void __iomem *ioaddr = np->base; in rx_poll()
1329 struct netdev_desc *desc = &(np->rx_ring[entry]); in rx_poll()
1330 u32 frame_status = le32_to_cpu(desc->status); in rx_poll()
1333 if (--boguscnt < 0) { in rx_poll()
1347 dev->stats.rx_errors++; in rx_poll()
1349 dev->stats.rx_length_errors++; in rx_poll()
1351 dev->stats.rx_fifo_errors++; in rx_poll()
1353 dev->stats.rx_frame_errors++; in rx_poll()
1355 dev->stats.rx_crc_errors++; in rx_poll()
1359 dev->name, frame_status); in rx_poll()
1370 to a minimally-sized skbuff. */ in rx_poll()
1374 dma_sync_single_for_cpu(&np->pci_dev->dev, in rx_poll()
1375 le32_to_cpu(desc->frag.addr), in rx_poll()
1376 np->rx_buf_sz, DMA_FROM_DEVICE); in rx_poll()
1377 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); in rx_poll()
1378 dma_sync_single_for_device(&np->pci_dev->dev, in rx_poll()
1379 le32_to_cpu(desc->frag.addr), in rx_poll()
1380 np->rx_buf_sz, DMA_FROM_DEVICE); in rx_poll()
1383 dma_unmap_single(&np->pci_dev->dev, in rx_poll()
1384 le32_to_cpu(desc->frag.addr), in rx_poll()
1385 np->rx_buf_sz, DMA_FROM_DEVICE); in rx_poll()
1386 skb_put(skb = np->rx_skbuff[entry], pkt_len); in rx_poll()
1387 np->rx_skbuff[entry] = NULL; in rx_poll()
1389 skb->protocol = eth_type_trans(skb, dev); in rx_poll()
1390 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */ in rx_poll()
1396 np->cur_rx = entry; in rx_poll()
1398 np->budget -= received; in rx_poll()
1403 np->cur_rx = entry; in rx_poll()
1407 np->budget -= received; in rx_poll()
1408 if (np->budget <= 0) in rx_poll()
1409 np->budget = RX_BUDGET; in rx_poll()
1410 tasklet_schedule(&np->rx_tasklet); in rx_poll()
1419 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; in refill_rx()
1420 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) { in refill_rx()
1422 entry = np->dirty_rx % RX_RING_SIZE; in refill_rx()
1423 if (np->rx_skbuff[entry] == NULL) { in refill_rx()
1424 skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); in refill_rx()
1425 np->rx_skbuff[entry] = skb; in refill_rx()
1429 np->rx_ring[entry].frag.addr = cpu_to_le32( in refill_rx()
1430 dma_map_single(&np->pci_dev->dev, skb->data, in refill_rx()
1431 np->rx_buf_sz, DMA_FROM_DEVICE)); in refill_rx()
1432 if (dma_mapping_error(&np->pci_dev->dev, in refill_rx()
1433 np->rx_ring[entry].frag.addr)) { in refill_rx()
1435 np->rx_skbuff[entry] = NULL; in refill_rx()
1440 np->rx_ring[entry].frag.length = in refill_rx()
1441 cpu_to_le32(np->rx_buf_sz | LastFrag); in refill_rx()
1442 np->rx_ring[entry].status = 0; in refill_rx()
1448 void __iomem *ioaddr = np->base; in netdev_error()
1454 printk(KERN_INFO "%s: Link up\n", dev->name); in netdev_error()
1455 if (np->an_enable) { in netdev_error()
1456 mii_advertise = mdio_read(dev, np->phys[0], in netdev_error()
1458 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); in netdev_error()
1461 dev->name); in netdev_error()
1463 np->speed = 100; in netdev_error()
1464 printk("100Mbps, full duplex\n"); in netdev_error()
1466 np->speed = 100; in netdev_error()
1467 printk("100Mbps, half duplex\n"); in netdev_error()
1469 np->speed = 10; in netdev_error()
1472 np->speed = 10; in netdev_error()
1478 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR); in netdev_error()
1479 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; in netdev_error()
1480 np->speed = speed; in netdev_error()
1482 dev->name, speed); in netdev_error()
1488 if (np->flowctrl && np->mii_if.full_duplex) { in netdev_error()
1496 printk(KERN_INFO "%s: Link down\n", dev->name); in netdev_error()
1505 dev->name, intr_status); in netdev_error()
1513 void __iomem *ioaddr = np->base; in get_stats()
1517 spin_lock_irqsave(&np->statlock, flags); in get_stats()
1519 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); in get_stats()
1520 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); in get_stats()
1521 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); in get_stats()
1522 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); in get_stats()
1525 np->xstats.tx_multiple_collisions += mult_coll; in get_stats()
1527 np->xstats.tx_single_collisions += single_coll; in get_stats()
1529 np->xstats.tx_late_collisions += late_coll; in get_stats()
1530 dev->stats.collisions += mult_coll in get_stats()
1534 np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer); in get_stats()
1535 np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer); in get_stats()
1536 np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort); in get_stats()
1537 np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx); in get_stats()
1538 np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx); in get_stats()
1539 np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx); in get_stats()
1540 np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx); in get_stats()
1542 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); in get_stats()
1543 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; in get_stats()
1544 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); in get_stats()
1545 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; in get_stats()
1547 spin_unlock_irqrestore(&np->statlock, flags); in get_stats()
1549 return &dev->stats; in get_stats()
1555 void __iomem *ioaddr = np->base; in set_rx_mode()
1560 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ in set_rx_mode()
1564 (dev->flags & IFF_ALLMULTI)) { in set_rx_mode()
1575 crc = ether_crc_le(ETH_ALEN, ha->addr); in set_rx_mode()
1585 if (np->mii_if.full_duplex && np->flowctrl) in set_rx_mode()
1598 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8)); in __set_mac_addr()
1599 iowrite16(addr16, np->base + StationAddr); in __set_mac_addr()
1600 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8)); in __set_mac_addr()
1601 iowrite16(addr16, np->base + StationAddr+2); in __set_mac_addr()
1602 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8)); in __set_mac_addr()
1603 iowrite16(addr16, np->base + StationAddr+4); in __set_mac_addr()
1612 if (!is_valid_ether_addr(addr->sa_data)) in sundance_set_mac_addr()
1613 return -EADDRNOTAVAIL; in sundance_set_mac_addr()
1614 eth_hw_addr_set(dev, addr->sa_data); in sundance_set_mac_addr()
1638 return -EINVAL; in check_if_running()
1645 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in get_drvinfo()
1646 strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in get_drvinfo()
1653 spin_lock_irq(&np->lock); in get_link_ksettings()
1654 mii_ethtool_get_link_ksettings(&np->mii_if, cmd); in get_link_ksettings()
1655 spin_unlock_irq(&np->lock); in get_link_ksettings()
1664 spin_lock_irq(&np->lock); in set_link_ksettings()
1665 res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); in set_link_ksettings()
1666 spin_unlock_irq(&np->lock); in set_link_ksettings()
1673 return mii_nway_restart(&np->mii_if); in nway_reset()
1679 return mii_link_ok(&np->mii_if); in get_link()
1685 return np->msg_enable; in get_msglevel()
1691 np->msg_enable = val; in set_msglevel()
1707 return -EOPNOTSUPP; in get_sset_count()
1718 data[i++] = np->xstats.tx_multiple_collisions; in get_ethtool_stats()
1719 data[i++] = np->xstats.tx_single_collisions; in get_ethtool_stats()
1720 data[i++] = np->xstats.tx_late_collisions; in get_ethtool_stats()
1721 data[i++] = np->xstats.tx_deferred; in get_ethtool_stats()
1722 data[i++] = np->xstats.tx_deferred_excessive; in get_ethtool_stats()
1723 data[i++] = np->xstats.tx_aborted; in get_ethtool_stats()
1724 data[i++] = np->xstats.tx_bcasts; in get_ethtool_stats()
1725 data[i++] = np->xstats.rx_bcasts; in get_ethtool_stats()
1726 data[i++] = np->xstats.tx_mcasts; in get_ethtool_stats()
1727 data[i++] = np->xstats.rx_mcasts; in get_ethtool_stats()
1736 void __iomem *ioaddr = np->base; in sundance_get_wol()
1739 wol->wolopts = 0; in sundance_get_wol()
1741 wol->supported = (WAKE_PHY | WAKE_MAGIC); in sundance_get_wol()
1742 if (!np->wol_enabled) in sundance_get_wol()
1747 wol->wolopts |= WAKE_MAGIC; in sundance_get_wol()
1749 wol->wolopts |= WAKE_PHY; in sundance_get_wol()
1756 void __iomem *ioaddr = np->base; in sundance_set_wol()
1759 if (!device_can_wakeup(&np->pci_dev->dev)) in sundance_set_wol()
1760 return -EOPNOTSUPP; in sundance_set_wol()
1762 np->wol_enabled = !!(wol->wolopts); in sundance_set_wol()
1767 if (np->wol_enabled) { in sundance_set_wol()
1768 if (wol->wolopts & WAKE_MAGIC) in sundance_set_wol()
1770 if (wol->wolopts & WAKE_PHY) in sundance_set_wol()
1775 device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled); in sundance_set_wol()
1806 return -EINVAL; in netdev_ioctl()
1808 spin_lock_irq(&np->lock); in netdev_ioctl()
1809 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL); in netdev_ioctl()
1810 spin_unlock_irq(&np->lock); in netdev_ioctl()
1818 void __iomem *ioaddr = np->base; in netdev_close()
1823 tasklet_kill(&np->rx_tasklet); in netdev_close()
1824 tasklet_kill(&np->tx_tasklet); in netdev_close()
1825 np->cur_tx = 0; in netdev_close()
1826 np->dirty_tx = 0; in netdev_close()
1827 np->cur_task = 0; in netdev_close()
1828 np->last_tx = NULL; in netdev_close()
1833 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x " in netdev_close()
1835 dev->name, ioread8(ioaddr + TxStatus), in netdev_close()
1837 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", in netdev_close()
1838 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); in netdev_close()
1844 /* Disable Rx and Tx DMA for safely release resource */ in netdev_close()
1847 /* Stop the chip's Tx and Rx processes. */ in netdev_close()
1850 for (i = 2000; i > 0; i--) { in netdev_close()
1859 for (i = 2000; i > 0; i--) { in netdev_close()
1867 printk(KERN_DEBUG " Tx ring at %8.8x:\n", in netdev_close()
1868 (int)(np->tx_ring_dma)); in netdev_close()
1871 i, np->tx_ring[i].status, np->tx_ring[i].frag.addr, in netdev_close()
1872 np->tx_ring[i].frag.length); in netdev_close()
1874 (int)(np->rx_ring_dma)); in netdev_close()
1877 i, np->rx_ring[i].status, np->rx_ring[i].frag.addr, in netdev_close()
1878 np->rx_ring[i].frag.length); in netdev_close()
1883 free_irq(np->pci_dev->irq, dev); in netdev_close()
1885 del_timer_sync(&np->timer); in netdev_close()
1889 np->rx_ring[i].status = 0; in netdev_close()
1890 skb = np->rx_skbuff[i]; in netdev_close()
1892 dma_unmap_single(&np->pci_dev->dev, in netdev_close()
1893 le32_to_cpu(np->rx_ring[i].frag.addr), in netdev_close()
1894 np->rx_buf_sz, DMA_FROM_DEVICE); in netdev_close()
1896 np->rx_skbuff[i] = NULL; in netdev_close()
1898 np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */ in netdev_close()
1901 np->tx_ring[i].next_desc = 0; in netdev_close()
1902 skb = np->tx_skbuff[i]; in netdev_close()
1904 dma_unmap_single(&np->pci_dev->dev, in netdev_close()
1905 le32_to_cpu(np->tx_ring[i].frag.addr), in netdev_close()
1906 skb->len, DMA_TO_DEVICE); in netdev_close()
1908 np->tx_skbuff[i] = NULL; in netdev_close()
1922 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, in sundance_remove1()
1923 np->rx_ring, np->rx_ring_dma); in sundance_remove1()
1924 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, in sundance_remove1()
1925 np->tx_ring, np->tx_ring_dma); in sundance_remove1()
1926 pci_iounmap(pdev, np->base); in sundance_remove1()
1936 void __iomem *ioaddr = np->base; in sundance_suspend()
1944 if (np->wol_enabled) { in sundance_suspend()
1949 device_set_wakeup_enable(dev_d, np->wol_enabled); in sundance_suspend()
1965 dev->name); in sundance_resume()