Lines Matching +full:twisted +full:- +full:pair

1 // SPDX-License-Identifier: GPL-2.0
28 #include <linux/dma-mapping.h>
68 while (--tries) { in qec_global_reset()
79 return -1; in qec_global_reset()
87 void __iomem *cregs = qep->qcregs; in qe_stop()
88 void __iomem *mregs = qep->mregs; in qe_stop()
94 while (--tries) { in qe_stop()
104 return -1; in qe_stop()
109 while (--tries) { in qe_stop()
119 return -1; in qe_stop()
126 struct qe_init_block *qb = qep->qe_block; in qe_init_rings()
127 struct sunqe_buffers *qbufs = qep->buffers; in qe_init_rings()
128 __u32 qbufs_dvma = (__u32)qep->buffers_dvma; in qe_init_rings()
131 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; in qe_init_rings()
135 qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); in qe_init_rings()
136 qb->qe_rxd[i].rx_flags = in qe_init_rings()
143 struct sunqec *qecp = qep->parent; in qe_init()
144 void __iomem *cregs = qep->qcregs; in qe_init()
145 void __iomem *mregs = qep->mregs; in qe_init()
146 void __iomem *gregs = qecp->gregs; in qe_init()
147 const unsigned char *e = &qep->dev->dev_addr[0]; in qe_init()
148 __u32 qblk_dvma = (__u32)qep->qblock_dvma; in qe_init()
154 return -EAGAIN; in qe_init()
168 tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); in qe_init()
198 /* Only usable interface on QuadEther is twisted pair. */ in qe_init()
231 while (--tries) { in qe_init()
241 printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); in qe_init()
250 qe_set_multicast(qep->dev); in qe_init()
261 struct net_device *dev = qep->dev; in qe_is_bolixed()
265 printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); in qe_is_bolixed()
266 dev->stats.tx_errors++; in qe_is_bolixed()
270 printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); in qe_is_bolixed()
271 dev->stats.tx_errors++; in qe_is_bolixed()
272 dev->stats.tx_carrier_errors++; in qe_is_bolixed()
276 printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); in qe_is_bolixed()
277 dev->stats.tx_errors++; in qe_is_bolixed()
282 printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); in qe_is_bolixed()
283 dev->stats.tx_errors++; in qe_is_bolixed()
284 dev->stats.collisions++; in qe_is_bolixed()
289 printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); in qe_is_bolixed()
290 dev->stats.tx_errors++; in qe_is_bolixed()
295 printk(KERN_ERR "%s: Jabber error.\n", dev->name); in qe_is_bolixed()
299 printk(KERN_ERR "%s: Babble error.\n", dev->name); in qe_is_bolixed()
303 dev->stats.tx_errors += 256; in qe_is_bolixed()
304 dev->stats.collisions += 256; in qe_is_bolixed()
308 printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); in qe_is_bolixed()
309 dev->stats.tx_errors++; in qe_is_bolixed()
310 dev->stats.tx_aborted_errors++; in qe_is_bolixed()
315 printk(KERN_ERR "%s: Transmit late error.\n", dev->name); in qe_is_bolixed()
316 dev->stats.tx_errors++; in qe_is_bolixed()
321 printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); in qe_is_bolixed()
322 dev->stats.tx_errors++; in qe_is_bolixed()
323 dev->stats.tx_aborted_errors++; in qe_is_bolixed()
328 printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); in qe_is_bolixed()
329 dev->stats.tx_errors++; in qe_is_bolixed()
330 dev->stats.tx_aborted_errors++; in qe_is_bolixed()
335 dev->stats.rx_errors += 256; in qe_is_bolixed()
336 dev->stats.collisions += 256; in qe_is_bolixed()
340 dev->stats.rx_errors += 256; in qe_is_bolixed()
341 dev->stats.rx_over_errors += 256; in qe_is_bolixed()
345 dev->stats.rx_errors += 256; in qe_is_bolixed()
346 dev->stats.rx_missed_errors += 256; in qe_is_bolixed()
350 printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); in qe_is_bolixed()
351 dev->stats.rx_errors++; in qe_is_bolixed()
352 dev->stats.rx_over_errors++; in qe_is_bolixed()
356 printk(KERN_ERR "%s: Late receive collision.\n", dev->name); in qe_is_bolixed()
357 dev->stats.rx_errors++; in qe_is_bolixed()
358 dev->stats.collisions++; in qe_is_bolixed()
362 dev->stats.rx_errors += 256; in qe_is_bolixed()
363 dev->stats.rx_frame_errors += 256; in qe_is_bolixed()
367 dev->stats.rx_errors += 256; in qe_is_bolixed()
368 dev->stats.rx_crc_errors += 256; in qe_is_bolixed()
372 printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); in qe_is_bolixed()
373 dev->stats.rx_errors++; in qe_is_bolixed()
374 dev->stats.rx_dropped++; in qe_is_bolixed()
375 dev->stats.rx_missed_errors++; in qe_is_bolixed()
379 printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); in qe_is_bolixed()
380 dev->stats.rx_errors++; in qe_is_bolixed()
381 dev->stats.rx_length_errors++; in qe_is_bolixed()
385 printk(KERN_ERR "%s: Receive late error.\n", dev->name); in qe_is_bolixed()
386 dev->stats.rx_errors++; in qe_is_bolixed()
391 printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); in qe_is_bolixed()
392 dev->stats.rx_errors++; in qe_is_bolixed()
393 dev->stats.rx_missed_errors++; in qe_is_bolixed()
398 printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); in qe_is_bolixed()
399 dev->stats.rx_errors++; in qe_is_bolixed()
400 dev->stats.rx_missed_errors++; in qe_is_bolixed()
409 /* Per-QE receive interrupt service routine. Just like on the happy meal
414 struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; in qe_rx()
415 struct net_device *dev = qep->dev; in qe_rx()
417 struct sunqe_buffers *qbufs = qep->buffers; in qe_rx()
418 __u32 qbufs_dvma = (__u32)qep->buffers_dvma; in qe_rx()
419 int elem = qep->rx_new; in qe_rx()
423 while (!((flags = this->rx_flags) & RXD_OWN)) { in qe_rx()
426 &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; in qe_rx()
428 qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); in qe_rx()
430 &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; in qe_rx()
431 int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ in qe_rx()
435 dev->stats.rx_errors++; in qe_rx()
436 dev->stats.rx_length_errors++; in qe_rx()
437 dev->stats.rx_dropped++; in qe_rx()
441 dev->stats.rx_dropped++; in qe_rx()
447 skb->protocol = eth_type_trans(skb, qep->dev); in qe_rx()
449 dev->stats.rx_packets++; in qe_rx()
450 dev->stats.rx_bytes += len; in qe_rx()
453 end_rxd->rx_addr = this_qbuf_dvma; in qe_rx()
454 end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); in qe_rx()
459 qep->rx_new = elem; in qe_rx()
475 qec_status = sbus_readl(qecp->gregs + GLOB_STAT); in qec_interrupt()
478 struct sunqe *qep = qecp->qes[channel]; in qec_interrupt()
481 qe_status = sbus_readl(qep->qcregs + CREG_STAT); in qec_interrupt()
488 if (netif_queue_stopped(qep->dev) && in qec_interrupt()
490 spin_lock(&qep->lock); in qec_interrupt()
496 netif_wake_queue(qep->dev); in qec_interrupt()
497 sbus_writel(1, qep->qcregs + CREG_TIMASK); in qec_interrupt()
499 spin_unlock(&qep->lock); in qec_interrupt()
515 qep->mconfig = (MREGS_MCONFIG_TXENAB | in qe_open()
530 * the IRQ protected qep->lock.
534 struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; in qe_tx_reclaim()
535 int elem = qep->tx_old; in qe_tx_reclaim()
537 while (elem != qep->tx_new) { in qe_tx_reclaim()
544 qep->tx_old = elem; in qe_tx_reclaim()
552 spin_lock_irq(&qep->lock); in qe_tx_timeout()
560 spin_unlock_irq(&qep->lock); in qe_tx_timeout()
565 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); in qe_tx_timeout()
576 struct sunqe_buffers *qbufs = qep->buffers; in qe_start_xmit()
577 __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma; in qe_start_xmit()
581 spin_lock_irq(&qep->lock); in qe_start_xmit()
585 len = skb->len; in qe_start_xmit()
586 entry = qep->tx_new; in qe_start_xmit()
588 txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; in qe_start_xmit()
590 qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); in qe_start_xmit()
593 qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; in qe_start_xmit()
597 qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; in qe_start_xmit()
598 qep->qe_block->qe_txd[entry].tx_flags = in qe_start_xmit()
600 qep->tx_new = NEXT_TX(entry); in qe_start_xmit()
603 sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); in qe_start_xmit()
605 dev->stats.tx_packets++; in qe_start_xmit()
606 dev->stats.tx_bytes += len; in qe_start_xmit()
615 sbus_writel(0, qep->qcregs + CREG_TIMASK); in qe_start_xmit()
617 spin_unlock_irq(&qep->lock); in qe_start_xmit()
628 u8 new_mconfig = qep->mconfig; in qe_set_multicast()
635 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { in qe_set_multicast()
637 qep->mregs + MREGS_IACONFIG); in qe_set_multicast()
638 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) in qe_set_multicast()
641 sbus_writeb(0xff, qep->mregs + MREGS_FILTER); in qe_set_multicast()
642 sbus_writeb(0, qep->mregs + MREGS_IACONFIG); in qe_set_multicast()
643 } else if (dev->flags & IFF_PROMISC) { in qe_set_multicast()
651 crc = ether_crc_le(6, ha->addr); in qe_set_multicast()
657 qep->mregs + MREGS_IACONFIG); in qe_set_multicast()
658 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) in qe_set_multicast()
662 sbus_writeb(tmp, qep->mregs + MREGS_FILTER); in qe_set_multicast()
664 sbus_writeb(0, qep->mregs + MREGS_IACONFIG); in qe_set_multicast()
669 * the receiver. So we must re-enable them here or else the MACE in qe_set_multicast()
673 qep->mconfig = new_mconfig; in qe_set_multicast()
674 sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); in qe_set_multicast()
687 strscpy(info->driver, "sunqe", sizeof(info->driver)); in qe_get_drvinfo()
688 strscpy(info->version, "3.0", sizeof(info->version)); in qe_get_drvinfo()
690 op = qep->op; in qe_get_drvinfo()
691 regs = of_get_property(op->dev.of_node, "reg", NULL); in qe_get_drvinfo()
693 snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d", in qe_get_drvinfo()
694 regs->which_io); in qe_get_drvinfo()
701 void __iomem *mregs = qep->mregs; in qe_get_link()
704 spin_lock_irq(&qep->lock); in qe_get_link()
706 spin_unlock_irq(&qep->lock); in qe_get_link()
719 u8 bsizes = qecp->qec_bursts; in qec_init_once()
722 sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); in qec_init_once()
724 sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); in qec_init_once()
726 sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); in qec_init_once()
732 sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); in qec_init_once()
735 sbus_writel((resource_size(&op->resource[1]) >> 2), in qec_init_once()
736 qecp->gregs + GLOB_MSIZE); in qec_init_once()
741 sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, in qec_init_once()
742 qecp->gregs + GLOB_TSIZE); in qec_init_once()
743 sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, in qec_init_once()
744 qecp->gregs + GLOB_RSIZE); in qec_init_once()
754 bsizes = of_getintprop_default(dp, "burst-sizes", 0xff); in qec_get_burst()
756 bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff); in qec_get_burst()
762 bsizes = (DMA_BURST32 - 1); in qec_get_burst()
769 struct platform_device *op = to_platform_device(child->dev.parent); in get_qec()
778 qecp->op = op; in get_qec()
779 qecp->gregs = of_ioremap(&op->resource[0], 0, in get_qec()
782 if (!qecp->gregs) in get_qec()
786 ctrl = sbus_readl(qecp->gregs + GLOB_CTRL); in get_qec()
793 if (qec_global_reset(qecp->gregs)) in get_qec()
796 qecp->qec_bursts = qec_get_burst(op->dev.of_node); in get_qec()
800 if (request_irq(op->archdata.irqs[0], qec_interrupt, in get_qec()
808 qecp->next_module = root_qec_dev; in get_qec()
816 if (qecp->gregs) in get_qec()
817 of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE); in get_qec()
845 return -ENOMEM; in qec_ether_init()
847 eth_hw_addr_set(dev, idprom->id_ethaddr); in qec_ether_init()
851 res = -ENODEV; in qec_ether_init()
853 i = of_getintprop_default(op->dev.of_node, "channel#", -1); in qec_ether_init()
854 if (i == -1) in qec_ether_init()
856 qe->channel = i; in qec_ether_init()
857 spin_lock_init(&qe->lock); in qec_ether_init()
863 qecp->qes[qe->channel] = qe; in qec_ether_init()
864 qe->dev = dev; in qec_ether_init()
865 qe->parent = qecp; in qec_ether_init()
866 qe->op = op; in qec_ether_init()
868 res = -ENOMEM; in qec_ether_init()
869 qe->qcregs = of_ioremap(&op->resource[0], 0, in qec_ether_init()
871 if (!qe->qcregs) { in qec_ether_init()
876 qe->mregs = of_ioremap(&op->resource[1], 0, in qec_ether_init()
878 if (!qe->mregs) { in qec_ether_init()
883 qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE, in qec_ether_init()
884 &qe->qblock_dvma, GFP_ATOMIC); in qec_ether_init()
885 qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers), in qec_ether_init()
886 &qe->buffers_dvma, GFP_ATOMIC); in qec_ether_init()
887 if (qe->qe_block == NULL || qe->qblock_dvma == 0 || in qec_ether_init()
888 qe->buffers == NULL || qe->buffers_dvma == 0) in qec_ether_init()
894 SET_NETDEV_DEV(dev, &op->dev); in qec_ether_init()
896 dev->watchdog_timeo = 5*HZ; in qec_ether_init()
897 dev->irq = op->archdata.irqs[0]; in qec_ether_init()
898 dev->dma = 0; in qec_ether_init()
899 dev->ethtool_ops = &qe_ethtool_ops; in qec_ether_init()
900 dev->netdev_ops = &qec_ops; in qec_ether_init()
908 printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel, in qec_ether_init()
909 dev->dev_addr); in qec_ether_init()
913 if (qe->qcregs) in qec_ether_init()
914 of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE); in qec_ether_init()
915 if (qe->mregs) in qec_ether_init()
916 of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE); in qec_ether_init()
917 if (qe->qe_block) in qec_ether_init()
918 dma_free_coherent(&op->dev, PAGE_SIZE, in qec_ether_init()
919 qe->qe_block, qe->qblock_dvma); in qec_ether_init()
920 if (qe->buffers) in qec_ether_init()
921 dma_free_coherent(&op->dev, in qec_ether_init()
923 qe->buffers, in qec_ether_init()
924 qe->buffers_dvma); in qec_ether_init()
939 struct net_device *net_dev = qp->dev; in qec_sbus_remove()
943 of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); in qec_sbus_remove()
944 of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); in qec_sbus_remove()
945 dma_free_coherent(&op->dev, PAGE_SIZE, in qec_sbus_remove()
946 qp->qe_block, qp->qblock_dvma); in qec_sbus_remove()
947 dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), in qec_sbus_remove()
948 qp->buffers, qp->buffers_dvma); in qec_sbus_remove()
981 struct sunqec *next = root_qec_dev->next_module; in qec_exit()
982 struct platform_device *op = root_qec_dev->op; in qec_exit()
984 free_irq(op->archdata.irqs[0], (void *) root_qec_dev); in qec_exit()
985 of_iounmap(&op->resource[0], root_qec_dev->gregs, in qec_exit()