Lines Matching +full:irq +full:- +full:can +full:- +full:wake

1 // SPDX-License-Identifier: GPL-2.0-only
29 static int port_aaui = -1;
85 static irqreturn_t mace_interrupt(int irq, void *dev_id);
86 static irqreturn_t mace_txdma_intr(int irq, void *dev_id);
87 static irqreturn_t mace_rxdma_intr(int irq, void *dev_id);
95 * If we can't get a skbuff when we need it, we use this area for DMA.
115 int j, rev, rc = -EBUSY; in mace_probe()
118 printk(KERN_ERR "can't use MACE %pOF: need 3 addrs and 3 irqs\n", in mace_probe()
120 return -ENODEV; in mace_probe()
123 addr = of_get_property(mace, "mac-address", NULL); in mace_probe()
125 addr = of_get_property(mace, "local-mac-address", NULL); in mace_probe()
127 printk(KERN_ERR "Can't get mac-address for MACE %pOF\n", in mace_probe()
129 return -ENODEV; in mace_probe()
134 * lazy allocate the driver-wide dummy buffer. (Note that we in mace_probe()
140 return -ENOMEM; in mace_probe()
144 printk(KERN_ERR "MACE: can't request IO resources !\n"); in mace_probe()
145 return -EBUSY; in mace_probe()
150 rc = -ENOMEM; in mace_probe()
153 SET_NETDEV_DEV(dev, &mdev->ofdev.dev); in mace_probe()
156 mp->mdev = mdev; in mace_probe()
159 dev->base_addr = macio_resource_start(mdev, 0); in mace_probe()
160 mp->mace = ioremap(dev->base_addr, 0x1000); in mace_probe()
161 if (mp->mace == NULL) { in mace_probe()
162 printk(KERN_ERR "MACE: can't map IO resources !\n"); in mace_probe()
163 rc = -ENOMEM; in mace_probe()
166 dev->irq = macio_irq(mdev, 0); in mace_probe()
173 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | in mace_probe()
174 in_8(&mp->mace->chipid_lo); in mace_probe()
178 mp->maccc = ENXMT | ENRCV; in mace_probe()
180 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000); in mace_probe()
181 if (mp->tx_dma == NULL) { in mace_probe()
182 printk(KERN_ERR "MACE: can't map TX DMA resources !\n"); in mace_probe()
183 rc = -ENOMEM; in mace_probe()
186 mp->tx_dma_intr = macio_irq(mdev, 1); in mace_probe()
188 mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000); in mace_probe()
189 if (mp->rx_dma == NULL) { in mace_probe()
190 printk(KERN_ERR "MACE: can't map RX DMA resources !\n"); in mace_probe()
191 rc = -ENOMEM; in mace_probe()
194 mp->rx_dma_intr = macio_irq(mdev, 2); in mace_probe()
196 mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); in mace_probe()
197 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; in mace_probe()
199 memset((char *) mp->tx_cmds, 0, in mace_probe()
201 timer_setup(&mp->tx_timeout, mace_tx_timeout, 0); in mace_probe()
202 spin_lock_init(&mp->lock); in mace_probe()
203 mp->timeout_active = 0; in mace_probe()
206 mp->port_aaui = port_aaui; in mace_probe()
210 mp->port_aaui = 1; in mace_probe()
213 mp->port_aaui = 1; in mace_probe()
215 mp->port_aaui = 0; in mace_probe()
220 dev->netdev_ops = &mace_netdev_ops; in mace_probe()
227 rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev); in mace_probe()
229 printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq); in mace_probe()
232 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); in mace_probe()
234 printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr); in mace_probe()
237 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); in mace_probe()
239 printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr); in mace_probe()
250 dev->name, dev->dev_addr, in mace_probe()
251 mp->chipid >> 8, mp->chipid & 0xff); in mace_probe()
262 iounmap(mp->rx_dma); in mace_probe()
264 iounmap(mp->tx_dma); in mace_probe()
266 iounmap(mp->mace); in mace_probe()
288 free_irq(dev->irq, dev); in mace_remove()
289 free_irq(mp->tx_dma_intr, dev); in mace_remove()
290 free_irq(mp->rx_dma_intr, dev); in mace_remove()
292 iounmap(mp->rx_dma); in mace_remove()
293 iounmap(mp->tx_dma); in mace_remove()
294 iounmap(mp->mace); in mace_remove()
305 out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16); in dbdma_reset()
311 for (i = 200; i > 0; --i) in dbdma_reset()
312 if (le32_to_cpu(dma->control) & RUN) in dbdma_reset()
319 volatile struct mace __iomem *mb = mp->mace; in mace_reset()
322 /* soft-reset the chip */ in mace_reset()
324 while (--i) { in mace_reset()
325 out_8(&mb->biucc, SWRST); in mace_reset()
326 if (in_8(&mb->biucc) & SWRST) { in mace_reset()
337 out_8(&mb->imr, 0xff); /* disable all intrs for now */ in mace_reset()
338 i = in_8(&mb->ir); in mace_reset()
339 out_8(&mb->maccc, 0); /* turn off tx, rx */ in mace_reset()
341 out_8(&mb->biucc, XMTSP_64); in mace_reset()
342 out_8(&mb->utr, RTRD); in mace_reset()
343 out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST); in mace_reset()
344 out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */ in mace_reset()
345 out_8(&mb->rcvfc, 0); in mace_reset()
348 __mace_set_address(dev, dev->dev_addr); in mace_reset()
351 if (mp->chipid == BROKEN_ADDRCHG_REV) in mace_reset()
352 out_8(&mb->iac, LOGADDR); in mace_reset()
354 out_8(&mb->iac, ADDRCHG | LOGADDR); in mace_reset()
355 while ((in_8(&mb->iac) & ADDRCHG) != 0) in mace_reset()
359 out_8(&mb->ladrf, 0); in mace_reset()
362 if (mp->chipid != BROKEN_ADDRCHG_REV) in mace_reset()
363 out_8(&mb->iac, 0); in mace_reset()
365 if (mp->port_aaui) in mace_reset()
366 out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO); in mace_reset()
368 out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO); in mace_reset()
374 volatile struct mace __iomem *mb = mp->mace; in __mace_set_address()
380 if (mp->chipid == BROKEN_ADDRCHG_REV) in __mace_set_address()
381 out_8(&mb->iac, PHYADDR); in __mace_set_address()
383 out_8(&mb->iac, ADDRCHG | PHYADDR); in __mace_set_address()
384 while ((in_8(&mb->iac) & ADDRCHG) != 0) in __mace_set_address()
388 out_8(&mb->padr, macaddr[i] = p[i]); in __mace_set_address()
392 if (mp->chipid != BROKEN_ADDRCHG_REV) in __mace_set_address()
393 out_8(&mb->iac, 0); in __mace_set_address()
399 volatile struct mace __iomem *mb = mp->mace; in mace_set_address()
402 spin_lock_irqsave(&mp->lock, flags); in mace_set_address()
407 out_8(&mb->maccc, mp->maccc); in mace_set_address()
409 spin_unlock_irqrestore(&mp->lock, flags); in mace_set_address()
419 if (mp->rx_bufs[i] != NULL) { in mace_clean_rings()
420 dev_kfree_skb(mp->rx_bufs[i]); in mace_clean_rings()
421 mp->rx_bufs[i] = NULL; in mace_clean_rings()
424 for (i = mp->tx_empty; i != mp->tx_fill; ) { in mace_clean_rings()
425 dev_kfree_skb(mp->tx_bufs[i]); in mace_clean_rings()
434 volatile struct mace __iomem *mb = mp->mace; in mace_open()
435 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_open()
436 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_open()
447 memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd)); in mace_open()
448 cp = mp->rx_cmds; in mace_open()
449 for (i = 0; i < N_RX_RING - 1; ++i) { in mace_open()
454 skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */ in mace_open()
455 data = skb->data; in mace_open()
457 mp->rx_bufs[i] = skb; in mace_open()
458 cp->req_count = cpu_to_le16(RX_BUFLEN); in mace_open()
459 cp->command = cpu_to_le16(INPUT_LAST + INTR_ALWAYS); in mace_open()
460 cp->phy_addr = cpu_to_le32(virt_to_bus(data)); in mace_open()
461 cp->xfer_status = 0; in mace_open()
464 mp->rx_bufs[i] = NULL; in mace_open()
465 cp->command = cpu_to_le16(DBDMA_STOP); in mace_open()
466 mp->rx_fill = i; in mace_open()
467 mp->rx_empty = 0; in mace_open()
471 cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS); in mace_open()
472 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds)); in mace_open()
475 out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ in mace_open()
476 out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds)); in mace_open()
477 out_le32(&rd->control, (RUN << 16) | RUN); in mace_open()
480 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING; in mace_open()
481 cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS); in mace_open()
482 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds)); in mace_open()
485 out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); in mace_open()
486 out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds)); in mace_open()
487 mp->tx_fill = 0; in mace_open()
488 mp->tx_empty = 0; in mace_open()
489 mp->tx_fullup = 0; in mace_open()
490 mp->tx_active = 0; in mace_open()
491 mp->tx_bad_runt = 0; in mace_open()
494 out_8(&mb->maccc, mp->maccc); in mace_open()
496 out_8(&mb->imr, RCVINT); in mace_open()
504 volatile struct mace __iomem *mb = mp->mace; in mace_close()
505 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_close()
506 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_close()
509 out_8(&mb->maccc, 0); in mace_close()
510 out_8(&mb->imr, 0xff); /* disable all intrs */ in mace_close()
513 rd->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ in mace_close()
514 td->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ in mace_close()
525 if (mp->timeout_active) in mace_set_timeout()
526 del_timer(&mp->tx_timeout); in mace_set_timeout()
527 mp->tx_timeout.expires = jiffies + TX_TIMEOUT; in mace_set_timeout()
528 add_timer(&mp->tx_timeout); in mace_set_timeout()
529 mp->timeout_active = 1; in mace_set_timeout()
535 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_xmit_start()
541 spin_lock_irqsave(&mp->lock, flags); in mace_xmit_start()
542 fill = mp->tx_fill; in mace_xmit_start()
546 if (next == mp->tx_empty) { in mace_xmit_start()
548 mp->tx_fullup = 1; in mace_xmit_start()
549 spin_unlock_irqrestore(&mp->lock, flags); in mace_xmit_start()
550 return NETDEV_TX_BUSY; /* can't take it at the moment */ in mace_xmit_start()
552 spin_unlock_irqrestore(&mp->lock, flags); in mace_xmit_start()
555 len = skb->len; in mace_xmit_start()
560 mp->tx_bufs[fill] = skb; in mace_xmit_start()
561 cp = mp->tx_cmds + NCMDS_TX * fill; in mace_xmit_start()
562 cp->req_count = cpu_to_le16(len); in mace_xmit_start()
563 cp->phy_addr = cpu_to_le32(virt_to_bus(skb->data)); in mace_xmit_start()
565 np = mp->tx_cmds + NCMDS_TX * next; in mace_xmit_start()
566 out_le16(&np->command, DBDMA_STOP); in mace_xmit_start()
569 spin_lock_irqsave(&mp->lock, flags); in mace_xmit_start()
570 mp->tx_fill = next; in mace_xmit_start()
571 if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) { in mace_xmit_start()
572 out_le16(&cp->xfer_status, 0); in mace_xmit_start()
573 out_le16(&cp->command, OUTPUT_LAST); in mace_xmit_start()
574 out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); in mace_xmit_start()
575 ++mp->tx_active; in mace_xmit_start()
580 if (next == mp->tx_empty) in mace_xmit_start()
582 spin_unlock_irqrestore(&mp->lock, flags); in mace_xmit_start()
590 volatile struct mace __iomem *mb = mp->mace; in mace_set_multicast()
595 spin_lock_irqsave(&mp->lock, flags); in mace_set_multicast()
596 mp->maccc &= ~PROM; in mace_set_multicast()
597 if (dev->flags & IFF_PROMISC) { in mace_set_multicast()
598 mp->maccc |= PROM; in mace_set_multicast()
603 if (dev->flags & IFF_ALLMULTI) { in mace_set_multicast()
610 crc = ether_crc_le(6, ha->addr); in mace_set_multicast()
622 if (mp->chipid == BROKEN_ADDRCHG_REV) in mace_set_multicast()
623 out_8(&mb->iac, LOGADDR); in mace_set_multicast()
625 out_8(&mb->iac, ADDRCHG | LOGADDR); in mace_set_multicast()
626 while ((in_8(&mb->iac) & ADDRCHG) != 0) in mace_set_multicast()
630 out_8(&mb->ladrf, multicast_filter[i]); in mace_set_multicast()
631 if (mp->chipid != BROKEN_ADDRCHG_REV) in mace_set_multicast()
632 out_8(&mb->iac, 0); in mace_set_multicast()
635 out_8(&mb->maccc, mp->maccc); in mace_set_multicast()
636 spin_unlock_irqrestore(&mp->lock, flags); in mace_set_multicast()
641 volatile struct mace __iomem *mb = mp->mace; in mace_handle_misc_intrs()
645 dev->stats.rx_missed_errors += 256; in mace_handle_misc_intrs()
646 dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ in mace_handle_misc_intrs()
648 dev->stats.rx_length_errors += 256; in mace_handle_misc_intrs()
649 dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ in mace_handle_misc_intrs()
651 ++dev->stats.tx_heartbeat_errors; in mace_handle_misc_intrs()
660 static irqreturn_t mace_interrupt(int irq, void *dev_id) in mace_interrupt() argument
664 volatile struct mace __iomem *mb = mp->mace; in mace_interrupt()
665 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_interrupt()
672 spin_lock_irqsave(&mp->lock, flags); in mace_interrupt()
673 intr = in_8(&mb->ir); /* read interrupt register */ in mace_interrupt()
674 in_8(&mb->xmtrc); /* get retries */ in mace_interrupt()
677 i = mp->tx_empty; in mace_interrupt()
678 while (in_8(&mb->pr) & XMTSV) { in mace_interrupt()
679 del_timer(&mp->tx_timeout); in mace_interrupt()
680 mp->timeout_active = 0; in mace_interrupt()
686 intr = in_8(&mb->ir); in mace_interrupt()
689 if (mp->tx_bad_runt) { in mace_interrupt()
690 fs = in_8(&mb->xmtfs); in mace_interrupt()
691 mp->tx_bad_runt = 0; in mace_interrupt()
692 out_8(&mb->xmtfc, AUTO_PAD_XMIT); in mace_interrupt()
695 dstat = le32_to_cpu(td->status); in mace_interrupt()
697 out_le32(&td->control, RUN << 16); in mace_interrupt()
702 xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; in mace_interrupt()
711 * help. So we disable auto-padding and FCS transmission in mace_interrupt()
715 out_8(&mb->xmtfc, DXMTFCS); in mace_interrupt()
717 fs = in_8(&mb->xmtfs); in mace_interrupt()
727 cp = mp->tx_cmds + NCMDS_TX * i; in mace_interrupt()
728 stat = le16_to_cpu(cp->xfer_status); in mace_interrupt()
735 x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; in mace_interrupt()
737 /* there were two bytes with an end-of-packet indication */ in mace_interrupt()
738 mp->tx_bad_runt = 1; in mace_interrupt()
743 * didn't have an end-of-packet indication. in mace_interrupt()
747 out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT); in mace_interrupt()
748 out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU); in mace_interrupt()
750 out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT); in mace_interrupt()
751 out_8(&mb->xmtfc, AUTO_PAD_XMIT); in mace_interrupt()
755 if (i == mp->tx_fill) { in mace_interrupt()
762 ++dev->stats.tx_errors; in mace_interrupt()
764 ++dev->stats.tx_carrier_errors; in mace_interrupt()
766 ++dev->stats.tx_aborted_errors; in mace_interrupt()
768 dev->stats.tx_bytes += mp->tx_bufs[i]->len; in mace_interrupt()
769 ++dev->stats.tx_packets; in mace_interrupt()
771 dev_consume_skb_irq(mp->tx_bufs[i]); in mace_interrupt()
772 --mp->tx_active; in mace_interrupt()
781 if (i != mp->tx_empty) { in mace_interrupt()
782 mp->tx_fullup = 0; in mace_interrupt()
785 mp->tx_empty = i; in mace_interrupt()
786 i += mp->tx_active; in mace_interrupt()
788 i -= N_TX_RING; in mace_interrupt()
789 if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) { in mace_interrupt()
792 cp = mp->tx_cmds + NCMDS_TX * i; in mace_interrupt()
793 out_le16(&cp->xfer_status, 0); in mace_interrupt()
794 out_le16(&cp->command, OUTPUT_LAST); in mace_interrupt()
795 ++mp->tx_active; in mace_interrupt()
798 } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE); in mace_interrupt()
799 out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); in mace_interrupt()
802 spin_unlock_irqrestore(&mp->lock, flags); in mace_interrupt()
809 struct net_device *dev = macio_get_drvdata(mp->mdev); in mace_tx_timeout()
810 volatile struct mace __iomem *mb = mp->mace; in mace_tx_timeout()
811 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_tx_timeout()
812 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_tx_timeout()
817 spin_lock_irqsave(&mp->lock, flags); in mace_tx_timeout()
818 mp->timeout_active = 0; in mace_tx_timeout()
819 if (mp->tx_active == 0 && !mp->tx_bad_runt) in mace_tx_timeout()
823 mace_handle_misc_intrs(mp, in_8(&mb->ir), dev); in mace_tx_timeout()
825 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; in mace_tx_timeout()
828 out_8(&mb->maccc, 0); in mace_tx_timeout()
829 printk(KERN_ERR "mace: transmit timeout - resetting\n"); in mace_tx_timeout()
834 cp = bus_to_virt(le32_to_cpu(rd->cmdptr)); in mace_tx_timeout()
836 out_le16(&cp->xfer_status, 0); in mace_tx_timeout()
837 out_le32(&rd->cmdptr, virt_to_bus(cp)); in mace_tx_timeout()
838 out_le32(&rd->control, (RUN << 16) | RUN); in mace_tx_timeout()
841 i = mp->tx_empty; in mace_tx_timeout()
842 mp->tx_active = 0; in mace_tx_timeout()
843 ++dev->stats.tx_errors; in mace_tx_timeout()
844 if (mp->tx_bad_runt) { in mace_tx_timeout()
845 mp->tx_bad_runt = 0; in mace_tx_timeout()
846 } else if (i != mp->tx_fill) { in mace_tx_timeout()
847 dev_kfree_skb_irq(mp->tx_bufs[i]); in mace_tx_timeout()
850 mp->tx_empty = i; in mace_tx_timeout()
852 mp->tx_fullup = 0; in mace_tx_timeout()
854 if (i != mp->tx_fill) { in mace_tx_timeout()
855 cp = mp->tx_cmds + NCMDS_TX * i; in mace_tx_timeout()
856 out_le16(&cp->xfer_status, 0); in mace_tx_timeout()
857 out_le16(&cp->command, OUTPUT_LAST); in mace_tx_timeout()
858 out_le32(&td->cmdptr, virt_to_bus(cp)); in mace_tx_timeout()
859 out_le32(&td->control, (RUN << 16) | RUN); in mace_tx_timeout()
860 ++mp->tx_active; in mace_tx_timeout()
865 out_8(&mb->imr, RCVINT); in mace_tx_timeout()
866 out_8(&mb->maccc, mp->maccc); in mace_tx_timeout()
869 spin_unlock_irqrestore(&mp->lock, flags); in mace_tx_timeout()
872 static irqreturn_t mace_txdma_intr(int irq, void *dev_id) in mace_txdma_intr() argument
877 static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) in mace_rxdma_intr() argument
881 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_rxdma_intr()
890 spin_lock_irqsave(&mp->lock, flags); in mace_rxdma_intr()
891 for (i = mp->rx_empty; i != mp->rx_fill; ) { in mace_rxdma_intr()
892 cp = mp->rx_cmds + i; in mace_rxdma_intr()
893 stat = le16_to_cpu(cp->xfer_status); in mace_rxdma_intr()
898 np = mp->rx_cmds + next; in mace_rxdma_intr()
899 if (next != mp->rx_fill && in mace_rxdma_intr()
900 (le16_to_cpu(np->xfer_status) & ACTIVE) != 0) { in mace_rxdma_intr()
906 nb = le16_to_cpu(cp->req_count) - le16_to_cpu(cp->res_count); in mace_rxdma_intr()
907 out_le16(&cp->command, DBDMA_STOP); in mace_rxdma_intr()
909 skb = mp->rx_bufs[i]; in mace_rxdma_intr()
911 ++dev->stats.rx_dropped; in mace_rxdma_intr()
913 data = skb->data; in mace_rxdma_intr()
914 frame_status = (data[nb-3] << 8) + data[nb-4]; in mace_rxdma_intr()
916 ++dev->stats.rx_errors; in mace_rxdma_intr()
918 ++dev->stats.rx_over_errors; in mace_rxdma_intr()
920 ++dev->stats.rx_frame_errors; in mace_rxdma_intr()
922 ++dev->stats.rx_crc_errors; in mace_rxdma_intr()
929 nb -= 4; in mace_rxdma_intr()
931 nb -= 8; in mace_rxdma_intr()
933 skb->protocol = eth_type_trans(skb, dev); in mace_rxdma_intr()
934 dev->stats.rx_bytes += skb->len; in mace_rxdma_intr()
936 mp->rx_bufs[i] = NULL; in mace_rxdma_intr()
937 ++dev->stats.rx_packets; in mace_rxdma_intr()
940 ++dev->stats.rx_errors; in mace_rxdma_intr()
941 ++dev->stats.rx_length_errors; in mace_rxdma_intr()
948 mp->rx_empty = i; in mace_rxdma_intr()
950 i = mp->rx_fill; in mace_rxdma_intr()
955 if (next == mp->rx_empty) in mace_rxdma_intr()
957 cp = mp->rx_cmds + i; in mace_rxdma_intr()
958 skb = mp->rx_bufs[i]; in mace_rxdma_intr()
963 mp->rx_bufs[i] = skb; in mace_rxdma_intr()
966 cp->req_count = cpu_to_le16(RX_BUFLEN); in mace_rxdma_intr()
967 data = skb? skb->data: dummy_buf; in mace_rxdma_intr()
968 cp->phy_addr = cpu_to_le32(virt_to_bus(data)); in mace_rxdma_intr()
969 out_le16(&cp->xfer_status, 0); in mace_rxdma_intr()
970 out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); in mace_rxdma_intr()
972 if ((le32_to_cpu(rd->status) & ACTIVE) != 0) { in mace_rxdma_intr()
973 out_le32(&rd->control, (PAUSE << 16) | PAUSE); in mace_rxdma_intr()
974 while ((in_le32(&rd->status) & ACTIVE) != 0) in mace_rxdma_intr()
980 if (i != mp->rx_fill) { in mace_rxdma_intr()
981 out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE)); in mace_rxdma_intr()
982 mp->rx_fill = i; in mace_rxdma_intr()
984 spin_unlock_irqrestore(&mp->lock, flags); in mace_rxdma_intr()
1025 MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)");