Lines Matching +full:npe +full:- +full:handle

1 // SPDX-License-Identifier: GPL-2.0-only
10 * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
13 * RX-free queue 26 27 28
14 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
17 * bits 0 -> 1 - NPE ID (RX and TX-done)
18 * bits 0 -> 2 - priority (TX, per 802.1D)
19 * bits 3 -> 4 - port ID (user-set?)
20 * bits 5 -> 31 - physical descriptor address
24 #include <linux/dma-mapping.h>
39 #include <linux/soc/ixp4xx/npe.h>
75 #define MAX_MRU (14320 - VLAN_ETH_HLEN)
81 #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
91 #define PORT2CHANNEL(p) NPE_ID(p->id)
119 #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
128 /* NPE message codes */
164 /* Information about built-in Ethernet MAC interfaces */
166 u8 rxq; /* configurable, currently 0 - 31 only */
169 u8 npe; /* NPE instance used by this interface */ member
196 struct npe *npe; member
210 /* NPE message structure */
259 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
261 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
263 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
265 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
286 u8 *data = skb->data; in ixp_ptp_match()
296 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) in ixp_ptp_match()
317 if (!port->hwts_rx_en) in ixp_rx_timestamp()
322 regs = port->timesync_regs; in ixp_rx_timestamp()
324 val = __raw_readl(&regs->channel[ch].ch_event); in ixp_rx_timestamp()
329 lo = __raw_readl(&regs->channel[ch].src_uuid_lo); in ixp_rx_timestamp()
330 hi = __raw_readl(&regs->channel[ch].src_uuid_hi); in ixp_rx_timestamp()
338 lo = __raw_readl(&regs->channel[ch].rx_snap_lo); in ixp_rx_timestamp()
339 hi = __raw_readl(&regs->channel[ch].rx_snap_hi); in ixp_rx_timestamp()
346 shhwtstamps->hwtstamp = ns_to_ktime(ns); in ixp_rx_timestamp()
348 __raw_writel(RX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event); in ixp_rx_timestamp()
360 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en)) in ixp_tx_timestamp()
361 shtx->tx_flags |= SKBTX_IN_PROGRESS; in ixp_tx_timestamp()
367 regs = port->timesync_regs; in ixp_tx_timestamp()
374 val = __raw_readl(&regs->channel[ch].ch_event); in ixp_tx_timestamp()
380 shtx->tx_flags &= ~SKBTX_IN_PROGRESS; in ixp_tx_timestamp()
384 lo = __raw_readl(&regs->channel[ch].tx_snap_lo); in ixp_tx_timestamp()
385 hi = __raw_readl(&regs->channel[ch].tx_snap_hi); in ixp_tx_timestamp()
394 __raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event); in ixp_tx_timestamp()
405 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) in hwtstamp_set()
406 return -EFAULT; in hwtstamp_set()
408 ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); in hwtstamp_set()
413 regs = port->timesync_regs; in hwtstamp_set()
416 return -ERANGE; in hwtstamp_set()
420 port->hwts_rx_en = 0; in hwtstamp_set()
423 port->hwts_rx_en = PTP_SLAVE_MODE; in hwtstamp_set()
424 __raw_writel(0, &regs->channel[ch].ch_control); in hwtstamp_set()
427 port->hwts_rx_en = PTP_MASTER_MODE; in hwtstamp_set()
428 __raw_writel(MASTER_MODE, &regs->channel[ch].ch_control); in hwtstamp_set()
431 return -ERANGE; in hwtstamp_set()
434 port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; in hwtstamp_set()
438 &regs->channel[ch].ch_event); in hwtstamp_set()
440 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; in hwtstamp_set()
449 cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; in hwtstamp_get()
451 switch (port->hwts_rx_en) { in hwtstamp_get()
463 return -ERANGE; in hwtstamp_get()
466 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; in hwtstamp_get()
474 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { in ixp4xx_mdio_cmd()
475 printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name); in ixp4xx_mdio_cmd()
476 return -1; in ixp4xx_mdio_cmd()
480 __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]); in ixp4xx_mdio_cmd()
481 __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]); in ixp4xx_mdio_cmd()
484 &mdio_regs->mdio_command[2]); in ixp4xx_mdio_cmd()
486 &mdio_regs->mdio_command[3]); in ixp4xx_mdio_cmd()
489 (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) { in ixp4xx_mdio_cmd()
495 printk(KERN_ERR "%s #%i: MII write failed\n", bus->name, in ixp4xx_mdio_cmd()
497 return -1; in ixp4xx_mdio_cmd()
501 printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name, in ixp4xx_mdio_cmd()
508 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { in ixp4xx_mdio_cmd()
510 printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name, in ixp4xx_mdio_cmd()
516 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | in ixp4xx_mdio_cmd()
517 ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8); in ixp4xx_mdio_cmd()
529 printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name, in ixp4xx_mdio_read()
545 printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n", in ixp4xx_mdio_write()
546 bus->name, phy_id, location, val, ret); in ixp4xx_mdio_write()
556 return -ENOMEM; in ixp4xx_mdio_register()
559 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); in ixp4xx_mdio_register()
560 mdio_bus->name = "IXP4xx MII Bus"; in ixp4xx_mdio_register()
561 mdio_bus->read = &ixp4xx_mdio_read; in ixp4xx_mdio_register()
562 mdio_bus->write = &ixp4xx_mdio_write; in ixp4xx_mdio_register()
563 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0"); in ixp4xx_mdio_register()
581 struct phy_device *phydev = dev->phydev; in ixp4xx_adjust_link()
583 if (!phydev->link) { in ixp4xx_adjust_link()
584 if (port->speed) { in ixp4xx_adjust_link()
585 port->speed = 0; in ixp4xx_adjust_link()
586 printk(KERN_INFO "%s: link down\n", dev->name); in ixp4xx_adjust_link()
591 if (port->speed == phydev->speed && port->duplex == phydev->duplex) in ixp4xx_adjust_link()
594 port->speed = phydev->speed; in ixp4xx_adjust_link()
595 port->duplex = phydev->duplex; in ixp4xx_adjust_link()
597 if (port->duplex) in ixp4xx_adjust_link()
599 &port->regs->tx_control[0]); in ixp4xx_adjust_link()
602 &port->regs->tx_control[0]); in ixp4xx_adjust_link()
605 dev->name, port->speed, port->duplex ? "full" : "half"); in ixp4xx_adjust_link()
633 phys, desc->next, desc->buf_len, desc->pkt_len, in debug_desc()
634 desc->data, desc->dest_id, desc->src_id, desc->flags, in debug_desc()
635 desc->qos, desc->padlen, desc->vlan_tci, in debug_desc()
636 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, in debug_desc()
637 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, in debug_desc()
638 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, in debug_desc()
639 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); in debug_desc()
650 return -1; in queue_get_desc()
652 phys &= ~0x1F; /* mask out non-address bits */ in queue_get_desc()
655 n_desc = (phys - tab_phys) / sizeof(struct desc); in queue_get_desc()
676 dma_unmap_single(&port->netdev->dev, desc->data, in dma_unmap_tx()
677 desc->buf_len, DMA_TO_DEVICE); in dma_unmap_tx()
679 dma_unmap_single(&port->netdev->dev, desc->data & ~3, in dma_unmap_tx()
680 ALIGN((desc->data & 3) + desc->buf_len, 4), in dma_unmap_tx()
692 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); in eth_rx_irq()
694 qmgr_disable_irq(port->plat->rxq); in eth_rx_irq()
695 napi_schedule(&port->napi); in eth_rx_irq()
701 struct net_device *dev = port->netdev; in eth_poll()
702 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); in eth_poll()
742 phys = dma_map_single(&dev->dev, skb->data, in eth_poll()
744 if (dma_mapping_error(&dev->dev, phys)) { in eth_poll()
751 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); in eth_poll()
755 dev->stats.rx_dropped++; in eth_poll()
756 /* put the desc back on RX-ready queue */ in eth_poll()
757 desc->buf_len = MAX_MRU; in eth_poll()
758 desc->pkt_len = 0; in eth_poll()
766 skb = port->rx_buff_tab[n]; in eth_poll()
767 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, in eth_poll()
770 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN, in eth_poll()
772 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], in eth_poll()
773 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); in eth_poll()
776 skb_put(skb, desc->pkt_len); in eth_poll()
778 debug_pkt(dev, "eth_poll", skb->data, skb->len); in eth_poll()
781 skb->protocol = eth_type_trans(skb, dev); in eth_poll()
782 dev->stats.rx_packets++; in eth_poll()
783 dev->stats.rx_bytes += skb->len; in eth_poll()
786 /* put the new buffer on RX-free queue */ in eth_poll()
788 port->rx_buff_tab[n] = temp; in eth_poll()
789 desc->data = phys + NET_IP_ALIGN; in eth_poll()
791 desc->buf_len = MAX_MRU; in eth_poll()
792 desc->pkt_len = 0; in eth_poll()
821 phys &= ~0x1F; /* mask out non-address bits */ in eth_txdone_irq()
822 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); in eth_txdone_irq()
827 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ in eth_txdone_irq()
828 port->netdev->stats.tx_packets++; in eth_txdone_irq()
829 port->netdev->stats.tx_bytes += desc->pkt_len; in eth_txdone_irq()
834 port->netdev->name, port->tx_buff_tab[n_desc]); in eth_txdone_irq()
836 free_buffer_irq(port->tx_buff_tab[n_desc]); in eth_txdone_irq()
837 port->tx_buff_tab[n_desc] = NULL; in eth_txdone_irq()
840 start = qmgr_stat_below_low_watermark(port->plat->txreadyq); in eth_txdone_irq()
841 queue_put_desc(port->plat->txreadyq, phys, desc); in eth_txdone_irq()
842 if (start) { /* TX-ready queue was empty */ in eth_txdone_irq()
845 port->netdev->name); in eth_txdone_irq()
847 netif_wake_queue(port->netdev); in eth_txdone_irq()
855 unsigned int txreadyq = port->plat->txreadyq; in eth_xmit()
865 if (unlikely(skb->len > MAX_MRU)) { in eth_xmit()
867 dev->stats.tx_errors++; in eth_xmit()
871 debug_pkt(dev, "eth_xmit", skb->data, skb->len); in eth_xmit()
873 len = skb->len; in eth_xmit()
877 mem = skb->data; in eth_xmit()
879 offset = (uintptr_t)skb->data & 3; /* keep 32-bit alignment */ in eth_xmit()
883 dev->stats.tx_dropped++; in eth_xmit()
886 memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4); in eth_xmit()
889 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); in eth_xmit()
890 if (dma_mapping_error(&dev->dev, phys)) { in eth_xmit()
895 dev->stats.tx_dropped++; in eth_xmit()
904 port->tx_buff_tab[n] = skb; in eth_xmit()
906 port->tx_buff_tab[n] = mem; in eth_xmit()
908 desc->data = phys + offset; in eth_xmit()
909 desc->buf_len = desc->pkt_len = len; in eth_xmit()
911 /* NPE firmware pads short frames with zeros internally */ in eth_xmit()
913 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); in eth_xmit()
952 if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { in eth_set_mcast_list()
954 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); in eth_set_mcast_list()
955 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); in eth_set_mcast_list()
958 &port->regs->rx_control[0]); in eth_set_mcast_list()
962 if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { in eth_set_mcast_list()
964 &port->regs->rx_control[0]); in eth_set_mcast_list()
973 addr = ha->addr; /* first MAC address */ in eth_set_mcast_list()
975 diffs[i] |= addr[i] ^ ha->addr[i]; in eth_set_mcast_list()
979 __raw_writel(addr[i], &port->regs->mcast_addr[i]); in eth_set_mcast_list()
980 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]); in eth_set_mcast_list()
984 &port->regs->rx_control[0]); in eth_set_mcast_list()
991 return -EINVAL; in eth_ioctl()
1000 return phy_mii_ioctl(dev->phydev, req, cmd); in eth_ioctl()
1010 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in ixp4xx_get_drvinfo()
1011 snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u", in ixp4xx_get_drvinfo()
1012 port->firmware[0], port->firmware[1], in ixp4xx_get_drvinfo()
1013 port->firmware[2], port->firmware[3]); in ixp4xx_get_drvinfo()
1014 strscpy(info->bus_info, "internal", sizeof(info->bus_info)); in ixp4xx_get_drvinfo()
1022 if (port->phc_index < 0) in ixp4xx_get_ts_info()
1023 ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); in ixp4xx_get_ts_info()
1025 info->phc_index = port->phc_index; in ixp4xx_get_ts_info()
1027 if (info->phc_index < 0) { in ixp4xx_get_ts_info()
1028 info->so_timestamping = in ixp4xx_get_ts_info()
1032 info->so_timestamping = in ixp4xx_get_ts_info()
1036 info->tx_types = in ixp4xx_get_ts_info()
1039 info->rx_filters = in ixp4xx_get_ts_info()
1060 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, in request_queues()
1061 "%s:RX-free", port->netdev->name); in request_queues()
1065 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, in request_queues()
1066 "%s:RX", port->netdev->name); in request_queues()
1070 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, in request_queues()
1071 "%s:TX", port->netdev->name); in request_queues()
1075 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, in request_queues()
1076 "%s:TX-ready", port->netdev->name); in request_queues()
1080 /* TX-done queue handles skbs sent out by the NPEs */ in request_queues()
1083 "%s:TX-done", DRV_NAME); in request_queues()
1090 qmgr_release_queue(port->plat->txreadyq); in request_queues()
1092 qmgr_release_queue(TX_QUEUE(port->id)); in request_queues()
1094 qmgr_release_queue(port->plat->rxq); in request_queues()
1096 qmgr_release_queue(RXFREE_QUEUE(port->id)); in request_queues()
1098 port->netdev->name); in request_queues()
1104 qmgr_release_queue(RXFREE_QUEUE(port->id)); in release_queues()
1105 qmgr_release_queue(port->plat->rxq); in release_queues()
1106 qmgr_release_queue(TX_QUEUE(port->id)); in release_queues()
1107 qmgr_release_queue(port->plat->txreadyq); in release_queues()
1118 dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, in init_queues()
1121 return -ENOMEM; in init_queues()
1124 port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys); in init_queues()
1125 if (!port->desc_tab) in init_queues()
1126 return -ENOMEM; in init_queues()
1127 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ in init_queues()
1128 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); in init_queues()
1136 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE))) in init_queues()
1137 return -ENOMEM; in init_queues()
1138 data = buff->data; in init_queues()
1141 return -ENOMEM; in init_queues()
1144 desc->buf_len = MAX_MRU; in init_queues()
1145 desc->data = dma_map_single(&port->netdev->dev, data, in init_queues()
1147 if (dma_mapping_error(&port->netdev->dev, desc->data)) { in init_queues()
1149 return -EIO; in init_queues()
1151 desc->data += NET_IP_ALIGN; in init_queues()
1152 port->rx_buff_tab[i] = buff; in init_queues()
1162 if (port->desc_tab) { in destroy_queues()
1165 buffer_t *buff = port->rx_buff_tab[i]; in destroy_queues()
1167 dma_unmap_single(&port->netdev->dev, in destroy_queues()
1168 desc->data - NET_IP_ALIGN, in destroy_queues()
1175 buffer_t *buff = port->tx_buff_tab[i]; in destroy_queues()
1181 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); in destroy_queues()
1182 port->desc_tab = NULL; in destroy_queues()
1194 struct npe *npe = port->npe; in ixp4xx_do_change_mtu() local
1204 msg.eth_id = port->id; in ixp4xx_do_change_mtu()
1213 if (npe_send_recv_message(npe, &msg, "ETH_SET_MAX_FRAME_LENGTH")) in ixp4xx_do_change_mtu()
1214 return -EIO; in ixp4xx_do_change_mtu()
1215 netdev_dbg(dev, "set MTU on NPE %s to %d bytes\n", in ixp4xx_do_change_mtu()
1216 npe_name(npe), new_mtu); in ixp4xx_do_change_mtu()
1226 * set the MTU from dev->mtu when opening the device. in ixp4xx_eth_change_mtu()
1228 if (dev->flags & IFF_UP) { in ixp4xx_eth_change_mtu()
1234 WRITE_ONCE(dev->mtu, new_mtu); in ixp4xx_eth_change_mtu()
1242 struct npe *npe = port->npe; in eth_open() local
1246 if (!npe_running(npe)) { in eth_open()
1247 err = npe_load_firmware(npe, npe_name(npe), &dev->dev); in eth_open()
1251 if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) { in eth_open()
1252 netdev_err(dev, "%s not responding\n", npe_name(npe)); in eth_open()
1253 return -EIO; in eth_open()
1255 port->firmware[0] = msg.byte4; in eth_open()
1256 port->firmware[1] = msg.byte5; in eth_open()
1257 port->firmware[2] = msg.byte6; in eth_open()
1258 port->firmware[3] = msg.byte7; in eth_open()
1263 msg.eth_id = port->id; in eth_open()
1264 msg.byte5 = port->plat->rxq | 0x80; in eth_open()
1265 msg.byte7 = port->plat->rxq << 4; in eth_open()
1268 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) in eth_open()
1269 return -EIO; in eth_open()
1273 msg.eth_id = PHYSICAL_ID(port->id); in eth_open()
1274 msg.byte2 = dev->dev_addr[0]; in eth_open()
1275 msg.byte3 = dev->dev_addr[1]; in eth_open()
1276 msg.byte4 = dev->dev_addr[2]; in eth_open()
1277 msg.byte5 = dev->dev_addr[3]; in eth_open()
1278 msg.byte6 = dev->dev_addr[4]; in eth_open()
1279 msg.byte7 = dev->dev_addr[5]; in eth_open()
1280 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) in eth_open()
1281 return -EIO; in eth_open()
1285 msg.eth_id = port->id; in eth_open()
1286 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) in eth_open()
1287 return -EIO; in eth_open()
1289 ixp4xx_do_change_mtu(dev, dev->mtu); in eth_open()
1300 port->speed = 0; /* force "link up" message */ in eth_open()
1301 phy_start(dev->phydev); in eth_open()
1304 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); in eth_open()
1305 __raw_writel(0x08, &port->regs->random_seed); in eth_open()
1306 __raw_writel(0x12, &port->regs->partial_empty_threshold); in eth_open()
1307 __raw_writel(0x30, &port->regs->partial_full_threshold); in eth_open()
1308 __raw_writel(0x08, &port->regs->tx_start_bytes); in eth_open()
1309 __raw_writel(0x15, &port->regs->tx_deferral); in eth_open()
1310 __raw_writel(0x08, &port->regs->tx_2part_deferral[0]); in eth_open()
1311 __raw_writel(0x07, &port->regs->tx_2part_deferral[1]); in eth_open()
1312 __raw_writel(0x80, &port->regs->slot_time); in eth_open()
1313 __raw_writel(0x01, &port->regs->int_clock_threshold); in eth_open()
1317 queue_put_desc(port->plat->txreadyq, in eth_open()
1321 queue_put_desc(RXFREE_QUEUE(port->id), in eth_open()
1324 __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]); in eth_open()
1325 __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]); in eth_open()
1326 __raw_writel(0, &port->regs->rx_control[1]); in eth_open()
1327 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); in eth_open()
1329 napi_enable(&port->napi); in eth_open()
1333 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, in eth_open()
1342 napi_schedule(&port->napi); in eth_open()
1353 ports_open--; in eth_close()
1354 qmgr_disable_irq(port->plat->rxq); in eth_close()
1355 napi_disable(&port->napi); in eth_close()
1358 while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) in eth_close()
1359 buffs--; in eth_close()
1363 msg.eth_id = port->id; in eth_close()
1365 if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) in eth_close()
1370 while (queue_get_desc(port->plat->rxq, port, 0) >= 0) in eth_close()
1371 buffs--; in eth_close()
1374 if (qmgr_stat_empty(TX_QUEUE(port->id))) { in eth_close()
1378 int n = queue_get_desc(port->plat->txreadyq, port, 1); in eth_close()
1382 desc->buf_len = desc->pkt_len = 1; in eth_close()
1384 queue_put_desc(TX_QUEUE(port->id), phys, desc); in eth_close()
1391 " left in NPE\n", buffs); in eth_close()
1398 while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) in eth_close()
1399 buffs--; /* cancel TX */ in eth_close()
1403 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) in eth_close()
1404 buffs--; in eth_close()
1411 "left in NPE\n", buffs); in eth_close()
1418 if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) in eth_close()
1421 phy_stop(dev->phydev); in eth_close()
1443 struct device_node *np = dev->of_node; in ixp4xx_of_get_platdata()
1455 ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 1, 0, in ixp4xx_of_get_platdata()
1458 dev_err(dev, "no NPE engine specified\n"); in ixp4xx_of_get_platdata()
1461 /* NPE ID 0x00, 0x10, 0x20... */ in ixp4xx_of_get_platdata()
1462 plat->npe = (npe_spec.args[0] << 4); in ixp4xx_of_get_platdata()
1467 plat->has_mdio = true; in ixp4xx_of_get_platdata()
1473 ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0, in ixp4xx_of_get_platdata()
1479 plat->rxq = queue_spec.args[0]; in ixp4xx_of_get_platdata()
1482 ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0, in ixp4xx_of_get_platdata()
1488 plat->txreadyq = queue_spec.args[0]; in ixp4xx_of_get_platdata()
1493 memcpy(plat->hwaddr, mac, ETH_ALEN); in ixp4xx_of_get_platdata()
1502 struct device *dev = &pdev->dev; in ixp4xx_eth_probe()
1503 struct device_node *np = dev->of_node; in ixp4xx_eth_probe()
1511 return -ENODEV; in ixp4xx_eth_probe()
1514 return -ENOMEM; in ixp4xx_eth_probe()
1518 port->netdev = ndev; in ixp4xx_eth_probe()
1519 port->id = plat->npe; in ixp4xx_eth_probe()
1520 port->phc_index = -1; in ixp4xx_eth_probe()
1523 port->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); in ixp4xx_eth_probe()
1524 if (IS_ERR(port->regs)) in ixp4xx_eth_probe()
1525 return PTR_ERR(port->regs); in ixp4xx_eth_probe()
1528 if (plat->has_mdio) { in ixp4xx_eth_probe()
1529 err = ixp4xx_mdio_register(port->regs); in ixp4xx_eth_probe()
1539 return -EPROBE_DEFER; in ixp4xx_eth_probe()
1541 ndev->netdev_ops = &ixp4xx_netdev_ops; in ixp4xx_eth_probe()
1542 ndev->ethtool_ops = &ixp4xx_ethtool_ops; in ixp4xx_eth_probe()
1543 ndev->tx_queue_len = 100; in ixp4xx_eth_probe()
1545 ndev->dev.dma_mask = dev->dma_mask; in ixp4xx_eth_probe()
1546 ndev->dev.coherent_dma_mask = dev->coherent_dma_mask; in ixp4xx_eth_probe()
1548 ndev->min_mtu = ETH_MIN_MTU; in ixp4xx_eth_probe()
1549 ndev->max_mtu = MAX_MRU; in ixp4xx_eth_probe()
1551 netif_napi_add_weight(ndev, &port->napi, eth_poll, NAPI_WEIGHT); in ixp4xx_eth_probe()
1553 if (!(port->npe = npe_request(NPE_ID(port->id)))) in ixp4xx_eth_probe()
1554 return -EIO; in ixp4xx_eth_probe()
1556 port->plat = plat; in ixp4xx_eth_probe()
1557 npe_port_tab[NPE_ID(port->id)] = port; in ixp4xx_eth_probe()
1558 if (is_valid_ether_addr(plat->hwaddr)) in ixp4xx_eth_probe()
1559 eth_hw_addr_set(ndev, plat->hwaddr); in ixp4xx_eth_probe()
1566 &port->regs->core_control); in ixp4xx_eth_probe()
1568 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); in ixp4xx_eth_probe()
1573 err = -ENODEV; in ixp4xx_eth_probe()
1578 phydev->irq = PHY_POLL; in ixp4xx_eth_probe()
1583 netdev_info(ndev, "%s: MII PHY %s on %s\n", ndev->name, phydev_name(phydev), in ixp4xx_eth_probe()
1584 npe_name(port->npe)); in ixp4xx_eth_probe()
1591 npe_port_tab[NPE_ID(port->id)] = NULL; in ixp4xx_eth_probe()
1592 npe_release(port->npe); in ixp4xx_eth_probe()
1599 struct phy_device *phydev = ndev->phydev; in ixp4xx_eth_remove()
1605 npe_port_tab[NPE_ID(port->id)] = NULL; in ixp4xx_eth_remove()
1606 npe_release(port->npe); in ixp4xx_eth_remove()
1611 .compatible = "intel,ixp4xx-ethernet",