Lines Matching +full:sierra +full:- +full:phy

1 // SPDX-License-Identifier: GPL-2.0-or-later
10 * Copyright (C) 2003 PMC-Sierra, Inc.,
13 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
15 * Copyright (C) 2004-2006 MontaVista Software, Inc.
21 * Copyright (C) 2007-2008 Marvell Semiconductor
30 #include <linux/dma-mapping.h>
44 #include <linux/phy.h>
71 * Main per-port registers. These live at offset 0x0400 for
142 * Misc per-port registers.
173 #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
180 ((addr >= txq->tso_hdrs_dma) && \
181 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
267 * Per-port MBUS window access register value.
272 * Hardware-specific parameters.
288 /* per-port *****************************************************************/
411 * Hardware-specific parameters.
421 return readl(mp->shared->base + offset); in rdl()
426 return readl(mp->base + offset); in rdlp()
431 writel(data, mp->shared->base + offset); in wrl()
436 writel(data, mp->base + offset); in wrlp()
443 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); in rxq_to_mp()
448 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp()
454 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); in rxq_enable()
460 u8 mask = 1 << rxq->index; in rxq_disable()
472 addr = (u32)txq->tx_desc_dma; in txq_reset_hw_ptr()
473 addr += txq->tx_curr_desc * sizeof(struct tx_desc); in txq_reset_hw_ptr()
474 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); in txq_reset_hw_ptr()
480 wrlp(mp, TXQ_COMMAND, 1 << txq->index); in txq_enable()
486 u8 mask = 1 << txq->index; in txq_disable()
496 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_maybe_wake()
500 if (txq->tx_desc_count <= txq->tx_wake_threshold) in txq_maybe_wake()
509 struct net_device_stats *stats = &mp->dev->stats; in rxq_process()
513 while (rx < budget && rxq->rx_desc_count) { in rxq_process()
519 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; in rxq_process()
521 cmd_sts = rx_desc->cmd_sts; in rxq_process()
526 skb = rxq->rx_skb[rxq->rx_curr_desc]; in rxq_process()
527 rxq->rx_skb[rxq->rx_curr_desc] = NULL; in rxq_process()
529 rxq->rx_curr_desc++; in rxq_process()
530 if (rxq->rx_curr_desc == rxq->rx_ring_size) in rxq_process()
531 rxq->rx_curr_desc = 0; in rxq_process()
533 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, in rxq_process()
534 rx_desc->buf_size, DMA_FROM_DEVICE); in rxq_process()
535 rxq->rx_desc_count--; in rxq_process()
538 mp->work_rx_refill |= 1 << rxq->index; in rxq_process()
540 byte_cnt = rx_desc->byte_cnt; in rxq_process()
550 stats->rx_packets++; in rxq_process()
551 stats->rx_bytes += byte_cnt - 2; in rxq_process()
563 * The -4 is for the CRC in the trailer of the in rxq_process()
566 skb_put(skb, byte_cnt - 2 - 4); in rxq_process()
569 skb->ip_summed = CHECKSUM_UNNECESSARY; in rxq_process()
570 skb->protocol = eth_type_trans(skb, mp->dev); in rxq_process()
572 napi_gro_receive(&mp->napi, skb); in rxq_process()
577 stats->rx_dropped++; in rxq_process()
582 netdev_err(mp->dev, in rxq_process()
587 stats->rx_errors++; in rxq_process()
593 mp->work_rx &= ~(1 << rxq->index); in rxq_process()
604 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { in rxq_refill()
610 skb = netdev_alloc_skb(mp->dev, mp->skb_size); in rxq_refill()
613 mp->oom = 1; in rxq_refill()
621 rxq->rx_desc_count++; in rxq_refill()
623 rx = rxq->rx_used_desc++; in rxq_refill()
624 if (rxq->rx_used_desc == rxq->rx_ring_size) in rxq_refill()
625 rxq->rx_used_desc = 0; in rxq_refill()
627 rx_desc = rxq->rx_desc_area + rx; in rxq_refill()
629 size = skb_end_pointer(skb) - skb->data; in rxq_refill()
630 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, in rxq_refill()
631 skb->data, size, in rxq_refill()
633 rx_desc->buf_size = size; in rxq_refill()
634 rxq->rx_skb[rx] = skb; in rxq_refill()
636 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; in rxq_refill()
642 * IP header ends up 16-byte aligned. in rxq_refill()
648 mp->work_rx_refill &= ~(1 << rxq->index); in rxq_refill()
660 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { in has_tiny_unaligned_frags()
661 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; in has_tiny_unaligned_frags()
676 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_tx_csum()
680 BUG_ON(skb->protocol != htons(ETH_P_IP) && in skb_tx_csum()
681 skb->protocol != htons(ETH_P_8021Q)); in skb_tx_csum()
683 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; in skb_tx_csum()
684 tag_bytes = hdr_len - ETH_HLEN; in skb_tx_csum()
686 if (length - hdr_len > mp->shared->tx_csum_limit || in skb_tx_csum()
701 ip_hdr(skb)->ihl << TX_IHL_SHIFT; in skb_tx_csum()
706 switch (ip_hdr(skb)->protocol) { in skb_tx_csum()
735 tx_index = txq->tx_curr_desc++; in txq_put_data_tso()
736 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_put_data_tso()
737 txq->tx_curr_desc = 0; in txq_put_data_tso()
738 desc = &txq->tx_desc_area[tx_index]; in txq_put_data_tso()
739 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; in txq_put_data_tso()
741 desc->l4i_chk = 0; in txq_put_data_tso()
742 desc->byte_cnt = length; in txq_put_data_tso()
746 memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE, in txq_put_data_tso()
748 desc->buf_ptr = txq->tso_hdrs_dma in txq_put_data_tso()
752 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; in txq_put_data_tso()
753 desc->buf_ptr = dma_map_single(dev->dev.parent, data, in txq_put_data_tso()
755 if (unlikely(dma_mapping_error(dev->dev.parent, in txq_put_data_tso()
756 desc->buf_ptr))) { in txq_put_data_tso()
758 return -ENOMEM; in txq_put_data_tso()
770 desc->cmd_sts = cmd_sts; in txq_put_data_tso()
787 tx_index = txq->tx_curr_desc; in txq_put_hdr_tso()
788 desc = &txq->tx_desc_area[tx_index]; in txq_put_hdr_tso()
797 desc->l4i_chk = 0; in txq_put_hdr_tso()
799 desc->byte_cnt = hdr_len; in txq_put_hdr_tso()
800 desc->buf_ptr = txq->tso_hdrs_dma + in txq_put_hdr_tso()
801 txq->tx_curr_desc * TSO_HEADER_SIZE; in txq_put_hdr_tso()
811 desc->cmd_sts = cmd_sts; in txq_put_hdr_tso()
813 txq->tx_curr_desc++; in txq_put_hdr_tso()
814 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_put_hdr_tso()
815 txq->tx_curr_desc = 0; in txq_put_hdr_tso()
829 if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) { in txq_submit_tso()
831 return -EBUSY; in txq_submit_tso()
834 first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; in txq_submit_tso()
839 total_len = skb->len - hdr_len; in txq_submit_tso()
844 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); in txq_submit_tso()
845 total_len -= data_left; in txq_submit_tso()
849 hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE; in txq_submit_tso()
864 data_left -= size; in txq_submit_tso()
869 __skb_queue_tail(&txq->tx_skb, skb); in txq_submit_tso()
874 first_tx_desc->cmd_sts = first_cmd_sts; in txq_submit_tso()
877 mp->work_tx_end &= ~(1 << txq->index); in txq_submit_tso()
882 txq->tx_desc_count += desc_count; in txq_submit_tso()
886 * be DMA-unmapped. in txq_submit_tso()
894 int nr_frags = skb_shinfo(skb)->nr_frags; in txq_submit_frag_skb()
902 this_frag = &skb_shinfo(skb)->frags[frag]; in txq_submit_frag_skb()
903 tx_index = txq->tx_curr_desc++; in txq_submit_frag_skb()
904 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_submit_frag_skb()
905 txq->tx_curr_desc = 0; in txq_submit_frag_skb()
906 desc = &txq->tx_desc_area[tx_index]; in txq_submit_frag_skb()
907 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE; in txq_submit_frag_skb()
913 if (frag == nr_frags - 1) { in txq_submit_frag_skb()
914 desc->cmd_sts = BUFFER_OWNED_BY_DMA | in txq_submit_frag_skb()
918 desc->cmd_sts = BUFFER_OWNED_BY_DMA; in txq_submit_frag_skb()
921 desc->l4i_chk = 0; in txq_submit_frag_skb()
922 desc->byte_cnt = skb_frag_size(this_frag); in txq_submit_frag_skb()
923 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, in txq_submit_frag_skb()
924 this_frag, 0, desc->byte_cnt, in txq_submit_frag_skb()
933 int nr_frags = skb_shinfo(skb)->nr_frags; in txq_submit_skb()
943 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { in txq_submit_skb()
946 return -EBUSY; in txq_submit_skb()
949 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len); in txq_submit_skb()
954 tx_index = txq->tx_curr_desc++; in txq_submit_skb()
955 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_submit_skb()
956 txq->tx_curr_desc = 0; in txq_submit_skb()
957 desc = &txq->tx_desc_area[tx_index]; in txq_submit_skb()
958 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; in txq_submit_skb()
965 length = skb->len; in txq_submit_skb()
968 desc->l4i_chk = l4i_chk; in txq_submit_skb()
969 desc->byte_cnt = length; in txq_submit_skb()
970 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, in txq_submit_skb()
973 __skb_queue_tail(&txq->tx_skb, skb); in txq_submit_skb()
979 desc->cmd_sts = cmd_sts; in txq_submit_skb()
982 mp->work_tx_end &= ~(1 << txq->index); in txq_submit_skb()
988 txq->tx_desc_count += nr_frags + 1; in txq_submit_skb()
1001 txq = mp->txq + queue; in mv643xx_eth_xmit()
1010 length = skb->len; in mv643xx_eth_xmit()
1017 txq->tx_bytes += length; in mv643xx_eth_xmit()
1018 txq->tx_packets++; in mv643xx_eth_xmit()
1020 if (txq->tx_desc_count >= txq->tx_stop_threshold) in mv643xx_eth_xmit()
1023 txq->tx_dropped++; in mv643xx_eth_xmit()
1035 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_kick()
1041 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) in txq_kick()
1044 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); in txq_kick()
1045 expected_ptr = (u32)txq->tx_desc_dma + in txq_kick()
1046 txq->tx_curr_desc * sizeof(struct tx_desc); in txq_kick()
1054 mp->work_tx_end &= ~(1 << txq->index); in txq_kick()
1060 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_reclaim()
1066 while (reclaimed < budget && txq->tx_desc_count > 0) { in txq_reclaim()
1072 tx_index = txq->tx_used_desc; in txq_reclaim()
1073 desc = &txq->tx_desc_area[tx_index]; in txq_reclaim()
1074 desc_dma_map = txq->tx_desc_mapping[tx_index]; in txq_reclaim()
1076 cmd_sts = desc->cmd_sts; in txq_reclaim()
1081 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; in txq_reclaim()
1084 txq->tx_used_desc = tx_index + 1; in txq_reclaim()
1085 if (txq->tx_used_desc == txq->tx_ring_size) in txq_reclaim()
1086 txq->tx_used_desc = 0; in txq_reclaim()
1089 txq->tx_desc_count--; in txq_reclaim()
1091 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { in txq_reclaim()
1094 dma_unmap_page(mp->dev->dev.parent, in txq_reclaim()
1095 desc->buf_ptr, in txq_reclaim()
1096 desc->byte_cnt, in txq_reclaim()
1099 dma_unmap_single(mp->dev->dev.parent, in txq_reclaim()
1100 desc->buf_ptr, in txq_reclaim()
1101 desc->byte_cnt, in txq_reclaim()
1106 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); in txq_reclaim()
1113 netdev_info(mp->dev, "tx error\n"); in txq_reclaim()
1114 mp->dev->stats.tx_errors++; in txq_reclaim()
1122 mp->work_tx &= ~(1 << txq->index); in txq_reclaim()
1139 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); in tx_set_rate()
1143 mtu = (mp->dev->mtu + 255) >> 8; in tx_set_rate()
1151 switch (mp->shared->tx_bw_control) { in tx_set_rate()
1171 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); in txq_set_rate()
1179 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); in txq_set_rate()
1180 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); in txq_set_rate()
1193 switch (mp->shared->tx_bw_control) { in txq_set_fixed_prio_mode()
1204 val |= 1 << txq->index; in txq_set_fixed_prio_mode()
1220 if (dev->phydev->autoneg == AUTONEG_ENABLE) { in mv643xx_eth_adjust_link()
1228 if (dev->phydev->speed == SPEED_1000) { in mv643xx_eth_adjust_link()
1237 if (dev->phydev->speed == SPEED_100) in mv643xx_eth_adjust_link()
1242 if (dev->phydev->duplex == DUPLEX_FULL) in mv643xx_eth_adjust_link()
1255 struct net_device_stats *stats = &dev->stats; in mv643xx_eth_get_stats()
1261 for (i = 0; i < mp->txq_count; i++) { in mv643xx_eth_get_stats()
1262 struct tx_queue *txq = mp->txq + i; in mv643xx_eth_get_stats()
1264 tx_packets += txq->tx_packets; in mv643xx_eth_get_stats()
1265 tx_bytes += txq->tx_bytes; in mv643xx_eth_get_stats()
1266 tx_dropped += txq->tx_dropped; in mv643xx_eth_get_stats()
1269 stats->tx_packets = tx_packets; in mv643xx_eth_get_stats()
1270 stats->tx_bytes = tx_bytes; in mv643xx_eth_get_stats()
1271 stats->tx_dropped = tx_dropped; in mv643xx_eth_get_stats()
1278 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); in mib_read()
1295 struct mib_counters *p = &mp->mib_counters; in mib_counters_update()
1297 spin_lock_bh(&mp->mib_counters_lock); in mib_counters_update()
1298 p->good_octets_received += mib_read(mp, 0x00); in mib_counters_update()
1299 p->bad_octets_received += mib_read(mp, 0x08); in mib_counters_update()
1300 p->internal_mac_transmit_err += mib_read(mp, 0x0c); in mib_counters_update()
1301 p->good_frames_received += mib_read(mp, 0x10); in mib_counters_update()
1302 p->bad_frames_received += mib_read(mp, 0x14); in mib_counters_update()
1303 p->broadcast_frames_received += mib_read(mp, 0x18); in mib_counters_update()
1304 p->multicast_frames_received += mib_read(mp, 0x1c); in mib_counters_update()
1305 p->frames_64_octets += mib_read(mp, 0x20); in mib_counters_update()
1306 p->frames_65_to_127_octets += mib_read(mp, 0x24); in mib_counters_update()
1307 p->frames_128_to_255_octets += mib_read(mp, 0x28); in mib_counters_update()
1308 p->frames_256_to_511_octets += mib_read(mp, 0x2c); in mib_counters_update()
1309 p->frames_512_to_1023_octets += mib_read(mp, 0x30); in mib_counters_update()
1310 p->frames_1024_to_max_octets += mib_read(mp, 0x34); in mib_counters_update()
1311 p->good_octets_sent += mib_read(mp, 0x38); in mib_counters_update()
1312 p->good_frames_sent += mib_read(mp, 0x40); in mib_counters_update()
1313 p->excessive_collision += mib_read(mp, 0x44); in mib_counters_update()
1314 p->multicast_frames_sent += mib_read(mp, 0x48); in mib_counters_update()
1315 p->broadcast_frames_sent += mib_read(mp, 0x4c); in mib_counters_update()
1316 p->unrec_mac_control_received += mib_read(mp, 0x50); in mib_counters_update()
1317 p->fc_sent += mib_read(mp, 0x54); in mib_counters_update()
1318 p->good_fc_received += mib_read(mp, 0x58); in mib_counters_update()
1319 p->bad_fc_received += mib_read(mp, 0x5c); in mib_counters_update()
1320 p->undersize_received += mib_read(mp, 0x60); in mib_counters_update()
1321 p->fragments_received += mib_read(mp, 0x64); in mib_counters_update()
1322 p->oversize_received += mib_read(mp, 0x68); in mib_counters_update()
1323 p->jabber_received += mib_read(mp, 0x6c); in mib_counters_update()
1324 p->mac_receive_error += mib_read(mp, 0x70); in mib_counters_update()
1325 p->bad_crc_event += mib_read(mp, 0x74); in mib_counters_update()
1326 p->collision += mib_read(mp, 0x78); in mib_counters_update()
1327 p->late_collision += mib_read(mp, 0x7c); in mib_counters_update()
1329 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); in mib_counters_update()
1330 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); in mib_counters_update()
1331 spin_unlock_bh(&mp->mib_counters_lock); in mib_counters_update()
1338 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); in mib_counters_timer_wrapper()
1351 * In the ->set*() methods, we round the computed register value
1359 if (mp->shared->extended_rx_coal_limit) in get_rx_coal()
1365 temp += mp->t_clk / 2; in get_rx_coal()
1366 do_div(temp, mp->t_clk); in get_rx_coal()
1376 temp = (u64)usec * mp->t_clk; in set_rx_coal()
1381 if (mp->shared->extended_rx_coal_limit) { in set_rx_coal()
1402 temp += mp->t_clk / 2; in get_tx_coal()
1403 do_div(temp, mp->t_clk); in get_tx_coal()
1412 temp = (u64)usec * mp->t_clk; in set_tx_coal()
1433 offsetof(struct net_device, stats.m), -1 }
1437 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
1486 struct net_device *dev = mp->dev; in mv643xx_eth_get_link_ksettings_phy()
1488 phy_ethtool_ksettings_get(dev->phydev, cmd); in mv643xx_eth_get_link_ksettings_phy()
1494 cmd->link_modes.supported); in mv643xx_eth_get_link_ksettings_phy()
1496 cmd->link_modes.advertising); in mv643xx_eth_get_link_ksettings_phy()
1514 cmd->base.speed = SPEED_10; in mv643xx_eth_get_link_ksettings_phyless()
1517 cmd->base.speed = SPEED_100; in mv643xx_eth_get_link_ksettings_phyless()
1520 cmd->base.speed = SPEED_1000; in mv643xx_eth_get_link_ksettings_phyless()
1523 cmd->base.speed = -1; in mv643xx_eth_get_link_ksettings_phyless()
1526 cmd->base.duplex = (port_status & FULL_DUPLEX) ? in mv643xx_eth_get_link_ksettings_phyless()
1528 cmd->base.port = PORT_MII; in mv643xx_eth_get_link_ksettings_phyless()
1529 cmd->base.phy_address = 0; in mv643xx_eth_get_link_ksettings_phyless()
1530 cmd->base.autoneg = AUTONEG_DISABLE; in mv643xx_eth_get_link_ksettings_phyless()
1532 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, in mv643xx_eth_get_link_ksettings_phyless()
1534 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, in mv643xx_eth_get_link_ksettings_phyless()
1543 wol->supported = 0; in mv643xx_eth_get_wol()
1544 wol->wolopts = 0; in mv643xx_eth_get_wol()
1545 if (dev->phydev) in mv643xx_eth_get_wol()
1546 phy_ethtool_get_wol(dev->phydev, wol); in mv643xx_eth_get_wol()
1554 if (!dev->phydev) in mv643xx_eth_set_wol()
1555 return -EOPNOTSUPP; in mv643xx_eth_set_wol()
1557 err = phy_ethtool_set_wol(dev->phydev, wol); in mv643xx_eth_set_wol()
1558 /* Given that mv643xx_eth works without the marvell-specific PHY driver, in mv643xx_eth_set_wol()
1561 if (err == -EOPNOTSUPP) in mv643xx_eth_set_wol()
1562 netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n"); in mv643xx_eth_set_wol()
1572 if (dev->phydev) in mv643xx_eth_get_link_ksettings()
1586 if (!dev->phydev) in mv643xx_eth_set_link_ksettings()
1587 return -EINVAL; in mv643xx_eth_set_link_ksettings()
1598 ret = phy_ethtool_ksettings_set(dev->phydev, &c); in mv643xx_eth_set_link_ksettings()
1607 strscpy(drvinfo->driver, mv643xx_eth_driver_name, in mv643xx_eth_get_drvinfo()
1608 sizeof(drvinfo->driver)); in mv643xx_eth_get_drvinfo()
1609 strscpy(drvinfo->version, mv643xx_eth_driver_version, in mv643xx_eth_get_drvinfo()
1610 sizeof(drvinfo->version)); in mv643xx_eth_get_drvinfo()
1611 strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); in mv643xx_eth_get_drvinfo()
1612 strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); in mv643xx_eth_get_drvinfo()
1622 ec->rx_coalesce_usecs = get_rx_coal(mp); in mv643xx_eth_get_coalesce()
1623 ec->tx_coalesce_usecs = get_tx_coal(mp); in mv643xx_eth_get_coalesce()
1635 set_rx_coal(mp, ec->rx_coalesce_usecs); in mv643xx_eth_set_coalesce()
1636 set_tx_coal(mp, ec->tx_coalesce_usecs); in mv643xx_eth_set_coalesce()
1648 er->rx_max_pending = 4096; in mv643xx_eth_get_ringparam()
1649 er->tx_max_pending = 4096; in mv643xx_eth_get_ringparam()
1651 er->rx_pending = mp->rx_ring_size; in mv643xx_eth_get_ringparam()
1652 er->tx_pending = mp->tx_ring_size; in mv643xx_eth_get_ringparam()
1662 if (er->rx_mini_pending || er->rx_jumbo_pending) in mv643xx_eth_set_ringparam()
1663 return -EINVAL; in mv643xx_eth_set_ringparam()
1665 mp->rx_ring_size = min(er->rx_pending, 4096U); in mv643xx_eth_set_ringparam()
1666 mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending, in mv643xx_eth_set_ringparam()
1668 if (mp->tx_ring_size != er->tx_pending) in mv643xx_eth_set_ringparam()
1670 mp->tx_ring_size, er->tx_pending); in mv643xx_eth_set_ringparam()
1676 "fatal error on re-opening device after ring param change\n"); in mv643xx_eth_set_ringparam()
1677 return -ENOMEM; in mv643xx_eth_set_ringparam()
1726 if (stat->netdev_off >= 0) in mv643xx_eth_get_ethtool_stats()
1727 p = ((void *)mp->dev) + stat->netdev_off; in mv643xx_eth_get_ethtool_stats()
1729 p = ((void *)mp) + stat->mp_off; in mv643xx_eth_get_ethtool_stats()
1731 data[i] = (stat->sizeof_stat == 8) ? in mv643xx_eth_get_ethtool_stats()
1741 return -EOPNOTSUPP; in mv643xx_eth_get_sset_count()
1790 if (dev->flags & IFF_PROMISC) in uc_addr_filter_mask()
1793 nibbles = 1 << (dev->dev_addr[5] & 0x0f); in uc_addr_filter_mask()
1795 if (memcmp(dev->dev_addr, ha->addr, 5)) in uc_addr_filter_mask()
1797 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) in uc_addr_filter_mask()
1800 nibbles |= 1 << (ha->addr[5] & 0x0f); in uc_addr_filter_mask()
1813 uc_addr_set(mp, dev->dev_addr); in mv643xx_eth_program_unicast_filter()
1824 int off = UNICAST_TABLE(mp->port_num) + i; in mv643xx_eth_program_unicast_filter()
1853 for (j = 7; j >= 0; j--) { in addr_crc()
1870 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) in mv643xx_eth_program_multicast_filter()
1880 u8 *a = ha->addr; in mv643xx_eth_program_multicast_filter()
1896 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), in mv643xx_eth_program_multicast_filter()
1898 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), in mv643xx_eth_program_multicast_filter()
1907 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), in mv643xx_eth_program_multicast_filter()
1909 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), in mv643xx_eth_program_multicast_filter()
1924 if (!is_valid_ether_addr(sa->sa_data)) in mv643xx_eth_set_mac_address()
1925 return -EADDRNOTAVAIL; in mv643xx_eth_set_mac_address()
1927 eth_hw_addr_set(dev, sa->sa_data); in mv643xx_eth_set_mac_address()
1940 struct rx_queue *rxq = mp->rxq + index; in rxq_init()
1945 rxq->index = index; in rxq_init()
1947 rxq->rx_ring_size = mp->rx_ring_size; in rxq_init()
1949 rxq->rx_desc_count = 0; in rxq_init()
1950 rxq->rx_curr_desc = 0; in rxq_init()
1951 rxq->rx_used_desc = 0; in rxq_init()
1953 size = rxq->rx_ring_size * sizeof(struct rx_desc); in rxq_init()
1955 if (index == 0 && size <= mp->rx_desc_sram_size) { in rxq_init()
1956 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, in rxq_init()
1957 mp->rx_desc_sram_size); in rxq_init()
1958 rxq->rx_desc_dma = mp->rx_desc_sram_addr; in rxq_init()
1960 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, in rxq_init()
1961 size, &rxq->rx_desc_dma, in rxq_init()
1965 if (rxq->rx_desc_area == NULL) { in rxq_init()
1966 netdev_err(mp->dev, in rxq_init()
1970 memset(rxq->rx_desc_area, 0, size); in rxq_init()
1972 rxq->rx_desc_area_size = size; in rxq_init()
1973 rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb), in rxq_init()
1975 if (rxq->rx_skb == NULL) in rxq_init()
1978 rx_desc = rxq->rx_desc_area; in rxq_init()
1979 for (i = 0; i < rxq->rx_ring_size; i++) { in rxq_init()
1983 if (nexti == rxq->rx_ring_size) in rxq_init()
1986 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + in rxq_init()
1994 if (index == 0 && size <= mp->rx_desc_sram_size) in rxq_init()
1995 iounmap(rxq->rx_desc_area); in rxq_init()
1997 dma_free_coherent(mp->dev->dev.parent, size, in rxq_init()
1998 rxq->rx_desc_area, in rxq_init()
1999 rxq->rx_desc_dma); in rxq_init()
2002 return -ENOMEM; in rxq_init()
2012 for (i = 0; i < rxq->rx_ring_size; i++) { in rxq_deinit()
2013 if (rxq->rx_skb[i]) { in rxq_deinit()
2014 dev_consume_skb_any(rxq->rx_skb[i]); in rxq_deinit()
2015 rxq->rx_desc_count--; in rxq_deinit()
2019 if (rxq->rx_desc_count) { in rxq_deinit()
2020 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", in rxq_deinit()
2021 rxq->rx_desc_count); in rxq_deinit()
2024 if (rxq->index == 0 && in rxq_deinit()
2025 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) in rxq_deinit()
2026 iounmap(rxq->rx_desc_area); in rxq_deinit()
2028 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, in rxq_deinit()
2029 rxq->rx_desc_area, rxq->rx_desc_dma); in rxq_deinit()
2031 kfree(rxq->rx_skb); in rxq_deinit()
2036 struct tx_queue *txq = mp->txq + index; in txq_init()
2042 txq->index = index; in txq_init()
2044 txq->tx_ring_size = mp->tx_ring_size; in txq_init()
2050 txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS; in txq_init()
2051 txq->tx_wake_threshold = txq->tx_stop_threshold / 2; in txq_init()
2053 txq->tx_desc_count = 0; in txq_init()
2054 txq->tx_curr_desc = 0; in txq_init()
2055 txq->tx_used_desc = 0; in txq_init()
2057 size = txq->tx_ring_size * sizeof(struct tx_desc); in txq_init()
2059 if (index == 0 && size <= mp->tx_desc_sram_size) { in txq_init()
2060 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, in txq_init()
2061 mp->tx_desc_sram_size); in txq_init()
2062 txq->tx_desc_dma = mp->tx_desc_sram_addr; in txq_init()
2064 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, in txq_init()
2065 size, &txq->tx_desc_dma, in txq_init()
2069 if (txq->tx_desc_area == NULL) { in txq_init()
2070 netdev_err(mp->dev, in txq_init()
2072 return -ENOMEM; in txq_init()
2074 memset(txq->tx_desc_area, 0, size); in txq_init()
2076 txq->tx_desc_area_size = size; in txq_init()
2078 tx_desc = txq->tx_desc_area; in txq_init()
2079 for (i = 0; i < txq->tx_ring_size; i++) { in txq_init()
2084 if (nexti == txq->tx_ring_size) in txq_init()
2087 txd->cmd_sts = 0; in txq_init()
2088 txd->next_desc_ptr = txq->tx_desc_dma + in txq_init()
2092 txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char), in txq_init()
2094 if (!txq->tx_desc_mapping) { in txq_init()
2095 ret = -ENOMEM; in txq_init()
2100 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, in txq_init()
2101 txq->tx_ring_size * TSO_HEADER_SIZE, in txq_init()
2102 &txq->tso_hdrs_dma, GFP_KERNEL); in txq_init()
2103 if (txq->tso_hdrs == NULL) { in txq_init()
2104 ret = -ENOMEM; in txq_init()
2107 skb_queue_head_init(&txq->tx_skb); in txq_init()
2112 kfree(txq->tx_desc_mapping); in txq_init()
2114 if (index == 0 && size <= mp->tx_desc_sram_size) in txq_init()
2115 iounmap(txq->tx_desc_area); in txq_init()
2117 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, in txq_init()
2118 txq->tx_desc_area, txq->tx_desc_dma); in txq_init()
2127 txq_reclaim(txq, txq->tx_ring_size, 1); in txq_deinit()
2129 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); in txq_deinit()
2131 if (txq->index == 0 && in txq_deinit()
2132 txq->tx_desc_area_size <= mp->tx_desc_sram_size) in txq_deinit()
2133 iounmap(txq->tx_desc_area); in txq_deinit()
2135 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, in txq_deinit()
2136 txq->tx_desc_area, txq->tx_desc_dma); in txq_deinit()
2137 kfree(txq->tx_desc_mapping); in txq_deinit()
2139 if (txq->tso_hdrs) in txq_deinit()
2140 dma_free_coherent(mp->dev->dev.parent, in txq_deinit()
2141 txq->tx_ring_size * TSO_HEADER_SIZE, in txq_deinit()
2142 txq->tso_hdrs, txq->tso_hdrs_dma); in txq_deinit()
2152 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; in mv643xx_eth_collect_events()
2164 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & in mv643xx_eth_collect_events()
2166 mp->work_rx |= (int_cause & INT_RX) >> 2; in mv643xx_eth_collect_events()
2173 mp->work_link = 1; in mv643xx_eth_collect_events()
2174 mp->work_tx |= int_cause_ext & INT_EXT_TX; in mv643xx_eth_collect_events()
2189 napi_schedule(&mp->napi); in mv643xx_eth_irq()
2196 struct net_device *dev = mp->dev; in handle_link_event()
2211 for (i = 0; i < mp->txq_count; i++) { in handle_link_event()
2212 struct tx_queue *txq = mp->txq + i; in handle_link_event()
2214 txq_reclaim(txq, txq->tx_ring_size, 1); in handle_link_event()
2232 speed = -1; in handle_link_event()
2252 if (unlikely(mp->oom)) { in mv643xx_eth_poll()
2253 mp->oom = 0; in mv643xx_eth_poll()
2254 del_timer(&mp->rx_oom); in mv643xx_eth_poll()
2263 if (mp->work_link) { in mv643xx_eth_poll()
2264 mp->work_link = 0; in mv643xx_eth_poll()
2270 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; in mv643xx_eth_poll()
2271 if (likely(!mp->oom)) in mv643xx_eth_poll()
2272 queue_mask |= mp->work_rx_refill; in mv643xx_eth_poll()
2280 queue = fls(queue_mask) - 1; in mv643xx_eth_poll()
2283 work_tbd = budget - work_done; in mv643xx_eth_poll()
2287 if (mp->work_tx_end & queue_mask) { in mv643xx_eth_poll()
2288 txq_kick(mp->txq + queue); in mv643xx_eth_poll()
2289 } else if (mp->work_tx & queue_mask) { in mv643xx_eth_poll()
2290 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); in mv643xx_eth_poll()
2291 txq_maybe_wake(mp->txq + queue); in mv643xx_eth_poll()
2292 } else if (mp->work_rx & queue_mask) { in mv643xx_eth_poll()
2293 work_done += rxq_process(mp->rxq + queue, work_tbd); in mv643xx_eth_poll()
2294 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { in mv643xx_eth_poll()
2295 work_done += rxq_refill(mp->rxq + queue, work_tbd); in mv643xx_eth_poll()
2302 if (mp->oom) in mv643xx_eth_poll()
2303 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); in mv643xx_eth_poll()
2305 wrlp(mp, INT_MASK, mp->int_mask); in mv643xx_eth_poll()
2315 napi_schedule(&mp->napi); in oom_timer_wrapper()
2320 struct net_device *dev = mp->dev; in port_start()
2325 * Perform PHY reset, if there is a PHY. in port_start()
2327 if (dev->phydev) { in port_start()
2331 phy_init_hw(dev->phydev); in port_start()
2334 phy_start(dev->phydev); in port_start()
2346 if (!dev->phydev) in port_start()
2354 for (i = 0; i < mp->txq_count; i++) { in port_start()
2355 struct tx_queue *txq = mp->txq + i; in port_start()
2364 * frames to RX queue #0, and include the pseudo-header when in port_start()
2367 mv643xx_eth_set_features(mp->dev, mp->dev->features); in port_start()
2377 mv643xx_eth_program_unicast_filter(mp->dev); in port_start()
2382 for (i = 0; i < mp->rxq_count; i++) { in port_start()
2383 struct rx_queue *rxq = mp->rxq + i; in port_start()
2386 addr = (u32)rxq->rx_desc_dma; in port_start()
2387 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); in port_start()
2402 * 4 bytes for the trailing FCS -- 36 bytes total. in mv643xx_eth_recalc_skb_size()
2404 skb_size = mp->dev->mtu + 36; in mv643xx_eth_recalc_skb_size()
2411 mp->skb_size = (skb_size + 7) & ~7; in mv643xx_eth_recalc_skb_size()
2415 * netdev_alloc_skb() will cause skb->data to be misaligned in mv643xx_eth_recalc_skb_size()
2417 * some extra space to allow re-aligning the data area. in mv643xx_eth_recalc_skb_size()
2419 mp->skb_size += SKB_DMA_REALIGN; in mv643xx_eth_recalc_skb_size()
2432 err = request_irq(dev->irq, mv643xx_eth_irq, in mv643xx_eth_open()
2433 IRQF_SHARED, dev->name, dev); in mv643xx_eth_open()
2436 return -EAGAIN; in mv643xx_eth_open()
2441 napi_enable(&mp->napi); in mv643xx_eth_open()
2443 mp->int_mask = INT_EXT; in mv643xx_eth_open()
2445 for (i = 0; i < mp->rxq_count; i++) { in mv643xx_eth_open()
2448 while (--i >= 0) in mv643xx_eth_open()
2449 rxq_deinit(mp->rxq + i); in mv643xx_eth_open()
2453 rxq_refill(mp->rxq + i, INT_MAX); in mv643xx_eth_open()
2454 mp->int_mask |= INT_RX_0 << i; in mv643xx_eth_open()
2457 if (mp->oom) { in mv643xx_eth_open()
2458 mp->rx_oom.expires = jiffies + (HZ / 10); in mv643xx_eth_open()
2459 add_timer(&mp->rx_oom); in mv643xx_eth_open()
2462 for (i = 0; i < mp->txq_count; i++) { in mv643xx_eth_open()
2465 while (--i >= 0) in mv643xx_eth_open()
2466 txq_deinit(mp->txq + i); in mv643xx_eth_open()
2469 mp->int_mask |= INT_TX_END_0 << i; in mv643xx_eth_open()
2472 add_timer(&mp->mib_counters_timer); in mv643xx_eth_open()
2476 wrlp(mp, INT_MASK, mp->int_mask); in mv643xx_eth_open()
2482 for (i = 0; i < mp->rxq_count; i++) in mv643xx_eth_open()
2483 rxq_deinit(mp->rxq + i); in mv643xx_eth_open()
2485 napi_disable(&mp->napi); in mv643xx_eth_open()
2486 free_irq(dev->irq, dev); in mv643xx_eth_open()
2496 for (i = 0; i < mp->rxq_count; i++) in port_reset()
2497 rxq_disable(mp->rxq + i); in port_reset()
2498 for (i = 0; i < mp->txq_count; i++) in port_reset()
2499 txq_disable(mp->txq + i); in port_reset()
2526 napi_disable(&mp->napi); in mv643xx_eth_stop()
2528 del_timer_sync(&mp->rx_oom); in mv643xx_eth_stop()
2531 if (dev->phydev) in mv643xx_eth_stop()
2532 phy_stop(dev->phydev); in mv643xx_eth_stop()
2533 free_irq(dev->irq, dev); in mv643xx_eth_stop()
2538 del_timer_sync(&mp->mib_counters_timer); in mv643xx_eth_stop()
2540 for (i = 0; i < mp->rxq_count; i++) in mv643xx_eth_stop()
2541 rxq_deinit(mp->rxq + i); in mv643xx_eth_stop()
2542 for (i = 0; i < mp->txq_count; i++) in mv643xx_eth_stop()
2543 txq_deinit(mp->txq + i); in mv643xx_eth_stop()
2552 if (!dev->phydev) in mv643xx_eth_ioctl()
2553 return -ENOTSUPP; in mv643xx_eth_ioctl()
2555 ret = phy_mii_ioctl(dev->phydev, ifr, cmd); in mv643xx_eth_ioctl()
2565 WRITE_ONCE(dev->mtu, new_mtu); in mv643xx_eth_change_mtu()
2573 * Stop and then re-open the interface. This will allocate RX in mv643xx_eth_change_mtu()
2581 "fatal error on re-opening device after MTU change\n"); in mv643xx_eth_change_mtu()
2592 if (netif_running(mp->dev)) { in tx_timeout_task()
2593 netif_tx_stop_all_queues(mp->dev); in tx_timeout_task()
2596 netif_tx_wake_all_queues(mp->dev); in tx_timeout_task()
2606 schedule_work(&mp->tx_timeout_task); in mv643xx_eth_tx_timeout()
2617 mv643xx_eth_irq(dev->irq, dev); in mv643xx_eth_netpoll()
2619 wrlp(mp, INT_MASK, mp->int_mask); in mv643xx_eth_netpoll()
2629 void __iomem *base = msp->base; in mv643xx_eth_conf_mbus_windows()
2644 for (i = 0; i < dram->num_cs; i++) { in mv643xx_eth_conf_mbus_windows()
2645 const struct mbus_dram_window *cs = dram->cs + i; in mv643xx_eth_conf_mbus_windows()
2647 writel((cs->base & 0xffff0000) | in mv643xx_eth_conf_mbus_windows()
2648 (cs->mbus_attr << 8) | in mv643xx_eth_conf_mbus_windows()
2649 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); in mv643xx_eth_conf_mbus_windows()
2650 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); in mv643xx_eth_conf_mbus_windows()
2657 msp->win_protect = win_protect; in mv643xx_eth_conf_mbus_windows()
2663 * Check whether we have a 14-bit coal limit field in bits in infer_hw_params()
2664 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the in infer_hw_params()
2667 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); in infer_hw_params()
2668 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) in infer_hw_params()
2669 msp->extended_rx_coal_limit = 1; in infer_hw_params()
2671 msp->extended_rx_coal_limit = 0; in infer_hw_params()
2678 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); in infer_hw_params()
2679 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { in infer_hw_params()
2680 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; in infer_hw_params()
2682 writel(7, msp->base + 0x0400 + TX_BW_RATE); in infer_hw_params()
2683 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) in infer_hw_params()
2684 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; in infer_hw_params()
2686 msp->tx_bw_control = TX_BW_CONTROL_ABSENT; in infer_hw_params()
2692 { .compatible = "marvell,orion-eth", },
2693 { .compatible = "marvell,kirkwood-eth", },
2733 dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp); in mv643xx_eth_shared_of_add_port()
2734 return -EINVAL; in mv643xx_eth_shared_of_add_port()
2738 dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp); in mv643xx_eth_shared_of_add_port()
2739 return -EINVAL; in mv643xx_eth_shared_of_add_port()
2743 dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp); in mv643xx_eth_shared_of_add_port()
2744 return -EINVAL; in mv643xx_eth_shared_of_add_port()
2751 dev_err(&pdev->dev, "too many ports registered\n"); in mv643xx_eth_shared_of_add_port()
2752 return -EINVAL; in mv643xx_eth_shared_of_add_port()
2756 if (ret == -EPROBE_DEFER) in mv643xx_eth_shared_of_add_port()
2759 mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); in mv643xx_eth_shared_of_add_port()
2760 mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); in mv643xx_eth_shared_of_add_port()
2761 mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size); in mv643xx_eth_shared_of_add_port()
2762 mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size); in mv643xx_eth_shared_of_add_port()
2763 mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr); in mv643xx_eth_shared_of_add_port()
2764 mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size); in mv643xx_eth_shared_of_add_port()
2768 ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0); in mv643xx_eth_shared_of_add_port()
2777 return -ENOMEM; in mv643xx_eth_shared_of_add_port()
2778 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); in mv643xx_eth_shared_of_add_port()
2779 ppdev->dev.of_node = pnp; in mv643xx_eth_shared_of_add_port()
2805 struct device_node *np = pdev->dev.of_node; in mv643xx_eth_shared_of_probe()
2812 pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL); in mv643xx_eth_shared_of_probe()
2814 return -ENOMEM; in mv643xx_eth_shared_of_probe()
2815 pdev->dev.platform_data = pd; in mv643xx_eth_shared_of_probe()
2817 mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit); in mv643xx_eth_shared_of_probe()
2850 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", in mv643xx_eth_shared_probe()
2855 return -EINVAL; in mv643xx_eth_shared_probe()
2857 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); in mv643xx_eth_shared_probe()
2859 return -ENOMEM; in mv643xx_eth_shared_probe()
2862 msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); in mv643xx_eth_shared_probe()
2863 if (msp->base == NULL) in mv643xx_eth_shared_probe()
2864 return -ENOMEM; in mv643xx_eth_shared_probe()
2866 msp->clk = devm_clk_get(&pdev->dev, NULL); in mv643xx_eth_shared_probe()
2867 if (!IS_ERR(msp->clk)) in mv643xx_eth_shared_probe()
2868 clk_prepare_enable(msp->clk); in mv643xx_eth_shared_probe()
2871 * (Re-)program MBUS remapping windows if we are asked to. in mv643xx_eth_shared_probe()
2880 pd = dev_get_platdata(&pdev->dev); in mv643xx_eth_shared_probe()
2882 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? in mv643xx_eth_shared_probe()
2883 pd->tx_csum_limit : 9 * 1024; in mv643xx_eth_shared_probe()
2889 if (!IS_ERR(msp->clk)) in mv643xx_eth_shared_probe()
2890 clk_disable_unprepare(msp->clk); in mv643xx_eth_shared_probe()
2899 if (!IS_ERR(msp->clk)) in mv643xx_eth_shared_remove()
2900 clk_disable_unprepare(msp->clk); in mv643xx_eth_shared_remove()
2914 int addr_shift = 5 * mp->port_num; in phy_addr_set()
2929 return (data >> (5 * mp->port_num)) & 0x1f; in phy_addr_get()
2935 struct net_device *dev = mp->dev; in set_params()
2938 if (is_valid_ether_addr(pd->mac_addr)) { in set_params()
2939 eth_hw_addr_set(dev, pd->mac_addr); in set_params()
2947 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; in set_params()
2948 if (pd->rx_queue_size) in set_params()
2949 mp->rx_ring_size = pd->rx_queue_size; in set_params()
2950 mp->rx_desc_sram_addr = pd->rx_sram_addr; in set_params()
2951 mp->rx_desc_sram_size = pd->rx_sram_size; in set_params()
2953 mp->rxq_count = pd->rx_queue_count ? : 1; in set_params()
2956 if (pd->tx_queue_size) in set_params()
2957 tx_ring_size = pd->tx_queue_size; in set_params()
2959 mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size, in set_params()
2961 if (mp->tx_ring_size != tx_ring_size) in set_params()
2963 mp->tx_ring_size, tx_ring_size); in set_params()
2965 mp->tx_desc_sram_addr = pd->tx_sram_addr; in set_params()
2966 mp->tx_desc_sram_size = pd->tx_sram_size; in set_params()
2968 mp->txq_count = pd->tx_queue_count ? : 1; in set_params()
2973 struct device *dev = mp->dev->dev.parent; in get_phy_mode()
2977 if (dev->of_node) in get_phy_mode()
2978 err = of_get_phy_mode(dev->of_node, &iface); in get_phy_mode()
2983 if (!dev->of_node || err) in get_phy_mode()
3005 /* Attempt to connect to the PHY using orion-mdio */ in phy_scan()
3006 phydev = ERR_PTR(-ENODEV); in phy_scan()
3011 "orion-mdio-mii", addr); in phy_scan()
3013 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, in phy_scan()
3026 struct net_device *dev = mp->dev; in phy_init()
3027 struct phy_device *phy = dev->phydev; in phy_init() local
3030 phy->autoneg = AUTONEG_ENABLE; in phy_init()
3031 phy->speed = 0; in phy_init()
3032 phy->duplex = 0; in phy_init()
3033 linkmode_copy(phy->advertising, phy->supported); in phy_init()
3035 phy->advertising); in phy_init()
3037 phy->autoneg = AUTONEG_DISABLE; in phy_init()
3038 linkmode_zero(phy->advertising); in phy_init()
3039 phy->speed = speed; in phy_init()
3040 phy->duplex = duplex; in phy_init()
3042 phy_start_aneg(phy); in phy_init()
3047 struct net_device *dev = mp->dev; in init_pscr()
3057 if (!dev->phydev) { in init_pscr()
3100 pd = dev_get_platdata(&pdev->dev); in mv643xx_eth_probe()
3102 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); in mv643xx_eth_probe()
3103 return -ENODEV; in mv643xx_eth_probe()
3106 if (pd->shared == NULL) { in mv643xx_eth_probe()
3107 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n"); in mv643xx_eth_probe()
3108 return -ENODEV; in mv643xx_eth_probe()
3113 return -ENOMEM; in mv643xx_eth_probe()
3115 SET_NETDEV_DEV(dev, &pdev->dev); in mv643xx_eth_probe()
3119 mp->shared = platform_get_drvdata(pd->shared); in mv643xx_eth_probe()
3120 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); in mv643xx_eth_probe()
3121 mp->port_num = pd->port_number; in mv643xx_eth_probe()
3123 mp->dev = dev; in mv643xx_eth_probe()
3125 if (of_device_is_compatible(pdev->dev.of_node, in mv643xx_eth_probe()
3126 "marvell,kirkwood-eth-port")) { in mv643xx_eth_probe()
3144 switch (pd->interface) { in mv643xx_eth_probe()
3169 mp->t_clk = 133000000; in mv643xx_eth_probe()
3170 mp->clk = devm_clk_get(&pdev->dev, NULL); in mv643xx_eth_probe()
3171 if (!IS_ERR(mp->clk)) { in mv643xx_eth_probe()
3172 clk_prepare_enable(mp->clk); in mv643xx_eth_probe()
3173 mp->t_clk = clk_get_rate(mp->clk); in mv643xx_eth_probe()
3174 } else if (!IS_ERR(mp->shared->clk)) { in mv643xx_eth_probe()
3175 mp->t_clk = clk_get_rate(mp->shared->clk); in mv643xx_eth_probe()
3179 netif_set_real_num_tx_queues(dev, mp->txq_count); in mv643xx_eth_probe()
3180 netif_set_real_num_rx_queues(dev, mp->rxq_count); in mv643xx_eth_probe()
3183 if (pd->phy_node) { in mv643xx_eth_probe()
3184 phydev = of_phy_connect(mp->dev, pd->phy_node, in mv643xx_eth_probe()
3188 err = -ENODEV; in mv643xx_eth_probe()
3190 phy_addr_set(mp, phydev->mdio.addr); in mv643xx_eth_probe()
3191 } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { in mv643xx_eth_probe()
3192 phydev = phy_scan(mp, pd->phy_addr); in mv643xx_eth_probe()
3197 phy_init(mp, pd->speed, pd->duplex); in mv643xx_eth_probe()
3199 if (err == -ENODEV) { in mv643xx_eth_probe()
3200 err = -EPROBE_DEFER; in mv643xx_eth_probe()
3206 dev->ethtool_ops = &mv643xx_eth_ethtool_ops; in mv643xx_eth_probe()
3208 init_pscr(mp, pd->speed, pd->duplex); in mv643xx_eth_probe()
3213 timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0); in mv643xx_eth_probe()
3214 mp->mib_counters_timer.expires = jiffies + 30 * HZ; in mv643xx_eth_probe()
3216 spin_lock_init(&mp->mib_counters_lock); in mv643xx_eth_probe()
3218 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); in mv643xx_eth_probe()
3220 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll); in mv643xx_eth_probe()
3222 timer_setup(&mp->rx_oom, oom_timer_wrapper, 0); in mv643xx_eth_probe()
3230 dev->irq = irq; in mv643xx_eth_probe()
3232 dev->netdev_ops = &mv643xx_eth_netdev_ops; in mv643xx_eth_probe()
3234 dev->watchdog_timeo = 2 * HZ; in mv643xx_eth_probe()
3235 dev->base_addr = 0; in mv643xx_eth_probe()
3237 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; in mv643xx_eth_probe()
3238 dev->vlan_features = dev->features; in mv643xx_eth_probe()
3240 dev->features |= NETIF_F_RXCSUM; in mv643xx_eth_probe()
3241 dev->hw_features = dev->features; in mv643xx_eth_probe()
3243 dev->priv_flags |= IFF_UNICAST_FLT; in mv643xx_eth_probe()
3246 /* MTU range: 64 - 9500 */ in mv643xx_eth_probe()
3247 dev->min_mtu = 64; in mv643xx_eth_probe()
3248 dev->max_mtu = 9500; in mv643xx_eth_probe()
3250 if (mp->shared->win_protect) in mv643xx_eth_probe()
3251 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); in mv643xx_eth_probe()
3265 mp->port_num, dev->dev_addr); in mv643xx_eth_probe()
3267 if (mp->tx_desc_sram_size > 0) in mv643xx_eth_probe()
3273 if (!IS_ERR(mp->clk)) in mv643xx_eth_probe()
3274 clk_disable_unprepare(mp->clk); in mv643xx_eth_probe()
3283 struct net_device *dev = mp->dev; in mv643xx_eth_remove()
3285 unregister_netdev(mp->dev); in mv643xx_eth_remove()
3286 if (dev->phydev) in mv643xx_eth_remove()
3287 phy_disconnect(dev->phydev); in mv643xx_eth_remove()
3288 cancel_work_sync(&mp->tx_timeout_task); in mv643xx_eth_remove()
3290 if (!IS_ERR(mp->clk)) in mv643xx_eth_remove()
3291 clk_disable_unprepare(mp->clk); in mv643xx_eth_remove()
3293 free_netdev(mp->dev); in mv643xx_eth_remove()
3304 if (netif_running(mp->dev)) in mv643xx_eth_shutdown()