Lines Matching +full:mac +full:- +full:clk +full:- +full:rx
1 // SPDX-License-Identifier: GPL-2.0-or-later
14 #include <linux/clk.h>
16 #include <linux/dma-mapping.h>
38 #define DRIVER_NAME "pxa168-eth"
64 #define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
65 #define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
72 /* RX & TX descriptor command */
75 /* RX descriptor status */
158 #define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
173 #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
199 int rx_resource_err; /* Rx ring resource error flag */
201 /* Next available and first returning Rx resource */
228 /* Size of Rx Ring per queue */
230 /* Number of rx descriptors in use */
234 * Used in case RX Ring is empty, which can occur when
241 struct clk *clk; member
275 return readl_relaxed(pep->base + offset); in rdl()
280 writel_relaxed(data, pep->base + offset); in wrl()
294 && delay-- > 0) { in abort_dma()
297 } while (max_retries-- > 0 && delay <= 0); in abort_dma()
300 netdev_err(pep->dev, "%s : DMA Stuck\n", __func__); in abort_dma()
310 while (pep->rx_desc_count < pep->rx_ring_size) { in rxq_refill()
313 skb = netdev_alloc_skb(dev, pep->skb_size); in rxq_refill()
318 pep->rx_desc_count++; in rxq_refill()
319 /* Get 'used' Rx descriptor */ in rxq_refill()
320 used_rx_desc = pep->rx_used_desc_q; in rxq_refill()
321 p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; in rxq_refill()
322 size = skb_end_pointer(skb) - skb->data; in rxq_refill()
323 p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev, in rxq_refill()
324 skb->data, in rxq_refill()
327 p_used_rx_desc->buf_size = size; in rxq_refill()
328 pep->rx_skb[used_rx_desc] = skb; in rxq_refill()
332 p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT; in rxq_refill()
336 pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size; in rxq_refill()
338 /* Any Rx return cancels the Rx resource error status */ in rxq_refill()
339 pep->rx_resource_err = 0; in rxq_refill()
345 * If RX ring is empty of SKB, set a timer to try allocating in rxq_refill()
348 if (pep->rx_desc_count == 0) { in rxq_refill()
349 pep->timeout.expires = jiffies + (HZ / 10); in rxq_refill()
350 add_timer(&pep->timeout); in rxq_refill()
357 napi_schedule(&pep->napi); in rxq_refill_timer_wrapper()
385 * ----------------------------------------------------------------------------
388 * mac_addr_orig - MAC address.
401 /* Make a copy of MAC address since we are going to performe bit in hash_function()
420 * ----------------------------------------------------------------------------
423 * pep - ETHERNET .
424 * mac_addr - MAC address.
425 * skip - if 1, skip this address.Used in case of deleting an entry which is a
429 * rd - 0 Discard packet upon match.
430 * - 1 Receive packet upon match.
434 * -ENOSPC if table full
464 * entries at the index obtained by hashing the specified MAC address in add_del_hash_entry()
466 start = pep->htpr; in add_del_hash_entry()
469 if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) { in add_del_hash_entry()
473 if (((le32_to_cpu(entry->lo) & 0xfffffff8) == in add_del_hash_entry()
475 (le32_to_cpu(entry->hi) == new_high)) { in add_del_hash_entry()
485 if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) && in add_del_hash_entry()
486 (le32_to_cpu(entry->hi) != new_high) && del) in add_del_hash_entry()
491 netdev_info(pep->dev, in add_del_hash_entry()
495 return -ENOSPC; in add_del_hash_entry()
504 entry->hi = 0; in add_del_hash_entry()
505 entry->lo = 0; in add_del_hash_entry()
507 entry->hi = cpu_to_le32(new_high); in add_del_hash_entry()
508 entry->lo = cpu_to_le32(new_low); in add_del_hash_entry()
515 * ----------------------------------------------------------------------------
516 * Create an addressTable entry from MAC address info
538 * location of the hash table is identified by 32-bit pointer stored in init_hash_table()
548 if (!pep->htpr) { in init_hash_table()
549 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, in init_hash_table()
551 &pep->htpr_dma, GFP_KERNEL); in init_hash_table()
552 if (!pep->htpr) in init_hash_table()
553 return -ENOMEM; in init_hash_table()
555 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); in init_hash_table()
557 wrl(pep, HTPR, pep->htpr_dma); in init_hash_table()
568 if (dev->flags & IFF_PROMISC) in pxa168_eth_set_rx_mode()
575 * Remove the old list of MAC address and add dev->addr in pxa168_eth_set_rx_mode()
578 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); in pxa168_eth_set_rx_mode()
579 update_hash_table_mac_address(pep, NULL, dev->dev_addr); in pxa168_eth_set_rx_mode()
582 update_hash_table_mac_address(pep, NULL, ha->addr); in pxa168_eth_set_rx_mode()
607 if (!is_valid_ether_addr(sa->sa_data)) in pxa168_eth_set_mac_address()
608 return -EADDRNOTAVAIL; in pxa168_eth_set_mac_address()
609 memcpy(oldMac, dev->dev_addr, ETH_ALEN); in pxa168_eth_set_mac_address()
610 eth_hw_addr_set(dev, sa->sa_data); in pxa168_eth_set_mac_address()
612 mac_h = dev->dev_addr[0] << 24; in pxa168_eth_set_mac_address()
613 mac_h |= dev->dev_addr[1] << 16; in pxa168_eth_set_mac_address()
614 mac_h |= dev->dev_addr[2] << 8; in pxa168_eth_set_mac_address()
615 mac_h |= dev->dev_addr[3]; in pxa168_eth_set_mac_address()
616 mac_l = dev->dev_addr[4] << 8; in pxa168_eth_set_mac_address()
617 mac_l |= dev->dev_addr[5]; in pxa168_eth_set_mac_address()
622 update_hash_table_mac_address(pep, oldMac, dev->dev_addr); in pxa168_eth_set_mac_address()
633 phy_start(dev->phydev); in eth_port_start()
636 tx_curr_desc = pep->tx_curr_desc_q; in eth_port_start()
638 (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc))); in eth_port_start()
640 /* Assignment of Rx CRDP of given queue */ in eth_port_start()
641 rx_curr_desc = pep->rx_curr_desc_q; in eth_port_start()
643 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); in eth_port_start()
646 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); in eth_port_start()
658 /* Start RX DMA engine */ in eth_port_start()
675 /* Stop RX DMA */ in eth_port_reset()
689 phy_stop(dev->phydev); in eth_port_reset()
693 * txq_reclaim - Free the tx desc data for completed descriptors
694 * If force is non-zero, frees uncompleted descriptors as well
709 pep->work_todo &= ~WORK_TX_DONE; in txq_reclaim()
710 while (pep->tx_desc_count > 0) { in txq_reclaim()
711 tx_index = pep->tx_used_desc_q; in txq_reclaim()
712 desc = &pep->p_tx_desc_area[tx_index]; in txq_reclaim()
713 cmd_sts = desc->cmd_sts; in txq_reclaim()
718 released = -1; in txq_reclaim()
722 pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size; in txq_reclaim()
723 pep->tx_desc_count--; in txq_reclaim()
724 addr = desc->buf_ptr; in txq_reclaim()
725 count = desc->byte_cnt; in txq_reclaim()
726 skb = pep->tx_skb[tx_index]; in txq_reclaim()
728 pep->tx_skb[tx_index] = NULL; in txq_reclaim()
733 dev->stats.tx_errors++; in txq_reclaim()
735 dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE); in txq_reclaim()
749 netdev_info(dev, "TX timeout desc_count %d\n", pep->tx_desc_count); in pxa168_eth_tx_timeout()
751 schedule_work(&pep->tx_timeout_task); in pxa168_eth_tx_timeout()
759 struct net_device *dev = pep->dev; in pxa168_eth_tx_timeout_task()
767 struct net_device_stats *stats = &dev->stats; in rxq_process()
771 while (budget-- > 0) { in rxq_process()
776 /* Do not process Rx ring in case of Rx ring resource error */ in rxq_process()
777 if (pep->rx_resource_err) in rxq_process()
779 rx_curr_desc = pep->rx_curr_desc_q; in rxq_process()
780 rx_used_desc = pep->rx_used_desc_q; in rxq_process()
781 rx_desc = &pep->p_rx_desc_area[rx_curr_desc]; in rxq_process()
782 cmd_sts = rx_desc->cmd_sts; in rxq_process()
786 skb = pep->rx_skb[rx_curr_desc]; in rxq_process()
787 pep->rx_skb[rx_curr_desc] = NULL; in rxq_process()
789 rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size; in rxq_process()
790 pep->rx_curr_desc_q = rx_next_curr_desc; in rxq_process()
792 /* Rx descriptors exhausted. */ in rxq_process()
793 /* Set the Rx ring resource error flag */ in rxq_process()
795 pep->rx_resource_err = 1; in rxq_process()
796 pep->rx_desc_count--; in rxq_process()
797 dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr, in rxq_process()
798 rx_desc->buf_size, in rxq_process()
805 stats->rx_packets++; in rxq_process()
806 stats->rx_bytes += rx_desc->byte_cnt; in rxq_process()
815 stats->rx_dropped++; in rxq_process()
820 "Rx pkt on multiple desc\n"); in rxq_process()
823 stats->rx_errors++; in rxq_process()
827 * The -4 is for the CRC in the trailer of the in rxq_process()
830 skb_put(skb, rx_desc->byte_cnt - 4); in rxq_process()
831 skb->protocol = eth_type_trans(skb, dev); in rxq_process()
835 /* Fill RX ring with skb's */ in rxq_process()
852 pep->work_todo |= WORK_TX_DONE; in pxa168_eth_collect_events()
869 napi_schedule(&pep->napi); in pxa168_eth_int_handler()
881 * 4 bytes for the trailing FCS -- 36 bytes total. in pxa168_eth_recalc_skb_size()
883 skb_size = pep->dev->mtu + 36; in pxa168_eth_recalc_skb_size()
890 pep->skb_size = (skb_size + 7) & ~7; in pxa168_eth_recalc_skb_size()
894 * netdev_alloc_skb() will cause skb->data to be misaligned in pxa168_eth_recalc_skb_size()
896 * some extra space to allow re-aligning the data area. in pxa168_eth_recalc_skb_size()
898 pep->skb_size += SKB_DMA_REALIGN; in pxa168_eth_recalc_skb_size()
907 if (pep->skb_size <= 1518) in set_port_config_ext()
909 else if (pep->skb_size <= 1536) in set_port_config_ext()
911 else if (pep->skb_size <= 2048) in set_port_config_ext()
924 PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ in set_port_config_ext()
932 struct phy_device *phy = dev->phydev; in pxa168_eth_adjust_link()
939 if (phy->interface == PHY_INTERFACE_MODE_RMII) in pxa168_eth_adjust_link()
941 if (phy->speed == SPEED_100) in pxa168_eth_adjust_link()
943 if (phy->duplex) in pxa168_eth_adjust_link()
945 if (!phy->pause) in pxa168_eth_adjust_link()
965 if (dev->phydev) in pxa168_init_phy()
968 phy = mdiobus_scan_c22(pep->smi_bus, pep->phy_addr); in pxa168_init_phy()
973 pep->phy_intf); in pxa168_init_phy()
977 cmd.base.phy_address = pep->phy_addr; in pxa168_init_phy()
978 cmd.base.speed = pep->phy_speed; in pxa168_init_phy()
979 cmd.base.duplex = pep->phy_duplex; in pxa168_init_phy()
1008 SDCR_RIFB | /* Rx interrupt on frame */ in pxa168_init_hw()
1024 int rx_desc_num = pep->rx_ring_size; in rxq_init()
1026 /* Allocate RX skb rings */ in rxq_init()
1027 pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL); in rxq_init()
1028 if (!pep->rx_skb) in rxq_init()
1029 return -ENOMEM; in rxq_init()
1031 /* Allocate RX ring */ in rxq_init()
1032 pep->rx_desc_count = 0; in rxq_init()
1033 size = pep->rx_ring_size * sizeof(struct rx_desc); in rxq_init()
1034 pep->rx_desc_area_size = size; in rxq_init()
1035 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, in rxq_init()
1036 &pep->rx_desc_dma, in rxq_init()
1038 if (!pep->p_rx_desc_area) in rxq_init()
1041 /* initialize the next_desc_ptr links in the Rx descriptors ring */ in rxq_init()
1042 p_rx_desc = pep->p_rx_desc_area; in rxq_init()
1044 p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma + in rxq_init()
1047 /* Save Rx desc pointer to driver struct. */ in rxq_init()
1048 pep->rx_curr_desc_q = 0; in rxq_init()
1049 pep->rx_used_desc_q = 0; in rxq_init()
1050 pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); in rxq_init()
1053 kfree(pep->rx_skb); in rxq_init()
1054 return -ENOMEM; in rxq_init()
1062 /* Free preallocated skb's on RX rings */ in rxq_deinit()
1063 for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) { in rxq_deinit()
1064 if (pep->rx_skb[curr]) { in rxq_deinit()
1065 dev_kfree_skb(pep->rx_skb[curr]); in rxq_deinit()
1066 pep->rx_desc_count--; in rxq_deinit()
1069 if (pep->rx_desc_count) in rxq_deinit()
1070 netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n", in rxq_deinit()
1071 pep->rx_desc_count); in rxq_deinit()
1072 /* Free RX ring */ in rxq_deinit()
1073 if (pep->p_rx_desc_area) in rxq_deinit()
1074 dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size, in rxq_deinit()
1075 pep->p_rx_desc_area, pep->rx_desc_dma); in rxq_deinit()
1076 kfree(pep->rx_skb); in rxq_deinit()
1084 int tx_desc_num = pep->tx_ring_size; in txq_init()
1086 pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL); in txq_init()
1087 if (!pep->tx_skb) in txq_init()
1088 return -ENOMEM; in txq_init()
1091 pep->tx_desc_count = 0; in txq_init()
1092 size = pep->tx_ring_size * sizeof(struct tx_desc); in txq_init()
1093 pep->tx_desc_area_size = size; in txq_init()
1094 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, in txq_init()
1095 &pep->tx_desc_dma, in txq_init()
1097 if (!pep->p_tx_desc_area) in txq_init()
1100 p_tx_desc = pep->p_tx_desc_area; in txq_init()
1102 p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma + in txq_init()
1105 pep->tx_curr_desc_q = 0; in txq_init()
1106 pep->tx_used_desc_q = 0; in txq_init()
1107 pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc); in txq_init()
1110 kfree(pep->tx_skb); in txq_init()
1111 return -ENOMEM; in txq_init()
1120 BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q); in txq_deinit()
1122 if (pep->p_tx_desc_area) in txq_deinit()
1123 dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size, in txq_deinit()
1124 pep->p_tx_desc_area, pep->tx_desc_dma); in txq_deinit()
1125 kfree(pep->tx_skb); in txq_deinit()
1137 err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev); in pxa168_eth_open()
1139 dev_err(&dev->dev, "can't assign irq\n"); in pxa168_eth_open()
1140 return -EAGAIN; in pxa168_eth_open()
1142 pep->rx_resource_err = 0; in pxa168_eth_open()
1149 pep->rx_used_desc_q = 0; in pxa168_eth_open()
1150 pep->rx_curr_desc_q = 0; in pxa168_eth_open()
1152 /* Fill RX ring with skb's */ in pxa168_eth_open()
1154 pep->rx_used_desc_q = 0; in pxa168_eth_open()
1155 pep->rx_curr_desc_q = 0; in pxa168_eth_open()
1157 napi_enable(&pep->napi); in pxa168_eth_open()
1163 free_irq(dev->irq, dev); in pxa168_eth_open()
1177 napi_disable(&pep->napi); in pxa168_eth_stop()
1178 del_timer_sync(&pep->timeout); in pxa168_eth_stop()
1180 free_irq(dev->irq, dev); in pxa168_eth_stop()
1191 WRITE_ONCE(dev->mtu, mtu); in pxa168_eth_change_mtu()
1198 * Stop and then re-open the interface. This will allocate RX in pxa168_eth_change_mtu()
1205 dev_err(&dev->dev, in pxa168_eth_change_mtu()
1206 "fatal error on re-opening device after MTU change\n"); in pxa168_eth_change_mtu()
1216 tx_desc_curr = pep->tx_curr_desc_q; in eth_alloc_tx_desc_index()
1217 pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size; in eth_alloc_tx_desc_index()
1218 BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q); in eth_alloc_tx_desc_index()
1219 pep->tx_desc_count++; in eth_alloc_tx_desc_index()
1228 struct net_device *dev = pep->dev; in pxa168_rx_poll()
1238 && pep->tx_ring_size - pep->tx_desc_count > 1) { in pxa168_rx_poll()
1254 struct net_device_stats *stats = &dev->stats; in pxa168_eth_start_xmit()
1260 desc = &pep->p_tx_desc_area[tx_index]; in pxa168_eth_start_xmit()
1261 length = skb->len; in pxa168_eth_start_xmit()
1262 pep->tx_skb[tx_index] = skb; in pxa168_eth_start_xmit()
1263 desc->byte_cnt = length; in pxa168_eth_start_xmit()
1264 desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length, in pxa168_eth_start_xmit()
1270 desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC | in pxa168_eth_start_xmit()
1275 stats->tx_bytes += length; in pxa168_eth_start_xmit()
1276 stats->tx_packets++; in pxa168_eth_start_xmit()
1278 if (pep->tx_ring_size - pep->tx_desc_count <= 1) { in pxa168_eth_start_xmit()
1293 return -ETIMEDOUT; in smi_wait_ready()
1302 struct pxa168_eth_private *pep = bus->priv; in pxa168_smi_read()
1307 netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); in pxa168_smi_read()
1308 return -ETIMEDOUT; in pxa168_smi_read()
1314 netdev_warn(pep->dev, in pxa168_smi_read()
1316 return -ENODEV; in pxa168_smi_read()
1327 struct pxa168_eth_private *pep = bus->priv; in pxa168_smi_write()
1330 netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); in pxa168_smi_write()
1331 return -ETIMEDOUT; in pxa168_smi_write()
1338 netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); in pxa168_smi_write()
1339 return -ETIMEDOUT; in pxa168_smi_write()
1348 disable_irq(dev->irq); in pxa168_eth_netpoll()
1349 pxa168_eth_int_handler(dev->irq, dev); in pxa168_eth_netpoll()
1350 enable_irq(dev->irq); in pxa168_eth_netpoll()
1357 strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); in pxa168_get_drvinfo()
1358 strscpy(info->version, DRIVER_VERSION, sizeof(info->version)); in pxa168_get_drvinfo()
1359 strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); in pxa168_get_drvinfo()
1360 strscpy(info->bus_info, "N/A", sizeof(info->bus_info)); in pxa168_get_drvinfo()
1391 struct clk *clk; in pxa168_eth_probe() local
1397 clk = devm_clk_get(&pdev->dev, NULL); in pxa168_eth_probe()
1398 if (IS_ERR(clk)) { in pxa168_eth_probe()
1399 dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n"); in pxa168_eth_probe()
1400 return -ENODEV; in pxa168_eth_probe()
1402 clk_prepare_enable(clk); in pxa168_eth_probe()
1406 err = -ENOMEM; in pxa168_eth_probe()
1412 pep->dev = dev; in pxa168_eth_probe()
1413 pep->clk = clk; in pxa168_eth_probe()
1415 pep->base = devm_platform_ioremap_resource(pdev, 0); in pxa168_eth_probe()
1416 if (IS_ERR(pep->base)) { in pxa168_eth_probe()
1417 err = PTR_ERR(pep->base); in pxa168_eth_probe()
1422 if (err == -EPROBE_DEFER) in pxa168_eth_probe()
1424 BUG_ON(dev->irq < 0); in pxa168_eth_probe()
1425 dev->irq = err; in pxa168_eth_probe()
1426 dev->netdev_ops = &pxa168_eth_netdev_ops; in pxa168_eth_probe()
1427 dev->watchdog_timeo = 2 * HZ; in pxa168_eth_probe()
1428 dev->base_addr = 0; in pxa168_eth_probe()
1429 dev->ethtool_ops = &pxa168_ethtool_ops; in pxa168_eth_probe()
1431 /* MTU range: 68 - 9500 */ in pxa168_eth_probe()
1432 dev->min_mtu = ETH_MIN_MTU; in pxa168_eth_probe()
1433 dev->max_mtu = 9500; in pxa168_eth_probe()
1435 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); in pxa168_eth_probe()
1437 err = of_get_ethdev_address(pdev->dev.of_node, dev); in pxa168_eth_probe()
1441 /* try reading the mac address, if set by the bootloader */ in pxa168_eth_probe()
1446 dev_info(&pdev->dev, "Using random mac address\n"); in pxa168_eth_probe()
1451 pep->rx_ring_size = NUM_RX_DESCS; in pxa168_eth_probe()
1452 pep->tx_ring_size = NUM_TX_DESCS; in pxa168_eth_probe()
1454 pep->pd = dev_get_platdata(&pdev->dev); in pxa168_eth_probe()
1455 if (pep->pd) { in pxa168_eth_probe()
1456 if (pep->pd->rx_queue_size) in pxa168_eth_probe()
1457 pep->rx_ring_size = pep->pd->rx_queue_size; in pxa168_eth_probe()
1459 if (pep->pd->tx_queue_size) in pxa168_eth_probe()
1460 pep->tx_ring_size = pep->pd->tx_queue_size; in pxa168_eth_probe()
1462 pep->port_num = pep->pd->port_number; in pxa168_eth_probe()
1463 pep->phy_addr = pep->pd->phy_addr; in pxa168_eth_probe()
1464 pep->phy_speed = pep->pd->speed; in pxa168_eth_probe()
1465 pep->phy_duplex = pep->pd->duplex; in pxa168_eth_probe()
1466 pep->phy_intf = pep->pd->intf; in pxa168_eth_probe()
1468 if (pep->pd->init) in pxa168_eth_probe()
1469 pep->pd->init(); in pxa168_eth_probe()
1470 } else if (pdev->dev.of_node) { in pxa168_eth_probe()
1471 of_property_read_u32(pdev->dev.of_node, "port-id", in pxa168_eth_probe()
1472 &pep->port_num); in pxa168_eth_probe()
1474 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); in pxa168_eth_probe()
1476 dev_err(&pdev->dev, "missing phy-handle\n"); in pxa168_eth_probe()
1477 err = -EINVAL; in pxa168_eth_probe()
1480 of_property_read_u32(np, "reg", &pep->phy_addr); in pxa168_eth_probe()
1482 err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf); in pxa168_eth_probe()
1483 if (err && err != -ENODEV) in pxa168_eth_probe()
1488 BUG_ON(pep->port_num > 2); in pxa168_eth_probe()
1489 netif_napi_add_weight(dev, &pep->napi, pxa168_rx_poll, in pxa168_eth_probe()
1490 pep->rx_ring_size); in pxa168_eth_probe()
1492 memset(&pep->timeout, 0, sizeof(struct timer_list)); in pxa168_eth_probe()
1493 timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0); in pxa168_eth_probe()
1495 pep->smi_bus = mdiobus_alloc(); in pxa168_eth_probe()
1496 if (!pep->smi_bus) { in pxa168_eth_probe()
1497 err = -ENOMEM; in pxa168_eth_probe()
1500 pep->smi_bus->priv = pep; in pxa168_eth_probe()
1501 pep->smi_bus->name = "pxa168_eth smi"; in pxa168_eth_probe()
1502 pep->smi_bus->read = pxa168_smi_read; in pxa168_eth_probe()
1503 pep->smi_bus->write = pxa168_smi_write; in pxa168_eth_probe()
1504 snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d", in pxa168_eth_probe()
1505 pdev->name, pdev->id); in pxa168_eth_probe()
1506 pep->smi_bus->parent = &pdev->dev; in pxa168_eth_probe()
1507 pep->smi_bus->phy_mask = 0xffffffff; in pxa168_eth_probe()
1508 err = mdiobus_register(pep->smi_bus); in pxa168_eth_probe()
1512 pep->pdev = pdev; in pxa168_eth_probe()
1513 SET_NETDEV_DEV(dev, &pdev->dev); in pxa168_eth_probe()
1521 mdiobus_unregister(pep->smi_bus); in pxa168_eth_probe()
1523 mdiobus_free(pep->smi_bus); in pxa168_eth_probe()
1527 clk_disable_unprepare(clk); in pxa168_eth_probe()
1536 cancel_work_sync(&pep->tx_timeout_task); in pxa168_eth_remove()
1537 if (pep->htpr) { in pxa168_eth_remove()
1538 dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, in pxa168_eth_remove()
1539 pep->htpr, pep->htpr_dma); in pxa168_eth_remove()
1540 pep->htpr = NULL; in pxa168_eth_remove()
1542 if (dev->phydev) in pxa168_eth_remove()
1543 phy_disconnect(dev->phydev); in pxa168_eth_remove()
1545 clk_disable_unprepare(pep->clk); in pxa168_eth_remove()
1546 mdiobus_unregister(pep->smi_bus); in pxa168_eth_remove()
1547 mdiobus_free(pep->smi_bus); in pxa168_eth_remove()
1561 return -ENOSYS; in pxa168_eth_resume()
1566 return -ENOSYS; in pxa168_eth_suspend()
1575 { .compatible = "marvell,pxa168-eth" },