Lines Matching +full:free +full:- +full:flowing

1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
14 * atl1e_pci_tbl - PCI Device ID Table
76 * atl1e_irq_enable - Enable default interrupt generation settings
81 if (likely(atomic_dec_and_test(&adapter->irq_sem))) { in atl1e_irq_enable()
82 AT_WRITE_REG(&adapter->hw, REG_ISR, 0); in atl1e_irq_enable()
83 AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); in atl1e_irq_enable()
84 AT_WRITE_FLUSH(&adapter->hw); in atl1e_irq_enable()
89 * atl1e_irq_disable - Mask off interrupt generation on the NIC
94 atomic_inc(&adapter->irq_sem); in atl1e_irq_disable()
95 AT_WRITE_REG(&adapter->hw, REG_IMR, 0); in atl1e_irq_disable()
96 AT_WRITE_FLUSH(&adapter->hw); in atl1e_irq_disable()
97 synchronize_irq(adapter->pdev->irq); in atl1e_irq_disable()
101 * atl1e_irq_reset - reset interrupt confiure on the NIC
106 atomic_set(&adapter->irq_sem, 0); in atl1e_irq_reset()
107 AT_WRITE_REG(&adapter->hw, REG_ISR, 0); in atl1e_irq_reset()
108 AT_WRITE_REG(&adapter->hw, REG_IMR, 0); in atl1e_irq_reset()
109 AT_WRITE_FLUSH(&adapter->hw); in atl1e_irq_reset()
113 * atl1e_phy_config - Timer Call-back
120 struct atl1e_hw *hw = &adapter->hw; in atl1e_phy_config()
123 spin_lock_irqsave(&adapter->mdio_lock, flags); in atl1e_phy_config()
125 spin_unlock_irqrestore(&adapter->mdio_lock, flags); in atl1e_phy_config()
130 while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) in atl1e_reinit_locked()
134 clear_bit(__AT_RESETTING, &adapter->flags); in atl1e_reinit_locked()
147 struct atl1e_hw *hw = &adapter->hw; in atl1e_check_link()
148 struct net_device *netdev = adapter->netdev; in atl1e_check_link()
163 adapter->link_speed = SPEED_0; in atl1e_check_link()
174 if (adapter->link_speed != speed || in atl1e_check_link()
175 adapter->link_duplex != duplex) { in atl1e_check_link()
176 adapter->link_speed = speed; in atl1e_check_link()
177 adapter->link_duplex = duplex; in atl1e_check_link()
181 adapter->link_speed, in atl1e_check_link()
182 adapter->link_duplex == FULL_DUPLEX ? in atl1e_check_link()
187 /* Link down -> Up */ in atl1e_check_link()
196 * atl1e_link_chg_task - deal with link change event Out of interrupt context
205 spin_lock_irqsave(&adapter->mdio_lock, flags); in atl1e_link_chg_task()
207 spin_unlock_irqrestore(&adapter->mdio_lock, flags); in atl1e_link_chg_task()
212 struct net_device *netdev = adapter->netdev; in atl1e_link_chg_event()
216 spin_lock(&adapter->mdio_lock); in atl1e_link_chg_event()
217 atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); in atl1e_link_chg_event()
218 atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); in atl1e_link_chg_event()
219 spin_unlock(&adapter->mdio_lock); in atl1e_link_chg_event()
226 adapter->link_speed = SPEED_0; in atl1e_link_chg_event()
230 schedule_work(&adapter->link_chg_task); in atl1e_link_chg_event()
235 del_timer_sync(&adapter->phy_config_timer); in atl1e_del_timer()
240 cancel_work_sync(&adapter->reset_task); in atl1e_cancel_work()
241 cancel_work_sync(&adapter->link_chg_task); in atl1e_cancel_work()
245 * atl1e_tx_timeout - Respond to a Tx Hang
254 schedule_work(&adapter->reset_task); in atl1e_tx_timeout()
258 * atl1e_set_multi - Multicast and Promiscuous mode set
264 * promiscuous mode, and all-multi behavior.
269 struct atl1e_hw *hw = &adapter->hw; in atl1e_set_multi()
277 if (netdev->flags & IFF_PROMISC) { in atl1e_set_multi()
279 } else if (netdev->flags & IFF_ALLMULTI) { in atl1e_set_multi()
294 hash_value = atl1e_hash_mc_addr(hw, ha->addr); in atl1e_set_multi()
317 netdev_dbg(adapter->netdev, "%s\n", __func__); in atl1e_rx_mode()
320 mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); in atl1e_rx_mode()
322 AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); in atl1e_rx_mode()
344 netdev_dbg(adapter->netdev, "%s\n", __func__); in atl1e_vlan_mode()
347 mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); in atl1e_vlan_mode()
349 AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); in atl1e_vlan_mode()
355 netdev_dbg(adapter->netdev, "%s\n", __func__); in atl1e_restore_vlan()
356 atl1e_vlan_mode(adapter->netdev, adapter->netdev->features); in atl1e_restore_vlan()
360 * atl1e_set_mac_addr - Change the Ethernet Address of the NIC
371 if (!is_valid_ether_addr(addr->sa_data)) in atl1e_set_mac_addr()
372 return -EADDRNOTAVAIL; in atl1e_set_mac_addr()
375 return -EBUSY; in atl1e_set_mac_addr()
377 eth_hw_addr_set(netdev, addr->sa_data); in atl1e_set_mac_addr()
378 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); in atl1e_set_mac_addr()
380 atl1e_hw_set_mac_addr(&adapter->hw); in atl1e_set_mac_addr()
403 netdev_features_t changed = netdev->features ^ features; in atl1e_set_features()
416 * atl1e_change_mtu - Change the Maximum Transfer Unit
429 while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) in atl1e_change_mtu()
431 WRITE_ONCE(netdev->mtu, new_mtu); in atl1e_change_mtu()
432 adapter->hw.max_frame_size = new_mtu; in atl1e_change_mtu()
433 adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3; in atl1e_change_mtu()
436 clear_bit(__AT_RESETTING, &adapter->flags); in atl1e_change_mtu()
449 atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); in atl1e_mdio_read()
458 if (atl1e_write_phy_reg(&adapter->hw, in atl1e_mdio_write()
472 return -EINVAL; in atl1e_mii_ioctl()
474 spin_lock_irqsave(&adapter->mdio_lock, flags); in atl1e_mii_ioctl()
477 data->phy_id = 0; in atl1e_mii_ioctl()
481 if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, in atl1e_mii_ioctl()
482 &data->val_out)) { in atl1e_mii_ioctl()
483 retval = -EIO; in atl1e_mii_ioctl()
489 if (data->reg_num & ~(0x1F)) { in atl1e_mii_ioctl()
490 retval = -EFAULT; in atl1e_mii_ioctl()
494 netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n", in atl1e_mii_ioctl()
495 data->reg_num, data->val_in); in atl1e_mii_ioctl()
496 if (atl1e_write_phy_reg(&adapter->hw, in atl1e_mii_ioctl()
497 data->reg_num, data->val_in)) { in atl1e_mii_ioctl()
498 retval = -EIO; in atl1e_mii_ioctl()
504 retval = -EOPNOTSUPP; in atl1e_mii_ioctl()
508 spin_unlock_irqrestore(&adapter->mdio_lock, flags); in atl1e_mii_ioctl()
521 return -EOPNOTSUPP; in atl1e_ioctl()
544 * atl1e_alloc_queues - Allocate memory for all rings
554 * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
563 struct atl1e_hw *hw = &adapter->hw; in atl1e_sw_init()
564 struct pci_dev *pdev = adapter->pdev; in atl1e_sw_init()
567 adapter->wol = 0; in atl1e_sw_init()
568 adapter->link_speed = SPEED_0; /* hardware init */ in atl1e_sw_init()
569 adapter->link_duplex = FULL_DUPLEX; in atl1e_sw_init()
570 adapter->num_rx_queues = 1; in atl1e_sw_init()
573 hw->vendor_id = pdev->vendor; in atl1e_sw_init()
574 hw->device_id = pdev->device; in atl1e_sw_init()
575 hw->subsystem_vendor_id = pdev->subsystem_vendor; in atl1e_sw_init()
576 hw->subsystem_id = pdev->subsystem_device; in atl1e_sw_init()
577 hw->revision_id = pdev->revision; in atl1e_sw_init()
579 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); in atl1e_sw_init()
583 if (hw->revision_id >= 0xF0) { in atl1e_sw_init()
584 hw->nic_type = athr_l2e_revB; in atl1e_sw_init()
587 hw->nic_type = athr_l1e; in atl1e_sw_init()
589 hw->nic_type = athr_l2e_revA; in atl1e_sw_init()
595 hw->emi_ca = true; in atl1e_sw_init()
597 hw->emi_ca = false; in atl1e_sw_init()
599 hw->phy_configured = false; in atl1e_sw_init()
600 hw->preamble_len = 7; in atl1e_sw_init()
601 hw->max_frame_size = adapter->netdev->mtu; in atl1e_sw_init()
602 hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN + in atl1e_sw_init()
605 hw->rrs_type = atl1e_rrs_disable; in atl1e_sw_init()
606 hw->indirect_tab = 0; in atl1e_sw_init()
607 hw->base_cpu = 0; in atl1e_sw_init()
611 hw->ict = 50000; /* 100ms */ in atl1e_sw_init()
612 hw->smb_timer = 200000; /* 200ms */ in atl1e_sw_init()
613 hw->tpd_burst = 5; in atl1e_sw_init()
614 hw->rrd_thresh = 1; in atl1e_sw_init()
615 hw->tpd_thresh = adapter->tx_ring.count / 2; in atl1e_sw_init()
616 hw->rx_count_down = 4; /* 2us resolution */ in atl1e_sw_init()
617 hw->tx_count_down = hw->imt * 4 / 3; in atl1e_sw_init()
618 hw->dmar_block = atl1e_dma_req_1024; in atl1e_sw_init()
619 hw->dmaw_block = atl1e_dma_req_1024; in atl1e_sw_init()
620 hw->dmar_dly_cnt = 15; in atl1e_sw_init()
621 hw->dmaw_dly_cnt = 4; in atl1e_sw_init()
624 netdev_err(adapter->netdev, "Unable to allocate memory for queues\n"); in atl1e_sw_init()
625 return -ENOMEM; in atl1e_sw_init()
628 atomic_set(&adapter->irq_sem, 1); in atl1e_sw_init()
629 spin_lock_init(&adapter->mdio_lock); in atl1e_sw_init()
631 set_bit(__AT_DOWN, &adapter->flags); in atl1e_sw_init()
637 * atl1e_clean_tx_ring - Free Tx-skb
642 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; in atl1e_clean_tx_ring()
644 struct pci_dev *pdev = adapter->pdev; in atl1e_clean_tx_ring()
647 if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL) in atl1e_clean_tx_ring()
650 ring_count = tx_ring->count; in atl1e_clean_tx_ring()
653 tx_buffer = &tx_ring->tx_buffer[index]; in atl1e_clean_tx_ring()
654 if (tx_buffer->dma) { in atl1e_clean_tx_ring()
655 if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) in atl1e_clean_tx_ring()
656 dma_unmap_single(&pdev->dev, tx_buffer->dma, in atl1e_clean_tx_ring()
657 tx_buffer->length, in atl1e_clean_tx_ring()
659 else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) in atl1e_clean_tx_ring()
660 dma_unmap_page(&pdev->dev, tx_buffer->dma, in atl1e_clean_tx_ring()
661 tx_buffer->length, in atl1e_clean_tx_ring()
663 tx_buffer->dma = 0; in atl1e_clean_tx_ring()
666 /* second free skb */ in atl1e_clean_tx_ring()
668 tx_buffer = &tx_ring->tx_buffer[index]; in atl1e_clean_tx_ring()
669 if (tx_buffer->skb) { in atl1e_clean_tx_ring()
670 dev_kfree_skb_any(tx_buffer->skb); in atl1e_clean_tx_ring()
671 tx_buffer->skb = NULL; in atl1e_clean_tx_ring()
674 /* Zero out Tx-buffers */ in atl1e_clean_tx_ring()
675 memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) * in atl1e_clean_tx_ring()
677 memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) * in atl1e_clean_tx_ring()
682 * atl1e_clean_rx_ring - Free rx-reservation skbs
688 &adapter->rx_ring; in atl1e_clean_rx_ring()
689 struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc; in atl1e_clean_rx_ring()
693 if (adapter->ring_vir_addr == NULL) in atl1e_clean_rx_ring()
696 for (i = 0; i < adapter->num_rx_queues; i++) { in atl1e_clean_rx_ring()
700 rx_ring->real_page_size); in atl1e_clean_rx_ring()
708 *ring_size = ((u32)(adapter->tx_ring.count * in atl1e_cal_ring_size()
711 + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE * in atl1e_cal_ring_size()
712 adapter->num_rx_queues + 31 in atl1e_cal_ring_size()
714 + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) * in atl1e_cal_ring_size()
723 rx_ring = &adapter->rx_ring; in atl1e_init_ring_resources()
725 rx_ring->real_page_size = adapter->rx_ring.page_size in atl1e_init_ring_resources()
726 + adapter->hw.max_frame_size in atl1e_init_ring_resources()
729 rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32); in atl1e_init_ring_resources()
730 atl1e_cal_ring_size(adapter, &adapter->ring_size); in atl1e_init_ring_resources()
732 adapter->ring_vir_addr = NULL; in atl1e_init_ring_resources()
733 adapter->rx_ring.desc = NULL; in atl1e_init_ring_resources()
734 rwlock_init(&adapter->tx_ring.tx_lock); in atl1e_init_ring_resources()
747 tx_ring = &adapter->tx_ring; in atl1e_init_ring_ptrs()
748 rx_ring = &adapter->rx_ring; in atl1e_init_ring_ptrs()
749 rx_page_desc = rx_ring->rx_page_desc; in atl1e_init_ring_ptrs()
751 tx_ring->next_to_use = 0; in atl1e_init_ring_ptrs()
752 atomic_set(&tx_ring->next_to_clean, 0); in atl1e_init_ring_ptrs()
754 for (i = 0; i < adapter->num_rx_queues; i++) { in atl1e_init_ring_ptrs()
765 * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
768 * Free all transmit software resources
772 struct pci_dev *pdev = adapter->pdev; in atl1e_free_ring_resources()
777 if (adapter->ring_vir_addr) { in atl1e_free_ring_resources()
778 dma_free_coherent(&pdev->dev, adapter->ring_size, in atl1e_free_ring_resources()
779 adapter->ring_vir_addr, adapter->ring_dma); in atl1e_free_ring_resources()
780 adapter->ring_vir_addr = NULL; in atl1e_free_ring_resources()
783 if (adapter->tx_ring.tx_buffer) { in atl1e_free_ring_resources()
784 kfree(adapter->tx_ring.tx_buffer); in atl1e_free_ring_resources()
785 adapter->tx_ring.tx_buffer = NULL; in atl1e_free_ring_resources()
790 * atl1e_setup_ring_resources - allocate Tx / RX descriptor resources
797 struct pci_dev *pdev = adapter->pdev; in atl1e_setup_ring_resources()
805 if (adapter->ring_vir_addr != NULL) in atl1e_setup_ring_resources()
808 tx_ring = &adapter->tx_ring; in atl1e_setup_ring_resources()
809 rx_ring = &adapter->rx_ring; in atl1e_setup_ring_resources()
813 size = adapter->ring_size; in atl1e_setup_ring_resources()
814 adapter->ring_vir_addr = dma_alloc_coherent(&pdev->dev, in atl1e_setup_ring_resources()
815 adapter->ring_size, in atl1e_setup_ring_resources()
816 &adapter->ring_dma, GFP_KERNEL); in atl1e_setup_ring_resources()
817 if (adapter->ring_vir_addr == NULL) { in atl1e_setup_ring_resources()
818 netdev_err(adapter->netdev, in atl1e_setup_ring_resources()
820 return -ENOMEM; in atl1e_setup_ring_resources()
823 rx_page_desc = rx_ring->rx_page_desc; in atl1e_setup_ring_resources()
826 tx_ring->dma = roundup(adapter->ring_dma, 8); in atl1e_setup_ring_resources()
827 offset = tx_ring->dma - adapter->ring_dma; in atl1e_setup_ring_resources()
828 tx_ring->desc = adapter->ring_vir_addr + offset; in atl1e_setup_ring_resources()
829 size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count); in atl1e_setup_ring_resources()
830 tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL); in atl1e_setup_ring_resources()
831 if (tx_ring->tx_buffer == NULL) { in atl1e_setup_ring_resources()
832 err = -ENOMEM; in atl1e_setup_ring_resources()
836 /* Init RXF-Pages */ in atl1e_setup_ring_resources()
837 offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count); in atl1e_setup_ring_resources()
840 for (i = 0; i < adapter->num_rx_queues; i++) { in atl1e_setup_ring_resources()
843 adapter->ring_dma + offset; in atl1e_setup_ring_resources()
845 adapter->ring_vir_addr + offset; in atl1e_setup_ring_resources()
846 offset += rx_ring->real_page_size; in atl1e_setup_ring_resources()
851 tx_ring->cmb_dma = adapter->ring_dma + offset; in atl1e_setup_ring_resources()
852 tx_ring->cmb = adapter->ring_vir_addr + offset; in atl1e_setup_ring_resources()
855 for (i = 0; i < adapter->num_rx_queues; i++) { in atl1e_setup_ring_resources()
858 adapter->ring_dma + offset; in atl1e_setup_ring_resources()
860 adapter->ring_vir_addr + offset; in atl1e_setup_ring_resources()
865 if (unlikely(offset > adapter->ring_size)) { in atl1e_setup_ring_resources()
866 netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n", in atl1e_setup_ring_resources()
867 offset, adapter->ring_size); in atl1e_setup_ring_resources()
868 err = -1; in atl1e_setup_ring_resources()
874 kfree(tx_ring->tx_buffer); in atl1e_setup_ring_resources()
875 tx_ring->tx_buffer = NULL; in atl1e_setup_ring_resources()
877 if (adapter->ring_vir_addr != NULL) { in atl1e_setup_ring_resources()
878 dma_free_coherent(&pdev->dev, adapter->ring_size, in atl1e_setup_ring_resources()
879 adapter->ring_vir_addr, adapter->ring_dma); in atl1e_setup_ring_resources()
880 adapter->ring_vir_addr = NULL; in atl1e_setup_ring_resources()
888 struct atl1e_hw *hw = &adapter->hw; in atl1e_configure_des_ring()
889 struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; in atl1e_configure_des_ring()
890 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; in atl1e_configure_des_ring()
895 (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32)); in atl1e_configure_des_ring()
897 (u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK)); in atl1e_configure_des_ring()
898 AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count)); in atl1e_configure_des_ring()
900 (u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK)); in atl1e_configure_des_ring()
902 rx_page_desc = rx_ring->rx_page_desc; in atl1e_configure_des_ring()
906 (u32)((adapter->ring_dma & in atl1e_configure_des_ring()
924 AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size); in atl1e_configure_des_ring()
931 struct atl1e_hw *hw = &adapter->hw; in atl1e_configure_tx()
938 if (hw->nic_type != athr_l2e_revB) { in atl1e_configure_tx()
940 if (hw->max_frame_size <= 1500) { in atl1e_configure_tx()
941 jumbo_thresh = hw->max_frame_size + extra_size; in atl1e_configure_tx()
942 } else if (hw->max_frame_size < 6*1024) { in atl1e_configure_tx()
944 (hw->max_frame_size + extra_size) * 2 / 3; in atl1e_configure_tx()
946 jumbo_thresh = (hw->max_frame_size + extra_size) / 2; in atl1e_configure_tx()
956 hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block); in atl1e_configure_tx()
960 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); in atl1e_configure_tx()
962 if (hw->nic_type != athr_l2e_revB) in atl1e_configure_tx()
964 atl1e_pay_load_size[hw->dmar_block]); in atl1e_configure_tx()
967 (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK) in atl1e_configure_tx()
974 struct atl1e_hw *hw = &adapter->hw; in atl1e_configure_rx()
981 if (hw->nic_type != athr_l2e_revB) { in atl1e_configure_rx()
983 (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) << in atl1e_configure_rx()
1000 AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); in atl1e_configure_rx()
1001 AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu); in atl1e_configure_rx()
1003 if (hw->rrs_type & atl1e_rrs_ipv4) in atl1e_configure_rx()
1006 if (hw->rrs_type & atl1e_rrs_ipv4_tcp) in atl1e_configure_rx()
1009 if (hw->rrs_type & atl1e_rrs_ipv6) in atl1e_configure_rx()
1012 if (hw->rrs_type & atl1e_rrs_ipv6_tcp) in atl1e_configure_rx()
1015 if (hw->rrs_type != atl1e_rrs_disable) in atl1e_configure_rx()
1027 struct atl1e_hw *hw = &adapter->hw; in atl1e_configure_dma()
1031 dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) in atl1e_configure_dma()
1033 dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) in atl1e_configure_dma()
1036 dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK) in atl1e_configure_dma()
1038 dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK) in atl1e_configure_dma()
1047 struct atl1e_hw *hw = &adapter->hw; in atl1e_setup_mac_ctrl()
1048 struct net_device *netdev = adapter->netdev; in atl1e_setup_mac_ctrl()
1054 if (FULL_DUPLEX == adapter->link_duplex) in atl1e_setup_mac_ctrl()
1057 value |= ((u32)((SPEED_1000 == adapter->link_speed) ? in atl1e_setup_mac_ctrl()
1063 value |= (((u32)adapter->hw.preamble_len & in atl1e_setup_mac_ctrl()
1066 __atl1e_vlan_mode(netdev->features, &value); in atl1e_setup_mac_ctrl()
1069 if (netdev->flags & IFF_PROMISC) in atl1e_setup_mac_ctrl()
1071 if (netdev->flags & IFF_ALLMULTI) in atl1e_setup_mac_ctrl()
1073 if (netdev->features & NETIF_F_RXALL) in atl1e_setup_mac_ctrl()
1079 * atl1e_configure - Configure Transmit&Receive Unit after Reset
1086 struct atl1e_hw *hw = &adapter->hw; in atl1e_configure()
1107 AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt); in atl1e_configure()
1108 AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt); in atl1e_configure()
1113 AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh); in atl1e_configure()
1114 AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh); in atl1e_configure()
1115 AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down); in atl1e_configure()
1116 AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down); in atl1e_configure()
1119 AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict); in atl1e_configure()
1122 AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + in atl1e_configure()
1135 AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer); in atl1e_configure()
1139 netdev_err(adapter->netdev, in atl1e_configure()
1141 return -1; in atl1e_configure()
1149 * atl1e_get_stats - Get System Network Statistics
1158 struct atl1e_hw_stats *hw_stats = &adapter->hw_stats; in atl1e_get_stats()
1159 struct net_device_stats *net_stats = &netdev->stats; in atl1e_get_stats()
1161 net_stats->rx_bytes = hw_stats->rx_byte_cnt; in atl1e_get_stats()
1162 net_stats->tx_bytes = hw_stats->tx_byte_cnt; in atl1e_get_stats()
1163 net_stats->multicast = hw_stats->rx_mcast; in atl1e_get_stats()
1164 net_stats->collisions = hw_stats->tx_1_col + in atl1e_get_stats()
1165 hw_stats->tx_2_col + in atl1e_get_stats()
1166 hw_stats->tx_late_col + in atl1e_get_stats()
1167 hw_stats->tx_abort_col; in atl1e_get_stats()
1169 net_stats->rx_errors = hw_stats->rx_frag + in atl1e_get_stats()
1170 hw_stats->rx_fcs_err + in atl1e_get_stats()
1171 hw_stats->rx_len_err + in atl1e_get_stats()
1172 hw_stats->rx_sz_ov + in atl1e_get_stats()
1173 hw_stats->rx_rrd_ov + in atl1e_get_stats()
1174 hw_stats->rx_align_err + in atl1e_get_stats()
1175 hw_stats->rx_rxf_ov; in atl1e_get_stats()
1177 net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; in atl1e_get_stats()
1178 net_stats->rx_length_errors = hw_stats->rx_len_err; in atl1e_get_stats()
1179 net_stats->rx_crc_errors = hw_stats->rx_fcs_err; in atl1e_get_stats()
1180 net_stats->rx_frame_errors = hw_stats->rx_align_err; in atl1e_get_stats()
1181 net_stats->rx_dropped = hw_stats->rx_rrd_ov; in atl1e_get_stats()
1183 net_stats->tx_errors = hw_stats->tx_late_col + in atl1e_get_stats()
1184 hw_stats->tx_abort_col + in atl1e_get_stats()
1185 hw_stats->tx_underrun + in atl1e_get_stats()
1186 hw_stats->tx_trunc; in atl1e_get_stats()
1188 net_stats->tx_fifo_errors = hw_stats->tx_underrun; in atl1e_get_stats()
1189 net_stats->tx_aborted_errors = hw_stats->tx_abort_col; in atl1e_get_stats()
1190 net_stats->tx_window_errors = hw_stats->tx_late_col; in atl1e_get_stats()
1192 net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; in atl1e_get_stats()
1193 net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; in atl1e_get_stats()
1205 stats_item = &adapter->hw_stats.rx_ok; in atl1e_update_hw_stats()
1207 *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); in atl1e_update_hw_stats()
1213 stats_item = &adapter->hw_stats.tx_ok; in atl1e_update_hw_stats()
1215 *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); in atl1e_update_hw_stats()
1225 spin_lock(&adapter->mdio_lock); in atl1e_clear_phy_int()
1226 atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data); in atl1e_clear_phy_int()
1227 spin_unlock(&adapter->mdio_lock); in atl1e_clear_phy_int()
1232 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; in atl1e_clean_tx_irq()
1234 u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX); in atl1e_clean_tx_irq()
1235 u16 next_to_clean = atomic_read(&tx_ring->next_to_clean); in atl1e_clean_tx_irq()
1238 tx_buffer = &tx_ring->tx_buffer[next_to_clean]; in atl1e_clean_tx_irq()
1239 if (tx_buffer->dma) { in atl1e_clean_tx_irq()
1240 if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) in atl1e_clean_tx_irq()
1241 dma_unmap_single(&adapter->pdev->dev, in atl1e_clean_tx_irq()
1242 tx_buffer->dma, in atl1e_clean_tx_irq()
1243 tx_buffer->length, in atl1e_clean_tx_irq()
1245 else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) in atl1e_clean_tx_irq()
1246 dma_unmap_page(&adapter->pdev->dev, in atl1e_clean_tx_irq()
1247 tx_buffer->dma, in atl1e_clean_tx_irq()
1248 tx_buffer->length, in atl1e_clean_tx_irq()
1250 tx_buffer->dma = 0; in atl1e_clean_tx_irq()
1253 if (tx_buffer->skb) { in atl1e_clean_tx_irq()
1254 dev_consume_skb_irq(tx_buffer->skb); in atl1e_clean_tx_irq()
1255 tx_buffer->skb = NULL; in atl1e_clean_tx_irq()
1258 if (++next_to_clean == tx_ring->count) in atl1e_clean_tx_irq()
1262 atomic_set(&tx_ring->next_to_clean, next_to_clean); in atl1e_clean_tx_irq()
1264 if (netif_queue_stopped(adapter->netdev) && in atl1e_clean_tx_irq()
1265 netif_carrier_ok(adapter->netdev)) { in atl1e_clean_tx_irq()
1266 netif_wake_queue(adapter->netdev); in atl1e_clean_tx_irq()
1273 * atl1e_intr - Interrupt Handler
1281 struct atl1e_hw *hw = &adapter->hw; in atl1e_intr()
1303 netdev_err(adapter->netdev, in atl1e_intr()
1305 if (netif_running(adapter->netdev)) { in atl1e_intr()
1308 schedule_work(&adapter->reset_task); in atl1e_intr()
1315 netdev_err(adapter->netdev, in atl1e_intr()
1319 schedule_work(&adapter->reset_task); in atl1e_intr()
1328 netdev->stats.tx_carrier_errors++; in atl1e_intr()
1346 &adapter->napi))) in atl1e_intr()
1347 __napi_schedule(&adapter->napi); in atl1e_intr()
1349 } while (--max_ints > 0); in atl1e_intr()
1350 /* re-enable Interrupt*/ in atl1e_intr()
1351 AT_WRITE_REG(&adapter->hw, REG_ISR, 0); in atl1e_intr()
1366 pkt_flags = prrs->pkt_flag; in atl1e_rx_checksum()
1367 err_flags = prrs->err_flag; in atl1e_rx_checksum()
1374 if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF)) in atl1e_rx_checksum()
1378 skb->ip_summed = CHECKSUM_UNNECESSARY; in atl1e_rx_checksum()
1391 (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc; in atl1e_get_rx_page()
1400 struct net_device *netdev = adapter->netdev; in atl1e_clean_rx_irq()
1401 struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; in atl1e_clean_rx_irq()
1403 (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc; in atl1e_clean_rx_irq()
1409 write_offset = *(rx_page->write_offset_addr); in atl1e_clean_rx_irq()
1410 if (likely(rx_page->read_offset < write_offset)) { in atl1e_clean_rx_irq()
1416 prrs = (struct atl1e_recv_ret_status *) (rx_page->addr + in atl1e_clean_rx_irq()
1417 rx_page->read_offset); in atl1e_clean_rx_irq()
1419 if (prrs->seq_num != rx_page_desc[que].rx_nxseq) { in atl1e_clean_rx_irq()
1422 prrs->seq_num, in atl1e_clean_rx_irq()
1426 AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0, in atl1e_clean_rx_irq()
1427 (((u32)prrs->seq_num) << 16) | in atl1e_clean_rx_irq()
1434 if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) && in atl1e_clean_rx_irq()
1435 !(netdev->features & NETIF_F_RXALL)) { in atl1e_clean_rx_irq()
1436 if (prrs->err_flag & (RRS_ERR_BAD_CRC | in atl1e_clean_rx_irq()
1447 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & in atl1e_clean_rx_irq()
1449 if (likely(!(netdev->features & NETIF_F_RXFCS))) in atl1e_clean_rx_irq()
1450 packet_size -= 4; /* CRC */ in atl1e_clean_rx_irq()
1456 memcpy(skb->data, (u8 *)(prrs + 1), packet_size); in atl1e_clean_rx_irq()
1458 skb->protocol = eth_type_trans(skb, netdev); in atl1e_clean_rx_irq()
1461 if (prrs->pkt_flag & RRS_IS_VLAN_TAG) { in atl1e_clean_rx_irq()
1462 u16 vlan_tag = (prrs->vtag >> 4) | in atl1e_clean_rx_irq()
1463 ((prrs->vtag & 7) << 13) | in atl1e_clean_rx_irq()
1464 ((prrs->vtag & 8) << 9); in atl1e_clean_rx_irq()
1467 prrs->vtag); in atl1e_clean_rx_irq()
1470 napi_gro_receive(&adapter->napi, skb); in atl1e_clean_rx_irq()
1474 rx_page->read_offset += in atl1e_clean_rx_irq()
1475 (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & in atl1e_clean_rx_irq()
1480 if (rx_page->read_offset >= rx_ring->page_size) { in atl1e_clean_rx_irq()
1485 rx_page->read_offset = in atl1e_clean_rx_irq()
1486 *(rx_page->write_offset_addr) = 0; in atl1e_clean_rx_irq()
1490 AT_WRITE_REGB(&adapter->hw, reg_addr, 1); in atl1e_clean_rx_irq()
1494 write_offset = *(rx_page->write_offset_addr); in atl1e_clean_rx_irq()
1495 } while (rx_page->read_offset < write_offset); in atl1e_clean_rx_irq()
1501 if (!test_bit(__AT_DOWN, &adapter->flags)) in atl1e_clean_rx_irq()
1502 schedule_work(&adapter->reset_task); in atl1e_clean_rx_irq()
1506 * atl1e_clean - NAPI Rx polling callback
1518 if (!netif_carrier_ok(adapter->netdev)) in atl1e_clean()
1527 imr_data = AT_READ_REG(&adapter->hw, REG_IMR); in atl1e_clean()
1528 AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); in atl1e_clean()
1530 if (test_bit(__AT_DOWN, &adapter->flags)) { in atl1e_clean()
1531 atomic_dec(&adapter->irq_sem); in atl1e_clean()
1532 netdev_err(adapter->netdev, in atl1e_clean()
1545 * Polling 'interrupt' - used by things like netconsole to send skbs
1546 * without having to re-enable interrupts. It's not called while
1553 disable_irq(adapter->pdev->irq); in atl1e_netpoll()
1554 atl1e_intr(adapter->pdev->irq, netdev); in atl1e_netpoll()
1555 enable_irq(adapter->pdev->irq); in atl1e_netpoll()
1561 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; in atl1e_tpd_avail()
1565 next_to_clean = atomic_read(&tx_ring->next_to_clean); in atl1e_tpd_avail()
1566 next_to_use = tx_ring->next_to_use; in atl1e_tpd_avail()
1569 (next_to_clean - next_to_use - 1) : in atl1e_tpd_avail()
1570 (tx_ring->count + next_to_clean - next_to_use - 1); in atl1e_tpd_avail()
1580 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; in atl1e_get_tpd()
1583 next_to_use = tx_ring->next_to_use; in atl1e_get_tpd()
1584 if (++tx_ring->next_to_use == tx_ring->count) in atl1e_get_tpd()
1585 tx_ring->next_to_use = 0; in atl1e_get_tpd()
1587 memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc)); in atl1e_get_tpd()
1588 return &tx_ring->desc[next_to_use]; in atl1e_get_tpd()
1594 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; in atl1e_get_tx_buffer()
1596 return &tx_ring->tx_buffer[tpd - tx_ring->desc]; in atl1e_get_tx_buffer()
1607 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in atl1e_cal_tdp_req()
1608 fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in atl1e_cal_tdp_req()
1609 tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT); in atl1e_cal_tdp_req()
1613 if (skb->protocol == htons(ETH_P_IP) || in atl1e_cal_tdp_req()
1614 (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) { in atl1e_cal_tdp_req()
1617 tpd_req += ((skb_headlen(skb) - proto_hdr_len + in atl1e_cal_tdp_req()
1618 MAX_TX_BUF_LEN - 1) >> in atl1e_cal_tdp_req()
1641 offload_type = skb_shinfo(skb)->gso_type; in atl1e_tso_csum()
1644 real_len = (((unsigned char *)ip_hdr(skb) - skb->data) in atl1e_tso_csum()
1645 + ntohs(ip_hdr(skb)->tot_len)); in atl1e_tso_csum()
1647 if (real_len < skb->len) { in atl1e_tso_csum()
1654 if (unlikely(skb->len == hdr_len)) { in atl1e_tso_csum()
1656 netdev_warn(adapter->netdev, in atl1e_tso_csum()
1660 ip_hdr(skb)->check = 0; in atl1e_tso_csum()
1661 ip_hdr(skb)->tot_len = 0; in atl1e_tso_csum()
1662 tcp_hdr(skb)->check = ~csum_tcpudp_magic( in atl1e_tso_csum()
1663 ip_hdr(skb)->saddr, in atl1e_tso_csum()
1664 ip_hdr(skb)->daddr, in atl1e_tso_csum()
1666 tpd->word3 |= (ip_hdr(skb)->ihl & in atl1e_tso_csum()
1669 tpd->word3 |= ((tcp_hdrlen(skb) >> 2) & in atl1e_tso_csum()
1672 tpd->word3 |= ((skb_shinfo(skb)->gso_size) & in atl1e_tso_csum()
1674 tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; in atl1e_tso_csum()
1681 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { in atl1e_tso_csum()
1686 netdev_err(adapter->netdev, in atl1e_tso_csum()
1688 return -1; in atl1e_tso_csum()
1690 css = cso + skb->csum_offset; in atl1e_tso_csum()
1691 tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) << in atl1e_tso_csum()
1693 tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) << in atl1e_tso_csum()
1695 tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT; in atl1e_tso_csum()
1714 int ring_start = adapter->tx_ring.next_to_use; in atl1e_tx_map()
1717 nr_frags = skb_shinfo(skb)->nr_frags; in atl1e_tx_map()
1718 segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; in atl1e_tx_map()
1726 tx_buffer->length = map_len; in atl1e_tx_map()
1727 tx_buffer->dma = dma_map_single(&adapter->pdev->dev, in atl1e_tx_map()
1728 skb->data, hdr_len, in atl1e_tx_map()
1730 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) in atl1e_tx_map()
1731 return -ENOSPC; in atl1e_tx_map()
1735 use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); in atl1e_tx_map()
1736 use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | in atl1e_tx_map()
1737 ((cpu_to_le32(tx_buffer->length) & in atl1e_tx_map()
1751 tx_buffer->skb = NULL; in atl1e_tx_map()
1753 tx_buffer->length = map_len = in atl1e_tx_map()
1754 ((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ? in atl1e_tx_map()
1755 MAX_TX_BUF_LEN : (buf_len - mapped_len); in atl1e_tx_map()
1756 tx_buffer->dma = in atl1e_tx_map()
1757 dma_map_single(&adapter->pdev->dev, in atl1e_tx_map()
1758 skb->data + mapped_len, map_len, in atl1e_tx_map()
1761 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { in atl1e_tx_map()
1763 ring_end = adapter->tx_ring.next_to_use; in atl1e_tx_map()
1764 adapter->tx_ring.next_to_use = ring_start; in atl1e_tx_map()
1765 while (adapter->tx_ring.next_to_use != ring_end) { in atl1e_tx_map()
1768 dma_unmap_single(&adapter->pdev->dev, in atl1e_tx_map()
1769 tx_buffer->dma, in atl1e_tx_map()
1770 tx_buffer->length, in atl1e_tx_map()
1774 adapter->tx_ring.next_to_use = ring_start; in atl1e_tx_map()
1775 return -ENOSPC; in atl1e_tx_map()
1780 use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); in atl1e_tx_map()
1781 use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | in atl1e_tx_map()
1782 ((cpu_to_le32(tx_buffer->length) & in atl1e_tx_map()
1787 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in atl1e_tx_map()
1793 seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; in atl1e_tx_map()
1799 BUG_ON(tx_buffer->skb); in atl1e_tx_map()
1801 tx_buffer->skb = NULL; in atl1e_tx_map()
1802 tx_buffer->length = in atl1e_tx_map()
1805 buf_len -= tx_buffer->length; in atl1e_tx_map()
1807 tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev, in atl1e_tx_map()
1810 tx_buffer->length, in atl1e_tx_map()
1813 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { in atl1e_tx_map()
1815 ring_end = adapter->tx_ring.next_to_use; in atl1e_tx_map()
1816 adapter->tx_ring.next_to_use = ring_start; in atl1e_tx_map()
1817 while (adapter->tx_ring.next_to_use != ring_end) { in atl1e_tx_map()
1820 dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma, in atl1e_tx_map()
1821 tx_buffer->length, DMA_TO_DEVICE); in atl1e_tx_map()
1825 adapter->tx_ring.next_to_use = ring_start; in atl1e_tx_map()
1826 return -ENOSPC; in atl1e_tx_map()
1830 use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); in atl1e_tx_map()
1831 use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | in atl1e_tx_map()
1832 ((cpu_to_le32(tx_buffer->length) & in atl1e_tx_map()
1837 if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK) in atl1e_tx_map()
1839 tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; in atl1e_tx_map()
1842 use_tpd->word3 |= 1 << TPD_EOP_SHIFT; in atl1e_tx_map()
1844 so it will be free after unmap */ in atl1e_tx_map()
1845 tx_buffer->skb = skb; in atl1e_tx_map()
1852 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; in atl1e_tx_queue()
1855 * applicable for weak-ordered memory model archs, in atl1e_tx_queue()
1856 * such as IA-64). */ in atl1e_tx_queue()
1858 AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use); in atl1e_tx_queue()
1868 if (test_bit(__AT_DOWN, &adapter->flags)) { in atl1e_xmit_frame()
1873 if (unlikely(skb->len <= 0)) { in atl1e_xmit_frame()
1891 tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; in atl1e_xmit_frame()
1893 tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) << in atl1e_xmit_frame()
1897 if (skb->protocol == htons(ETH_P_8021Q)) in atl1e_xmit_frame()
1898 tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT; in atl1e_xmit_frame()
1901 tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */ in atl1e_xmit_frame()
1921 struct net_device *netdev = adapter->netdev; in atl1e_free_irq()
1923 free_irq(adapter->pdev->irq, netdev); in atl1e_free_irq()
1928 struct pci_dev *pdev = adapter->pdev; in atl1e_request_irq()
1929 struct net_device *netdev = adapter->netdev; in atl1e_request_irq()
1932 err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, in atl1e_request_irq()
1935 netdev_dbg(adapter->netdev, in atl1e_request_irq()
1945 struct net_device *netdev = adapter->netdev; in atl1e_up()
1950 err = atl1e_init_hw(&adapter->hw); in atl1e_up()
1952 err = -EIO; in atl1e_up()
1960 err = -EIO; in atl1e_up()
1964 clear_bit(__AT_DOWN, &adapter->flags); in atl1e_up()
1965 napi_enable(&adapter->napi); in atl1e_up()
1967 val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL); in atl1e_up()
1968 AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, in atl1e_up()
1977 struct net_device *netdev = adapter->netdev; in atl1e_down()
1981 set_bit(__AT_DOWN, &adapter->flags); in atl1e_down()
1986 atl1e_reset_hw(&adapter->hw); in atl1e_down()
1989 napi_disable(&adapter->napi); in atl1e_down()
1994 adapter->link_speed = SPEED_0; in atl1e_down()
1995 adapter->link_duplex = -1; in atl1e_down()
2001 * atl1e_open - Called when a network interface is made active
2018 if (test_bit(__AT_TESTING, &adapter->flags)) in atl1e_open()
2019 return -EBUSY; in atl1e_open()
2041 atl1e_reset_hw(&adapter->hw); in atl1e_open()
2047 * atl1e_close - Disables a network interface
2052 * The close entry point is called when an interface is de-activated
2061 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); in atl1e_close()
2073 struct atl1e_hw *hw = &adapter->hw; in atl1e_suspend()
2080 u32 wufc = adapter->wol; in atl1e_suspend()
2087 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); in atl1e_suspend()
2109 netdev_dbg(adapter->netdev, "set phy register failed\n"); in atl1e_suspend()
2113 hw->phy_configured = false; /* re-init PHY when resume */ in atl1e_suspend()
2131 netdev_dbg(adapter->netdev, in atl1e_suspend()
2137 netdev_dbg(adapter->netdev, in atl1e_suspend()
2148 mac_ctrl_data |= (((u32)adapter->hw.preamble_len & in atl1e_suspend()
2152 __atl1e_vlan_mode(netdev->features, &mac_ctrl_data); in atl1e_suspend()
2158 netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n", in atl1e_suspend()
2181 hw->phy_configured = false; /* re-init PHY when resume */ in atl1e_suspend()
2209 netdev_err(adapter->netdev, in atl1e_resume()
2216 AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ in atl1e_resume()
2221 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); in atl1e_resume()
2229 atl1e_reset_hw(&adapter->hw); in atl1e_resume()
2266 SET_NETDEV_DEV(netdev, &pdev->dev); in atl1e_init_netdev()
2269 netdev->netdev_ops = &atl1e_netdev_ops; in atl1e_init_netdev()
2271 netdev->watchdog_timeo = AT_TX_WATCHDOG; in atl1e_init_netdev()
2272 /* MTU range: 42 - 8170 */ in atl1e_init_netdev()
2273 netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN); in atl1e_init_netdev()
2274 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - in atl1e_init_netdev()
2278 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | in atl1e_init_netdev()
2280 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX; in atl1e_init_netdev()
2282 netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS; in atl1e_init_netdev()
2287 * atl1e_probe - Device Initialization Routine
2307 return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); in atl1e_probe()
2310 * The atl1e chip can DMA to 64-bit addresses, but it uses a single in atl1e_probe()
2314 * Supporting 64-bit DMA on this hardware is more trouble than it's in atl1e_probe()
2315 * worth. It is far easier to limit to 32-bit DMA than update in atl1e_probe()
2317 * fixed-high-32-bit system. in atl1e_probe()
2319 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in atl1e_probe()
2321 dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); in atl1e_probe()
2327 dev_err(&pdev->dev, "cannot obtain PCI resources\n"); in atl1e_probe()
2335 err = -ENOMEM; in atl1e_probe()
2345 adapter->bd_number = cards_found; in atl1e_probe()
2346 adapter->netdev = netdev; in atl1e_probe()
2347 adapter->pdev = pdev; in atl1e_probe()
2348 adapter->hw.adapter = adapter; in atl1e_probe()
2349 adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0); in atl1e_probe()
2350 if (!adapter->hw.hw_addr) { in atl1e_probe()
2351 err = -EIO; in atl1e_probe()
2357 adapter->mii.dev = netdev; in atl1e_probe()
2358 adapter->mii.mdio_read = atl1e_mdio_read; in atl1e_probe()
2359 adapter->mii.mdio_write = atl1e_mdio_write; in atl1e_probe()
2360 adapter->mii.phy_id_mask = 0x1f; in atl1e_probe()
2361 adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK; in atl1e_probe()
2363 netif_napi_add(netdev, &adapter->napi, atl1e_clean); in atl1e_probe()
2365 timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0); in atl1e_probe()
2372 * Enables bus-mastering on the device and calls in atl1e_probe()
2384 atl1e_phy_init(&adapter->hw); in atl1e_probe()
2387 err = atl1e_reset_hw(&adapter->hw); in atl1e_probe()
2389 err = -EIO; in atl1e_probe()
2393 if (atl1e_read_mac_addr(&adapter->hw) != 0) { in atl1e_probe()
2394 err = -EIO; in atl1e_probe()
2399 eth_hw_addr_set(netdev, adapter->hw.mac_addr); in atl1e_probe()
2400 netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr); in atl1e_probe()
2402 INIT_WORK(&adapter->reset_task, atl1e_reset_task); in atl1e_probe()
2403 INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); in atl1e_probe()
2423 pci_iounmap(pdev, adapter->hw.hw_addr); in atl1e_probe()
2436 * atl1e_remove - Device Removal Routine
2441 * Hot-Plug event, or because the driver is going to be removed from
2453 set_bit(__AT_DOWN, &adapter->flags); in atl1e_remove()
2460 atl1e_force_ps(&adapter->hw); in atl1e_remove()
2461 pci_iounmap(pdev, adapter->hw.hw_addr); in atl1e_remove()
2468 * atl1e_io_error_detected - called when PCI error is detected
2496 * atl1e_io_slot_reset - called after the pci bus has been reset.
2499 * Restart the card from scratch, as if from a cold-boot. Implementation
2500 * resembles the first-half of the e1000_resume routine.
2508 netdev_err(adapter->netdev, in atl1e_io_slot_reset()
2509 "Cannot re-enable PCI device after reset\n"); in atl1e_io_slot_reset()
2517 atl1e_reset_hw(&adapter->hw); in atl1e_io_slot_reset()
2523 * atl1e_io_resume - called when traffic can start flowing again.
2528 * second-half of the atl1e_resume routine.
2537 netdev_err(adapter->netdev, in atl1e_io_resume()