Lines Matching +full:rx +full:- +full:pcs +full:- +full:input
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2007 - 2018 Intel Corporation. */
104 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
106 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
130 "legacy-rx",
139 struct e1000_hw *hw = &adapter->hw; in igb_get_link_ksettings()
140 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; in igb_get_link_ksettings()
141 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; in igb_get_link_ksettings()
146 status = pm_runtime_suspended(&adapter->pdev->dev) ? in igb_get_link_ksettings()
148 if (hw->phy.media_type == e1000_media_type_copper) { in igb_get_link_ksettings()
160 if (hw->mac.autoneg == 1) { in igb_get_link_ksettings()
163 advertising |= hw->phy.autoneg_advertised; in igb_get_link_ksettings()
166 cmd->base.port = PORT_TP; in igb_get_link_ksettings()
167 cmd->base.phy_address = hw->phy.addr; in igb_get_link_ksettings()
175 if (hw->mac.type == e1000_i354) { in igb_get_link_ksettings()
176 if ((hw->device_id == in igb_get_link_ksettings()
185 if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { in igb_get_link_ksettings()
189 if (hw->mac.autoneg == 1) in igb_get_link_ksettings()
192 cmd->base.port = PORT_FIBRE; in igb_get_link_ksettings()
194 if (hw->mac.autoneg != 1) in igb_get_link_ksettings()
198 switch (hw->fc.requested_mode) { in igb_get_link_ksettings()
225 hw->phy.media_type != e1000_media_type_copper) in igb_get_link_ksettings()
226 cmd->base.duplex = DUPLEX_FULL; in igb_get_link_ksettings()
228 cmd->base.duplex = DUPLEX_HALF; in igb_get_link_ksettings()
231 cmd->base.duplex = DUPLEX_UNKNOWN; in igb_get_link_ksettings()
233 cmd->base.speed = speed; in igb_get_link_ksettings()
234 if ((hw->phy.media_type == e1000_media_type_fiber) || in igb_get_link_ksettings()
235 hw->mac.autoneg) in igb_get_link_ksettings()
236 cmd->base.autoneg = AUTONEG_ENABLE; in igb_get_link_ksettings()
238 cmd->base.autoneg = AUTONEG_DISABLE; in igb_get_link_ksettings()
240 /* MDI-X => 2; MDI =>1; Invalid =>0 */ in igb_get_link_ksettings()
241 if (hw->phy.media_type == e1000_media_type_copper) in igb_get_link_ksettings()
242 cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : in igb_get_link_ksettings()
245 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; in igb_get_link_ksettings()
247 if (hw->phy.mdix == AUTO_ALL_MODES) in igb_get_link_ksettings()
248 cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; in igb_get_link_ksettings()
250 cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; in igb_get_link_ksettings()
252 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, in igb_get_link_ksettings()
254 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, in igb_get_link_ksettings()
264 struct e1000_hw *hw = &adapter->hw; in igb_set_link_ksettings()
271 dev_err(&adapter->pdev->dev, in igb_set_link_ksettings()
273 return -EINVAL; in igb_set_link_ksettings()
280 if (cmd->base.eth_tp_mdix_ctrl) { in igb_set_link_ksettings()
281 if (hw->phy.media_type != e1000_media_type_copper) in igb_set_link_ksettings()
282 return -EOPNOTSUPP; in igb_set_link_ksettings()
284 if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && in igb_set_link_ksettings()
285 (cmd->base.autoneg != AUTONEG_ENABLE)) { in igb_set_link_ksettings()
286 …dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or dupl… in igb_set_link_ksettings()
287 return -EINVAL; in igb_set_link_ksettings()
291 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) in igb_set_link_ksettings()
295 cmd->link_modes.advertising); in igb_set_link_ksettings()
297 if (cmd->base.autoneg == AUTONEG_ENABLE) { in igb_set_link_ksettings()
298 hw->mac.autoneg = 1; in igb_set_link_ksettings()
299 if (hw->phy.media_type == e1000_media_type_fiber) { in igb_set_link_ksettings()
300 hw->phy.autoneg_advertised = advertising | in igb_set_link_ksettings()
303 switch (adapter->link_speed) { in igb_set_link_ksettings()
305 hw->phy.autoneg_advertised = in igb_set_link_ksettings()
309 hw->phy.autoneg_advertised = in igb_set_link_ksettings()
313 hw->phy.autoneg_advertised = in igb_set_link_ksettings()
320 hw->phy.autoneg_advertised = advertising | in igb_set_link_ksettings()
324 advertising = hw->phy.autoneg_advertised; in igb_set_link_ksettings()
325 if (adapter->fc_autoneg) in igb_set_link_ksettings()
326 hw->fc.requested_mode = e1000_fc_default; in igb_set_link_ksettings()
328 u32 speed = cmd->base.speed; in igb_set_link_ksettings()
330 if (igb_set_spd_dplx(adapter, speed, cmd->base.duplex)) { in igb_set_link_ksettings()
331 clear_bit(__IGB_RESETTING, &adapter->state); in igb_set_link_ksettings()
332 return -EINVAL; in igb_set_link_ksettings()
336 /* MDI-X => 2; MDI => 1; Auto => 3 */ in igb_set_link_ksettings()
337 if (cmd->base.eth_tp_mdix_ctrl) { in igb_set_link_ksettings()
341 if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) in igb_set_link_ksettings()
342 hw->phy.mdix = AUTO_ALL_MODES; in igb_set_link_ksettings()
344 hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; in igb_set_link_ksettings()
348 if (netif_running(adapter->netdev)) { in igb_set_link_ksettings()
354 clear_bit(__IGB_RESETTING, &adapter->state); in igb_set_link_ksettings()
361 struct e1000_mac_info *mac = &adapter->hw.mac; in igb_get_link()
370 mac->get_link_status = 1; in igb_get_link()
379 struct e1000_hw *hw = &adapter->hw; in igb_get_pauseparam()
381 pause->autoneg = in igb_get_pauseparam()
382 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); in igb_get_pauseparam()
384 if (hw->fc.current_mode == e1000_fc_rx_pause) in igb_get_pauseparam()
385 pause->rx_pause = 1; in igb_get_pauseparam()
386 else if (hw->fc.current_mode == e1000_fc_tx_pause) in igb_get_pauseparam()
387 pause->tx_pause = 1; in igb_get_pauseparam()
388 else if (hw->fc.current_mode == e1000_fc_full) { in igb_get_pauseparam()
389 pause->rx_pause = 1; in igb_get_pauseparam()
390 pause->tx_pause = 1; in igb_get_pauseparam()
398 struct e1000_hw *hw = &adapter->hw; in igb_set_pauseparam()
403 if (hw->dev_spec._82575.eth_flags.e100_base_fx) in igb_set_pauseparam()
404 return -EINVAL; in igb_set_pauseparam()
406 adapter->fc_autoneg = pause->autoneg; in igb_set_pauseparam()
408 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) in igb_set_pauseparam()
411 if (adapter->fc_autoneg == AUTONEG_ENABLE) { in igb_set_pauseparam()
412 hw->fc.requested_mode = e1000_fc_default; in igb_set_pauseparam()
413 if (netif_running(adapter->netdev)) { in igb_set_pauseparam()
420 if (pause->rx_pause && pause->tx_pause) in igb_set_pauseparam()
421 hw->fc.requested_mode = e1000_fc_full; in igb_set_pauseparam()
422 else if (pause->rx_pause && !pause->tx_pause) in igb_set_pauseparam()
423 hw->fc.requested_mode = e1000_fc_rx_pause; in igb_set_pauseparam()
424 else if (!pause->rx_pause && pause->tx_pause) in igb_set_pauseparam()
425 hw->fc.requested_mode = e1000_fc_tx_pause; in igb_set_pauseparam()
426 else if (!pause->rx_pause && !pause->tx_pause) in igb_set_pauseparam()
427 hw->fc.requested_mode = e1000_fc_none; in igb_set_pauseparam()
429 hw->fc.current_mode = hw->fc.requested_mode; in igb_set_pauseparam()
431 retval = ((hw->phy.media_type == e1000_media_type_copper) ? in igb_set_pauseparam()
435 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_set_pauseparam()
436 struct igb_ring *ring = adapter->rx_ring[i]; in igb_set_pauseparam()
442 clear_bit(__IGB_RESETTING, &adapter->state); in igb_set_pauseparam()
449 return adapter->msg_enable; in igb_get_msglevel()
455 adapter->msg_enable = data; in igb_set_msglevel()
468 struct e1000_hw *hw = &adapter->hw; in igb_get_regs()
474 regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; in igb_get_regs()
553 regs_buff[54] = adapter->stats.crcerrs; in igb_get_regs()
554 regs_buff[55] = adapter->stats.algnerrc; in igb_get_regs()
555 regs_buff[56] = adapter->stats.symerrs; in igb_get_regs()
556 regs_buff[57] = adapter->stats.rxerrc; in igb_get_regs()
557 regs_buff[58] = adapter->stats.mpc; in igb_get_regs()
558 regs_buff[59] = adapter->stats.scc; in igb_get_regs()
559 regs_buff[60] = adapter->stats.ecol; in igb_get_regs()
560 regs_buff[61] = adapter->stats.mcc; in igb_get_regs()
561 regs_buff[62] = adapter->stats.latecol; in igb_get_regs()
562 regs_buff[63] = adapter->stats.colc; in igb_get_regs()
563 regs_buff[64] = adapter->stats.dc; in igb_get_regs()
564 regs_buff[65] = adapter->stats.tncrs; in igb_get_regs()
565 regs_buff[66] = adapter->stats.sec; in igb_get_regs()
566 regs_buff[67] = adapter->stats.htdpmc; in igb_get_regs()
567 regs_buff[68] = adapter->stats.rlec; in igb_get_regs()
568 regs_buff[69] = adapter->stats.xonrxc; in igb_get_regs()
569 regs_buff[70] = adapter->stats.xontxc; in igb_get_regs()
570 regs_buff[71] = adapter->stats.xoffrxc; in igb_get_regs()
571 regs_buff[72] = adapter->stats.xofftxc; in igb_get_regs()
572 regs_buff[73] = adapter->stats.fcruc; in igb_get_regs()
573 regs_buff[74] = adapter->stats.prc64; in igb_get_regs()
574 regs_buff[75] = adapter->stats.prc127; in igb_get_regs()
575 regs_buff[76] = adapter->stats.prc255; in igb_get_regs()
576 regs_buff[77] = adapter->stats.prc511; in igb_get_regs()
577 regs_buff[78] = adapter->stats.prc1023; in igb_get_regs()
578 regs_buff[79] = adapter->stats.prc1522; in igb_get_regs()
579 regs_buff[80] = adapter->stats.gprc; in igb_get_regs()
580 regs_buff[81] = adapter->stats.bprc; in igb_get_regs()
581 regs_buff[82] = adapter->stats.mprc; in igb_get_regs()
582 regs_buff[83] = adapter->stats.gptc; in igb_get_regs()
583 regs_buff[84] = adapter->stats.gorc; in igb_get_regs()
584 regs_buff[86] = adapter->stats.gotc; in igb_get_regs()
585 regs_buff[88] = adapter->stats.rnbc; in igb_get_regs()
586 regs_buff[89] = adapter->stats.ruc; in igb_get_regs()
587 regs_buff[90] = adapter->stats.rfc; in igb_get_regs()
588 regs_buff[91] = adapter->stats.roc; in igb_get_regs()
589 regs_buff[92] = adapter->stats.rjc; in igb_get_regs()
590 regs_buff[93] = adapter->stats.mgprc; in igb_get_regs()
591 regs_buff[94] = adapter->stats.mgpdc; in igb_get_regs()
592 regs_buff[95] = adapter->stats.mgptc; in igb_get_regs()
593 regs_buff[96] = adapter->stats.tor; in igb_get_regs()
594 regs_buff[98] = adapter->stats.tot; in igb_get_regs()
595 regs_buff[100] = adapter->stats.tpr; in igb_get_regs()
596 regs_buff[101] = adapter->stats.tpt; in igb_get_regs()
597 regs_buff[102] = adapter->stats.ptc64; in igb_get_regs()
598 regs_buff[103] = adapter->stats.ptc127; in igb_get_regs()
599 regs_buff[104] = adapter->stats.ptc255; in igb_get_regs()
600 regs_buff[105] = adapter->stats.ptc511; in igb_get_regs()
601 regs_buff[106] = adapter->stats.ptc1023; in igb_get_regs()
602 regs_buff[107] = adapter->stats.ptc1522; in igb_get_regs()
603 regs_buff[108] = adapter->stats.mptc; in igb_get_regs()
604 regs_buff[109] = adapter->stats.bptc; in igb_get_regs()
605 regs_buff[110] = adapter->stats.tsctc; in igb_get_regs()
606 regs_buff[111] = adapter->stats.iac; in igb_get_regs()
607 regs_buff[112] = adapter->stats.rpthc; in igb_get_regs()
608 regs_buff[113] = adapter->stats.hgptc; in igb_get_regs()
609 regs_buff[114] = adapter->stats.hgorc; in igb_get_regs()
610 regs_buff[116] = adapter->stats.hgotc; in igb_get_regs()
611 regs_buff[118] = adapter->stats.lenerrs; in igb_get_regs()
612 regs_buff[119] = adapter->stats.scvpc; in igb_get_regs()
613 regs_buff[120] = adapter->stats.hrmpc; in igb_get_regs()
680 if (hw->mac.type > e1000_82580) { in igb_get_regs()
681 regs_buff[551] = adapter->stats.o2bgptc; in igb_get_regs()
682 regs_buff[552] = adapter->stats.b2ospc; in igb_get_regs()
683 regs_buff[553] = adapter->stats.o2bspc; in igb_get_regs()
684 regs_buff[554] = adapter->stats.b2ogprc; in igb_get_regs()
687 if (hw->mac.type == e1000_82576) { in igb_get_regs()
723 if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) in igb_get_regs()
730 return adapter->hw.nvm.word_size * 2; in igb_get_eeprom_len()
737 struct e1000_hw *hw = &adapter->hw; in igb_get_eeprom()
743 if (eeprom->len == 0) in igb_get_eeprom()
744 return -EINVAL; in igb_get_eeprom()
746 eeprom->magic = hw->vendor_id | (hw->device_id << 16); in igb_get_eeprom()
748 first_word = eeprom->offset >> 1; in igb_get_eeprom()
749 last_word = (eeprom->offset + eeprom->len - 1) >> 1; in igb_get_eeprom()
751 eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), in igb_get_eeprom()
754 return -ENOMEM; in igb_get_eeprom()
756 if (hw->nvm.type == e1000_nvm_eeprom_spi) in igb_get_eeprom()
757 ret_val = hw->nvm.ops.read(hw, first_word, in igb_get_eeprom()
758 last_word - first_word + 1, in igb_get_eeprom()
761 for (i = 0; i < last_word - first_word + 1; i++) { in igb_get_eeprom()
762 ret_val = hw->nvm.ops.read(hw, first_word + i, 1, in igb_get_eeprom()
769 /* Device's eeprom is always little-endian, word addressable */ in igb_get_eeprom()
770 for (i = 0; i < last_word - first_word + 1; i++) in igb_get_eeprom()
773 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), in igb_get_eeprom()
774 eeprom->len); in igb_get_eeprom()
784 struct e1000_hw *hw = &adapter->hw; in igb_set_eeprom()
790 if (eeprom->len == 0) in igb_set_eeprom()
791 return -EOPNOTSUPP; in igb_set_eeprom()
793 if ((hw->mac.type >= e1000_i210) && in igb_set_eeprom()
795 return -EOPNOTSUPP; in igb_set_eeprom()
798 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) in igb_set_eeprom()
799 return -EFAULT; in igb_set_eeprom()
801 max_len = hw->nvm.word_size * 2; in igb_set_eeprom()
803 first_word = eeprom->offset >> 1; in igb_set_eeprom()
804 last_word = (eeprom->offset + eeprom->len - 1) >> 1; in igb_set_eeprom()
807 return -ENOMEM; in igb_set_eeprom()
811 if (eeprom->offset & 1) { in igb_set_eeprom()
815 ret_val = hw->nvm.ops.read(hw, first_word, 1, in igb_set_eeprom()
819 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { in igb_set_eeprom()
823 ret_val = hw->nvm.ops.read(hw, last_word, 1, in igb_set_eeprom()
824 &eeprom_buff[last_word - first_word]); in igb_set_eeprom()
829 /* Device's eeprom is always little-endian, word addressable */ in igb_set_eeprom()
830 for (i = 0; i < last_word - first_word + 1; i++) in igb_set_eeprom()
833 memcpy(ptr, bytes, eeprom->len); in igb_set_eeprom()
835 for (i = 0; i < last_word - first_word + 1; i++) in igb_set_eeprom()
838 ret_val = hw->nvm.ops.write(hw, first_word, in igb_set_eeprom()
839 last_word - first_word + 1, eeprom_buff); in igb_set_eeprom()
843 hw->nvm.ops.update(hw); in igb_set_eeprom()
856 strscpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); in igb_get_drvinfo()
861 strscpy(drvinfo->fw_version, adapter->fw_version, in igb_get_drvinfo()
862 sizeof(drvinfo->fw_version)); in igb_get_drvinfo()
863 strscpy(drvinfo->bus_info, pci_name(adapter->pdev), in igb_get_drvinfo()
864 sizeof(drvinfo->bus_info)); in igb_get_drvinfo()
866 drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN; in igb_get_drvinfo()
876 ring->rx_max_pending = IGB_MAX_RXD; in igb_get_ringparam()
877 ring->tx_max_pending = IGB_MAX_TXD; in igb_get_ringparam()
878 ring->rx_pending = adapter->rx_ring_count; in igb_get_ringparam()
879 ring->tx_pending = adapter->tx_ring_count; in igb_get_ringparam()
892 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in igb_set_ringparam()
893 return -EINVAL; in igb_set_ringparam()
895 new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); in igb_set_ringparam()
899 new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); in igb_set_ringparam()
903 if ((new_tx_count == adapter->tx_ring_count) && in igb_set_ringparam()
904 (new_rx_count == adapter->rx_ring_count)) { in igb_set_ringparam()
909 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) in igb_set_ringparam()
912 if (!netif_running(adapter->netdev)) { in igb_set_ringparam()
913 for (i = 0; i < adapter->num_tx_queues; i++) in igb_set_ringparam()
914 adapter->tx_ring[i]->count = new_tx_count; in igb_set_ringparam()
915 for (i = 0; i < adapter->num_rx_queues; i++) in igb_set_ringparam()
916 adapter->rx_ring[i]->count = new_rx_count; in igb_set_ringparam()
917 adapter->tx_ring_count = new_tx_count; in igb_set_ringparam()
918 adapter->rx_ring_count = new_rx_count; in igb_set_ringparam()
922 if (adapter->num_tx_queues > adapter->num_rx_queues) in igb_set_ringparam()
924 adapter->num_tx_queues)); in igb_set_ringparam()
927 adapter->num_rx_queues)); in igb_set_ringparam()
930 err = -ENOMEM; in igb_set_ringparam()
937 * because the ISRs in MSI-X mode get passed pointers in igb_set_ringparam()
938 * to the Tx and Rx ring structs. in igb_set_ringparam()
940 if (new_tx_count != adapter->tx_ring_count) { in igb_set_ringparam()
941 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_set_ringparam()
942 memcpy(&temp_ring[i], adapter->tx_ring[i], in igb_set_ringparam()
949 i--; in igb_set_ringparam()
956 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_set_ringparam()
957 igb_free_tx_resources(adapter->tx_ring[i]); in igb_set_ringparam()
959 memcpy(adapter->tx_ring[i], &temp_ring[i], in igb_set_ringparam()
963 adapter->tx_ring_count = new_tx_count; in igb_set_ringparam()
966 if (new_rx_count != adapter->rx_ring_count) { in igb_set_ringparam()
967 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_set_ringparam()
968 memcpy(&temp_ring[i], adapter->rx_ring[i], in igb_set_ringparam()
975 i--; in igb_set_ringparam()
983 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_set_ringparam()
984 igb_free_rx_resources(adapter->rx_ring[i]); in igb_set_ringparam()
986 memcpy(adapter->rx_ring[i], &temp_ring[i], in igb_set_ringparam()
990 adapter->rx_ring_count = new_rx_count; in igb_set_ringparam()
996 clear_bit(__IGB_RESETTING, &adapter->state); in igb_set_ringparam()
1013 * as a single-element array) and special-case the tables.
1017 * registers to be written without any read-back testing.
1035 /* RDH is read-only for i210, only test RDT. */
1069 /* RDH is read-only for i350, only test RDT. */
1112 /* RDH is read-only for 82580, only test RDT. */
1155 /* Enable all RX queues before testing. */
1160 /* RDH is read-only for 82576, only test RDT. */
1195 /* Enable all four RX queues before testing. */
1198 /* RDH is read-only for 82575, only test RDT. */
1221 struct e1000_hw *hw = &adapter->hw; in reg_pattern_test()
1229 dev_err(&adapter->pdev->dev, in reg_pattern_test()
1243 struct e1000_hw *hw = &adapter->hw; in reg_set_and_check()
1249 dev_err(&adapter->pdev->dev, in reg_set_and_check()
1273 struct e1000_hw *hw = &adapter->hw; in igb_reg_test()
1278 switch (adapter->hw.mac.type) { in igb_reg_test()
1305 * tests. Some bits are read-only, some toggle, and some in igb_reg_test()
1313 dev_err(&adapter->pdev->dev, in igb_reg_test()
1325 while (test->reg) { in igb_reg_test()
1326 for (i = 0; i < test->array_len; i++) { in igb_reg_test()
1327 switch (test->test_type) { in igb_reg_test()
1329 REG_PATTERN_TEST(test->reg + in igb_reg_test()
1330 (i * test->reg_offset), in igb_reg_test()
1331 test->mask, in igb_reg_test()
1332 test->write); in igb_reg_test()
1335 REG_SET_AND_CHECK(test->reg + in igb_reg_test()
1336 (i * test->reg_offset), in igb_reg_test()
1337 test->mask, in igb_reg_test()
1338 test->write); in igb_reg_test()
1341 writel(test->write, in igb_reg_test()
1342 (adapter->hw.hw_addr + test->reg) in igb_reg_test()
1343 + (i * test->reg_offset)); in igb_reg_test()
1346 REG_PATTERN_TEST(test->reg + (i * 4), in igb_reg_test()
1347 test->mask, in igb_reg_test()
1348 test->write); in igb_reg_test()
1351 REG_PATTERN_TEST(test->reg + (i * 8), in igb_reg_test()
1352 test->mask, in igb_reg_test()
1353 test->write); in igb_reg_test()
1356 REG_PATTERN_TEST((test->reg + 4) + (i * 8), in igb_reg_test()
1357 test->mask, in igb_reg_test()
1358 test->write); in igb_reg_test()
1371 struct e1000_hw *hw = &adapter->hw; in igb_eeprom_test()
1376 switch (hw->mac.type) { in igb_eeprom_test()
1380 if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) in igb_eeprom_test()
1385 if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) in igb_eeprom_test()
1396 struct e1000_hw *hw = &adapter->hw; in igb_test_intr()
1398 adapter->test_icr |= rd32(E1000_ICR); in igb_test_intr()
1405 struct e1000_hw *hw = &adapter->hw; in igb_intr_test()
1406 struct net_device *netdev = adapter->netdev; in igb_intr_test()
1408 u32 irq = adapter->pdev->irq; in igb_intr_test()
1413 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_intr_test()
1414 if (request_irq(adapter->msix_entries[0].vector, in igb_intr_test()
1415 igb_test_intr, 0, netdev->name, adapter)) { in igb_intr_test()
1417 return -1; in igb_intr_test()
1421 } else if (adapter->flags & IGB_FLAG_HAS_MSI) { in igb_intr_test()
1424 igb_test_intr, 0, netdev->name, adapter)) { in igb_intr_test()
1426 return -1; in igb_intr_test()
1429 netdev->name, adapter)) { in igb_intr_test()
1432 netdev->name, adapter)) { in igb_intr_test()
1434 return -1; in igb_intr_test()
1436 dev_info(&adapter->pdev->dev, "testing %s interrupt\n", in igb_intr_test()
1445 switch (hw->mac.type) { in igb_intr_test()
1481 adapter->test_icr = 0; in igb_intr_test()
1491 if (adapter->test_icr & mask) { in igb_intr_test()
1503 adapter->test_icr = 0; in igb_intr_test()
1513 if (!(adapter->test_icr & mask)) { in igb_intr_test()
1525 adapter->test_icr = 0; in igb_intr_test()
1535 if (adapter->test_icr & mask) { in igb_intr_test()
1548 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_intr_test()
1549 free_irq(adapter->msix_entries[0].vector, adapter); in igb_intr_test()
1558 igb_free_tx_resources(&adapter->test_tx_ring); in igb_free_desc_rings()
1559 igb_free_rx_resources(&adapter->test_rx_ring); in igb_free_desc_rings()
1564 struct igb_ring *tx_ring = &adapter->test_tx_ring; in igb_setup_desc_rings()
1565 struct igb_ring *rx_ring = &adapter->test_rx_ring; in igb_setup_desc_rings()
1566 struct e1000_hw *hw = &adapter->hw; in igb_setup_desc_rings()
1570 tx_ring->count = IGB_DEFAULT_TXD; in igb_setup_desc_rings()
1571 tx_ring->dev = &adapter->pdev->dev; in igb_setup_desc_rings()
1572 tx_ring->netdev = adapter->netdev; in igb_setup_desc_rings()
1573 tx_ring->reg_idx = adapter->vfs_allocated_count; in igb_setup_desc_rings()
1583 /* Setup Rx descriptor ring and Rx buffers */ in igb_setup_desc_rings()
1584 rx_ring->count = IGB_DEFAULT_RXD; in igb_setup_desc_rings()
1585 rx_ring->dev = &adapter->pdev->dev; in igb_setup_desc_rings()
1586 rx_ring->netdev = adapter->netdev; in igb_setup_desc_rings()
1587 rx_ring->reg_idx = adapter->vfs_allocated_count; in igb_setup_desc_rings()
1595 wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); in igb_setup_desc_rings()
1612 struct e1000_hw *hw = &adapter->hw; in igb_phy_disable_receiver()
1623 struct e1000_hw *hw = &adapter->hw; in igb_integrated_phy_loopback()
1626 hw->mac.autoneg = false; in igb_integrated_phy_loopback()
1628 if (hw->phy.type == e1000_phy_m88) { in igb_integrated_phy_loopback()
1629 if (hw->phy.id != I210_I_PHY_ID) { in igb_integrated_phy_loopback()
1630 /* Auto-MDI/MDIX Off */ in igb_integrated_phy_loopback()
1632 /* reset to update Auto-MDI/MDIX */ in igb_integrated_phy_loopback()
1641 } else if (hw->phy.type == e1000_phy_82580) { in igb_integrated_phy_loopback()
1661 if (hw->phy.type == e1000_phy_m88) in igb_integrated_phy_loopback()
1669 if (hw->phy.type == e1000_phy_m88) in igb_integrated_phy_loopback()
1683 struct e1000_hw *hw = &adapter->hw; in igb_setup_loopback_test()
1690 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || in igb_setup_loopback_test()
1691 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || in igb_setup_loopback_test()
1692 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || in igb_setup_loopback_test()
1693 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || in igb_setup_loopback_test()
1694 (hw->device_id == E1000_DEV_ID_I354_SGMII) || in igb_setup_loopback_test()
1695 (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { in igb_setup_loopback_test()
1729 if (hw->mac.type >= e1000_82580) { in igb_setup_loopback_test()
1735 /* Set PCS register for forced speed */ in igb_setup_loopback_test()
1753 struct e1000_hw *hw = &adapter->hw; in igb_loopback_cleanup()
1757 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || in igb_loopback_cleanup()
1758 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || in igb_loopback_cleanup()
1759 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || in igb_loopback_cleanup()
1760 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || in igb_loopback_cleanup()
1761 (hw->device_id == E1000_DEV_ID_I354_SGMII)) { in igb_loopback_cleanup()
1779 hw->mac.autoneg = true; in igb_loopback_cleanup()
1791 memset(skb->data, 0xFF, frame_size); in igb_create_lbtest_frame()
1793 memset(&skb->data[frame_size], 0xAA, frame_size - 1); in igb_create_lbtest_frame()
1794 skb->data[frame_size + 10] = 0xBE; in igb_create_lbtest_frame()
1795 skb->data[frame_size + 12] = 0xAF; in igb_create_lbtest_frame()
1806 data = kmap_local_page(rx_buffer->page); in igb_check_lbtest_frame()
1828 rx_ntc = rx_ring->next_to_clean; in igb_clean_test_rings()
1829 tx_ntc = tx_ring->next_to_clean; in igb_clean_test_rings()
1832 while (rx_desc->wb.upper.length) { in igb_clean_test_rings()
1833 /* check Rx buffer */ in igb_clean_test_rings()
1834 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; in igb_clean_test_rings()
1836 /* sync Rx buffer for CPU read */ in igb_clean_test_rings()
1837 dma_sync_single_for_cpu(rx_ring->dev, in igb_clean_test_rings()
1838 rx_buffer_info->dma, in igb_clean_test_rings()
1846 /* sync Rx buffer for device write */ in igb_clean_test_rings()
1847 dma_sync_single_for_device(rx_ring->dev, in igb_clean_test_rings()
1848 rx_buffer_info->dma, in igb_clean_test_rings()
1853 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; in igb_clean_test_rings()
1856 dev_kfree_skb_any(tx_buffer_info->skb); in igb_clean_test_rings()
1859 dma_unmap_single(tx_ring->dev, in igb_clean_test_rings()
1865 /* increment Rx/Tx next to clean counters */ in igb_clean_test_rings()
1867 if (rx_ntc == rx_ring->count) in igb_clean_test_rings()
1870 if (tx_ntc == tx_ring->count) in igb_clean_test_rings()
1879 /* re-map buffers to ring, store next to clean values */ in igb_clean_test_rings()
1881 rx_ring->next_to_clean = rx_ntc; in igb_clean_test_rings()
1882 tx_ring->next_to_clean = tx_ntc; in igb_clean_test_rings()
1889 struct igb_ring *tx_ring = &adapter->test_tx_ring; in igb_run_loopback_test()
1890 struct igb_ring *rx_ring = &adapter->test_rx_ring; in igb_run_loopback_test()
1911 if (rx_ring->count <= tx_ring->count) in igb_run_loopback_test()
1912 lc = ((tx_ring->count / 64) * 2) + 1; in igb_run_loopback_test()
1914 lc = ((rx_ring->count / 64) * 2) + 1; in igb_run_loopback_test()
1933 /* allow 200 milliseconds for packets to go from Tx to Rx */ in igb_run_loopback_test()
1954 if (igb_check_reset_block(&adapter->hw)) { in igb_loopback_test()
1955 dev_err(&adapter->pdev->dev, in igb_loopback_test()
1961 if (adapter->hw.mac.type == e1000_i354) { in igb_loopback_test()
1962 dev_info(&adapter->pdev->dev, in igb_loopback_test()
1984 struct e1000_hw *hw = &adapter->hw; in igb_link_test()
1986 if (hw->phy.media_type == e1000_media_type_internal_serdes) { in igb_link_test()
1989 hw->mac.serdes_has_link = false; in igb_link_test()
1992 * could take as long as 2-3 minutes in igb_link_test()
1995 hw->mac.ops.check_for_link(&adapter->hw); in igb_link_test()
1996 if (hw->mac.serdes_has_link) in igb_link_test()
2003 hw->mac.ops.check_for_link(&adapter->hw); in igb_link_test()
2004 if (hw->mac.autoneg) in igb_link_test()
2021 set_bit(__IGB_TESTING, &adapter->state); in igb_diag_test()
2024 if (adapter->hw.dev_spec._82575.mas_capable) in igb_diag_test()
2025 eth_test->flags &= ~ETH_TEST_FL_OFFLINE; in igb_diag_test()
2026 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { in igb_diag_test()
2030 autoneg_advertised = adapter->hw.phy.autoneg_advertised; in igb_diag_test()
2031 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; in igb_diag_test()
2032 autoneg = adapter->hw.mac.autoneg; in igb_diag_test()
2034 dev_info(&adapter->pdev->dev, "offline testing starting\n"); in igb_diag_test()
2043 eth_test->flags |= ETH_TEST_FL_FAILED; in igb_diag_test()
2052 eth_test->flags |= ETH_TEST_FL_FAILED; in igb_diag_test()
2056 eth_test->flags |= ETH_TEST_FL_FAILED; in igb_diag_test()
2060 eth_test->flags |= ETH_TEST_FL_FAILED; in igb_diag_test()
2066 eth_test->flags |= ETH_TEST_FL_FAILED; in igb_diag_test()
2069 adapter->hw.phy.autoneg_advertised = autoneg_advertised; in igb_diag_test()
2070 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; in igb_diag_test()
2071 adapter->hw.mac.autoneg = autoneg; in igb_diag_test()
2074 adapter->hw.phy.autoneg_wait_to_complete = true; in igb_diag_test()
2076 adapter->hw.phy.autoneg_wait_to_complete = false; in igb_diag_test()
2078 clear_bit(__IGB_TESTING, &adapter->state); in igb_diag_test()
2082 dev_info(&adapter->pdev->dev, "online testing starting\n"); in igb_diag_test()
2086 eth_test->flags |= ETH_TEST_FL_FAILED; in igb_diag_test()
2096 clear_bit(__IGB_TESTING, &adapter->state); in igb_diag_test()
2105 wol->wolopts = 0; in igb_get_wol()
2107 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) in igb_get_wol()
2110 wol->supported = WAKE_UCAST | WAKE_MCAST | in igb_get_wol()
2115 switch (adapter->hw.device_id) { in igb_get_wol()
2120 if (adapter->wol & E1000_WUFC_EX) in igb_get_wol()
2121 wol->wolopts |= WAKE_UCAST; in igb_get_wol()
2122 if (adapter->wol & E1000_WUFC_MC) in igb_get_wol()
2123 wol->wolopts |= WAKE_MCAST; in igb_get_wol()
2124 if (adapter->wol & E1000_WUFC_BC) in igb_get_wol()
2125 wol->wolopts |= WAKE_BCAST; in igb_get_wol()
2126 if (adapter->wol & E1000_WUFC_MAG) in igb_get_wol()
2127 wol->wolopts |= WAKE_MAGIC; in igb_get_wol()
2128 if (adapter->wol & E1000_WUFC_LNKC) in igb_get_wol()
2129 wol->wolopts |= WAKE_PHY; in igb_get_wol()
2136 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) in igb_set_wol()
2137 return -EOPNOTSUPP; in igb_set_wol()
2139 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) in igb_set_wol()
2140 return wol->wolopts ? -EOPNOTSUPP : 0; in igb_set_wol()
2143 adapter->wol = 0; in igb_set_wol()
2145 if (wol->wolopts & WAKE_UCAST) in igb_set_wol()
2146 adapter->wol |= E1000_WUFC_EX; in igb_set_wol()
2147 if (wol->wolopts & WAKE_MCAST) in igb_set_wol()
2148 adapter->wol |= E1000_WUFC_MC; in igb_set_wol()
2149 if (wol->wolopts & WAKE_BCAST) in igb_set_wol()
2150 adapter->wol |= E1000_WUFC_BC; in igb_set_wol()
2151 if (wol->wolopts & WAKE_MAGIC) in igb_set_wol()
2152 adapter->wol |= E1000_WUFC_MAG; in igb_set_wol()
2153 if (wol->wolopts & WAKE_PHY) in igb_set_wol()
2154 adapter->wol |= E1000_WUFC_LNKC; in igb_set_wol()
2155 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); in igb_set_wol()
2160 /* bit defines for adapter->led_status */
2167 struct e1000_hw *hw = &adapter->hw; in igb_set_phys_id()
2181 clear_bit(IGB_LED_ON, &adapter->led_status); in igb_set_phys_id()
2197 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || in igb_set_coalesce()
2198 ((ec->rx_coalesce_usecs > 3) && in igb_set_coalesce()
2199 (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || in igb_set_coalesce()
2200 (ec->rx_coalesce_usecs == 2)) in igb_set_coalesce()
2201 return -EINVAL; in igb_set_coalesce()
2203 if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || in igb_set_coalesce()
2204 ((ec->tx_coalesce_usecs > 3) && in igb_set_coalesce()
2205 (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) || in igb_set_coalesce()
2206 (ec->tx_coalesce_usecs == 2)) in igb_set_coalesce()
2207 return -EINVAL; in igb_set_coalesce()
2209 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) in igb_set_coalesce()
2210 return -EINVAL; in igb_set_coalesce()
2213 if (ec->rx_coalesce_usecs == 0) { in igb_set_coalesce()
2214 if (adapter->flags & IGB_FLAG_DMAC) in igb_set_coalesce()
2215 adapter->flags &= ~IGB_FLAG_DMAC; in igb_set_coalesce()
2219 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) in igb_set_coalesce()
2220 adapter->rx_itr_setting = ec->rx_coalesce_usecs; in igb_set_coalesce()
2222 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; in igb_set_coalesce()
2225 if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) in igb_set_coalesce()
2226 adapter->tx_itr_setting = adapter->rx_itr_setting; in igb_set_coalesce()
2227 else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) in igb_set_coalesce()
2228 adapter->tx_itr_setting = ec->tx_coalesce_usecs; in igb_set_coalesce()
2230 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; in igb_set_coalesce()
2232 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_set_coalesce()
2233 struct igb_q_vector *q_vector = adapter->q_vector[i]; in igb_set_coalesce()
2234 q_vector->tx.work_limit = adapter->tx_work_limit; in igb_set_coalesce()
2235 if (q_vector->rx.ring) in igb_set_coalesce()
2236 q_vector->itr_val = adapter->rx_itr_setting; in igb_set_coalesce()
2238 q_vector->itr_val = adapter->tx_itr_setting; in igb_set_coalesce()
2239 if (q_vector->itr_val && q_vector->itr_val <= 3) in igb_set_coalesce()
2240 q_vector->itr_val = IGB_START_ITR; in igb_set_coalesce()
2241 q_vector->set_itr = 1; in igb_set_coalesce()
2254 if (adapter->rx_itr_setting <= 3) in igb_get_coalesce()
2255 ec->rx_coalesce_usecs = adapter->rx_itr_setting; in igb_get_coalesce()
2257 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; in igb_get_coalesce()
2259 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { in igb_get_coalesce()
2260 if (adapter->tx_itr_setting <= 3) in igb_get_coalesce()
2261 ec->tx_coalesce_usecs = adapter->tx_itr_setting; in igb_get_coalesce()
2263 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; in igb_get_coalesce()
2287 return -ENOTSUPP; in igb_get_sset_count()
2295 struct rtnl_link_stats64 *net_stats = &adapter->stats64; in igb_get_ethtool_stats()
2301 spin_lock(&adapter->stats64_lock); in igb_get_ethtool_stats()
2314 for (j = 0; j < adapter->num_tx_queues; j++) { in igb_get_ethtool_stats()
2317 ring = adapter->tx_ring[j]; in igb_get_ethtool_stats()
2319 start = u64_stats_fetch_begin(&ring->tx_syncp); in igb_get_ethtool_stats()
2320 data[i] = ring->tx_stats.packets; in igb_get_ethtool_stats()
2321 data[i+1] = ring->tx_stats.bytes; in igb_get_ethtool_stats()
2322 data[i+2] = ring->tx_stats.restart_queue; in igb_get_ethtool_stats()
2323 } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); in igb_get_ethtool_stats()
2325 start = u64_stats_fetch_begin(&ring->tx_syncp2); in igb_get_ethtool_stats()
2326 restart2 = ring->tx_stats.restart_queue2; in igb_get_ethtool_stats()
2327 } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); in igb_get_ethtool_stats()
2332 for (j = 0; j < adapter->num_rx_queues; j++) { in igb_get_ethtool_stats()
2333 ring = adapter->rx_ring[j]; in igb_get_ethtool_stats()
2335 start = u64_stats_fetch_begin(&ring->rx_syncp); in igb_get_ethtool_stats()
2336 data[i] = ring->rx_stats.packets; in igb_get_ethtool_stats()
2337 data[i+1] = ring->rx_stats.bytes; in igb_get_ethtool_stats()
2338 data[i+2] = ring->rx_stats.drops; in igb_get_ethtool_stats()
2339 data[i+3] = ring->rx_stats.csum_err; in igb_get_ethtool_stats()
2340 data[i+4] = ring->rx_stats.alloc_failed; in igb_get_ethtool_stats()
2341 } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); in igb_get_ethtool_stats()
2344 spin_unlock(&adapter->stats64_lock); in igb_get_ethtool_stats()
2362 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_get_strings()
2367 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_get_strings()
2374 /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ in igb_get_strings()
2388 if (adapter->ptp_clock) in igb_get_ts_info()
2389 info->phc_index = ptp_clock_index(adapter->ptp_clock); in igb_get_ts_info()
2391 switch (adapter->hw.mac.type) { in igb_get_ts_info()
2393 info->so_timestamping = in igb_get_ts_info()
2402 info->so_timestamping = in igb_get_ts_info()
2408 info->tx_types = in igb_get_ts_info()
2412 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); in igb_get_ts_info()
2415 if (adapter->hw.mac.type >= e1000_82580) in igb_get_ts_info()
2416 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); in igb_get_ts_info()
2418 info->rx_filters |= in igb_get_ts_info()
2425 return -EOPNOTSUPP; in igb_get_ts_info()
2433 struct ethtool_rx_flow_spec *fsp = &cmd->fs; in igb_get_ethtool_nfc_entry()
2437 cmd->data = IGB_MAX_RXNFC_FILTERS; in igb_get_ethtool_nfc_entry()
2439 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { in igb_get_ethtool_nfc_entry()
2440 if (fsp->location <= rule->sw_idx) in igb_get_ethtool_nfc_entry()
2444 if (!rule || fsp->location != rule->sw_idx) in igb_get_ethtool_nfc_entry()
2445 return -EINVAL; in igb_get_ethtool_nfc_entry()
2447 if (rule->filter.match_flags) { in igb_get_ethtool_nfc_entry()
2448 fsp->flow_type = ETHER_FLOW; in igb_get_ethtool_nfc_entry()
2449 fsp->ring_cookie = rule->action; in igb_get_ethtool_nfc_entry()
2450 if (rule->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { in igb_get_ethtool_nfc_entry()
2451 fsp->h_u.ether_spec.h_proto = rule->filter.etype; in igb_get_ethtool_nfc_entry()
2452 fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; in igb_get_ethtool_nfc_entry()
2454 if (rule->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) { in igb_get_ethtool_nfc_entry()
2455 fsp->flow_type |= FLOW_EXT; in igb_get_ethtool_nfc_entry()
2456 fsp->h_ext.vlan_tci = rule->filter.vlan_tci; in igb_get_ethtool_nfc_entry()
2457 fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); in igb_get_ethtool_nfc_entry()
2459 if (rule->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { in igb_get_ethtool_nfc_entry()
2460 ether_addr_copy(fsp->h_u.ether_spec.h_dest, in igb_get_ethtool_nfc_entry()
2461 rule->filter.dst_addr); in igb_get_ethtool_nfc_entry()
2465 eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); in igb_get_ethtool_nfc_entry()
2467 if (rule->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { in igb_get_ethtool_nfc_entry()
2468 ether_addr_copy(fsp->h_u.ether_spec.h_source, in igb_get_ethtool_nfc_entry()
2469 rule->filter.src_addr); in igb_get_ethtool_nfc_entry()
2473 eth_broadcast_addr(fsp->m_u.ether_spec.h_source); in igb_get_ethtool_nfc_entry()
2478 return -EINVAL; in igb_get_ethtool_nfc_entry()
2489 cmd->data = IGB_MAX_RXNFC_FILTERS; in igb_get_ethtool_nfc_all()
2491 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { in igb_get_ethtool_nfc_all()
2492 if (cnt == cmd->rule_cnt) in igb_get_ethtool_nfc_all()
2493 return -EMSGSIZE; in igb_get_ethtool_nfc_all()
2494 rule_locs[cnt] = rule->sw_idx; in igb_get_ethtool_nfc_all()
2498 cmd->rule_cnt = cnt; in igb_get_ethtool_nfc_all()
2506 cmd->data = 0; in igb_get_rss_hash_opts()
2509 switch (cmd->flow_type) { in igb_get_rss_hash_opts()
2511 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; in igb_get_rss_hash_opts()
2514 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) in igb_get_rss_hash_opts()
2515 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; in igb_get_rss_hash_opts()
2522 cmd->data |= RXH_IP_SRC | RXH_IP_DST; in igb_get_rss_hash_opts()
2525 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; in igb_get_rss_hash_opts()
2528 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) in igb_get_rss_hash_opts()
2529 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; in igb_get_rss_hash_opts()
2536 cmd->data |= RXH_IP_SRC | RXH_IP_DST; in igb_get_rss_hash_opts()
2539 return -EINVAL; in igb_get_rss_hash_opts()
2549 int ret = -EOPNOTSUPP; in igb_get_rxnfc()
2551 switch (cmd->cmd) { in igb_get_rxnfc()
2553 cmd->data = adapter->num_rx_queues; in igb_get_rxnfc()
2557 cmd->rule_cnt = adapter->nfc_filter_count; in igb_get_rxnfc()
2581 u32 flags = adapter->flags; in igb_set_rss_hash_opt()
2586 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | in igb_set_rss_hash_opt()
2588 return -EINVAL; in igb_set_rss_hash_opt()
2590 switch (nfc->flow_type) { in igb_set_rss_hash_opt()
2593 if (!(nfc->data & RXH_IP_SRC) || in igb_set_rss_hash_opt()
2594 !(nfc->data & RXH_IP_DST) || in igb_set_rss_hash_opt()
2595 !(nfc->data & RXH_L4_B_0_1) || in igb_set_rss_hash_opt()
2596 !(nfc->data & RXH_L4_B_2_3)) in igb_set_rss_hash_opt()
2597 return -EINVAL; in igb_set_rss_hash_opt()
2600 if (!(nfc->data & RXH_IP_SRC) || in igb_set_rss_hash_opt()
2601 !(nfc->data & RXH_IP_DST)) in igb_set_rss_hash_opt()
2602 return -EINVAL; in igb_set_rss_hash_opt()
2603 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { in igb_set_rss_hash_opt()
2611 return -EINVAL; in igb_set_rss_hash_opt()
2615 if (!(nfc->data & RXH_IP_SRC) || in igb_set_rss_hash_opt()
2616 !(nfc->data & RXH_IP_DST)) in igb_set_rss_hash_opt()
2617 return -EINVAL; in igb_set_rss_hash_opt()
2618 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { in igb_set_rss_hash_opt()
2626 return -EINVAL; in igb_set_rss_hash_opt()
2637 if (!(nfc->data & RXH_IP_SRC) || in igb_set_rss_hash_opt()
2638 !(nfc->data & RXH_IP_DST) || in igb_set_rss_hash_opt()
2639 (nfc->data & RXH_L4_B_0_1) || in igb_set_rss_hash_opt()
2640 (nfc->data & RXH_L4_B_2_3)) in igb_set_rss_hash_opt()
2641 return -EINVAL; in igb_set_rss_hash_opt()
2644 return -EINVAL; in igb_set_rss_hash_opt()
2648 if (flags != adapter->flags) { in igb_set_rss_hash_opt()
2649 struct e1000_hw *hw = &adapter->hw; in igb_set_rss_hash_opt()
2653 !(adapter->flags & UDP_RSS_FLAGS)) in igb_set_rss_hash_opt()
2654 dev_err(&adapter->pdev->dev, in igb_set_rss_hash_opt()
2657 adapter->flags = flags; in igb_set_rss_hash_opt()
2681 struct igb_nfc_filter *input) in igb_rxnfc_write_etype_filter() argument
2683 struct e1000_hw *hw = &adapter->hw; in igb_rxnfc_write_etype_filter()
2690 if (!adapter->etype_bitmap[i]) in igb_rxnfc_write_etype_filter()
2694 dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n"); in igb_rxnfc_write_etype_filter()
2695 return -EINVAL; in igb_rxnfc_write_etype_filter()
2698 adapter->etype_bitmap[i] = true; in igb_rxnfc_write_etype_filter()
2701 etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK); in igb_rxnfc_write_etype_filter()
2708 etqf |= FIELD_PREP(E1000_ETQF_QUEUE_MASK, input->action); in igb_rxnfc_write_etype_filter()
2713 input->etype_reg_index = i; in igb_rxnfc_write_etype_filter()
2719 struct igb_nfc_filter *input) in igb_rxnfc_write_vlan_prio_filter() argument
2721 struct e1000_hw *hw = &adapter->hw; in igb_rxnfc_write_vlan_prio_filter()
2728 ntohs(input->filter.vlan_tci)); in igb_rxnfc_write_vlan_prio_filter()
2733 (queue_index != input->action)) { in igb_rxnfc_write_vlan_prio_filter()
2734 dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n"); in igb_rxnfc_write_vlan_prio_filter()
2735 return -EEXIST; in igb_rxnfc_write_vlan_prio_filter()
2739 vlapqf |= E1000_VLAPQF_QUEUE_SEL(vlan_priority, input->action); in igb_rxnfc_write_vlan_prio_filter()
2746 int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) in igb_add_filter() argument
2748 struct e1000_hw *hw = &adapter->hw; in igb_add_filter()
2749 int err = -EINVAL; in igb_add_filter()
2751 if (hw->mac.type == e1000_i210 && in igb_add_filter()
2752 !(input->filter.match_flags & ~IGB_FILTER_FLAG_SRC_MAC_ADDR)) { in igb_add_filter()
2753 dev_err(&adapter->pdev->dev, in igb_add_filter()
2755 return -EOPNOTSUPP; in igb_add_filter()
2758 if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { in igb_add_filter()
2759 err = igb_rxnfc_write_etype_filter(adapter, input); in igb_add_filter()
2764 if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { in igb_add_filter()
2766 input->filter.dst_addr, in igb_add_filter()
2767 input->action, 0); in igb_add_filter()
2773 if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { in igb_add_filter()
2775 input->filter.src_addr, in igb_add_filter()
2776 input->action, in igb_add_filter()
2783 if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) in igb_add_filter()
2784 err = igb_rxnfc_write_vlan_prio_filter(adapter, input); in igb_add_filter()
2792 struct e1000_hw *hw = &adapter->hw; in igb_clear_etype_filter_regs()
2801 adapter->etype_bitmap[reg_index] = false; in igb_clear_etype_filter_regs()
2807 struct e1000_hw *hw = &adapter->hw; in igb_clear_vlan_prio_filter()
2821 int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) in igb_erase_filter() argument
2823 if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) in igb_erase_filter()
2825 input->etype_reg_index); in igb_erase_filter()
2827 if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) in igb_erase_filter()
2829 ntohs(input->filter.vlan_tci)); in igb_erase_filter()
2831 if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) in igb_erase_filter()
2832 igb_del_mac_steering_filter(adapter, input->filter.src_addr, in igb_erase_filter()
2833 input->action, in igb_erase_filter()
2836 if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) in igb_erase_filter()
2837 igb_del_mac_steering_filter(adapter, input->filter.dst_addr, in igb_erase_filter()
2838 input->action, 0); in igb_erase_filter()
2844 struct igb_nfc_filter *input, in igb_update_ethtool_nfc_entry() argument
2848 int err = -EINVAL; in igb_update_ethtool_nfc_entry()
2853 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { in igb_update_ethtool_nfc_entry()
2855 if (rule->sw_idx >= sw_idx) in igb_update_ethtool_nfc_entry()
2861 if (rule && (rule->sw_idx == sw_idx)) { in igb_update_ethtool_nfc_entry()
2862 if (!input) in igb_update_ethtool_nfc_entry()
2865 hlist_del(&rule->nfc_node); in igb_update_ethtool_nfc_entry()
2867 adapter->nfc_filter_count--; in igb_update_ethtool_nfc_entry()
2870 /* If no input this was a delete, err should be 0 if a rule was in igb_update_ethtool_nfc_entry()
2871 * successfully found and removed from the list else -EINVAL in igb_update_ethtool_nfc_entry()
2873 if (!input) in igb_update_ethtool_nfc_entry()
2877 INIT_HLIST_NODE(&input->nfc_node); in igb_update_ethtool_nfc_entry()
2881 hlist_add_behind(&input->nfc_node, &parent->nfc_node); in igb_update_ethtool_nfc_entry()
2883 hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); in igb_update_ethtool_nfc_entry()
2886 adapter->nfc_filter_count++; in igb_update_ethtool_nfc_entry()
2894 struct net_device *netdev = adapter->netdev; in igb_add_ethtool_nfc_entry()
2896 (struct ethtool_rx_flow_spec *)&cmd->fs; in igb_add_ethtool_nfc_entry()
2897 struct igb_nfc_filter *input, *rule; in igb_add_ethtool_nfc_entry() local
2900 if (!(netdev->hw_features & NETIF_F_NTUPLE)) in igb_add_ethtool_nfc_entry()
2901 return -EOPNOTSUPP; in igb_add_ethtool_nfc_entry()
2904 * the number of online Rx queues. in igb_add_ethtool_nfc_entry()
2906 if ((fsp->ring_cookie == RX_CLS_FLOW_DISC) || in igb_add_ethtool_nfc_entry()
2907 (fsp->ring_cookie >= adapter->num_rx_queues)) { in igb_add_ethtool_nfc_entry()
2908 dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n"); in igb_add_ethtool_nfc_entry()
2909 return -EINVAL; in igb_add_ethtool_nfc_entry()
2913 if (fsp->location >= IGB_MAX_RXNFC_FILTERS) { in igb_add_ethtool_nfc_entry()
2914 dev_err(&adapter->pdev->dev, "Location out of range\n"); in igb_add_ethtool_nfc_entry()
2915 return -EINVAL; in igb_add_ethtool_nfc_entry()
2918 if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) in igb_add_ethtool_nfc_entry()
2919 return -EINVAL; in igb_add_ethtool_nfc_entry()
2921 input = kzalloc(sizeof(*input), GFP_KERNEL); in igb_add_ethtool_nfc_entry()
2922 if (!input) in igb_add_ethtool_nfc_entry()
2923 return -ENOMEM; in igb_add_ethtool_nfc_entry()
2925 if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { in igb_add_ethtool_nfc_entry()
2926 input->filter.etype = fsp->h_u.ether_spec.h_proto; in igb_add_ethtool_nfc_entry()
2927 input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE; in igb_add_ethtool_nfc_entry()
2931 if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { in igb_add_ethtool_nfc_entry()
2932 input->filter.match_flags |= IGB_FILTER_FLAG_SRC_MAC_ADDR; in igb_add_ethtool_nfc_entry()
2933 ether_addr_copy(input->filter.src_addr, in igb_add_ethtool_nfc_entry()
2934 fsp->h_u.ether_spec.h_source); in igb_add_ethtool_nfc_entry()
2938 if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { in igb_add_ethtool_nfc_entry()
2939 input->filter.match_flags |= IGB_FILTER_FLAG_DST_MAC_ADDR; in igb_add_ethtool_nfc_entry()
2940 ether_addr_copy(input->filter.dst_addr, in igb_add_ethtool_nfc_entry()
2941 fsp->h_u.ether_spec.h_dest); in igb_add_ethtool_nfc_entry()
2944 if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { in igb_add_ethtool_nfc_entry()
2945 if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { in igb_add_ethtool_nfc_entry()
2946 err = -EINVAL; in igb_add_ethtool_nfc_entry()
2949 input->filter.vlan_tci = fsp->h_ext.vlan_tci; in igb_add_ethtool_nfc_entry()
2950 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; in igb_add_ethtool_nfc_entry()
2953 input->action = fsp->ring_cookie; in igb_add_ethtool_nfc_entry()
2954 input->sw_idx = fsp->location; in igb_add_ethtool_nfc_entry()
2956 spin_lock(&adapter->nfc_lock); in igb_add_ethtool_nfc_entry()
2958 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { in igb_add_ethtool_nfc_entry()
2959 if (!memcmp(&input->filter, &rule->filter, in igb_add_ethtool_nfc_entry()
2960 sizeof(input->filter))) { in igb_add_ethtool_nfc_entry()
2961 err = -EEXIST; in igb_add_ethtool_nfc_entry()
2962 dev_err(&adapter->pdev->dev, in igb_add_ethtool_nfc_entry()
2968 err = igb_add_filter(adapter, input); in igb_add_ethtool_nfc_entry()
2972 err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); in igb_add_ethtool_nfc_entry()
2976 spin_unlock(&adapter->nfc_lock); in igb_add_ethtool_nfc_entry()
2980 igb_erase_filter(adapter, input); in igb_add_ethtool_nfc_entry()
2982 spin_unlock(&adapter->nfc_lock); in igb_add_ethtool_nfc_entry()
2984 kfree(input); in igb_add_ethtool_nfc_entry()
2992 (struct ethtool_rx_flow_spec *)&cmd->fs; in igb_del_ethtool_nfc_entry()
2995 spin_lock(&adapter->nfc_lock); in igb_del_ethtool_nfc_entry()
2996 err = igb_update_ethtool_nfc_entry(adapter, NULL, fsp->location); in igb_del_ethtool_nfc_entry()
2997 spin_unlock(&adapter->nfc_lock); in igb_del_ethtool_nfc_entry()
3005 int ret = -EOPNOTSUPP; in igb_set_rxnfc()
3007 switch (cmd->cmd) { in igb_set_rxnfc()
3027 struct e1000_hw *hw = &adapter->hw; in igb_get_eee()
3031 if ((hw->mac.type < e1000_i350) || in igb_get_eee()
3032 (hw->phy.media_type != e1000_media_type_copper)) in igb_get_eee()
3033 return -EOPNOTSUPP; in igb_get_eee()
3036 edata->supported); in igb_get_eee()
3038 edata->supported); in igb_get_eee()
3039 if (!hw->dev_spec._82575.eee_disable) in igb_get_eee()
3040 mii_eee_cap1_mod_linkmode_t(edata->advertised, in igb_get_eee()
3041 adapter->eee_advert); in igb_get_eee()
3044 if (hw->mac.type == e1000_i354) { in igb_get_eee()
3045 igb_get_eee_status_i354(hw, (bool *)&edata->eee_active); in igb_get_eee()
3053 edata->eee_active = true; in igb_get_eee()
3056 edata->tx_lpi_enabled = true; in igb_get_eee()
3060 switch (hw->mac.type) { in igb_get_eee()
3065 return -ENODATA; in igb_get_eee()
3067 mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data); in igb_get_eee()
3076 return -ENODATA; in igb_get_eee()
3078 mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data); in igb_get_eee()
3085 edata->eee_enabled = !hw->dev_spec._82575.eee_disable; in igb_get_eee()
3087 if ((hw->mac.type == e1000_i354) && in igb_get_eee()
3088 (edata->eee_enabled)) in igb_get_eee()
3089 edata->tx_lpi_enabled = true; in igb_get_eee()
3092 * wrongly report EEE at half-duplex in igb_get_eee()
3094 if (adapter->link_duplex == HALF_DUPLEX) { in igb_get_eee()
3095 edata->eee_enabled = false; in igb_get_eee()
3096 edata->eee_active = false; in igb_get_eee()
3097 edata->tx_lpi_enabled = false; in igb_get_eee()
3098 linkmode_zero(edata->advertised); in igb_get_eee()
3110 struct e1000_hw *hw = &adapter->hw; in igb_set_eee()
3115 if ((hw->mac.type < e1000_i350) || in igb_set_eee()
3116 (hw->phy.media_type != e1000_media_type_copper)) in igb_set_eee()
3117 return -EOPNOTSUPP; in igb_set_eee()
3126 if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { in igb_set_eee()
3127 dev_err(&adapter->pdev->dev, in igb_set_eee()
3128 "Setting EEE tx-lpi is not supported\n"); in igb_set_eee()
3129 return -EINVAL; in igb_set_eee()
3133 if (edata->tx_lpi_timer) { in igb_set_eee()
3134 dev_err(&adapter->pdev->dev, in igb_set_eee()
3136 return -EINVAL; in igb_set_eee()
3143 if (linkmode_andnot(tmp, edata->advertised, supported)) { in igb_set_eee()
3144 dev_err(&adapter->pdev->dev, in igb_set_eee()
3146 return -EINVAL; in igb_set_eee()
3150 edata->advertised); in igb_set_eee()
3153 edata->advertised); in igb_set_eee()
3155 } else if (!edata->eee_enabled) { in igb_set_eee()
3156 dev_err(&adapter->pdev->dev, in igb_set_eee()
3158 return -EINVAL; in igb_set_eee()
3161 adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised); in igb_set_eee()
3162 if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { in igb_set_eee()
3163 hw->dev_spec._82575.eee_disable = !edata->eee_enabled; in igb_set_eee()
3164 adapter->flags |= IGB_FLAG_EEE; in igb_set_eee()
3173 if (hw->mac.type == e1000_i354) in igb_set_eee()
3179 dev_err(&adapter->pdev->dev, in igb_set_eee()
3181 return -EINVAL; in igb_set_eee()
3191 struct e1000_hw *hw = &adapter->hw; in igb_get_module_info()
3196 if ((hw->phy.media_type == e1000_media_type_copper) || in igb_get_module_info()
3197 (hw->phy.media_type == e1000_media_type_unknown)) in igb_get_module_info()
3198 return -EOPNOTSUPP; in igb_get_module_info()
3200 /* Check whether we support SFF-8472 or not */ in igb_get_module_info()
3203 return -EIO; in igb_get_module_info()
3208 return -EIO; in igb_get_module_info()
3217 /* We have an SFP, but it does not support SFF-8472 */ in igb_get_module_info()
3218 modinfo->type = ETH_MODULE_SFF_8079; in igb_get_module_info()
3219 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; in igb_get_module_info()
3221 /* We have an SFP which supports a revision of SFF-8472 */ in igb_get_module_info()
3222 modinfo->type = ETH_MODULE_SFF_8472; in igb_get_module_info()
3223 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; in igb_get_module_info()
3233 struct e1000_hw *hw = &adapter->hw; in igb_get_module_eeprom()
3239 if (ee->len == 0) in igb_get_module_eeprom()
3240 return -EINVAL; in igb_get_module_eeprom()
3242 first_word = ee->offset >> 1; in igb_get_module_eeprom()
3243 last_word = (ee->offset + ee->len - 1) >> 1; in igb_get_module_eeprom()
3245 dataword = kmalloc_array(last_word - first_word + 1, sizeof(u16), in igb_get_module_eeprom()
3248 return -ENOMEM; in igb_get_module_eeprom()
3250 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ in igb_get_module_eeprom()
3251 for (i = 0; i < last_word - first_word + 1; i++) { in igb_get_module_eeprom()
3257 return -EIO; in igb_get_module_eeprom()
3263 memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len); in igb_get_module_eeprom()
3280 rxfh->hfunc = ETH_RSS_HASH_TOP; in igb_get_rxfh()
3281 if (!rxfh->indir) in igb_get_rxfh()
3284 rxfh->indir[i] = adapter->rss_indir_tbl[i]; in igb_get_rxfh()
3291 struct e1000_hw *hw = &adapter->hw; in igb_write_rss_indir_tbl()
3296 switch (hw->mac.type) { in igb_write_rss_indir_tbl()
3301 /* 82576 supports 2 RSS queues for SR-IOV */ in igb_write_rss_indir_tbl()
3302 if (adapter->vfs_allocated_count) in igb_write_rss_indir_tbl()
3313 for (j = 3; j >= 0; j--) { in igb_write_rss_indir_tbl()
3315 val |= adapter->rss_indir_tbl[i + j]; in igb_write_rss_indir_tbl()
3329 struct e1000_hw *hw = &adapter->hw; in igb_set_rxfh()
3334 if (rxfh->key || in igb_set_rxfh()
3335 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && in igb_set_rxfh()
3336 rxfh->hfunc != ETH_RSS_HASH_TOP)) in igb_set_rxfh()
3337 return -EOPNOTSUPP; in igb_set_rxfh()
3338 if (!rxfh->indir) in igb_set_rxfh()
3341 num_queues = adapter->rss_queues; in igb_set_rxfh()
3343 switch (hw->mac.type) { in igb_set_rxfh()
3345 /* 82576 supports 2 RSS queues for SR-IOV */ in igb_set_rxfh()
3346 if (adapter->vfs_allocated_count) in igb_set_rxfh()
3353 /* Verify user input. */ in igb_set_rxfh()
3355 if (rxfh->indir[i] >= num_queues) in igb_set_rxfh()
3356 return -EINVAL; in igb_set_rxfh()
3360 adapter->rss_indir_tbl[i] = rxfh->indir[i]; in igb_set_rxfh()
3378 ch->max_combined = igb_max_channels(adapter); in igb_get_channels()
3381 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_get_channels()
3382 ch->max_other = NON_Q_VECTORS; in igb_get_channels()
3383 ch->other_count = NON_Q_VECTORS; in igb_get_channels()
3386 ch->combined_count = adapter->rss_queues; in igb_get_channels()
3393 unsigned int count = ch->combined_count; in igb_set_channels()
3397 if (!count || ch->rx_count || ch->tx_count) in igb_set_channels()
3398 return -EINVAL; in igb_set_channels()
3401 if (ch->other_count != NON_Q_VECTORS) in igb_set_channels()
3402 return -EINVAL; in igb_set_channels()
3407 return -EINVAL; in igb_set_channels()
3409 if (count != adapter->rss_queues) { in igb_set_channels()
3410 adapter->rss_queues = count; in igb_set_channels()
3427 if (adapter->flags & IGB_FLAG_RX_LEGACY) in igb_get_priv_flags()
3436 unsigned int flags = adapter->flags; in igb_set_priv_flags()
3442 if (flags != adapter->flags) { in igb_set_priv_flags()
3443 adapter->flags = flags; in igb_set_priv_flags()
3498 netdev->ethtool_ops = &igb_ethtool_ops; in igb_set_ethtool_ops()