Lines Matching +full:sfc +full:- +full:no +full:- +full:dma
1 // SPDX-License-Identifier: GPL-2.0-only
104 * queued onto this work queue. This is not a per-nic work queue, because
114 return -ENOMEM; in efx_create_reset_workqueue()
122 queue_work(reset_workqueue, &efx->reset_work); in efx_queue_reset_work()
127 cancel_work_sync(&efx->reset_work); in efx_flush_reset_workqueue()
138 /* We assume that efx->type->reconfigure_mac will always try to sync RX
139 * filters and therefore needs to read-lock the filter table against freeing
143 if (efx->type->reconfigure_mac) { in efx_mac_reconfigure()
144 down_read(&efx->filter_sem); in efx_mac_reconfigure()
145 efx->type->reconfigure_mac(efx, mtu_only); in efx_mac_reconfigure()
146 up_read(&efx->filter_sem); in efx_mac_reconfigure()
158 mutex_lock(&efx->mac_lock); in efx_mac_work()
159 if (efx->port_enabled) in efx_mac_work()
161 mutex_unlock(&efx->mac_lock); in efx_mac_work()
168 u8 *new_addr = addr->sa_data; in efx_set_mac_address()
173 netif_err(efx, drv, efx->net_dev, in efx_set_mac_address()
176 return -EADDRNOTAVAIL; in efx_set_mac_address()
180 ether_addr_copy(old_addr, net_dev->dev_addr); in efx_set_mac_address()
182 if (efx->type->set_mac_address) { in efx_set_mac_address()
183 rc = efx->type->set_mac_address(efx); in efx_set_mac_address()
191 mutex_lock(&efx->mac_lock); in efx_set_mac_address()
193 mutex_unlock(&efx->mac_lock); in efx_set_mac_address()
203 if (efx->port_enabled) in efx_set_rx_mode()
204 queue_work(efx->workqueue, &efx->mac_work); in efx_set_rx_mode()
213 /* If disabling RX n-tuple filtering, clear existing filters */ in efx_set_features()
214 if (net_dev->features & ~data & NETIF_F_NTUPLE) { in efx_set_features()
215 rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); in efx_set_features()
221 * If rx-fcs is changed, mac_reconfigure updates that too. in efx_set_features()
223 if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER | in efx_set_features()
240 struct efx_link_state *link_state = &efx->link_state; in efx_link_status_changed()
242 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure in efx_link_status_changed()
243 * that no events are triggered between unregister_netdev() and the in efx_link_status_changed()
247 if (!netif_running(efx->net_dev)) in efx_link_status_changed()
250 if (link_state->up != netif_carrier_ok(efx->net_dev)) { in efx_link_status_changed()
251 efx->n_link_state_changes++; in efx_link_status_changed()
253 if (link_state->up) in efx_link_status_changed()
254 netif_carrier_on(efx->net_dev); in efx_link_status_changed()
256 netif_carrier_off(efx->net_dev); in efx_link_status_changed()
260 if (link_state->up) in efx_link_status_changed()
261 netif_info(efx, link, efx->net_dev, in efx_link_status_changed()
262 "link up at %uMbps %s-duplex (MTU %d)\n", in efx_link_status_changed()
263 link_state->speed, link_state->fd ? "full" : "half", in efx_link_status_changed()
264 efx->net_dev->mtu); in efx_link_status_changed()
266 netif_info(efx, link, efx->net_dev, "link down\n"); in efx_link_status_changed()
275 efx->rx_prefix_size + efx->type->rx_buffer_padding + in efx_xdp_max_mtu()
276 efx->rx_ip_align + EFX_XDP_HEADROOM + EFX_XDP_TAILROOM; in efx_xdp_max_mtu()
278 return PAGE_SIZE - overhead; in efx_xdp_max_mtu()
291 if (rtnl_dereference(efx->xdp_prog) && in efx_change_mtu()
293 netif_err(efx, drv, efx->net_dev, in efx_change_mtu()
296 return -EINVAL; in efx_change_mtu()
299 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); in efx_change_mtu()
304 mutex_lock(&efx->mac_lock); in efx_change_mtu()
305 WRITE_ONCE(net_dev->mtu, new_mtu); in efx_change_mtu()
307 mutex_unlock(&efx->mac_lock); in efx_change_mtu()
326 netif_vdbg(efx, timer, efx->net_dev, in efx_monitor()
329 BUG_ON(efx->type->monitor == NULL); in efx_monitor()
335 if (mutex_trylock(&efx->mac_lock)) { in efx_monitor()
336 if (efx->port_enabled && efx->type->monitor) in efx_monitor()
337 efx->type->monitor(efx); in efx_monitor()
338 mutex_unlock(&efx->mac_lock); in efx_monitor()
346 if (efx->type->monitor) in efx_start_monitor()
347 queue_delayed_work(efx->workqueue, &efx->monitor_work, in efx_start_monitor()
363 netdev_features_t old_features = efx->net_dev->features; in efx_start_datapath()
364 bool old_rx_scatter = efx->rx_scatter; in efx_start_datapath()
371 efx->rx_dma_len = (efx->rx_prefix_size + in efx_start_datapath()
372 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + in efx_start_datapath()
373 efx->type->rx_buffer_padding); in efx_start_datapath()
375 efx->rx_ip_align + efx->rx_dma_len + EFX_XDP_TAILROOM); in efx_start_datapath()
378 efx->rx_scatter = efx->type->always_rx_scatter; in efx_start_datapath()
379 efx->rx_buffer_order = 0; in efx_start_datapath()
380 } else if (efx->type->can_rx_scatter) { in efx_start_datapath()
386 efx->rx_scatter = true; in efx_start_datapath()
387 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; in efx_start_datapath()
388 efx->rx_buffer_order = 0; in efx_start_datapath()
390 efx->rx_scatter = false; in efx_start_datapath()
391 efx->rx_buffer_order = get_order(rx_buf_len); in efx_start_datapath()
395 if (efx->rx_buffer_order) in efx_start_datapath()
396 netif_dbg(efx, drv, efx->net_dev, in efx_start_datapath()
398 efx->rx_dma_len, efx->rx_buffer_order, in efx_start_datapath()
399 efx->rx_pages_per_batch); in efx_start_datapath()
401 netif_dbg(efx, drv, efx->net_dev, in efx_start_datapath()
403 efx->rx_dma_len, efx->rx_page_buf_step, in efx_start_datapath()
404 efx->rx_bufs_per_page, efx->rx_pages_per_batch); in efx_start_datapath()
409 efx->net_dev->hw_features |= efx->net_dev->features; in efx_start_datapath()
410 efx->net_dev->hw_features &= ~efx->fixed_features; in efx_start_datapath()
411 efx->net_dev->features |= efx->fixed_features; in efx_start_datapath()
412 if (efx->net_dev->features != old_features) in efx_start_datapath()
413 netdev_features_change(efx->net_dev); in efx_start_datapath()
415 /* RX filters may also have scatter-enabled flags */ in efx_start_datapath()
416 if ((efx->rx_scatter != old_rx_scatter) && in efx_start_datapath()
417 efx->type->filter_update_rx_scatter) in efx_start_datapath()
418 efx->type->filter_update_rx_scatter(efx); in efx_start_datapath()
427 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); in efx_start_datapath()
428 efx->txq_wake_thresh = efx->txq_stop_thresh / 2; in efx_start_datapath()
435 if (netif_device_present(efx->net_dev)) in efx_start_datapath()
436 netif_tx_wake_all_queues(efx->net_dev); in efx_start_datapath()
442 BUG_ON(efx->port_enabled); in efx_stop_datapath()
455 /* Equivalent to efx_link_set_advertising with all-zeroes, except does not
460 bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); in efx_link_clear_advertising()
461 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); in efx_link_clear_advertising()
466 efx->wanted_fc = wanted_fc; in efx_link_set_wanted_fc()
467 if (efx->link_advertising[0]) { in efx_link_set_wanted_fc()
469 efx->link_advertising[0] |= (ADVERTISED_Pause | in efx_link_set_wanted_fc()
472 efx->link_advertising[0] &= ~(ADVERTISED_Pause | in efx_link_set_wanted_fc()
475 efx->link_advertising[0] ^= ADVERTISED_Asym_Pause; in efx_link_set_wanted_fc()
481 netif_dbg(efx, ifup, efx->net_dev, "start port\n"); in efx_start_port()
482 BUG_ON(efx->port_enabled); in efx_start_port()
484 mutex_lock(&efx->mac_lock); in efx_start_port()
485 efx->port_enabled = true; in efx_start_port()
490 mutex_unlock(&efx->mac_lock); in efx_start_port()
494 * and the async self-test, wait for them to finish and prevent them
500 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); in efx_stop_port()
504 mutex_lock(&efx->mac_lock); in efx_stop_port()
505 efx->port_enabled = false; in efx_stop_port()
506 mutex_unlock(&efx->mac_lock); in efx_stop_port()
509 netif_addr_lock_bh(efx->net_dev); in efx_stop_port()
510 netif_addr_unlock_bh(efx->net_dev); in efx_stop_port()
512 cancel_delayed_work_sync(&efx->monitor_work); in efx_stop_port()
514 cancel_work_sync(&efx->mac_work); in efx_stop_port()
527 BUG_ON(efx->state == STATE_DISABLED); in efx_start_all()
532 if (efx->port_enabled || !netif_running(efx->net_dev) || in efx_start_all()
533 efx->reset_pending) in efx_start_all()
544 /* Link state detection is normally event-driven; we have in efx_start_all()
547 mutex_lock(&efx->mac_lock); in efx_start_all()
550 mutex_unlock(&efx->mac_lock); in efx_start_all()
552 if (efx->type->start_stats) { in efx_start_all()
553 efx->type->start_stats(efx); in efx_start_all()
554 efx->type->pull_stats(efx); in efx_start_all()
555 spin_lock_bh(&efx->stats_lock); in efx_start_all()
556 efx->type->update_stats(efx, NULL, NULL); in efx_start_all()
557 spin_unlock_bh(&efx->stats_lock); in efx_start_all()
571 if (!efx->port_enabled) in efx_stop_all()
574 if (efx->type->update_stats) { in efx_stop_all()
578 efx->type->pull_stats(efx); in efx_stop_all()
579 spin_lock_bh(&efx->stats_lock); in efx_stop_all()
580 efx->type->update_stats(efx, NULL, NULL); in efx_stop_all()
581 spin_unlock_bh(&efx->stats_lock); in efx_stop_all()
582 efx->type->stop_stats(efx); in efx_stop_all()
591 WARN_ON(netif_running(efx->net_dev) && in efx_stop_all()
592 netif_device_present(efx->net_dev)); in efx_stop_all()
593 netif_tx_disable(efx->net_dev); in efx_stop_all()
598 /* Context: process, rcu_read_lock or RTNL held, non-blocking. */
603 spin_lock_bh(&efx->stats_lock); in efx_net_stats()
605 spin_unlock_bh(&efx->stats_lock); in efx_net_stats()
610 * through phy_op->set_settings(), and pushed asynchronously to the MAC
620 WARN_ON(!mutex_is_locked(&efx->mac_lock)); in __efx_reconfigure_port()
623 phy_mode = efx->phy_mode; in __efx_reconfigure_port()
625 efx->phy_mode |= PHY_MODE_TX_DISABLED; in __efx_reconfigure_port()
627 efx->phy_mode &= ~PHY_MODE_TX_DISABLED; in __efx_reconfigure_port()
629 if (efx->type->reconfigure_port) in __efx_reconfigure_port()
630 rc = efx->type->reconfigure_port(efx); in __efx_reconfigure_port()
633 efx->phy_mode = phy_mode; in __efx_reconfigure_port()
647 mutex_lock(&efx->mac_lock); in efx_reconfigure_port()
649 mutex_unlock(&efx->mac_lock); in efx_reconfigure_port()
670 netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); in efx_wait_for_bist_end()
672 /* Either way unset the BIST flag. If we found no reboot we probably in efx_wait_for_bist_end()
675 efx->mc_bist_for_other_fn = false; in efx_wait_for_bist_end()
681 * Returns a non-zero value otherwise.
691 struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); in efx_try_recovery()
710 efx->type->prepare_flr(efx); in efx_reset_down()
715 mutex_lock(&efx->mac_lock); in efx_reset_down()
716 down_write(&efx->filter_sem); in efx_reset_down()
717 mutex_lock(&efx->net_dev->ethtool->rss_lock); in efx_reset_down()
718 efx->type->fini(efx); in efx_reset_down()
726 netif_err(efx, tx_err, efx->net_dev, in efx_watchdog()
728 efx->port_enabled); in efx_watchdog()
746 efx->type->finish_flr(efx); in efx_reset_up()
749 rc = efx->type->init(efx); in efx_reset_up()
751 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); in efx_reset_up()
758 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && in efx_reset_up()
761 if (rc && rc != -EPERM) in efx_reset_up()
762 netif_err(efx, drv, efx->net_dev, in efx_reset_up()
771 rc = efx->type->vswitching_restore(efx); in efx_reset_up()
773 netif_warn(efx, probe, efx->net_dev, in efx_reset_up()
778 if (efx->type->rx_restore_rss_contexts) in efx_reset_up()
779 efx->type->rx_restore_rss_contexts(efx); in efx_reset_up()
780 mutex_unlock(&efx->net_dev->ethtool->rss_lock); in efx_reset_up()
781 efx->type->filter_table_restore(efx); in efx_reset_up()
782 up_write(&efx->filter_sem); in efx_reset_up()
784 mutex_unlock(&efx->mac_lock); in efx_reset_up()
788 if (efx->type->udp_tnl_push_ports) in efx_reset_up()
789 efx->type->udp_tnl_push_ports(efx); in efx_reset_up()
794 efx->port_initialized = false; in efx_reset_up()
796 mutex_unlock(&efx->net_dev->ethtool->rss_lock); in efx_reset_up()
797 up_write(&efx->filter_sem); in efx_reset_up()
798 mutex_unlock(&efx->mac_lock); in efx_reset_up()
813 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", in efx_reset()
823 rc = efx->type->reset(efx, method); in efx_reset()
825 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); in efx_reset()
830 * driver are now quiescent so that there is no race here. in efx_reset()
833 efx->reset_pending &= -(1 << (method + 1)); in efx_reset()
834 else /* it doesn't fit into the well-ordered scope hierarchy */ in efx_reset()
835 __clear_bit(method, &efx->reset_pending); in efx_reset()
837 /* Reinitialise bus-mastering, which may have been turned off before in efx_reset()
842 pci_set_master(efx->pci_dev); in efx_reset()
858 dev_close(efx->net_dev); in efx_reset()
859 netif_err(efx, drv, efx->net_dev, "has been disabled\n"); in efx_reset()
860 efx->state = STATE_DISABLED; in efx_reset()
862 netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); in efx_reset()
877 pending = READ_ONCE(efx->reset_pending); in efx_reset_work()
878 method = fls(pending) - 1; in efx_reset_work()
897 if (efx_net_active(efx->state)) in efx_reset_work()
907 if (efx_recovering(efx->state)) { in efx_schedule_reset()
908 netif_dbg(efx, drv, efx->net_dev, in efx_schedule_reset()
925 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", in efx_schedule_reset()
929 method = efx->type->map_reset_reason(type); in efx_schedule_reset()
930 netif_dbg(efx, drv, efx->net_dev, in efx_schedule_reset()
936 set_bit(method, &efx->reset_pending); in efx_schedule_reset()
942 if (!efx_net_active(READ_ONCE(efx->state))) in efx_schedule_reset()
945 /* efx_process_channel() will no longer read events once a in efx_schedule_reset()
975 * efx_nic (including all sub-structures).
979 int rc = -ENOMEM; in efx_init_struct()
982 INIT_LIST_HEAD(&efx->node); in efx_init_struct()
983 INIT_LIST_HEAD(&efx->secondary_list); in efx_init_struct()
984 spin_lock_init(&efx->biu_lock); in efx_init_struct()
986 INIT_LIST_HEAD(&efx->mtd_list); in efx_init_struct()
988 INIT_WORK(&efx->reset_work, efx_reset_work); in efx_init_struct()
989 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); in efx_init_struct()
991 efx->pci_dev = pci_dev; in efx_init_struct()
992 efx->msg_enable = debug; in efx_init_struct()
993 efx->state = STATE_UNINIT; in efx_init_struct()
994 strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); in efx_init_struct()
996 efx->rx_prefix_size = efx->type->rx_prefix_size; in efx_init_struct()
997 efx->rx_ip_align = in efx_init_struct()
998 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; in efx_init_struct()
999 efx->rx_packet_hash_offset = in efx_init_struct()
1000 efx->type->rx_hash_offset - efx->type->rx_prefix_size; in efx_init_struct()
1001 efx->rx_packet_ts_offset = in efx_init_struct()
1002 efx->type->rx_ts_offset - efx->type->rx_prefix_size; in efx_init_struct()
1003 efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; in efx_init_struct()
1004 efx->vport_id = EVB_PORT_ID_ASSIGNED; in efx_init_struct()
1005 spin_lock_init(&efx->stats_lock); in efx_init_struct()
1006 efx->vi_stride = EFX_DEFAULT_VI_STRIDE; in efx_init_struct()
1007 efx->num_mac_stats = MC_CMD_MAC_NSTATS; in efx_init_struct()
1008 BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END); in efx_init_struct()
1009 mutex_init(&efx->mac_lock); in efx_init_struct()
1010 init_rwsem(&efx->filter_sem); in efx_init_struct()
1012 mutex_init(&efx->rps_mutex); in efx_init_struct()
1013 spin_lock_init(&efx->rps_hash_lock); in efx_init_struct()
1015 efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE, in efx_init_struct()
1016 sizeof(*efx->rps_hash_table), GFP_KERNEL); in efx_init_struct()
1018 spin_lock_init(&efx->vf_reps_lock); in efx_init_struct()
1019 INIT_LIST_HEAD(&efx->vf_reps); in efx_init_struct()
1020 INIT_WORK(&efx->mac_work, efx_mac_work); in efx_init_struct()
1021 init_waitqueue_head(&efx->flush_wq); in efx_init_struct()
1023 efx->tx_queues_per_channel = 1; in efx_init_struct()
1024 efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE; in efx_init_struct()
1025 efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; in efx_init_struct()
1027 efx->mem_bar = UINT_MAX; in efx_init_struct()
1034 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", in efx_init_struct()
1036 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); in efx_init_struct()
1037 if (!efx->workqueue) { in efx_init_struct()
1038 rc = -ENOMEM; in efx_init_struct()
1052 kfree(efx->rps_hash_table); in efx_fini_struct()
1057 kfree(efx->vpd_sn); in efx_fini_struct()
1059 if (efx->workqueue) { in efx_fini_struct()
1060 destroy_workqueue(efx->workqueue); in efx_fini_struct()
1061 efx->workqueue = NULL; in efx_fini_struct()
1065 /* This configures the PCI device to enable I/O and DMA. */
1069 struct pci_dev *pci_dev = efx->pci_dev; in efx_init_io()
1072 efx->mem_bar = UINT_MAX; in efx_init_io()
1083 rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); in efx_init_io()
1085 pci_err(efx->pci_dev, "could not find a suitable DMA mask\n"); in efx_init_io()
1088 pci_dbg(efx->pci_dev, "using DMA mask %llx\n", (unsigned long long)dma_mask); in efx_init_io()
1090 efx->membase_phys = pci_resource_start(efx->pci_dev, bar); in efx_init_io()
1091 if (!efx->membase_phys) { in efx_init_io()
1092 pci_err(efx->pci_dev, in efx_init_io()
1093 "ERROR: No BAR%d mapping from the BIOS. Try pci=realloc on the kernel command line\n", in efx_init_io()
1095 rc = -ENODEV; in efx_init_io()
1099 rc = pci_request_region(pci_dev, bar, "sfc"); in efx_init_io()
1101 pci_err(efx->pci_dev, in efx_init_io()
1103 rc = -EIO; in efx_init_io()
1106 efx->mem_bar = bar; in efx_init_io()
1107 efx->membase = ioremap(efx->membase_phys, mem_map_size); in efx_init_io()
1108 if (!efx->membase) { in efx_init_io()
1109 pci_err(efx->pci_dev, in efx_init_io()
1111 (unsigned long long)efx->membase_phys, mem_map_size); in efx_init_io()
1112 rc = -ENOMEM; in efx_init_io()
1115 pci_dbg(efx->pci_dev, in efx_init_io()
1117 (unsigned long long)efx->membase_phys, mem_map_size, in efx_init_io()
1118 efx->membase); in efx_init_io()
1123 pci_release_region(efx->pci_dev, bar); in efx_init_io()
1125 efx->membase_phys = 0; in efx_init_io()
1127 pci_disable_device(efx->pci_dev); in efx_init_io()
1134 pci_dbg(efx->pci_dev, "shutting down I/O\n"); in efx_fini_io()
1136 if (efx->membase) { in efx_fini_io()
1137 iounmap(efx->membase); in efx_fini_io()
1138 efx->membase = NULL; in efx_fini_io()
1141 if (efx->membase_phys) { in efx_fini_io()
1142 pci_release_region(efx->pci_dev, efx->mem_bar); in efx_fini_io()
1143 efx->membase_phys = 0; in efx_fini_io()
1144 efx->mem_bar = UINT_MAX; in efx_fini_io()
1147 /* Don't disable bus-mastering if VFs are assigned */ in efx_fini_io()
1148 if (!pci_vfs_assigned(efx->pci_dev)) in efx_fini_io()
1149 pci_disable_device(efx->pci_dev); in efx_fini_io()
1160 return sysfs_emit(buf, "%d\n", mcdi->logging_enabled); in mcdi_logging_show()
1171 mcdi->logging_enabled = enable; in mcdi_logging_store()
1179 int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); in efx_init_mcdi_logging()
1182 netif_warn(efx, drv, efx->net_dev, in efx_init_mcdi_logging()
1189 device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); in efx_fini_mcdi_logging()
1194 * At this point MMIO and DMA may be disabled.
1208 if (efx->state != STATE_DISABLED) { in efx_io_error_detected()
1209 efx->state = efx_recover(efx->state); in efx_io_error_detected()
1210 efx->reset_pending = 0; in efx_io_error_detected()
1214 if (efx_net_active(efx->state)) { in efx_io_error_detected()
1241 netif_err(efx, hw, efx->net_dev, in efx_io_slot_reset()
1242 "Cannot re-enable PCI device after reset.\n"); in efx_io_slot_reset()
1257 if (efx->state == STATE_DISABLED) in efx_io_resume()
1262 netif_err(efx, hw, efx->net_dev, in efx_io_resume()
1265 efx->state = efx_recovered(efx->state); in efx_io_resume()
1266 netif_dbg(efx, hw, efx->net_dev, in efx_io_resume()
1299 if (WARN_ON_ONCE(!efx->type->udp_tnl_has_port)) in efx_can_encap_offloads()
1303 switch (skb->protocol) { in efx_can_encap_offloads()
1305 ipproto = ip_hdr(skb)->protocol; in efx_can_encap_offloads()
1311 ipproto = ipv6_hdr(skb)->nexthdr; in efx_can_encap_offloads()
1328 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER) in efx_can_encap_offloads()
1330 if (ntohs(skb->inner_protocol) != ETH_P_TEB) in efx_can_encap_offloads()
1332 if (skb_inner_mac_header(skb) - skb_transport_header(skb) != 8) in efx_can_encap_offloads()
1335 return !(greh->flags & (GRE_CSUM | GRE_SEQ)); in efx_can_encap_offloads()
1341 dst_port = udp_hdr(skb)->dest; in efx_can_encap_offloads()
1342 return efx->type->udp_tnl_has_port(efx, dst_port); in efx_can_encap_offloads()
1353 if (skb->encapsulation) { in efx_features_check()
1374 if (efx->type->get_phys_port_id) in efx_get_phys_port_id()
1375 return efx->type->get_phys_port_id(efx, ppid); in efx_get_phys_port_id()
1377 return -EOPNOTSUPP; in efx_get_phys_port_id()
1384 if (snprintf(name, len, "p%u", efx->port_num) >= len) in efx_get_phys_port_name()
1385 return -EINVAL; in efx_get_phys_port_name()
1395 netif_dbg(efx, drv, efx->net_dev, "Detaching VF representors\n"); in efx_detach_reps()
1396 list_for_each_entry(efv, &efx->vf_reps, list) { in efx_detach_reps()
1397 rep_dev = efv->net_dev; in efx_detach_reps()
1414 netif_dbg(efx, drv, efx->net_dev, "Attaching VF representors\n"); in efx_attach_reps()
1415 list_for_each_entry(efv, &efx->vf_reps, list) { in efx_attach_reps()
1416 rep_dev = efv->net_dev; in efx_attach_reps()