Lines Matching +full:tcam +full:- +full:based

2  * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
17 * - Redistributions of source code must retain the above
21 * - Redistributions in binary form must reproduce the above
42 #include <linux/dma-mapping.h>
74 * order MSI-X then MSI. This parameter determines which of these schemes the
77 * msi = 2: choose from among MSI-X and MSI
82 * the PCI-E SR-IOV standard).
91 MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
112 * list entries are 64-bit PCI DMA addresses. And since the state of
139 struct net_device *dev = adapter->port[pidx]; in t4vf_os_link_changed()
159 switch (pi->link_cfg.speed) { in t4vf_os_link_changed()
184 switch ((int)pi->link_cfg.fc) { in t4vf_os_link_changed()
202 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc); in t4vf_os_link_changed()
218 const struct net_device *dev = adapter->port[pidx]; in t4vf_os_portmod_changed()
221 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) in t4vf_os_portmod_changed()
222 dev_info(adapter->pdev_dev, "%s: port module unplugged\n", in t4vf_os_portmod_changed()
223 dev->name); in t4vf_os_portmod_changed()
224 else if (pi->mod_type < ARRAY_SIZE(mod_str)) in t4vf_os_portmod_changed()
225 dev_info(adapter->pdev_dev, "%s: %s port module inserted\n", in t4vf_os_portmod_changed()
226 dev->name, mod_str[pi->mod_type]); in t4vf_os_portmod_changed()
227 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) in t4vf_os_portmod_changed()
228 dev_info(adapter->pdev_dev, "%s: unsupported optical port " in t4vf_os_portmod_changed()
229 "module inserted\n", dev->name); in t4vf_os_portmod_changed()
230 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) in t4vf_os_portmod_changed()
231 dev_info(adapter->pdev_dev, "%s: unknown port module inserted," in t4vf_os_portmod_changed()
232 "forcing TWINAX\n", dev->name); in t4vf_os_portmod_changed()
233 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) in t4vf_os_portmod_changed()
234 dev_info(adapter->pdev_dev, "%s: transceiver module error\n", in t4vf_os_portmod_changed()
235 dev->name); in t4vf_os_portmod_changed()
237 dev_info(adapter->pdev_dev, "%s: unknown module type %d " in t4vf_os_portmod_changed()
238 "inserted\n", dev->name, pi->mod_type); in t4vf_os_portmod_changed()
243 struct adapter *adapter = pi->adapter; in cxgb4vf_set_addr_hash()
249 list_for_each_entry(entry, &adapter->mac_hlist, list) { in cxgb4vf_set_addr_hash()
250 ucast |= is_unicast_ether_addr(entry->addr); in cxgb4vf_set_addr_hash()
251 vec |= (1ULL << hash_mac_addr(entry->addr)); in cxgb4vf_set_addr_hash()
253 return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false); in cxgb4vf_set_addr_hash()
257 * cxgb4vf_change_mac - Update match filter for a MAC address.
260 * @tcam_idx: TCAM index of existing filter for old value of MAC address,
261 * or -1
269 * Addresses are programmed to hash region, if tcam runs out of entries.
276 struct adapter *adapter = pi->adapter; in cxgb4vf_change_mac()
280 /* We ran out of TCAM entries. try programming hash region. */ in cxgb4vf_change_mac()
281 if (ret == -ENOMEM) { in cxgb4vf_change_mac()
285 list_for_each_entry(entry, &adapter->mac_hlist, list) { in cxgb4vf_change_mac()
286 if (entry->iface_mac) { in cxgb4vf_change_mac()
287 ether_addr_copy(entry->addr, addr); in cxgb4vf_change_mac()
293 return -ENOMEM; in cxgb4vf_change_mac()
294 ether_addr_copy(new_entry->addr, addr); in cxgb4vf_change_mac()
295 new_entry->iface_mac = true; in cxgb4vf_change_mac()
296 list_add_tail(&new_entry->list, &adapter->mac_hlist); in cxgb4vf_change_mac()
328 ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1, in link_start()
331 ret = cxgb4vf_change_mac(pi, pi->viid, in link_start()
332 &pi->xact_addr_filt, in link_start()
333 dev->dev_addr, true); in link_start()
341 ret = t4vf_enable_pi(pi->adapter, pi, true, true); in link_start()
347 * Name the MSI-X interrupts.
351 int namelen = sizeof(adapter->msix_info[0].desc) - 1; in name_msix_vecs()
357 snprintf(adapter->msix_info[MSIX_FW].desc, namelen, in name_msix_vecs()
358 "%s-FWeventq", adapter->name); in name_msix_vecs()
359 adapter->msix_info[MSIX_FW].desc[namelen] = 0; in name_msix_vecs()
365 struct net_device *dev = adapter->port[pidx]; in name_msix_vecs()
369 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { in name_msix_vecs()
370 snprintf(adapter->msix_info[msi].desc, namelen, in name_msix_vecs()
371 "%s-%d", dev->name, qs); in name_msix_vecs()
372 adapter->msix_info[msi].desc[namelen] = 0; in name_msix_vecs()
378 * Request all of our MSI-X resources.
382 struct sge *s = &adapter->sge; in request_msix_queue_irqs()
388 err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix, in request_msix_queue_irqs()
389 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq); in request_msix_queue_irqs()
398 err = request_irq(adapter->msix_info[msi].vec, in request_msix_queue_irqs()
400 adapter->msix_info[msi].desc, in request_msix_queue_irqs()
401 &s->ethrxq[rxq].rspq); in request_msix_queue_irqs()
409 while (--rxq >= 0) in request_msix_queue_irqs()
410 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq); in request_msix_queue_irqs()
411 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); in request_msix_queue_irqs()
416 * Free our MSI-X resources.
420 struct sge *s = &adapter->sge; in free_msix_queue_irqs()
423 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); in free_msix_queue_irqs()
426 free_irq(adapter->msix_info[msi++].vec, in free_msix_queue_irqs()
427 &s->ethrxq[rxq].rspq); in free_msix_queue_irqs()
435 napi_enable(&rspq->napi); in qenable()
438 * 0-increment the Going To Sleep register to start the timer and in qenable()
441 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, in qenable()
443 SEINTARM_V(rspq->intr_params) | in qenable()
444 INGRESSQID_V(rspq->cntxt_id)); in qenable()
453 struct sge *s = &adapter->sge; in enable_rx()
456 qenable(&s->ethrxq[rxq].rspq); in enable_rx()
457 qenable(&s->fw_evtq); in enable_rx()
460 * The interrupt queue doesn't use NAPI so we do the 0-increment of in enable_rx()
463 if (adapter->flags & CXGB4VF_USING_MSI) in enable_rx()
466 SEINTARM_V(s->intrq.intr_params) | in enable_rx()
467 INGRESSQID_V(s->intrq.cntxt_id)); in enable_rx()
476 struct sge *s = &adapter->sge; in quiesce_rx()
480 napi_disable(&s->ethrxq[rxq].rspq.napi); in quiesce_rx()
481 napi_disable(&s->fw_evtq.napi); in quiesce_rx()
493 struct adapter *adapter = rspq->adapter; in fwevtq_handler()
494 u8 opcode = ((const struct rss_header *)rsp)->opcode; in fwevtq_handler()
503 if (fw_msg->type == FW6_TYPE_CMD_RPL) in fwevtq_handler()
504 t4vf_handle_fw_rpl(adapter, fw_msg->data); in fwevtq_handler()
512 opcode = CPL_OPCODE_G(ntohl(p->opcode_qid)); in fwevtq_handler()
514 dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" in fwevtq_handler()
533 unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid)); in fwevtq_handler()
534 struct sge *s = &adapter->sge; in fwevtq_handler()
548 dev_err(adapter->pdev_dev, in fwevtq_handler()
552 tq = s->egr_map[eq_idx]; in fwevtq_handler()
554 dev_err(adapter->pdev_dev, in fwevtq_handler()
559 if (unlikely(tq->abs_id != qid)) { in fwevtq_handler()
560 dev_err(adapter->pdev_dev, in fwevtq_handler()
562 qid, tq->abs_id); in fwevtq_handler()
570 txq->q.restarts++; in fwevtq_handler()
571 netif_tx_wake_queue(txq->txq); in fwevtq_handler()
576 dev_err(adapter->pdev_dev, in fwevtq_handler()
586 * we have MSI-X, otherwise just one queue set per port.
590 struct sge *s = &adapter->sge; in setup_sge_queues()
597 bitmap_zero(s->starving_fl, MAX_EGRQ); in setup_sge_queues()
607 if (adapter->flags & CXGB4VF_USING_MSI) { in setup_sge_queues()
608 err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false, in setup_sge_queues()
609 adapter->port[0], 0, NULL, NULL); in setup_sge_queues()
617 err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0], in setup_sge_queues()
630 struct net_device *dev = adapter->port[pidx]; in setup_sge_queues()
632 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; in setup_sge_queues()
633 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; in setup_sge_queues()
636 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues()
637 err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false, in setup_sge_queues()
639 &rxq->fl, t4vf_ethrx_handler); in setup_sge_queues()
645 s->fw_evtq.cntxt_id); in setup_sge_queues()
649 rxq->rspq.idx = qs; in setup_sge_queues()
650 memset(&rxq->stats, 0, sizeof(rxq->stats)); in setup_sge_queues()
657 s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id; in setup_sge_queues()
658 s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id; in setup_sge_queues()
659 IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq; in setup_sge_queues()
661 struct net_device *dev = adapter->port[pidx]; in setup_sge_queues()
663 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; in setup_sge_queues()
664 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; in setup_sge_queues()
667 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues()
668 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq; in setup_sge_queues()
669 EQ_MAP(s, txq->q.abs_id) = &txq->q; in setup_sge_queues()
675 * which are computed as Absolute - Base Queue ID, we in setup_sge_queues()
680 rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base; in setup_sge_queues()
681 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl; in setup_sge_queues()
705 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; in setup_rss()
709 for (qs = 0; qs < pi->nqsets; qs++) in setup_rss()
712 err = t4vf_config_rss_range(adapter, pi->viid, in setup_rss()
713 0, pi->rss_size, rss, pi->nqsets); in setup_rss()
718 * Perform Global RSS Mode-specific initialization. in setup_rss()
720 switch (adapter->params.rss.mode) { in setup_rss()
729 if (!adapter->params.rss.u.basicvirtual.tnlalllookup) { in setup_rss()
732 pi->viid, in setup_rss()
739 pi->viid, in setup_rss()
767 if ((adapter->flags & CXGB4VF_FULL_INIT_DONE) == 0) { in adapter_up()
777 if (adapter->flags & CXGB4VF_USING_MSIX) in adapter_up()
780 adapter->flags |= CXGB4VF_FULL_INIT_DONE; in adapter_up()
784 * Acquire our interrupt resources. We only support MSI-X and MSI. in adapter_up()
786 BUG_ON((adapter->flags & in adapter_up()
788 if (adapter->flags & CXGB4VF_USING_MSIX) in adapter_up()
791 err = request_irq(adapter->pdev->irq, in adapter_up()
793 adapter->name, adapter); in adapter_up()
795 dev_err(adapter->pdev_dev, "request_irq failed, err %d\n", in adapter_up()
819 if (adapter->flags & CXGB4VF_USING_MSIX) in adapter_down()
822 free_irq(adapter->pdev->irq, adapter); in adapter_down()
837 struct adapter *adapter = pi->adapter; in cxgb4vf_open()
843 if (!(adapter->flags & CXGB4VF_FW_OK)) in cxgb4vf_open()
844 return -ENXIO; in cxgb4vf_open()
850 if (adapter->open_device_map == 0) { in cxgb4vf_open()
870 pi->vlan_id = t4vf_get_vf_vlan_acl(adapter); in cxgb4vf_open()
873 set_bit(pi->port_id, &adapter->open_device_map); in cxgb4vf_open()
877 if (adapter->open_device_map == 0) in cxgb4vf_open()
889 struct adapter *adapter = pi->adapter; in cxgb4vf_stop()
895 clear_bit(pi->port_id, &adapter->open_device_map); in cxgb4vf_stop()
896 if (adapter->open_device_map == 0) in cxgb4vf_stop()
908 struct adapter *adapter = pi->adapter; in cxgb4vf_get_stats()
909 struct net_device_stats *ns = &dev->stats; in cxgb4vf_get_stats()
912 spin_lock(&adapter->stats_lock); in cxgb4vf_get_stats()
913 err = t4vf_get_port_stats(adapter, pi->pidx, &stats); in cxgb4vf_get_stats()
914 spin_unlock(&adapter->stats_lock); in cxgb4vf_get_stats()
920 ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes + in cxgb4vf_get_stats()
922 ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames + in cxgb4vf_get_stats()
924 ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes + in cxgb4vf_get_stats()
926 ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames + in cxgb4vf_get_stats()
928 ns->multicast = stats.rx_mcast_frames; in cxgb4vf_get_stats()
929 ns->tx_errors = stats.tx_drop_frames; in cxgb4vf_get_stats()
930 ns->rx_errors = stats.rx_err_frames; in cxgb4vf_get_stats()
938 struct adapter *adapter = pi->adapter; in cxgb4vf_mac_sync()
947 ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist, in cxgb4vf_mac_sync()
958 return -ENOMEM; in cxgb4vf_mac_sync()
959 ether_addr_copy(new_entry->addr, mac_addr); in cxgb4vf_mac_sync()
960 list_add_tail(&new_entry->list, &adapter->mac_hlist); in cxgb4vf_mac_sync()
970 struct adapter *adapter = pi->adapter; in cxgb4vf_mac_unsync()
978 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) { in cxgb4vf_mac_unsync()
979 if (ether_addr_equal(entry->addr, mac_addr)) { in cxgb4vf_mac_unsync()
980 list_del(&entry->list); in cxgb4vf_mac_unsync()
986 ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false); in cxgb4vf_mac_unsync()
987 return ret < 0 ? -EINVAL : 0; in cxgb4vf_mac_unsync()
992 * If @mtu is -1 it is left unchanged.
1000 return t4vf_set_rxmode(pi->adapter, pi->viid, -1, in set_rxmode()
1001 (dev->flags & IFF_PROMISC) != 0, in set_rxmode()
1002 (dev->flags & IFF_ALLMULTI) != 0, in set_rxmode()
1003 1, -1, sleep_ok); in set_rxmode()
1012 set_rxmode(dev, -1, false); in cxgb4vf_set_rxmode()
1023 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { in closest_timer()
1024 int delta = us - s->timer_val[i]; in closest_timer()
1026 delta = -delta; in closest_timer()
1039 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { in closest_thres()
1040 delta = thres - s->counter_val[i]; in closest_thres()
1042 delta = -delta; in closest_thres()
1052 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1057 unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params); in qtimer_val()
1060 ? adapter->sge.timer_val[timer_idx] in qtimer_val()
1065 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1068 * @us: the hold-off time in us, or 0 to disable timer
1069 * @cnt: the hold-off packet count, or 0 to disable counter
1071 * Sets an RX response queue's interrupt hold-off time and packet count.
1097 pktcnt_idx = closest_thres(&adapter->sge, cnt); in set_rxq_intr_params()
1098 if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) { in set_rxq_intr_params()
1102 FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id); in set_rxq_intr_params()
1107 rspq->pktcnt_idx = pktcnt_idx; in set_rxq_intr_params()
1116 : closest_timer(&adapter->sge, us)); in set_rxq_intr_params()
1122 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) | in set_rxq_intr_params()
1129 * - bits 0..9: chip version
1130 * - bits 10..15: chip revision
1137 return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10); in mk_adap_vers()
1155 ret = -EOPNOTSUPP; in cxgb4vf_do_ioctl()
1169 ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu, in cxgb4vf_change_mtu()
1170 -1, -1, -1, -1, true); in cxgb4vf_change_mtu()
1172 WRITE_ONCE(dev->mtu, new_mtu); in cxgb4vf_change_mtu()
1195 netdev_features_t changed = dev->features ^ features; in cxgb4vf_set_features()
1198 t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, in cxgb4vf_set_features()
1213 if (!is_valid_ether_addr(addr->sa_data)) in cxgb4vf_set_mac_addr()
1214 return -EADDRNOTAVAIL; in cxgb4vf_set_mac_addr()
1216 ret = cxgb4vf_change_mac(pi, pi->viid, &pi->xact_addr_filt, in cxgb4vf_set_mac_addr()
1217 addr->sa_data, true); in cxgb4vf_set_mac_addr()
1221 eth_hw_addr_set(dev, addr->sa_data); in cxgb4vf_set_mac_addr()
1233 struct adapter *adapter = pi->adapter; in cxgb4vf_poll_controller()
1235 if (adapter->flags & CXGB4VF_USING_MSIX) { in cxgb4vf_poll_controller()
1239 rxq = &adapter->sge.ethrxq[pi->first_qset]; in cxgb4vf_poll_controller()
1240 for (nqsets = pi->nqsets; nqsets; nqsets--) { in cxgb4vf_poll_controller()
1241 t4vf_sge_intr_msix(0, &rxq->rspq); in cxgb4vf_poll_controller()
1258 * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
1302 * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
1440 struct ethtool_link_settings *base = &link_ksettings->base; in cxgb4vf_get_link_ksettings()
1453 base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type); in cxgb4vf_get_link_ksettings()
1455 if (pi->mdio_addr >= 0) { in cxgb4vf_get_link_ksettings()
1456 base->phy_address = pi->mdio_addr; in cxgb4vf_get_link_ksettings()
1457 base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII in cxgb4vf_get_link_ksettings()
1461 base->phy_address = 255; in cxgb4vf_get_link_ksettings()
1462 base->mdio_support = 0; in cxgb4vf_get_link_ksettings()
1465 fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps, in cxgb4vf_get_link_ksettings()
1466 link_ksettings->link_modes.supported); in cxgb4vf_get_link_ksettings()
1467 fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps, in cxgb4vf_get_link_ksettings()
1468 link_ksettings->link_modes.advertising); in cxgb4vf_get_link_ksettings()
1469 fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps, in cxgb4vf_get_link_ksettings()
1470 link_ksettings->link_modes.lp_advertising); in cxgb4vf_get_link_ksettings()
1473 base->speed = pi->link_cfg.speed; in cxgb4vf_get_link_ksettings()
1474 base->duplex = DUPLEX_FULL; in cxgb4vf_get_link_ksettings()
1476 base->speed = SPEED_UNKNOWN; in cxgb4vf_get_link_ksettings()
1477 base->duplex = DUPLEX_UNKNOWN; in cxgb4vf_get_link_ksettings()
1480 base->autoneg = pi->link_cfg.autoneg; in cxgb4vf_get_link_ksettings()
1481 if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG) in cxgb4vf_get_link_ksettings()
1484 if (pi->link_cfg.autoneg) in cxgb4vf_get_link_ksettings()
1531 const struct link_config *lc = &pi->link_cfg; in cxgb4vf_get_fecparam()
1537 fec->fec = fwcap_to_eth_fec(lc->pcaps); in cxgb4vf_get_fecparam()
1538 if (fec->fec != ETHTOOL_FEC_OFF) in cxgb4vf_get_fecparam()
1539 fec->fec |= ETHTOOL_FEC_AUTO; in cxgb4vf_get_fecparam()
1544 fec->active_fec = cc_to_eth_fec(lc->fec); in cxgb4vf_get_fecparam()
1556 strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); in cxgb4vf_get_drvinfo()
1557 strscpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)), in cxgb4vf_get_drvinfo()
1558 sizeof(drvinfo->bus_info)); in cxgb4vf_get_drvinfo()
1559 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), in cxgb4vf_get_drvinfo()
1561 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev), in cxgb4vf_get_drvinfo()
1562 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev), in cxgb4vf_get_drvinfo()
1563 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev), in cxgb4vf_get_drvinfo()
1564 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev), in cxgb4vf_get_drvinfo()
1565 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev), in cxgb4vf_get_drvinfo()
1566 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev), in cxgb4vf_get_drvinfo()
1567 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev), in cxgb4vf_get_drvinfo()
1568 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev)); in cxgb4vf_get_drvinfo()
1576 return netdev2adap(dev)->msg_enable; in cxgb4vf_get_msglevel()
1584 netdev2adap(dev)->msg_enable = msglevel; in cxgb4vf_set_msglevel()
1590 * multi-queue devices, we just return the current values associated with the
1599 const struct sge *s = &pi->adapter->sge; in cxgb4vf_get_ringparam()
1601 rp->rx_max_pending = MAX_RX_BUFFERS; in cxgb4vf_get_ringparam()
1602 rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES; in cxgb4vf_get_ringparam()
1603 rp->rx_jumbo_max_pending = 0; in cxgb4vf_get_ringparam()
1604 rp->tx_max_pending = MAX_TXQ_ENTRIES; in cxgb4vf_get_ringparam()
1606 rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID; in cxgb4vf_get_ringparam()
1607 rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; in cxgb4vf_get_ringparam()
1608 rp->rx_jumbo_pending = 0; in cxgb4vf_get_ringparam()
1609 rp->tx_pending = s->ethtxq[pi->first_qset].q.size; in cxgb4vf_get_ringparam()
1616 * device -- after vetting them of course!
1624 struct adapter *adapter = pi->adapter; in cxgb4vf_set_ringparam()
1625 struct sge *s = &adapter->sge; in cxgb4vf_set_ringparam()
1628 if (rp->rx_pending > MAX_RX_BUFFERS || in cxgb4vf_set_ringparam()
1629 rp->rx_jumbo_pending || in cxgb4vf_set_ringparam()
1630 rp->tx_pending > MAX_TXQ_ENTRIES || in cxgb4vf_set_ringparam()
1631 rp->rx_mini_pending > MAX_RSPQ_ENTRIES || in cxgb4vf_set_ringparam()
1632 rp->rx_mini_pending < MIN_RSPQ_ENTRIES || in cxgb4vf_set_ringparam()
1633 rp->rx_pending < MIN_FL_ENTRIES || in cxgb4vf_set_ringparam()
1634 rp->tx_pending < MIN_TXQ_ENTRIES) in cxgb4vf_set_ringparam()
1635 return -EINVAL; in cxgb4vf_set_ringparam()
1637 if (adapter->flags & CXGB4VF_FULL_INIT_DONE) in cxgb4vf_set_ringparam()
1638 return -EBUSY; in cxgb4vf_set_ringparam()
1640 for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) { in cxgb4vf_set_ringparam()
1641 s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID; in cxgb4vf_set_ringparam()
1642 s->ethrxq[qs].rspq.size = rp->rx_mini_pending; in cxgb4vf_set_ringparam()
1643 s->ethtxq[qs].q.size = rp->tx_pending; in cxgb4vf_set_ringparam()
1659 const struct adapter *adapter = pi->adapter; in cxgb4vf_get_coalesce()
1660 const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq; in cxgb4vf_get_coalesce()
1662 coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq); in cxgb4vf_get_coalesce()
1663 coalesce->rx_max_coalesced_frames = in cxgb4vf_get_coalesce()
1664 ((rspq->intr_params & QINTR_CNT_EN_F) in cxgb4vf_get_coalesce()
1665 ? adapter->sge.counter_val[rspq->pktcnt_idx] in cxgb4vf_get_coalesce()
1681 struct adapter *adapter = pi->adapter; in cxgb4vf_set_coalesce()
1684 &adapter->sge.ethrxq[pi->first_qset].rspq, in cxgb4vf_set_coalesce()
1685 coalesce->rx_coalesce_usecs, in cxgb4vf_set_coalesce()
1686 coalesce->rx_max_coalesced_frames); in cxgb4vf_set_coalesce()
1697 pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; in cxgb4vf_get_pauseparam()
1698 pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0; in cxgb4vf_get_pauseparam()
1699 pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0; in cxgb4vf_get_pauseparam()
1716 return -EINVAL; in cxgb4vf_phys_id()
1718 return t4vf_identify_port(pi->adapter, pi->viid, val); in cxgb4vf_phys_id()
1735 * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
1761 * These are accumulated per-queue statistics and must match the
1782 return -EOPNOTSUPP; in cxgb4vf_get_sset_count()
1809 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset]; in collect_sge_port_stats()
1810 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; in collect_sge_port_stats()
1814 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in collect_sge_port_stats()
1815 stats->tso += txq->tso; in collect_sge_port_stats()
1816 stats->tx_csum += txq->tx_cso; in collect_sge_port_stats()
1817 stats->rx_csum += rxq->stats.rx_cso; in collect_sge_port_stats()
1818 stats->vlan_ex += rxq->stats.vlan_ex; in collect_sge_port_stats()
1819 stats->vlan_ins += txq->vlan_ins; in collect_sge_port_stats()
1820 stats->lro_pkts += rxq->stats.lro_pkts; in collect_sge_port_stats()
1821 stats->lro_merged += rxq->stats.lro_merged; in collect_sge_port_stats()
1833 struct adapter *adapter = pi->adapter; in cxgb4vf_get_ethtool_stats()
1834 int err = t4vf_get_port_stats(adapter, pi->pidx, in cxgb4vf_get_ethtool_stats()
1857 u32 *bp = regbuf + start - T4VF_REGMAP_START; in reg_block_dump()
1881 regs->version = mk_adap_vers(adapter); in cxgb4vf_get_regs()
1899 T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip) in cxgb4vf_get_regs()
1916 wol->supported = 0; in cxgb4vf_get_wol()
1917 wol->wolopts = 0; in cxgb4vf_get_wol()
1918 memset(&wol->sopass, 0, sizeof(wol->sopass)); in cxgb4vf_get_wol()
1969 struct adapter *adapter = seq->private; in mboxlog_show()
1970 struct mbox_cmd_log *log = adapter->mbox_log; in mboxlog_show()
1982 entry_idx = log->cursor + ((uintptr_t)v - 2); in mboxlog_show()
1983 if (entry_idx >= log->size) in mboxlog_show()
1984 entry_idx -= log->size; in mboxlog_show()
1988 if (entry->timestamp == 0) in mboxlog_show()
1992 entry->seqno, entry->timestamp, in mboxlog_show()
1993 entry->access, entry->execute); in mboxlog_show()
1995 u64 flit = entry->cmd[i]; in mboxlog_show()
2007 struct adapter *adapter = seq->private; in mboxlog_get_idx()
2008 struct mbox_cmd_log *log = adapter->mbox_log; in mboxlog_get_idx()
2010 return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL); in mboxlog_get_idx()
2043 struct adapter *adapter = seq->private; in sge_qinfo_show()
2044 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL); in sge_qinfo_show()
2045 int qs, r = (uintptr_t)v - 1; in sge_qinfo_show()
2052 seq_printf(seq, "%-12s", s); \ in sge_qinfo_show()
2062 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; in sge_qinfo_show()
2063 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL]; in sge_qinfo_show()
2064 int n = min(QPL, adapter->sge.ethqsets - QPL * r); in sge_qinfo_show()
2069 ? rxq[qs].rspq.netdev->name in sge_qinfo_show()
2074 netdev_priv(rxq[qs].rspq.netdev))->port_id in sge_qinfo_show()
2075 : -1)); in sge_qinfo_show()
2086 adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]); in sge_qinfo_show()
2090 R("FL size:", fl.size - MIN_FL_RESID); in sge_qinfo_show()
2097 r -= eth_entries; in sge_qinfo_show()
2099 const struct sge_rspq *evtq = &adapter->sge.fw_evtq; in sge_qinfo_show()
2101 seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue"); in sge_qinfo_show()
2102 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id); in sge_qinfo_show()
2103 seq_printf(seq, "%-12s %16u\n", "Intr delay:", in sge_qinfo_show()
2105 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", in sge_qinfo_show()
2106 adapter->sge.counter_val[evtq->pktcnt_idx]); in sge_qinfo_show()
2107 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx); in sge_qinfo_show()
2108 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen); in sge_qinfo_show()
2110 const struct sge_rspq *intrq = &adapter->sge.intrq; in sge_qinfo_show()
2112 seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue"); in sge_qinfo_show()
2113 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id); in sge_qinfo_show()
2114 seq_printf(seq, "%-12s %16u\n", "Intr delay:", in sge_qinfo_show()
2116 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", in sge_qinfo_show()
2117 adapter->sge.counter_val[intrq->pktcnt_idx]); in sge_qinfo_show()
2118 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx); in sge_qinfo_show()
2119 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen); in sge_qinfo_show()
2131 * Return the number of "entries" in our "file". We group the multi-Queue
2140 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + in sge_queue_entries()
2141 ((adapter->flags & CXGB4VF_USING_MSI) != 0); in sge_queue_entries()
2146 int entries = sge_queue_entries(seq->private); in sge_queue_start()
2157 int entries = sge_queue_entries(seq->private); in sge_queue_next()
2179 struct adapter *adapter = seq->private; in sge_qstats_show()
2180 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL); in sge_qstats_show()
2181 int qs, r = (uintptr_t)v - 1; in sge_qstats_show()
2188 seq_printf(seq, "%-16s", s); \ in sge_qstats_show()
2202 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; in sge_qstats_show()
2203 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL]; in sge_qstats_show()
2204 int n = min(QPL, adapter->sge.ethqsets - QPL * r); in sge_qstats_show()
2209 ? rxq[qs].rspq.netdev->name in sge_qstats_show()
2230 r -= eth_entries; in sge_qstats_show()
2232 const struct sge_rspq *evtq = &adapter->sge.fw_evtq; in sge_qstats_show()
2234 seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue"); in sge_qstats_show()
2235 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:", in sge_qstats_show()
2236 evtq->unhandled_irqs); in sge_qstats_show()
2237 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx); in sge_qstats_show()
2238 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen); in sge_qstats_show()
2240 const struct sge_rspq *intrq = &adapter->sge.intrq; in sge_qstats_show()
2242 seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue"); in sge_qstats_show()
2243 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:", in sge_qstats_show()
2244 intrq->unhandled_irqs); in sge_qstats_show()
2245 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx); in sge_qstats_show()
2246 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen); in sge_qstats_show()
2260 * Return the number of "entries" in our "file". We group the multi-Queue
2269 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + in sge_qstats_entries()
2270 ((adapter->flags & CXGB4VF_USING_MSI) != 0); in sge_qstats_entries()
2275 int entries = sge_qstats_entries(seq->private); in sge_qstats_start()
2286 int entries = sge_qstats_entries(seq->private); in sge_qstats_next()
2302 * Show PCI-E SR-IOV Virtual Function Resource Limits.
2306 struct adapter *adapter = seq->private; in resources_show()
2307 struct vf_resources *vfres = &adapter->params.vfres; in resources_show()
2310 seq_printf(seq, "%-60s " fmt "\n", \ in resources_show()
2311 desc " (" #var "):", vfres->var) in resources_show()
2338 struct adapter *adapter = seq->private; in interfaces_show()
2339 int pidx = (uintptr_t)v - 2; in interfaces_show()
2340 struct net_device *dev = adapter->port[pidx]; in interfaces_show()
2344 dev->name, pi->port_id, pi->viid); in interfaces_show()
2351 return pos <= adapter->params.nports in interfaces_get_idx()
2359 ? interfaces_get_idx(seq->private, *pos) in interfaces_start()
2366 return interfaces_get_idx(seq->private, *pos); in interfaces_next()
2405 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2412 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); in setup_debugfs()
2420 adapter->debugfs_root, adapter, in setup_debugfs()
2427 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2432 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); in cleanup_debugfs()
2445 * we fall back from MSI-X to MSI Interrupt Mode.
2449 struct vf_resources *vfres = &adapter->params.vfres; in size_nports_qsets()
2455 adapter->params.nports = vfres->nvi; in size_nports_qsets()
2456 if (adapter->params.nports > MAX_NPORTS) { in size_nports_qsets()
2457 dev_warn(adapter->pdev_dev, "only using %d of %d maximum" in size_nports_qsets()
2459 adapter->params.nports); in size_nports_qsets()
2460 adapter->params.nports = MAX_NPORTS; in size_nports_qsets()
2468 pmask_nports = hweight32(adapter->params.vfres.pmask); in size_nports_qsets()
2469 if (pmask_nports < adapter->params.nports) { in size_nports_qsets()
2470 dev_warn(adapter->pdev_dev, "only using %d of %d provisioned" in size_nports_qsets()
2472 " mask %#x\n", pmask_nports, adapter->params.nports, in size_nports_qsets()
2473 adapter->params.vfres.pmask); in size_nports_qsets()
2474 adapter->params.nports = pmask_nports; in size_nports_qsets()
2481 * The rest of the FL/Intr-capable ingress queues will be matched up in size_nports_qsets()
2482 * one-for-one with Ethernet/Control egress queues in order to form in size_nports_qsets()
2485 * Contexts -- one for the Ingress Queue Free List and one for the TX in size_nports_qsets()
2488 * Note that even if we're currently configured to use MSI-X in size_nports_qsets()
2490 * to MSI Interrupts if we can't get enough MSI-X Interrupts. If that in size_nports_qsets()
2493 ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI); in size_nports_qsets()
2494 if (vfres->nethctrl != ethqsets) in size_nports_qsets()
2495 ethqsets = min(vfres->nethctrl, ethqsets); in size_nports_qsets()
2496 if (vfres->neq < ethqsets*2) in size_nports_qsets()
2497 ethqsets = vfres->neq/2; in size_nports_qsets()
2500 adapter->sge.max_ethqsets = ethqsets; in size_nports_qsets()
2502 if (adapter->sge.max_ethqsets < adapter->params.nports) { in size_nports_qsets()
2503 dev_warn(adapter->pdev_dev, "only using %d of %d available" in size_nports_qsets()
2505 adapter->sge.max_ethqsets, adapter->params.nports); in size_nports_qsets()
2506 adapter->params.nports = adapter->sge.max_ethqsets; in size_nports_qsets()
2517 struct sge_params *sge_params = &adapter->params.sge; in adap_init0()
2518 struct sge *s = &adapter->sge; in adap_init0()
2523 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux in adap_init0()
2525 * issue an FLR because of a self- deadlock on the device semaphore. in adap_init0()
2527 * cases where they're needed -- for instance, some versions of KVM in adap_init0()
2529 * use the firmware based reset in order to reset any per function in adap_init0()
2534 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); in adap_init0()
2548 dev_err(adapter->pdev_dev, "unable to retrieve adapter" in adap_init0()
2554 dev_err(adapter->pdev_dev, "unable to retrieve adapter" in adap_init0()
2560 dev_err(adapter->pdev_dev, "unable to retrieve adapter" in adap_init0()
2566 dev_err(adapter->pdev_dev, "unable to retrieve adapter" in adap_init0()
2570 if (adapter->params.rss.mode != in adap_init0()
2572 dev_err(adapter->pdev_dev, "unable to operate with global RSS" in adap_init0()
2573 " mode %d\n", adapter->params.rss.mode); in adap_init0()
2574 return -EINVAL; in adap_init0()
2578 dev_err(adapter->pdev_dev, "unable to use adapter parameters:" in adap_init0()
2597 s->timer_val[0] = core_ticks_to_us(adapter, in adap_init0()
2598 TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1)); in adap_init0()
2599 s->timer_val[1] = core_ticks_to_us(adapter, in adap_init0()
2600 TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1)); in adap_init0()
2601 s->timer_val[2] = core_ticks_to_us(adapter, in adap_init0()
2602 TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3)); in adap_init0()
2603 s->timer_val[3] = core_ticks_to_us(adapter, in adap_init0()
2604 TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3)); in adap_init0()
2605 s->timer_val[4] = core_ticks_to_us(adapter, in adap_init0()
2606 TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5)); in adap_init0()
2607 s->timer_val[5] = core_ticks_to_us(adapter, in adap_init0()
2608 TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5)); in adap_init0()
2610 s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold); in adap_init0()
2611 s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold); in adap_init0()
2612 s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold); in adap_init0()
2613 s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold); in adap_init0()
2622 dev_err(adapter->pdev_dev, "unable to get virtual interface" in adap_init0()
2628 if (adapter->params.vfres.pmask == 0) { in adap_init0()
2629 dev_err(adapter->pdev_dev, "no port access configured\n" in adap_init0()
2631 return -EINVAL; in adap_init0()
2633 if (adapter->params.vfres.nvi == 0) { in adap_init0()
2634 dev_err(adapter->pdev_dev, "no virtual interfaces configured/" in adap_init0()
2636 return -EINVAL; in adap_init0()
2644 adapter->flags |= CXGB4VF_FW_OK; in adap_init0()
2652 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) | in init_rspq()
2655 rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS in init_rspq()
2658 rspq->iqe_len = iqe_size; in init_rspq()
2659 rspq->size = size; in init_rspq()
2670 struct sge *s = &adapter->sge; in cfg_queues()
2679 BUG_ON((adapter->flags & in cfg_queues()
2687 n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); in cfg_queues()
2690 * We default to 1 queue per non-10G port and up to # of cores queues in cfg_queues()
2696 int n1g = (adapter->params.nports - n10g); in cfg_queues()
2697 q10g = (adapter->sge.max_ethqsets - n1g) / n10g; in cfg_queues()
2711 pi->first_qset = qidx; in cfg_queues()
2712 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; in cfg_queues()
2713 qidx += pi->nqsets; in cfg_queues()
2715 s->ethqsets = qidx; in cfg_queues()
2728 for (qs = 0; qs < s->max_ethqsets; qs++) { in cfg_queues()
2729 struct sge_eth_rxq *rxq = &s->ethrxq[qs]; in cfg_queues()
2730 struct sge_eth_txq *txq = &s->ethtxq[qs]; in cfg_queues()
2732 init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size); in cfg_queues()
2733 rxq->fl.size = 72; in cfg_queues()
2734 txq->q.size = 1024; in cfg_queues()
2741 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size); in cfg_queues()
2756 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, in cfg_queues()
2773 BUG_ON(n < adapter->params.nports); in reduce_ethqs()
2774 while (n < adapter->sge.ethqsets) in reduce_ethqs()
2777 if (pi->nqsets > 1) { in reduce_ethqs()
2778 pi->nqsets--; in reduce_ethqs()
2779 adapter->sge.ethqsets--; in reduce_ethqs()
2780 if (adapter->sge.ethqsets <= n) in reduce_ethqs()
2791 pi->first_qset = n; in reduce_ethqs()
2792 n += pi->nqsets; in reduce_ethqs()
2797 * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
2798 * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2807 struct sge *s = &adapter->sge; in enable_msix()
2813 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets" in enable_msix()
2819 want = s->max_ethqsets + MSIX_EXTRAS; in enable_msix()
2820 need = adapter->params.nports + MSIX_EXTRAS; in enable_msix()
2822 want = pci_enable_msix_range(adapter->pdev, entries, need, want); in enable_msix()
2826 nqsets = want - MSIX_EXTRAS; in enable_msix()
2827 if (nqsets < s->max_ethqsets) { in enable_msix()
2828 dev_warn(adapter->pdev_dev, "only enough MSI-X vectors" in enable_msix()
2830 s->max_ethqsets = nqsets; in enable_msix()
2831 if (nqsets < s->ethqsets) in enable_msix()
2835 adapter->msix_info[i].vec = entries[i].vector; in enable_msix()
2858 * cxgb4vf_get_port_mask - Get port mask for the VF based on mac
2862 * Find the port mask for the VF based on the index of mac
2874 pmask = adapter->params.vfres.pmask; in cxgb4vf_get_port_mask()
2885 rmask = adapter->params.vfres.pmask; in cxgb4vf_get_port_mask()
2909 return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); in cxgb4vf_pci_probe()
2917 dev_err(&pdev->dev, "cannot obtain PCI resources\n"); in cxgb4vf_pci_probe()
2924 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in cxgb4vf_pci_probe()
2926 dev_err(&pdev->dev, "no usable DMA configuration\n"); in cxgb4vf_pci_probe()
2940 err = -ENOMEM; in cxgb4vf_pci_probe()
2944 adapter->pdev = pdev; in cxgb4vf_pci_probe()
2945 adapter->pdev_dev = &pdev->dev; in cxgb4vf_pci_probe()
2947 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + in cxgb4vf_pci_probe()
2951 if (!adapter->mbox_log) { in cxgb4vf_pci_probe()
2952 err = -ENOMEM; in cxgb4vf_pci_probe()
2955 adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS; in cxgb4vf_pci_probe()
2960 spin_lock_init(&adapter->stats_lock); in cxgb4vf_pci_probe()
2961 spin_lock_init(&adapter->mbox_lock); in cxgb4vf_pci_probe()
2962 INIT_LIST_HEAD(&adapter->mlist.list); in cxgb4vf_pci_probe()
2967 adapter->regs = pci_ioremap_bar(pdev, 0); in cxgb4vf_pci_probe()
2968 if (!adapter->regs) { in cxgb4vf_pci_probe()
2969 dev_err(&pdev->dev, "cannot map device registers\n"); in cxgb4vf_pci_probe()
2970 err = -ENOMEM; in cxgb4vf_pci_probe()
2978 dev_err(adapter->pdev_dev, "device didn't become ready:" in cxgb4vf_pci_probe()
2983 /* For T5 and later we want to use the new BAR-based User Doorbells, in cxgb4vf_pci_probe()
2986 if (!is_t4(adapter->params.chip)) { in cxgb4vf_pci_probe()
2987 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), in cxgb4vf_pci_probe()
2989 if (!adapter->bar2) { in cxgb4vf_pci_probe()
2990 dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n"); in cxgb4vf_pci_probe()
2991 err = -ENOMEM; in cxgb4vf_pci_probe()
2998 adapter->name = pci_name(pdev); in cxgb4vf_pci_probe()
2999 adapter->msg_enable = DFLT_MSG_ENABLE; in cxgb4vf_pci_probe()
3016 adapter->flags |= CXGB4VF_ROOT_NO_RELAXED_ORDERING; in cxgb4vf_pci_probe()
3020 dev_err(&pdev->dev, in cxgb4vf_pci_probe()
3025 INIT_LIST_HEAD(&adapter->mac_hlist); in cxgb4vf_pci_probe()
3044 port_id = ffs(pmask) - 1; in cxgb4vf_pci_probe()
3053 err = -ENOMEM; in cxgb4vf_pci_probe()
3056 adapter->port[pidx] = netdev; in cxgb4vf_pci_probe()
3057 SET_NETDEV_DEV(netdev, &pdev->dev); in cxgb4vf_pci_probe()
3059 pi->adapter = adapter; in cxgb4vf_pci_probe()
3060 pi->pidx = pidx; in cxgb4vf_pci_probe()
3061 pi->port_id = port_id; in cxgb4vf_pci_probe()
3067 pi->xact_addr_filt = -1; in cxgb4vf_pci_probe()
3068 netdev->irq = pdev->irq; in cxgb4vf_pci_probe()
3070 netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO | in cxgb4vf_pci_probe()
3073 netdev->features = netdev->hw_features | NETIF_F_HIGHDMA; in cxgb4vf_pci_probe()
3074 netdev->vlan_features = netdev->features & VLAN_FEAT; in cxgb4vf_pci_probe()
3076 netdev->priv_flags |= IFF_UNICAST_FLT; in cxgb4vf_pci_probe()
3077 netdev->min_mtu = 81; in cxgb4vf_pci_probe()
3078 netdev->max_mtu = ETH_MAX_MTU; in cxgb4vf_pci_probe()
3080 netdev->netdev_ops = &cxgb4vf_netdev_ops; in cxgb4vf_pci_probe()
3081 netdev->ethtool_ops = &cxgb4vf_ethtool_ops; in cxgb4vf_pci_probe()
3082 netdev->dev_port = pi->port_id; in cxgb4vf_pci_probe()
3088 if (!(adapter->flags & CXGB4VF_FW_OK)) in cxgb4vf_pci_probe()
3093 dev_err(&pdev->dev, in cxgb4vf_pci_probe()
3099 pi->viid = viid; in cxgb4vf_pci_probe()
3106 dev_err(&pdev->dev, "cannot initialize port %d\n", in cxgb4vf_pci_probe()
3113 dev_err(&pdev->dev, in cxgb4vf_pci_probe()
3116 } else if (naddr && adapter->params.vfres.nvi == 1) { in cxgb4vf_pci_probe()
3122 dev_err(&pdev->dev, in cxgb4vf_pci_probe()
3127 dev_info(&pdev->dev, in cxgb4vf_pci_probe()
3133 * use MSI-X interrupts, try to enable them but fall back to using in cxgb4vf_pci_probe()
3134 * MSI interrupts if we can't enable MSI-X interrupts. If we can't in cxgb4vf_pci_probe()
3138 adapter->flags |= CXGB4VF_USING_MSIX; in cxgb4vf_pci_probe()
3141 dev_info(adapter->pdev_dev, in cxgb4vf_pci_probe()
3142 "Unable to use MSI-X Interrupts; falling " in cxgb4vf_pci_probe()
3154 dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;" in cxgb4vf_pci_probe()
3158 adapter->flags |= CXGB4VF_USING_MSI; in cxgb4vf_pci_probe()
3173 struct port_info *pi = netdev_priv(adapter->port[pidx]); in cxgb4vf_pci_probe()
3174 netdev = adapter->port[pidx]; in cxgb4vf_pci_probe()
3178 netif_set_real_num_tx_queues(netdev, pi->nqsets); in cxgb4vf_pci_probe()
3179 netif_set_real_num_rx_queues(netdev, pi->nqsets); in cxgb4vf_pci_probe()
3183 dev_warn(&pdev->dev, "cannot register net device %s," in cxgb4vf_pci_probe()
3184 " skipping\n", netdev->name); in cxgb4vf_pci_probe()
3189 set_bit(pidx, &adapter->registered_device_map); in cxgb4vf_pci_probe()
3191 if (adapter->registered_device_map == 0) { in cxgb4vf_pci_probe()
3192 dev_err(&pdev->dev, "could not register any net devices\n"); in cxgb4vf_pci_probe()
3193 err = -EINVAL; in cxgb4vf_pci_probe()
3201 adapter->debugfs_root = in cxgb4vf_pci_probe()
3212 dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n", in cxgb4vf_pci_probe()
3213 adapter->port[pidx]->name, in cxgb4vf_pci_probe()
3214 (adapter->flags & CXGB4VF_USING_MSIX) ? "MSI-X" : in cxgb4vf_pci_probe()
3215 (adapter->flags & CXGB4VF_USING_MSI) ? "MSI" : ""); in cxgb4vf_pci_probe()
3228 if (adapter->flags & CXGB4VF_USING_MSIX) { in cxgb4vf_pci_probe()
3229 pci_disable_msix(adapter->pdev); in cxgb4vf_pci_probe()
3230 adapter->flags &= ~CXGB4VF_USING_MSIX; in cxgb4vf_pci_probe()
3231 } else if (adapter->flags & CXGB4VF_USING_MSI) { in cxgb4vf_pci_probe()
3232 pci_disable_msi(adapter->pdev); in cxgb4vf_pci_probe()
3233 adapter->flags &= ~CXGB4VF_USING_MSI; in cxgb4vf_pci_probe()
3238 netdev = adapter->port[pidx]; in cxgb4vf_pci_probe()
3242 if (pi->viid) in cxgb4vf_pci_probe()
3243 t4vf_free_vi(adapter, pi->viid); in cxgb4vf_pci_probe()
3244 if (test_bit(pidx, &adapter->registered_device_map)) in cxgb4vf_pci_probe()
3249 if (!is_t4(adapter->params.chip)) in cxgb4vf_pci_probe()
3250 iounmap(adapter->bar2); in cxgb4vf_pci_probe()
3253 iounmap(adapter->regs); in cxgb4vf_pci_probe()
3256 kfree(adapter->mbox_log); in cxgb4vf_pci_probe()
3289 if (test_bit(pidx, &adapter->registered_device_map)) in cxgb4vf_pci_remove()
3290 unregister_netdev(adapter->port[pidx]); in cxgb4vf_pci_remove()
3292 if (adapter->flags & CXGB4VF_USING_MSIX) { in cxgb4vf_pci_remove()
3293 pci_disable_msix(adapter->pdev); in cxgb4vf_pci_remove()
3294 adapter->flags &= ~CXGB4VF_USING_MSIX; in cxgb4vf_pci_remove()
3295 } else if (adapter->flags & CXGB4VF_USING_MSI) { in cxgb4vf_pci_remove()
3296 pci_disable_msi(adapter->pdev); in cxgb4vf_pci_remove()
3297 adapter->flags &= ~CXGB4VF_USING_MSI; in cxgb4vf_pci_remove()
3303 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { in cxgb4vf_pci_remove()
3305 debugfs_remove_recursive(adapter->debugfs_root); in cxgb4vf_pci_remove()
3313 struct net_device *netdev = adapter->port[pidx]; in cxgb4vf_pci_remove()
3320 if (pi->viid) in cxgb4vf_pci_remove()
3321 t4vf_free_vi(adapter, pi->viid); in cxgb4vf_pci_remove()
3324 iounmap(adapter->regs); in cxgb4vf_pci_remove()
3325 if (!is_t4(adapter->params.chip)) in cxgb4vf_pci_remove()
3326 iounmap(adapter->bar2); in cxgb4vf_pci_remove()
3327 kfree(adapter->mbox_log); in cxgb4vf_pci_remove()
3328 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, in cxgb4vf_pci_remove()
3330 list_del(&entry->list); in cxgb4vf_pci_remove()
3361 if (test_bit(pidx, &adapter->registered_device_map)) in cxgb4vf_pci_shutdown()
3362 unregister_netdev(adapter->port[pidx]); in cxgb4vf_pci_shutdown()
3368 if (adapter->flags & CXGB4VF_USING_MSIX) { in cxgb4vf_pci_shutdown()
3369 pci_disable_msix(adapter->pdev); in cxgb4vf_pci_shutdown()
3370 adapter->flags &= ~CXGB4VF_USING_MSIX; in cxgb4vf_pci_shutdown()
3371 } else if (adapter->flags & CXGB4VF_USING_MSI) { in cxgb4vf_pci_shutdown()
3372 pci_disable_msi(adapter->pdev); in cxgb4vf_pci_shutdown()
3373 adapter->flags &= ~CXGB4VF_USING_MSI; in cxgb4vf_pci_shutdown()
3421 pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n", in cxgb4vf_module_init()
3423 return -EINVAL; in cxgb4vf_module_init()