Lines Matching +full:dcb +full:- +full:algorithm

4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
109 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
126 #define FW4_CFNAME "cxgb4/t4-config.txt"
127 #define FW5_CFNAME "cxgb4/t5-config.txt"
128 #define FW6_CFNAME "cxgb4/t6-config.txt"
144 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
154 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
158 * offset by 2 bytes in order to have the IP headers line up on 4-byte
160 * a machine check fault if an attempt is made to access one of the 4-byte IP
161 * header fields on a non-4-byte boundary. And it's a major performance issue
164 * edge-case performance sensitive applications (like forwarding large volumes
166 * PCI-E Bus transfers enough to measurably affect performance.
170 /* TX Queue select used to determine what algorithm to use for selecting TX
199 switch (p->link_cfg.speed) { in link_report()
223 dev->name, p->link_cfg.speed); in link_report()
227 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, in link_report()
228 fc[p->link_cfg.fc]); in link_report()
237 struct adapter *adap = pi->adapter; in dcb_tx_queue_prio_enable()
238 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; in dcb_tx_queue_prio_enable()
241 /* We use a simple mapping of Port TX Queue Index to DCB in dcb_tx_queue_prio_enable()
242 * Priority when we're enabling DCB. in dcb_tx_queue_prio_enable()
244 for (i = 0; i < pi->nqsets; i++, txq++) { in dcb_tx_queue_prio_enable()
251 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); in dcb_tx_queue_prio_enable()
258 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, in dcb_tx_queue_prio_enable()
260 -FW_CMD_MAX_TIMEOUT); in dcb_tx_queue_prio_enable()
263 dev_err(adap->pdev_dev, in dcb_tx_queue_prio_enable()
264 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n", in dcb_tx_queue_prio_enable()
265 enable ? "set" : "unset", pi->port_id, i, -err); in dcb_tx_queue_prio_enable()
267 txq->dcb_prio = enable ? value : 0; in dcb_tx_queue_prio_enable()
275 if (!pi->dcb.enabled) in cxgb4_dcb_enabled()
278 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || in cxgb4_dcb_enabled()
279 (pi->dcb.state == CXGB4_DCB_STATE_HOST)); in cxgb4_dcb_enabled()
285 struct net_device *dev = adapter->port[port_id]; in t4_os_link_changed()
311 struct net_device *dev = adap->port[port_id]; in t4_os_portmod_changed()
314 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) in t4_os_portmod_changed()
316 else if (pi->mod_type < ARRAY_SIZE(mod_str)) in t4_os_portmod_changed()
317 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); in t4_os_portmod_changed()
318 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) in t4_os_portmod_changed()
320 dev->name); in t4_os_portmod_changed()
321 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) in t4_os_portmod_changed()
323 dev->name); in t4_os_portmod_changed()
324 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) in t4_os_portmod_changed()
325 netdev_info(dev, "%s: transceiver module error\n", dev->name); in t4_os_portmod_changed()
328 dev->name, pi->mod_type); in t4_os_portmod_changed()
333 pi->link_cfg.redo_l1cfg = netif_running(dev); in t4_os_portmod_changed()
350 struct adapter *adap = pi->adapter; in cxgb4_set_addr_hash()
356 list_for_each_entry(entry, &adap->mac_hlist, list) { in cxgb4_set_addr_hash()
357 ucast |= is_unicast_ether_addr(entry->addr); in cxgb4_set_addr_hash()
358 vec |= (1ULL << hash_mac_addr(entry->addr)); in cxgb4_set_addr_hash()
360 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, in cxgb4_set_addr_hash()
367 struct adapter *adap = pi->adapter; in cxgb4_mac_sync()
382 ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist, in cxgb4_mac_sync()
393 return -ENOMEM; in cxgb4_mac_sync()
394 ether_addr_copy(new_entry->addr, mac_addr); in cxgb4_mac_sync()
395 list_add_tail(&new_entry->list, &adap->mac_hlist); in cxgb4_mac_sync()
405 struct adapter *adap = pi->adapter; in cxgb4_mac_unsync()
413 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { in cxgb4_mac_unsync()
414 if (ether_addr_equal(entry->addr, mac_addr)) { in cxgb4_mac_unsync()
415 list_del(&entry->list); in cxgb4_mac_unsync()
421 ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false); in cxgb4_mac_unsync()
422 return ret < 0 ? -EINVAL : 0; in cxgb4_mac_unsync()
427 * If @mtu is -1 it is left unchanged.
432 struct adapter *adapter = pi->adapter; in set_rxmode()
437 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror, in set_rxmode()
438 mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, in set_rxmode()
439 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, in set_rxmode()
444 * cxgb4_change_mac - Update match filter for a MAC address.
448 * or -1
464 struct adapter *adapter = pi->adapter; in cxgb4_change_mac()
468 ret = t4_change_mac(adapter, adapter->mbox, viid, in cxgb4_change_mac()
471 if (ret == -ENOMEM) { in cxgb4_change_mac()
475 list_for_each_entry(entry, &adapter->mac_hlist, list) { in cxgb4_change_mac()
476 if (entry->iface_mac) { in cxgb4_change_mac()
477 ether_addr_copy(entry->addr, addr); in cxgb4_change_mac()
483 return -ENOMEM; in cxgb4_change_mac()
484 ether_addr_copy(new_entry->addr, addr); in cxgb4_change_mac()
485 new_entry->iface_mac = true; in cxgb4_change_mac()
486 list_add_tail(&new_entry->list, &adapter->mac_hlist); in cxgb4_change_mac()
498 * link_start - enable a port
506 unsigned int mb = pi->adapter->mbox; in link_start()
513 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror, in link_start()
514 dev->mtu, -1, -1, -1, in link_start()
515 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); in link_start()
517 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt, in link_start()
518 dev->dev_addr, true, &pi->smt_idx); in link_start()
520 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan, in link_start()
521 &pi->link_cfg); in link_start()
524 ret = t4_enable_pi_params(pi->adapter, mb, pi, true, in link_start()
536 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); in dcb_rpl()
537 struct net_device *dev = adap->port[adap->chan_map[port]]; in dcb_rpl()
544 /* If the DCB has become enabled or disabled on the port then we're in dcb_rpl()
545 * going to need to set up/tear down DCB Priority parameters for the in dcb_rpl()
558 u8 opcode = ((const struct rss_header *)rsp)->opcode; in fwevtq_handler()
565 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) { in fwevtq_handler()
567 opcode = ((const struct rss_header *)rsp)->opcode; in fwevtq_handler()
570 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" in fwevtq_handler()
578 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid)); in fwevtq_handler()
581 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler()
582 txq->restarts++; in fwevtq_handler()
583 if (txq->q_type == CXGB4_TXQ_ETH) { in fwevtq_handler()
587 t4_sge_eth_txq_egress_update(q->adap, eq, -1); in fwevtq_handler()
592 tasklet_schedule(&oq->qresume_tsk); in fwevtq_handler()
598 const struct fw_port_cmd *pcmd = (const void *)p->data; in fwevtq_handler()
599 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid)); in fwevtq_handler()
601 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); in fwevtq_handler()
607 be32_to_cpu(pcmd->op_to_portid)); in fwevtq_handler()
611 dev = q->adap->port[q->adap->chan_map[port]]; in fwevtq_handler()
613 ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F) in fwevtq_handler()
614 : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32) in fwevtq_handler()
625 dcb_rpl(q->adap, pcmd); in fwevtq_handler()
628 if (p->type == 0) in fwevtq_handler()
629 t4_handle_fw_rpl(q->adap, p->data); in fwevtq_handler()
633 do_l2t_write_rpl(q->adap, p); in fwevtq_handler()
637 do_smt_write_rpl(q->adap, p); in fwevtq_handler()
641 filter_rpl(q->adap, p); in fwevtq_handler()
645 hash_filter_rpl(q->adap, p); in fwevtq_handler()
649 hash_del_filter_rpl(q->adap, p); in fwevtq_handler()
653 do_srq_table_rpl(q->adap, p); in fwevtq_handler()
655 dev_err(q->adap->pdev_dev, in fwevtq_handler()
663 if (adapter->flags & CXGB4_USING_MSIX) { in disable_msi()
664 pci_disable_msix(adapter->pdev); in disable_msi()
665 adapter->flags &= ~CXGB4_USING_MSIX; in disable_msi()
666 } else if (adapter->flags & CXGB4_USING_MSI) { in disable_msi()
667 pci_disable_msi(adapter->pdev); in disable_msi()
668 adapter->flags &= ~CXGB4_USING_MSI; in disable_msi()
673 * Interrupt handler for non-data events used with MSI-X.
681 adap->swintr = 1; in t4_nondata_intr()
684 if (adap->flags & CXGB4_MASTER_PF) in t4_nondata_intr()
695 dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n"); in cxgb4_set_msix_aff()
696 return -ENOMEM; in cxgb4_set_msix_aff()
699 cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)), in cxgb4_set_msix_aff()
704 dev_warn(adap->pdev_dev, in cxgb4_set_msix_aff()
719 struct sge *s = &adap->sge; in request_msix_queue_irqs()
723 if (s->fwevtq_msix_idx < 0) in request_msix_queue_irqs()
724 return -ENOMEM; in request_msix_queue_irqs()
726 err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec, in request_msix_queue_irqs()
728 adap->msix_info[s->fwevtq_msix_idx].desc, in request_msix_queue_irqs()
729 &s->fw_evtq); in request_msix_queue_irqs()
734 minfo = s->ethrxq[ethqidx].msix; in request_msix_queue_irqs()
735 err = request_irq(minfo->vec, in request_msix_queue_irqs()
737 minfo->desc, in request_msix_queue_irqs()
738 &s->ethrxq[ethqidx].rspq); in request_msix_queue_irqs()
742 cxgb4_set_msix_aff(adap, minfo->vec, in request_msix_queue_irqs()
743 &minfo->aff_mask, ethqidx); in request_msix_queue_irqs()
748 while (--ethqidx >= 0) { in request_msix_queue_irqs()
749 minfo = s->ethrxq[ethqidx].msix; in request_msix_queue_irqs()
750 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); in request_msix_queue_irqs()
751 free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq); in request_msix_queue_irqs()
753 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); in request_msix_queue_irqs()
759 struct sge *s = &adap->sge; in free_msix_queue_irqs()
763 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); in free_msix_queue_irqs()
765 minfo = s->ethrxq[i].msix; in free_msix_queue_irqs()
766 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); in free_msix_queue_irqs()
767 free_irq(minfo->vec, &s->ethrxq[i].rspq); in free_msix_queue_irqs()
785 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val); in setup_ppod_edram()
787 dev_warn(adap->pdev_dev, in setup_ppod_edram()
790 return -1; in setup_ppod_edram()
794 return -1; in setup_ppod_edram()
796 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val); in setup_ppod_edram()
798 dev_err(adap->pdev_dev, in setup_ppod_edram()
800 return -1; in setup_ppod_edram()
814 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0, in adap_config_hpfilter()
821 dev_err(adapter->pdev_dev, in adap_config_hpfilter()
828 struct adapter *adap = pi->adapter; in cxgb4_config_rss()
831 ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss, in cxgb4_config_rss()
841 return t4_config_vi_rss(adap, adap->mbox, viid, in cxgb4_config_rss()
851 * cxgb4_write_rss - write the RSS table for a given port
861 struct adapter *adapter = pi->adapter; in cxgb4_write_rss()
866 rxq = &adapter->sge.ethrxq[pi->first_qset]; in cxgb4_write_rss()
867 rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL); in cxgb4_write_rss()
869 return -ENOMEM; in cxgb4_write_rss()
872 for (i = 0; i < pi->rss_size; i++, queues++) in cxgb4_write_rss()
875 err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid); in cxgb4_write_rss()
881 * setup_rss - configure RSS
894 for (j = 0; j < pi->rss_size; j++) in setup_rss()
895 pi->rss[j] = j % pi->nqsets; in setup_rss()
897 err = cxgb4_write_rss(pi, pi->rss); in setup_rss()
909 qid -= p->ingr_start; in rxq_to_chan()
910 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; in rxq_to_chan()
915 if (q->handler) in cxgb4_quiesce_rx()
916 napi_disable(&q->napi); in cxgb4_quiesce_rx()
926 for (i = 0; i < adap->sge.ingr_sz; i++) { in quiesce_rx()
927 struct sge_rspq *q = adap->sge.ingr_map[i]; in quiesce_rx()
939 struct sge *s = &adap->sge; in disable_interrupts()
941 if (adap->flags & CXGB4_FULL_INIT_DONE) { in disable_interrupts()
943 if (adap->flags & CXGB4_USING_MSIX) { in disable_interrupts()
945 free_irq(adap->msix_info[s->nd_msix_idx].vec, in disable_interrupts()
948 free_irq(adap->pdev->irq, adap); in disable_interrupts()
956 if (q->handler) in cxgb4_enable_rx()
957 napi_enable(&q->napi); in cxgb4_enable_rx()
959 /* 0-increment GTS to start the timer and enable interrupts */ in cxgb4_enable_rx()
961 SEINTARM_V(q->intr_params) | in cxgb4_enable_rx()
962 INGRESSQID_V(q->cntxt_id)); in cxgb4_enable_rx()
972 for (i = 0; i < adap->sge.ingr_sz; i++) { in enable_rx()
973 struct sge_rspq *q = adap->sge.ingr_map[i]; in enable_rx()
986 adap->sge.nd_msix_idx = -1; in setup_non_data_intr()
987 if (!(adap->flags & CXGB4_USING_MSIX)) in setup_non_data_intr()
990 /* Request MSI-X vector for non-data interrupt */ in setup_non_data_intr()
993 return -ENOMEM; in setup_non_data_intr()
995 snprintf(adap->msix_info[msix].desc, in setup_non_data_intr()
996 sizeof(adap->msix_info[msix].desc), in setup_non_data_intr()
997 "%s", adap->port[0]->name); in setup_non_data_intr()
999 adap->sge.nd_msix_idx = msix; in setup_non_data_intr()
1005 struct sge *s = &adap->sge; in setup_fw_sge_queues()
1008 bitmap_zero(s->starving_fl, s->egr_sz); in setup_fw_sge_queues()
1009 bitmap_zero(s->txq_maperr, s->egr_sz); in setup_fw_sge_queues()
1011 if (adap->flags & CXGB4_USING_MSIX) { in setup_fw_sge_queues()
1012 s->fwevtq_msix_idx = -1; in setup_fw_sge_queues()
1015 return -ENOMEM; in setup_fw_sge_queues()
1017 snprintf(adap->msix_info[msix].desc, in setup_fw_sge_queues()
1018 sizeof(adap->msix_info[msix].desc), in setup_fw_sge_queues()
1019 "%s-FWeventq", adap->port[0]->name); in setup_fw_sge_queues()
1021 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, in setup_fw_sge_queues()
1022 NULL, NULL, NULL, -1); in setup_fw_sge_queues()
1025 msix = -((int)s->intrq.abs_id + 1); in setup_fw_sge_queues()
1028 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], in setup_fw_sge_queues()
1029 msix, NULL, fwevtq_handler, NULL, -1); in setup_fw_sge_queues()
1033 s->fwevtq_msix_idx = msix; in setup_fw_sge_queues()
1038 * setup_sge_queues - configure SGE Tx/Rx/response queues
1042 * We support multiple queue sets per port if we have MSI-X, otherwise
1048 struct sge *s = &adap->sge; in setup_sge_queues()
1053 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; in setup_sge_queues()
1055 if (!(adap->flags & CXGB4_USING_MSIX)) in setup_sge_queues()
1056 msix = -((int)s->intrq.abs_id + 1); in setup_sge_queues()
1059 struct net_device *dev = adap->port[i]; in setup_sge_queues()
1061 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; in setup_sge_queues()
1062 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; in setup_sge_queues()
1064 for (j = 0; j < pi->nqsets; j++, q++) { in setup_sge_queues()
1072 snprintf(adap->msix_info[msix].desc, in setup_sge_queues()
1073 sizeof(adap->msix_info[msix].desc), in setup_sge_queues()
1074 "%s-Rx%d", dev->name, j); in setup_sge_queues()
1075 q->msix = &adap->msix_info[msix]; in setup_sge_queues()
1078 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, in setup_sge_queues()
1079 msix, &q->fl, in setup_sge_queues()
1083 pi->tx_chan)); in setup_sge_queues()
1086 q->rspq.idx = j; in setup_sge_queues()
1087 memset(&q->stats, 0, sizeof(q->stats)); in setup_sge_queues()
1090 q = &s->ethrxq[pi->first_qset]; in setup_sge_queues()
1091 for (j = 0; j < pi->nqsets; j++, t++, q++) { in setup_sge_queues()
1094 q->rspq.cntxt_id, in setup_sge_queues()
1095 !!(adap->flags & CXGB4_SGE_DBQ_TIMER)); in setup_sge_queues()
1106 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; in setup_sge_queues()
1108 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], in setup_sge_queues()
1109 s->fw_evtq.cntxt_id, cmplqid); in setup_sge_queues()
1114 if (!is_t4(adap->params.chip)) { in setup_sge_queues()
1115 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], in setup_sge_queues()
1116 netdev_get_tx_queue(adap->port[0], 0) in setup_sge_queues()
1117 , s->fw_evtq.cntxt_id, false); in setup_sge_queues()
1122 t4_write_reg(adap, is_t4(adap->params.chip) ? in setup_sge_queues()
1125 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | in setup_sge_queues()
1126 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id)); in setup_sge_queues()
1129 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err); in setup_sge_queues()
1153 "TX Packet without VLAN Tag on DCB Link\n"); in cxgb_select_queue()
1158 if (skb->protocol == htons(ETH_P_FCOE)) in cxgb_select_queue()
1159 txq = skb->priority & 0x7; in cxgb_select_queue()
1166 if (dev->num_tc) { in cxgb_select_queue()
1170 ver = ip_hdr(skb)->version; in cxgb_select_queue()
1171 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : in cxgb_select_queue()
1172 ip_hdr(skb)->protocol; in cxgb_select_queue()
1177 skb->encapsulation || in cxgb_select_queue()
1180 txq = txq % pi->nqsets; in cxgb_select_queue()
1190 while (unlikely(txq >= dev->real_num_tx_queues)) in cxgb_select_queue()
1191 txq -= dev->real_num_tx_queues; in cxgb_select_queue()
1196 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; in cxgb_select_queue()
1203 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { in closest_timer()
1204 delta = time - s->timer_val[i]; in closest_timer()
1206 delta = -delta; in closest_timer()
1219 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { in closest_thres()
1220 delta = thres - s->counter_val[i]; in closest_thres()
1222 delta = -delta; in closest_thres()
1232 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1234 * @us: the hold-off time in us, or 0 to disable timer
1235 * @cnt: the hold-off packet count, or 0 to disable counter
1237 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1243 struct adapter *adap = q->adap; in cxgb4_set_rspq_intr_params()
1252 new_idx = closest_thres(&adap->sge, cnt); in cxgb4_set_rspq_intr_params()
1253 if (q->desc && q->pktcnt_idx != new_idx) { in cxgb4_set_rspq_intr_params()
1258 FW_PARAMS_PARAM_YZ_V(q->cntxt_id); in cxgb4_set_rspq_intr_params()
1259 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in cxgb4_set_rspq_intr_params()
1264 q->pktcnt_idx = new_idx; in cxgb4_set_rspq_intr_params()
1267 us = us == 0 ? 6 : closest_timer(&adap->sge, us); in cxgb4_set_rspq_intr_params()
1268 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0); in cxgb4_set_rspq_intr_params()
1274 netdev_features_t changed = dev->features ^ features; in cxgb_set_features()
1281 err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, in cxgb_set_features()
1282 pi->viid_mirror, -1, -1, -1, -1, in cxgb_set_features()
1285 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; in cxgb_set_features()
1291 if (IS_ERR_OR_NULL(adap->debugfs_root)) in setup_debugfs()
1292 return -1; in setup_debugfs()
1303 if ((adap->flags & CXGB4_FULL_INIT_DONE) && in cxgb4_port_mirror_free_rxq()
1304 !(adap->flags & CXGB4_SHUTTING_DOWN)) in cxgb4_port_mirror_free_rxq()
1305 cxgb4_quiesce_rx(&mirror_rxq->rspq); in cxgb4_port_mirror_free_rxq()
1307 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_port_mirror_free_rxq()
1308 cxgb4_clear_msix_aff(mirror_rxq->msix->vec, in cxgb4_port_mirror_free_rxq()
1309 mirror_rxq->msix->aff_mask); in cxgb4_port_mirror_free_rxq()
1310 free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq); in cxgb4_port_mirror_free_rxq()
1311 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); in cxgb4_port_mirror_free_rxq()
1314 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); in cxgb4_port_mirror_free_rxq()
1322 struct sge *s = &adap->sge; in cxgb4_port_mirror_alloc_queues()
1327 if (!pi->vi_mirror_count) in cxgb4_port_mirror_alloc_queues()
1330 if (s->mirror_rxq[pi->port_id]) in cxgb4_port_mirror_alloc_queues()
1333 mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL); in cxgb4_port_mirror_alloc_queues()
1335 return -ENOMEM; in cxgb4_port_mirror_alloc_queues()
1337 s->mirror_rxq[pi->port_id] = mirror_rxq; in cxgb4_port_mirror_alloc_queues()
1339 if (!(adap->flags & CXGB4_USING_MSIX)) in cxgb4_port_mirror_alloc_queues()
1340 msix = -((int)adap->sge.intrq.abs_id + 1); in cxgb4_port_mirror_alloc_queues()
1342 for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) { in cxgb4_port_mirror_alloc_queues()
1343 mirror_rxq = &s->mirror_rxq[pi->port_id][i]; in cxgb4_port_mirror_alloc_queues()
1353 mirror_rxq->msix = &adap->msix_info[msix]; in cxgb4_port_mirror_alloc_queues()
1354 snprintf(mirror_rxq->msix->desc, in cxgb4_port_mirror_alloc_queues()
1355 sizeof(mirror_rxq->msix->desc), in cxgb4_port_mirror_alloc_queues()
1356 "%s-mirrorrxq%d", dev->name, i); in cxgb4_port_mirror_alloc_queues()
1359 init_rspq(adap, &mirror_rxq->rspq, in cxgb4_port_mirror_alloc_queues()
1365 mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM; in cxgb4_port_mirror_alloc_queues()
1367 ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false, in cxgb4_port_mirror_alloc_queues()
1368 dev, msix, &mirror_rxq->fl, in cxgb4_port_mirror_alloc_queues()
1373 /* Setup MSI-X vectors for Mirror Rxqs */ in cxgb4_port_mirror_alloc_queues()
1374 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_port_mirror_alloc_queues()
1375 ret = request_irq(mirror_rxq->msix->vec, in cxgb4_port_mirror_alloc_queues()
1377 mirror_rxq->msix->desc, in cxgb4_port_mirror_alloc_queues()
1378 &mirror_rxq->rspq); in cxgb4_port_mirror_alloc_queues()
1382 cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec, in cxgb4_port_mirror_alloc_queues()
1383 &mirror_rxq->msix->aff_mask, i); in cxgb4_port_mirror_alloc_queues()
1387 cxgb4_enable_rx(adap, &mirror_rxq->rspq); in cxgb4_port_mirror_alloc_queues()
1391 rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); in cxgb4_port_mirror_alloc_queues()
1393 ret = -ENOMEM; in cxgb4_port_mirror_alloc_queues()
1397 mirror_rxq = &s->mirror_rxq[pi->port_id][0]; in cxgb4_port_mirror_alloc_queues()
1398 for (i = 0; i < pi->rss_size; i++) in cxgb4_port_mirror_alloc_queues()
1399 rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id; in cxgb4_port_mirror_alloc_queues()
1401 ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror); in cxgb4_port_mirror_alloc_queues()
1409 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); in cxgb4_port_mirror_alloc_queues()
1412 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); in cxgb4_port_mirror_alloc_queues()
1415 while (rxqid-- > 0) in cxgb4_port_mirror_alloc_queues()
1417 &s->mirror_rxq[pi->port_id][rxqid]); in cxgb4_port_mirror_alloc_queues()
1419 kfree(s->mirror_rxq[pi->port_id]); in cxgb4_port_mirror_alloc_queues()
1420 s->mirror_rxq[pi->port_id] = NULL; in cxgb4_port_mirror_alloc_queues()
1428 struct sge *s = &adap->sge; in cxgb4_port_mirror_free_queues()
1431 if (!pi->vi_mirror_count) in cxgb4_port_mirror_free_queues()
1434 if (!s->mirror_rxq[pi->port_id]) in cxgb4_port_mirror_free_queues()
1437 for (i = 0; i < pi->nmirrorqsets; i++) in cxgb4_port_mirror_free_queues()
1439 &s->mirror_rxq[pi->port_id][i]); in cxgb4_port_mirror_free_queues()
1441 kfree(s->mirror_rxq[pi->port_id]); in cxgb4_port_mirror_free_queues()
1442 s->mirror_rxq[pi->port_id] = NULL; in cxgb4_port_mirror_free_queues()
1449 int ret, idx = -1; in cxgb4_port_mirror_start()
1451 if (!pi->vi_mirror_count) in cxgb4_port_mirror_start()
1459 ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror, in cxgb4_port_mirror_start()
1460 dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, in cxgb4_port_mirror_start()
1461 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, in cxgb4_port_mirror_start()
1462 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); in cxgb4_port_mirror_start()
1464 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1466 pi->viid_mirror, ret); in cxgb4_port_mirror_start()
1474 ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx, in cxgb4_port_mirror_start()
1475 dev->dev_addr, true, NULL); in cxgb4_port_mirror_start()
1477 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1479 pi->viid_mirror, ret); in cxgb4_port_mirror_start()
1490 ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true, in cxgb4_port_mirror_start()
1494 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1496 pi->viid_mirror, ret); in cxgb4_port_mirror_start()
1506 if (!pi->vi_mirror_count) in cxgb4_port_mirror_stop()
1509 t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false, in cxgb4_port_mirror_stop()
1519 if (!pi->nmirrorqsets) in cxgb4_port_mirror_alloc()
1520 return -EOPNOTSUPP; in cxgb4_port_mirror_alloc()
1522 mutex_lock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_alloc()
1523 if (pi->viid_mirror) { in cxgb4_port_mirror_alloc()
1524 pi->vi_mirror_count++; in cxgb4_port_mirror_alloc()
1528 ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0, in cxgb4_port_mirror_alloc()
1529 &pi->viid_mirror); in cxgb4_port_mirror_alloc()
1533 pi->vi_mirror_count = 1; in cxgb4_port_mirror_alloc()
1535 if (adap->flags & CXGB4_FULL_INIT_DONE) { in cxgb4_port_mirror_alloc()
1545 mutex_unlock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_alloc()
1552 pi->vi_mirror_count = 0; in cxgb4_port_mirror_alloc()
1553 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); in cxgb4_port_mirror_alloc()
1554 pi->viid_mirror = 0; in cxgb4_port_mirror_alloc()
1557 mutex_unlock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_alloc()
1566 mutex_lock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_free()
1567 if (!pi->viid_mirror) in cxgb4_port_mirror_free()
1570 if (pi->vi_mirror_count > 1) { in cxgb4_port_mirror_free()
1571 pi->vi_mirror_count--; in cxgb4_port_mirror_free()
1578 pi->vi_mirror_count = 0; in cxgb4_port_mirror_free()
1579 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); in cxgb4_port_mirror_free()
1580 pi->viid_mirror = 0; in cxgb4_port_mirror_free()
1583 mutex_unlock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_free()
1587 * upper-layer driver support
1591 * Allocate an active-open TID and set it to the supplied value.
1595 int atid = -1; in cxgb4_alloc_atid()
1597 spin_lock_bh(&t->atid_lock); in cxgb4_alloc_atid()
1598 if (t->afree) { in cxgb4_alloc_atid()
1599 union aopen_entry *p = t->afree; in cxgb4_alloc_atid()
1601 atid = (p - t->atid_tab) + t->atid_base; in cxgb4_alloc_atid()
1602 t->afree = p->next; in cxgb4_alloc_atid()
1603 p->data = data; in cxgb4_alloc_atid()
1604 t->atids_in_use++; in cxgb4_alloc_atid()
1606 spin_unlock_bh(&t->atid_lock); in cxgb4_alloc_atid()
1612 * Release an active-open TID.
1616 union aopen_entry *p = &t->atid_tab[atid - t->atid_base]; in cxgb4_free_atid()
1618 spin_lock_bh(&t->atid_lock); in cxgb4_free_atid()
1619 p->next = t->afree; in cxgb4_free_atid()
1620 t->afree = p; in cxgb4_free_atid()
1621 t->atids_in_use--; in cxgb4_free_atid()
1622 spin_unlock_bh(&t->atid_lock); in cxgb4_free_atid()
1633 spin_lock_bh(&t->stid_lock); in cxgb4_alloc_stid()
1635 stid = find_first_zero_bit(t->stid_bmap, t->nstids); in cxgb4_alloc_stid()
1636 if (stid < t->nstids) in cxgb4_alloc_stid()
1637 __set_bit(stid, t->stid_bmap); in cxgb4_alloc_stid()
1639 stid = -1; in cxgb4_alloc_stid()
1641 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1); in cxgb4_alloc_stid()
1643 stid = -1; in cxgb4_alloc_stid()
1646 t->stid_tab[stid].data = data; in cxgb4_alloc_stid()
1647 stid += t->stid_base; in cxgb4_alloc_stid()
1653 t->stids_in_use += 2; in cxgb4_alloc_stid()
1654 t->v6_stids_in_use += 2; in cxgb4_alloc_stid()
1656 t->stids_in_use++; in cxgb4_alloc_stid()
1659 spin_unlock_bh(&t->stid_lock); in cxgb4_alloc_stid()
1670 spin_lock_bh(&t->stid_lock); in cxgb4_alloc_sftid()
1672 stid = find_next_zero_bit(t->stid_bmap, in cxgb4_alloc_sftid()
1673 t->nstids + t->nsftids, t->nstids); in cxgb4_alloc_sftid()
1674 if (stid < (t->nstids + t->nsftids)) in cxgb4_alloc_sftid()
1675 __set_bit(stid, t->stid_bmap); in cxgb4_alloc_sftid()
1677 stid = -1; in cxgb4_alloc_sftid()
1679 stid = -1; in cxgb4_alloc_sftid()
1682 t->stid_tab[stid].data = data; in cxgb4_alloc_sftid()
1683 stid -= t->nstids; in cxgb4_alloc_sftid()
1684 stid += t->sftid_base; in cxgb4_alloc_sftid()
1685 t->sftids_in_use++; in cxgb4_alloc_sftid()
1687 spin_unlock_bh(&t->stid_lock); in cxgb4_alloc_sftid()
1697 if (t->nsftids && (stid >= t->sftid_base)) { in cxgb4_free_stid()
1698 stid -= t->sftid_base; in cxgb4_free_stid()
1699 stid += t->nstids; in cxgb4_free_stid()
1701 stid -= t->stid_base; in cxgb4_free_stid()
1704 spin_lock_bh(&t->stid_lock); in cxgb4_free_stid()
1706 __clear_bit(stid, t->stid_bmap); in cxgb4_free_stid()
1708 bitmap_release_region(t->stid_bmap, stid, 1); in cxgb4_free_stid()
1709 t->stid_tab[stid].data = NULL; in cxgb4_free_stid()
1710 if (stid < t->nstids) { in cxgb4_free_stid()
1712 t->stids_in_use -= 2; in cxgb4_free_stid()
1713 t->v6_stids_in_use -= 2; in cxgb4_free_stid()
1715 t->stids_in_use--; in cxgb4_free_stid()
1718 t->sftids_in_use--; in cxgb4_free_stid()
1721 spin_unlock_bh(&t->stid_lock); in cxgb4_free_stid()
1747 void **p = &t->tid_tab[tid - t->tid_base]; in cxgb4_queue_tid_release()
1749 spin_lock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1750 *p = adap->tid_release_head; in cxgb4_queue_tid_release()
1752 adap->tid_release_head = (void **)((uintptr_t)p | chan); in cxgb4_queue_tid_release()
1753 if (!adap->tid_release_task_busy) { in cxgb4_queue_tid_release()
1754 adap->tid_release_task_busy = true; in cxgb4_queue_tid_release()
1755 queue_work(adap->workq, &adap->tid_release_task); in cxgb4_queue_tid_release()
1757 spin_unlock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1770 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1771 while (adap->tid_release_head) { in process_tid_release_list()
1772 void **p = adap->tid_release_head; in process_tid_release_list()
1774 p = (void *)p - chan; in process_tid_release_list()
1776 adap->tid_release_head = *p; in process_tid_release_list()
1778 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1784 mk_tid_release(skb, chan, p - adap->tids.tid_tab); in process_tid_release_list()
1786 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1788 adap->tid_release_task_busy = false; in process_tid_release_list()
1789 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1802 WARN_ON(tid_out_of_range(&adap->tids, tid)); in cxgb4_remove_tid()
1804 if (t->tid_tab[tid - adap->tids.tid_base]) { in cxgb4_remove_tid()
1805 t->tid_tab[tid - adap->tids.tid_base] = NULL; in cxgb4_remove_tid()
1806 atomic_dec(&t->conns_in_use); in cxgb4_remove_tid()
1807 if (t->hash_base && (tid >= t->hash_base)) { in cxgb4_remove_tid()
1809 atomic_sub(2, &t->hash_tids_in_use); in cxgb4_remove_tid()
1811 atomic_dec(&t->hash_tids_in_use); in cxgb4_remove_tid()
1814 atomic_sub(2, &t->tids_in_use); in cxgb4_remove_tid()
1816 atomic_dec(&t->tids_in_use); in cxgb4_remove_tid()
1835 unsigned int max_ftids = t->nftids + t->nsftids; in tid_init()
1836 unsigned int natids = t->natids; in tid_init()
1843 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); in tid_init()
1844 ftid_bmap_size = BITS_TO_LONGS(t->nftids); in tid_init()
1845 hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids); in tid_init()
1846 eotid_bmap_size = BITS_TO_LONGS(t->neotids); in tid_init()
1847 size = t->ntids * sizeof(*t->tid_tab) + in tid_init()
1848 natids * sizeof(*t->atid_tab) + in tid_init()
1849 t->nstids * sizeof(*t->stid_tab) + in tid_init()
1850 t->nsftids * sizeof(*t->stid_tab) + in tid_init()
1852 t->nhpftids * sizeof(*t->hpftid_tab) + in tid_init()
1854 max_ftids * sizeof(*t->ftid_tab) + in tid_init()
1856 t->neotids * sizeof(*t->eotid_tab) + in tid_init()
1859 t->tid_tab = kvzalloc(size, GFP_KERNEL); in tid_init()
1860 if (!t->tid_tab) in tid_init()
1861 return -ENOMEM; in tid_init()
1863 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; in tid_init()
1864 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; in tid_init()
1865 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; in tid_init()
1866 t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; in tid_init()
1867 t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids]; in tid_init()
1868 t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size]; in tid_init()
1869 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids]; in tid_init()
1870 t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size]; in tid_init()
1871 t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids]; in tid_init()
1872 spin_lock_init(&t->stid_lock); in tid_init()
1873 spin_lock_init(&t->atid_lock); in tid_init()
1874 spin_lock_init(&t->ftid_lock); in tid_init()
1876 t->stids_in_use = 0; in tid_init()
1877 t->v6_stids_in_use = 0; in tid_init()
1878 t->sftids_in_use = 0; in tid_init()
1879 t->afree = NULL; in tid_init()
1880 t->atids_in_use = 0; in tid_init()
1881 atomic_set(&t->tids_in_use, 0); in tid_init()
1882 atomic_set(&t->conns_in_use, 0); in tid_init()
1883 atomic_set(&t->hash_tids_in_use, 0); in tid_init()
1884 atomic_set(&t->eotids_in_use, 0); in tid_init()
1888 while (--natids) in tid_init()
1889 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; in tid_init()
1890 t->afree = t->atid_tab; in tid_init()
1894 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); in tid_init()
1896 if (!t->stid_base && in tid_init()
1897 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in tid_init()
1898 __set_bit(0, t->stid_bmap); in tid_init()
1900 if (t->neotids) in tid_init()
1901 bitmap_zero(t->eotid_bmap, t->neotids); in tid_init()
1904 if (t->nhpftids) in tid_init()
1905 bitmap_zero(t->hpftid_bmap, t->nhpftids); in tid_init()
1906 bitmap_zero(t->ftid_bmap, t->nftids); in tid_init()
1911 * cxgb4_create_server - create an IP server
1934 return -ENOMEM; in cxgb4_create_server()
1940 req->local_port = sport; in cxgb4_create_server()
1941 req->peer_port = htons(0); in cxgb4_create_server()
1942 req->local_ip = sip; in cxgb4_create_server()
1943 req->peer_ip = htonl(0); in cxgb4_create_server()
1944 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server()
1945 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); in cxgb4_create_server()
1946 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | in cxgb4_create_server()
1953 /* cxgb4_create_server6 - create an IPv6 server
1975 return -ENOMEM; in cxgb4_create_server6()
1981 req->local_port = sport; in cxgb4_create_server6()
1982 req->peer_port = htons(0); in cxgb4_create_server6()
1983 req->local_ip_hi = *(__be64 *)(sip->s6_addr); in cxgb4_create_server6()
1984 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); in cxgb4_create_server6()
1985 req->peer_ip_hi = cpu_to_be64(0); in cxgb4_create_server6()
1986 req->peer_ip_lo = cpu_to_be64(0); in cxgb4_create_server6()
1987 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server6()
1988 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); in cxgb4_create_server6()
1989 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | in cxgb4_create_server6()
2008 return -ENOMEM; in cxgb4_remove_server()
2013 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) : in cxgb4_remove_server()
2021 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2035 while (i < NMTUS - 1 && mtus[i + 1] <= mtu) in cxgb4_best_mtu()
2044 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
2066 unsigned short data_size_align_mask = data_size_align - 1; in cxgb4_best_aligned_mtu()
2074 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) { in cxgb4_best_aligned_mtu()
2075 unsigned short data_size = mtus[mtu_idx] - header_size; in cxgb4_best_aligned_mtu()
2095 mtu_idx--; in cxgb4_best_aligned_mtu()
2102 mtu_idx - aligned_mtu_idx <= 1) in cxgb4_best_aligned_mtu()
2115 * cxgb4_port_chan - get the HW channel of a port
2122 return netdev2pinfo(dev)->tx_chan; in cxgb4_port_chan()
2127 * cxgb4_port_e2cchan - get the HW c-channel of a port
2130 * Return the HW RX c-channel of the given port.
2134 return netdev2pinfo(dev)->rx_cchan; in cxgb4_port_e2cchan()
2145 if (is_t4(adap->params.chip)) { in cxgb4_dbfifo_count()
2157 * cxgb4_port_viid - get the VI id of a port
2164 return netdev2pinfo(dev)->viid; in cxgb4_port_viid()
2169 * cxgb4_port_idx - get the index of a port
2176 return netdev2pinfo(dev)->port_id; in cxgb4_port_idx()
2185 spin_lock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2187 spin_unlock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2207 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS); in cxgb4_flush_eq_cache()
2217 spin_lock(&adap->win0_lock); in read_eq_indices()
2221 spin_unlock(&adap->win0_lock); in read_eq_indices()
2245 delta = pidx - hw_pidx; in cxgb4_sync_txq_pidx()
2247 delta = size - hw_pidx + pidx; in cxgb4_sync_txq_pidx()
2249 if (is_t4(adap->params.chip)) in cxgb4_sync_txq_pidx()
2273 offset = ((stag >> 8) * 32) + adap->vres.stag.start; in cxgb4_read_tpte()
2301 memaddr = offset - edc0_end; in cxgb4_read_tpte()
2305 memaddr = offset - edc1_end; in cxgb4_read_tpte()
2308 memaddr = offset - edc1_end; in cxgb4_read_tpte()
2309 } else if (is_t5(adap->params.chip)) { in cxgb4_read_tpte()
2315 memaddr = offset - mc0_end; in cxgb4_read_tpte()
2326 spin_lock(&adap->win0_lock); in cxgb4_read_tpte()
2328 spin_unlock(&adap->win0_lock); in cxgb4_read_tpte()
2332 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", in cxgb4_read_tpte()
2334 return -EINVAL; in cxgb4_read_tpte()
2374 const struct net_device *netdev = neigh->dev; in check_neigh_update()
2378 parent = netdev->dev.parent; in check_neigh_update()
2379 if (parent && parent->driver == &cxgb4_driver.driver) in check_neigh_update()
2409 if (is_t4(adap->params.chip)) { in drain_db_fifo()
2428 spin_lock_irqsave(&q->db_lock, flags); in disable_txq_db()
2429 q->db_disabled = 1; in disable_txq_db()
2430 spin_unlock_irqrestore(&q->db_lock, flags); in disable_txq_db()
2435 spin_lock_irq(&q->db_lock); in enable_txq_db()
2436 if (q->db_pidx_inc) { in enable_txq_db()
2442 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc)); in enable_txq_db()
2443 q->db_pidx_inc = 0; in enable_txq_db()
2445 q->db_disabled = 0; in enable_txq_db()
2446 spin_unlock_irq(&q->db_lock); in enable_txq_db()
2453 for_each_ethrxq(&adap->sge, i) in disable_dbs()
2454 disable_txq_db(&adap->sge.ethtxq[i].q); in disable_dbs()
2457 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in disable_dbs()
2460 for_each_ofldtxq(&adap->sge, i) { in disable_dbs()
2461 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; in disable_dbs()
2463 disable_txq_db(&txq->q); in disable_dbs()
2468 disable_txq_db(&adap->sge.ctrlq[i].q); in disable_dbs()
2475 for_each_ethrxq(&adap->sge, i) in enable_dbs()
2476 enable_txq_db(adap, &adap->sge.ethtxq[i].q); in enable_dbs()
2479 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in enable_dbs()
2482 for_each_ofldtxq(&adap->sge, i) { in enable_dbs()
2483 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; in enable_dbs()
2485 enable_txq_db(adap, &txq->q); in enable_dbs()
2490 enable_txq_db(adap, &adap->sge.ctrlq[i].q); in enable_dbs()
2497 if (adap->uld && adap->uld[type].handle) in notify_rdma_uld()
2498 adap->uld[type].control(adap->uld[type].handle, cmd); in notify_rdma_uld()
2510 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_full()
2524 spin_lock_irq(&q->db_lock); in sync_txq_pidx()
2525 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); in sync_txq_pidx()
2528 if (q->db_pidx != hw_pidx) { in sync_txq_pidx()
2532 if (q->db_pidx >= hw_pidx) in sync_txq_pidx()
2533 delta = q->db_pidx - hw_pidx; in sync_txq_pidx()
2535 delta = q->size - hw_pidx + q->db_pidx; in sync_txq_pidx()
2537 if (is_t4(adap->params.chip)) in sync_txq_pidx()
2543 QID_V(q->cntxt_id) | val); in sync_txq_pidx()
2546 q->db_disabled = 0; in sync_txq_pidx()
2547 q->db_pidx_inc = 0; in sync_txq_pidx()
2548 spin_unlock_irq(&q->db_lock); in sync_txq_pidx()
2557 for_each_ethrxq(&adap->sge, i) in recover_all_queues()
2558 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); in recover_all_queues()
2561 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in recover_all_queues()
2563 for_each_ofldtxq(&adap->sge, i) { in recover_all_queues()
2564 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; in recover_all_queues()
2566 sync_txq_pidx(adap, &txq->q); in recover_all_queues()
2571 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); in recover_all_queues()
2580 if (is_t4(adap->params.chip)) { in process_db_drop()
2588 } else if (is_t5(adap->params.chip)) { in process_db_drop()
2599 dev_err(adap->pdev_dev, "doorbell drop recovery: " in process_db_drop()
2603 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); in process_db_drop()
2605 /* Re-enable BAR2 WC */ in process_db_drop()
2609 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_drop()
2615 if (is_t4(adap->params.chip)) { in t4_db_full()
2620 queue_work(adap->workq, &adap->db_full_task); in t4_db_full()
2626 if (is_t4(adap->params.chip)) { in t4_db_dropped()
2630 queue_work(adap->workq, &adap->db_drop_task); in t4_db_dropped()
2649 list_del(&adap->list_node); in detach_ulds()
2652 if (adap->uld && adap->uld[i].handle) in detach_ulds()
2653 adap->uld[i].state_change(adap->uld[i].handle, in detach_ulds()
2669 if (adap->uld && adap->uld[i].handle) in notify_ulds()
2670 adap->uld[i].state_change(adap->uld[i].handle, in notify_ulds()
2680 struct net_device *event_dev = ifa->idev->dev; in cxgb4_inet6addr_handler()
2688 if (event_dev->flags & IFF_MASTER) { in cxgb4_inet6addr_handler()
2692 cxgb4_clip_get(adap->port[0], in cxgb4_inet6addr_handler()
2696 cxgb4_clip_release(adap->port[0], in cxgb4_inet6addr_handler()
2708 parent = event_dev->dev.parent; in cxgb4_inet6addr_handler()
2710 if (parent && parent->driver == &cxgb4_driver.driver) { in cxgb4_inet6addr_handler()
2739 dev = adap->port[i]; in update_clip()
2753 * cxgb_up - enable the adapter
2764 struct sge *s = &adap->sge; in cxgb_up()
2775 if (adap->flags & CXGB4_USING_MSIX) { in cxgb_up()
2776 if (s->nd_msix_idx < 0) { in cxgb_up()
2777 err = -ENOMEM; in cxgb_up()
2781 err = request_irq(adap->msix_info[s->nd_msix_idx].vec, in cxgb_up()
2783 adap->msix_info[s->nd_msix_idx].desc, adap); in cxgb_up()
2791 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), in cxgb_up()
2792 (adap->flags & CXGB4_USING_MSI) ? 0 in cxgb_up()
2794 adap->port[0]->name, adap); in cxgb_up()
2802 adap->flags |= CXGB4_FULL_INIT_DONE; in cxgb_up()
2812 free_irq(adap->msix_info[s->nd_msix_idx].vec, adap); in cxgb_up()
2814 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); in cxgb_up()
2824 cancel_work_sync(&adapter->tid_release_task); in cxgb_down()
2825 cancel_work_sync(&adapter->db_full_task); in cxgb_down()
2826 cancel_work_sync(&adapter->db_drop_task); in cxgb_down()
2827 adapter->tid_release_task_busy = false; in cxgb_down()
2828 adapter->tid_release_head = NULL; in cxgb_down()
2833 adapter->flags &= ~CXGB4_FULL_INIT_DONE; in cxgb_down()
2842 struct adapter *adapter = pi->adapter; in cxgb_open()
2847 if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_open()
2864 if (pi->nmirrorqsets) { in cxgb_open()
2865 mutex_lock(&pi->vi_mirror_mutex); in cxgb_open()
2873 mutex_unlock(&pi->vi_mirror_mutex); in cxgb_open()
2883 mutex_unlock(&pi->vi_mirror_mutex); in cxgb_open()
2890 struct adapter *adapter = pi->adapter; in cxgb_close()
2895 ret = t4_enable_pi_params(adapter, adapter->pf, pi, in cxgb_close()
2904 if (pi->nmirrorqsets) { in cxgb_close()
2905 mutex_lock(&pi->vi_mirror_mutex); in cxgb_close()
2908 mutex_unlock(&pi->vi_mirror_mutex); in cxgb_close()
2927 stid -= adap->tids.sftid_base; in cxgb4_create_server_filter()
2928 stid += adap->tids.nftids; in cxgb4_create_server_filter()
2932 f = &adap->tids.ftid_tab[stid]; in cxgb4_create_server_filter()
2940 if (f->valid) in cxgb4_create_server_filter()
2944 memset(&f->fs, 0, sizeof(struct ch_filter_specification)); in cxgb4_create_server_filter()
2945 f->fs.val.lport = be16_to_cpu(sport); in cxgb4_create_server_filter()
2946 f->fs.mask.lport = ~0; in cxgb4_create_server_filter()
2950 f->fs.val.lip[i] = val[i]; in cxgb4_create_server_filter()
2951 f->fs.mask.lip[i] = ~0; in cxgb4_create_server_filter()
2953 if (adap->params.tp.vlan_pri_map & PORT_F) { in cxgb4_create_server_filter()
2954 f->fs.val.iport = port; in cxgb4_create_server_filter()
2955 f->fs.mask.iport = mask; in cxgb4_create_server_filter()
2959 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { in cxgb4_create_server_filter()
2960 f->fs.val.proto = IPPROTO_TCP; in cxgb4_create_server_filter()
2961 f->fs.mask.proto = ~0; in cxgb4_create_server_filter()
2964 f->fs.dirsteer = 1; in cxgb4_create_server_filter()
2965 f->fs.iq = queue; in cxgb4_create_server_filter()
2967 f->locked = 1; in cxgb4_create_server_filter()
2968 f->fs.rpttid = 1; in cxgb4_create_server_filter()
2973 f->tid = stid + adap->tids.ftid_base; in cxgb4_create_server_filter()
2993 stid -= adap->tids.sftid_base; in cxgb4_remove_server_filter()
2994 stid += adap->tids.nftids; in cxgb4_remove_server_filter()
2996 f = &adap->tids.ftid_tab[stid]; in cxgb4_remove_server_filter()
2998 f->locked = 0; in cxgb4_remove_server_filter()
3009 struct adapter *adapter = p->adapter; in cxgb_get_stats()
3015 spin_lock(&adapter->stats_lock); in cxgb_get_stats()
3017 spin_unlock(&adapter->stats_lock); in cxgb_get_stats()
3020 t4_get_port_stats_offset(adapter, p->tx_chan, &stats, in cxgb_get_stats()
3021 &p->stats_base); in cxgb_get_stats()
3022 spin_unlock(&adapter->stats_lock); in cxgb_get_stats()
3024 ns->tx_bytes = stats.tx_octets; in cxgb_get_stats()
3025 ns->tx_packets = stats.tx_frames; in cxgb_get_stats()
3026 ns->rx_bytes = stats.rx_octets; in cxgb_get_stats()
3027 ns->rx_packets = stats.rx_frames; in cxgb_get_stats()
3028 ns->multicast = stats.rx_mcast_frames; in cxgb_get_stats()
3031 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + in cxgb_get_stats()
3033 ns->rx_over_errors = 0; in cxgb_get_stats()
3034 ns->rx_crc_errors = stats.rx_fcs_err; in cxgb_get_stats()
3035 ns->rx_frame_errors = stats.rx_symbol_err; in cxgb_get_stats()
3036 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 + in cxgb_get_stats()
3040 ns->rx_missed_errors = 0; in cxgb_get_stats()
3043 ns->tx_aborted_errors = 0; in cxgb_get_stats()
3044 ns->tx_carrier_errors = 0; in cxgb_get_stats()
3045 ns->tx_fifo_errors = 0; in cxgb_get_stats()
3046 ns->tx_heartbeat_errors = 0; in cxgb_get_stats()
3047 ns->tx_window_errors = 0; in cxgb_get_stats()
3049 ns->tx_errors = stats.tx_error_frames; in cxgb_get_stats()
3050 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + in cxgb_get_stats()
3051 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; in cxgb_get_stats()
3059 struct adapter *adapter = pi->adapter; in cxgb_ioctl()
3060 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; in cxgb_ioctl()
3064 if (pi->mdio_addr < 0) in cxgb_ioctl()
3065 return -EOPNOTSUPP; in cxgb_ioctl()
3066 data->phy_id = pi->mdio_addr; in cxgb_ioctl()
3070 if (mdio_phy_id_is_c45(data->phy_id)) { in cxgb_ioctl()
3071 prtad = mdio_phy_id_prtad(data->phy_id); in cxgb_ioctl()
3072 devad = mdio_phy_id_devad(data->phy_id); in cxgb_ioctl()
3073 } else if (data->phy_id < 32) { in cxgb_ioctl()
3074 prtad = data->phy_id; in cxgb_ioctl()
3076 data->reg_num &= 0x1f; in cxgb_ioctl()
3078 return -EINVAL; in cxgb_ioctl()
3080 mbox = pi->adapter->pf; in cxgb_ioctl()
3082 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, in cxgb_ioctl()
3083 data->reg_num, &data->val_out); in cxgb_ioctl()
3085 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, in cxgb_ioctl()
3086 data->reg_num, data->val_in); in cxgb_ioctl()
3089 return copy_to_user(req->ifr_data, &pi->tstamp_config, in cxgb_ioctl()
3090 sizeof(pi->tstamp_config)) ? in cxgb_ioctl()
3091 -EFAULT : 0; in cxgb_ioctl()
3093 if (copy_from_user(&pi->tstamp_config, req->ifr_data, in cxgb_ioctl()
3094 sizeof(pi->tstamp_config))) in cxgb_ioctl()
3095 return -EFAULT; in cxgb_ioctl()
3097 if (!is_t4(adapter->params.chip)) { in cxgb_ioctl()
3098 switch (pi->tstamp_config.tx_type) { in cxgb_ioctl()
3103 return -ERANGE; in cxgb_ioctl()
3106 switch (pi->tstamp_config.rx_filter) { in cxgb_ioctl()
3108 pi->rxtstamp = false; in cxgb_ioctl()
3112 cxgb4_ptprx_timestamping(pi, pi->port_id, in cxgb_ioctl()
3116 cxgb4_ptprx_timestamping(pi, pi->port_id, in cxgb_ioctl()
3124 pi->rxtstamp = true; in cxgb_ioctl()
3127 pi->tstamp_config.rx_filter = in cxgb_ioctl()
3129 return -ERANGE; in cxgb_ioctl()
3132 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) && in cxgb_ioctl()
3133 (pi->tstamp_config.rx_filter == in cxgb_ioctl()
3135 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0) in cxgb_ioctl()
3136 pi->ptp_enable = false; in cxgb_ioctl()
3139 if (pi->tstamp_config.rx_filter != in cxgb_ioctl()
3143 pi->ptp_enable = true; in cxgb_ioctl()
3147 switch (pi->tstamp_config.rx_filter) { in cxgb_ioctl()
3149 pi->rxtstamp = false; in cxgb_ioctl()
3152 pi->rxtstamp = true; in cxgb_ioctl()
3155 pi->tstamp_config.rx_filter = in cxgb_ioctl()
3157 return -ERANGE; in cxgb_ioctl()
3160 return copy_to_user(req->ifr_data, &pi->tstamp_config, in cxgb_ioctl()
3161 sizeof(pi->tstamp_config)) ? in cxgb_ioctl()
3162 -EFAULT : 0; in cxgb_ioctl()
3164 return -EOPNOTSUPP; in cxgb_ioctl()
3172 set_rxmode(dev, -1, false); in cxgb_set_rxmode()
3180 ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, in cxgb_change_mtu()
3181 pi->viid_mirror, new_mtu, -1, -1, -1, -1, true); in cxgb_change_mtu()
3183 WRITE_ONCE(dev->mtu, new_mtu); in cxgb_change_mtu()
3206 err = t4_get_raw_vpd_params(adap, &adap->params.vpd); in cxgb4_mgmt_fill_vf_station_mac_addr()
3210 na = adap->params.vpd.na; in cxgb4_mgmt_fill_vf_station_mac_addr()
3226 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev); in cxgb4_mgmt_fill_vf_station_mac_addr()
3228 macaddr[5] = adap->pf * nvfs + vf; in cxgb4_mgmt_fill_vf_station_mac_addr()
3229 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr); in cxgb4_mgmt_fill_vf_station_mac_addr()
3236 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_mac()
3241 dev_err(pi->adapter->pdev_dev, in cxgb4_mgmt_set_vf_mac()
3244 return -EINVAL; in cxgb4_mgmt_set_vf_mac()
3247 dev_info(pi->adapter->pdev_dev, in cxgb4_mgmt_set_vf_mac()
3251 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); in cxgb4_mgmt_set_vf_mac()
3259 struct adapter *adap = pi->adapter; in cxgb4_mgmt_get_vf_config()
3262 if (vf >= adap->num_vfs) in cxgb4_mgmt_get_vf_config()
3263 return -EINVAL; in cxgb4_mgmt_get_vf_config()
3264 vfinfo = &adap->vfinfo[vf]; in cxgb4_mgmt_get_vf_config()
3266 ivi->vf = vf; in cxgb4_mgmt_get_vf_config()
3267 ivi->max_tx_rate = vfinfo->tx_rate; in cxgb4_mgmt_get_vf_config()
3268 ivi->min_tx_rate = 0; in cxgb4_mgmt_get_vf_config()
3269 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr); in cxgb4_mgmt_get_vf_config()
3270 ivi->vlan = vfinfo->vlan; in cxgb4_mgmt_get_vf_config()
3271 ivi->linkstate = vfinfo->link_state; in cxgb4_mgmt_get_vf_config()
3281 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id; in cxgb4_mgmt_get_phys_port_id()
3282 ppid->id_len = sizeof(phy_port_id); in cxgb4_mgmt_get_phys_port_id()
3283 memcpy(ppid->id, &phy_port_id, ppid->id_len); in cxgb4_mgmt_get_phys_port_id()
3291 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_rate()
3298 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_rate()
3299 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3302 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3305 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3314 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, in cxgb4_mgmt_set_vf_rate()
3317 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3319 ret, adap->pf, vf); in cxgb4_mgmt_set_vf_rate()
3320 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3322 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3324 adap->pf, vf); in cxgb4_mgmt_set_vf_rate()
3325 adap->vfinfo[vf].tx_rate = 0; in cxgb4_mgmt_set_vf_rate()
3331 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3333 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3337 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); in cxgb4_mgmt_set_vf_rate()
3338 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3342 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3343 "Max tx rate %d for VF %d can't be > link-speed %u", in cxgb4_mgmt_set_vf_rate()
3345 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3350 pktsize = pktsize - sizeof(struct ethhdr) - 4; in cxgb4_mgmt_set_vf_rate()
3352 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr); in cxgb4_mgmt_set_vf_rate()
3353 /* configure Traffic Class for rate-limiting */ in cxgb4_mgmt_set_vf_rate()
3359 pi->tx_chan, class_id, 0, in cxgb4_mgmt_set_vf_rate()
3362 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", in cxgb4_mgmt_set_vf_rate()
3364 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3366 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3374 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf, in cxgb4_mgmt_set_vf_rate()
3377 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3379 ret, adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
3380 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3382 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", in cxgb4_mgmt_set_vf_rate()
3383 adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
3384 adap->vfinfo[vf].tx_rate = max_tx_rate; in cxgb4_mgmt_set_vf_rate()
3392 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_vlan()
3395 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7) in cxgb4_mgmt_set_vf_vlan()
3396 return -EINVAL; in cxgb4_mgmt_set_vf_vlan()
3399 return -EPROTONOSUPPORT; in cxgb4_mgmt_set_vf_vlan()
3401 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan); in cxgb4_mgmt_set_vf_vlan()
3403 adap->vfinfo[vf].vlan = vlan; in cxgb4_mgmt_set_vf_vlan()
3407 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n", in cxgb4_mgmt_set_vf_vlan()
3408 ret, (vlan ? "setting" : "clearing"), adap->pf, vf); in cxgb4_mgmt_set_vf_vlan()
3416 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_link_state()
3420 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_link_state()
3421 return -EINVAL; in cxgb4_mgmt_set_vf_link_state()
3437 return -EINVAL; in cxgb4_mgmt_set_vf_link_state()
3442 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, in cxgb4_mgmt_set_vf_link_state()
3445 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_link_state()
3447 ret, adap->pf, vf); in cxgb4_mgmt_set_vf_link_state()
3448 return -EINVAL; in cxgb4_mgmt_set_vf_link_state()
3451 adap->vfinfo[vf].link_state = link; in cxgb4_mgmt_set_vf_link_state()
3462 if (!is_valid_ether_addr(addr->sa_data)) in cxgb_set_mac_addr()
3463 return -EADDRNOTAVAIL; in cxgb_set_mac_addr()
3465 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt, in cxgb_set_mac_addr()
3466 addr->sa_data, true, &pi->smt_idx); in cxgb_set_mac_addr()
3470 eth_hw_addr_set(dev, addr->sa_data); in cxgb_set_mac_addr()
3478 struct adapter *adap = pi->adapter; in cxgb_netpoll()
3480 if (adap->flags & CXGB4_USING_MSIX) { in cxgb_netpoll()
3482 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; in cxgb_netpoll()
3484 for (i = pi->nqsets; i; i--, rx++) in cxgb_netpoll()
3485 t4_sge_intr_msix(0, &rx->rspq); in cxgb_netpoll()
3494 struct adapter *adap = pi->adapter; in cxgb_set_tx_maxrate()
3502 return -ENOTSUPP; in cxgb_set_tx_maxrate()
3504 if (index < 0 || index > pi->nqsets - 1) in cxgb_set_tx_maxrate()
3505 return -EINVAL; in cxgb_set_tx_maxrate()
3507 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_set_tx_maxrate()
3508 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3511 return -EINVAL; in cxgb_set_tx_maxrate()
3516 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) { in cxgb_set_tx_maxrate()
3517 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3519 index, e->idx, e->info.u.params.level); in cxgb_set_tx_maxrate()
3520 return -EBUSY; in cxgb_set_tx_maxrate()
3528 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3531 return -ERANGE; in cxgb_set_tx_maxrate()
3541 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3543 index, pi->port_id, err); in cxgb_set_tx_maxrate()
3557 p.u.params.channel = pi->tx_chan; in cxgb_set_tx_maxrate()
3562 p.u.params.pktsize = dev->mtu; in cxgb_set_tx_maxrate()
3566 return -ENOMEM; in cxgb_set_tx_maxrate()
3571 qe.class = e->idx; in cxgb_set_tx_maxrate()
3575 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3583 switch (cls_flower->command) { in cxgb_setup_tc_flower()
3591 return -EOPNOTSUPP; in cxgb_setup_tc_flower()
3598 switch (cls_u32->command) { in cxgb_setup_tc_cls_u32()
3605 return -EOPNOTSUPP; in cxgb_setup_tc_cls_u32()
3615 if (!adap->tc_matchall) in cxgb_setup_tc_matchall()
3616 return -ENOMEM; in cxgb_setup_tc_matchall()
3618 switch (cls_matchall->command) { in cxgb_setup_tc_matchall()
3631 return -EOPNOTSUPP; in cxgb_setup_tc_matchall()
3641 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_setup_tc_block_ingress_cb()
3642 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_ingress_cb()
3644 pi->port_id); in cxgb_setup_tc_block_ingress_cb()
3645 return -EINVAL; in cxgb_setup_tc_block_ingress_cb()
3649 return -EOPNOTSUPP; in cxgb_setup_tc_block_ingress_cb()
3659 return -EOPNOTSUPP; in cxgb_setup_tc_block_ingress_cb()
3670 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_setup_tc_block_egress_cb()
3671 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_egress_cb()
3673 pi->port_id); in cxgb_setup_tc_block_egress_cb()
3674 return -EINVAL; in cxgb_setup_tc_block_egress_cb()
3678 return -EOPNOTSUPP; in cxgb_setup_tc_block_egress_cb()
3687 return -EOPNOTSUPP; in cxgb_setup_tc_block_egress_cb()
3695 if (!is_ethofld(adap) || !adap->tc_mqprio) in cxgb_setup_tc_mqprio()
3696 return -ENOMEM; in cxgb_setup_tc_mqprio()
3710 pi->tc_block_shared = f->block_shared; in cxgb_setup_tc_block()
3711 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { in cxgb_setup_tc_block()
3732 return -EOPNOTSUPP; in cxgb_setup_tc()
3741 struct adapter *adapter = pi->adapter; in cxgb_udp_tunnel_unset_port()
3745 switch (ti->type) { in cxgb_udp_tunnel_unset_port()
3747 adapter->vxlan_port = 0; in cxgb_udp_tunnel_unset_port()
3751 adapter->geneve_port = 0; in cxgb_udp_tunnel_unset_port()
3755 return -EINVAL; in cxgb_udp_tunnel_unset_port()
3761 if (!adapter->rawf_cnt) in cxgb_udp_tunnel_unset_port()
3765 ret = t4_free_raw_mac_filt(adapter, pi->viid, in cxgb_udp_tunnel_unset_port()
3767 adapter->rawf_start + pi->port_id, in cxgb_udp_tunnel_unset_port()
3768 1, pi->port_id, false); in cxgb_udp_tunnel_unset_port()
3784 struct adapter *adapter = pi->adapter; in cxgb_udp_tunnel_set_port()
3788 switch (ti->type) { in cxgb_udp_tunnel_set_port()
3790 adapter->vxlan_port = ti->port; in cxgb_udp_tunnel_set_port()
3792 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F); in cxgb_udp_tunnel_set_port()
3795 adapter->geneve_port = ti->port; in cxgb_udp_tunnel_set_port()
3797 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); in cxgb_udp_tunnel_set_port()
3800 return -EINVAL; in cxgb_udp_tunnel_set_port()
3812 ret = t4_alloc_raw_mac_filt(adapter, pi->viid, in cxgb_udp_tunnel_set_port()
3815 adapter->rawf_start + pi->port_id, in cxgb_udp_tunnel_set_port()
3816 1, pi->port_id, false); in cxgb_udp_tunnel_set_port()
3819 be16_to_cpu(ti->port)); in cxgb_udp_tunnel_set_port()
3841 struct adapter *adapter = pi->adapter; in cxgb_features_check()
3843 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) in cxgb_features_check()
3847 if (!skb->encapsulation || cxgb_encap_offload_supported(skb)) in cxgb_features_check()
3905 strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); in cxgb4_mgmt_get_drvinfo()
3906 strscpy(info->bus_info, pci_name(adapter->pdev), in cxgb4_mgmt_get_drvinfo()
3907 sizeof(info->bus_info)); in cxgb4_mgmt_get_drvinfo()
3927 if (pci_channel_offline(adap->pdev)) in t4_fatal_err()
3935 struct net_device *dev = adap->port[port]; in t4_fatal_err()
3946 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); in t4_fatal_err()
3947 queue_work(adap->workq, &adap->fatal_err_notify_task); in t4_fatal_err()
3959 if (adap->vres.ocq.size) { in setup_memwin_rdma()
3965 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); in setup_memwin_rdma()
3966 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; in setup_memwin_rdma()
3972 adap->vres.ocq.start); in setup_memwin_rdma()
4005 if (!adapter->hma.sgt) in adap_free_hma_mem()
4008 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) { in adap_free_hma_mem()
4009 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl, in adap_free_hma_mem()
4010 adapter->hma.sgt->nents, DMA_BIDIRECTIONAL); in adap_free_hma_mem()
4011 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG; in adap_free_hma_mem()
4014 for_each_sg(adapter->hma.sgt->sgl, iter, in adap_free_hma_mem()
4015 adapter->hma.sgt->orig_nents, i) { in adap_free_hma_mem()
4021 kfree(adapter->hma.phy_addr); in adap_free_hma_mem()
4022 sg_free_table(adapter->hma.sgt); in adap_free_hma_mem()
4023 kfree(adapter->hma.sgt); in adap_free_hma_mem()
4024 adapter->hma.sgt = NULL; in adap_free_hma_mem()
4043 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) in adap_config_hma()
4049 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, in adap_config_hma()
4059 dev_err(adapter->pdev_dev, in adap_config_hma()
4060 "HMA size %uMB beyond bounds(%u-%lu)MB\n", in adap_config_hma()
4062 return -EINVAL; in adap_config_hma()
4067 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL); in adap_config_hma()
4068 if (unlikely(!adapter->hma.sgt)) { in adap_config_hma()
4069 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n"); in adap_config_hma()
4070 return -ENOMEM; in adap_config_hma()
4072 sgt = adapter->hma.sgt; in adap_config_hma()
4075 sgt->orig_nents = (hma_size << 20) / (page_size << page_order); in adap_config_hma()
4076 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) { in adap_config_hma()
4077 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n"); in adap_config_hma()
4078 kfree(adapter->hma.sgt); in adap_config_hma()
4079 adapter->hma.sgt = NULL; in adap_config_hma()
4080 return -ENOMEM; in adap_config_hma()
4083 sgl = adapter->hma.sgt->sgl; in adap_config_hma()
4084 node = dev_to_node(adapter->pdev_dev); in adap_config_hma()
4085 for_each_sg(sgl, iter, sgt->orig_nents, i) { in adap_config_hma()
4089 dev_err(adapter->pdev_dev, in adap_config_hma()
4091 ret = -ENOMEM; in adap_config_hma()
4097 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents, in adap_config_hma()
4099 if (!sgt->nents) { in adap_config_hma()
4100 dev_err(adapter->pdev_dev, in adap_config_hma()
4102 ret = -ENOMEM; in adap_config_hma()
4105 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG; in adap_config_hma()
4107 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t), in adap_config_hma()
4109 if (unlikely(!adapter->hma.phy_addr)) in adap_config_hma()
4112 for_each_sg(sgl, iter, sgt->nents, i) { in adap_config_hma()
4114 adapter->hma.phy_addr[i] = sg_dma_address(iter); in adap_config_hma()
4117 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD); in adap_config_hma()
4126 eoc = (i == ncmds - 1) ? 1 : 0; in adap_config_hma()
4131 if (i == ncmds - 1) { in adap_config_hma()
4132 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD; in adap_config_hma()
4157 cpu_to_be64(adapter->hma.phy_addr[j + k]); in adap_config_hma()
4159 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd, in adap_config_hma()
4162 dev_err(adapter->pdev_dev, in adap_config_hma()
4169 dev_info(adapter->pdev_dev, in adap_config_hma()
4188 dev_err(adap->pdev_dev, in adap_init1()
4195 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | in adap_init1()
4197 c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); in adap_init1()
4198 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); in adap_init1()
4202 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | in adap_init1()
4204 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); in adap_init1()
4208 ret = t4_config_glbl_rss(adap, adap->pf, in adap_init1()
4215 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, in adap_init1()
4225 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); in adap_init1()
4231 adap->params.tp.tx_modq_map = 0xE4; in adap_init1()
4233 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); in adap_init1()
4259 return t4_early_init(adap, adap->pf); in adap_init1()
4277 * them) but need to be explicitly set if we're using hard-coded
4281 * Configuration Files and hard-coded initialization ...
4286 * Fix up various Host-Dependent Parameters like Page Size, Cache in adap_init0_tweaks()
4296 dev_err(&adapter->pdev->dev, in adap_init0_tweaks()
4315 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
4378 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
4391 phy_info = find_phy_info(adap->pdev->device); in adap_init0_phy()
4393 dev_warn(adap->pdev_dev, in adap_init0_phy()
4395 return -EOPNOTSUPP; in adap_init0_phy()
4403 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file, in adap_init0_phy()
4404 adap->pdev_dev); in adap_init0_phy()
4412 dev_err(adap->pdev_dev, "unable to find PHY Firmware image " in adap_init0_phy()
4414 phy_info->phy_fw_file, -ret); in adap_init0_phy()
4415 if (phy_info->phy_flash) { in adap_init0_phy()
4419 dev_warn(adap->pdev_dev, "continuing with, on-adapter " in adap_init0_phy()
4429 ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version, in adap_init0_phy()
4430 (u8 *)phyf->data, phyf->size); in adap_init0_phy()
4432 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", in adap_init0_phy()
4433 -ret); in adap_init0_phy()
4437 if (phy_info->phy_fw_version) in adap_init0_phy()
4438 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data, in adap_init0_phy()
4439 phyf->size); in adap_init0_phy()
4440 dev_info(adap->pdev_dev, "Successfully transferred PHY " in adap_init0_phy()
4442 phy_info->phy_fw_file, new_phy_fw_ver); in adap_init0_phy()
4468 ret = t4_fw_reset(adapter, adapter->mbox, in adap_init0_config()
4474 /* If this is a 10Gb/s-BT adapter make sure the chip-external in adap_init0_config()
4475 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs in adap_init0_config()
4479 if (is_10gbt_device(adapter->pdev->device)) { in adap_init0_config()
4489 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { in adap_init0_config()
4500 dev_err(adapter->pdev_dev, "Device %d is not supported\n", in adap_init0_config()
4501 adapter->pdev->device); in adap_init0_config()
4502 ret = -EINVAL; in adap_init0_config()
4506 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); in adap_init0_config()
4518 if (cf->size >= FLASH_CFG_MAX_SIZE) in adap_init0_config()
4519 ret = -ENOMEM; in adap_init0_config()
4523 ret = t4_query_params(adapter, adapter->mbox, in adap_init0_config()
4524 adapter->pf, 0, 1, params, val); in adap_init0_config()
4536 size_t resid = cf->size & 0x3; in adap_init0_config()
4537 size_t size = cf->size & ~0x3; in adap_init0_config()
4538 __be32 *data = (__be32 *)cf->data; in adap_init0_config()
4543 spin_lock(&adapter->win0_lock); in adap_init0_config()
4561 spin_unlock(&adapter->win0_lock); in adap_init0_config()
4577 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0, in adap_init0_config()
4584 dev_warn(adapter->pdev_dev, in adap_init0_config()
4604 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0_config()
4613 if (ret == -ENOENT) { in adap_init0_config()
4620 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, in adap_init0_config()
4633 dev_warn(adapter->pdev_dev, "Configuration File checksum "\ in adap_init0_config()
4645 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0_config()
4661 dev_err(adapter->pdev_dev, in adap_init0_config()
4664 if (is_t6(adapter->params.chip)) { in adap_init0_config()
4668 dev_info(adapter->pdev_dev, "Successfully enabled " in adap_init0_config()
4676 ret = t4_fw_initialize(adapter, adapter->mbox); in adap_init0_config()
4683 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ in adap_init0_config()
4694 if (config_issued && ret != -ENOENT) in adap_init0_config()
4695 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", in adap_init0_config()
4696 config_name, -ret); in adap_init0_config()
4778 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, in adap_init0()
4781 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", in adap_init0()
4785 if (ret == adap->mbox) in adap_init0()
4786 adap->flags |= CXGB4_MASTER_PF; in adap_init0()
4801 if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) { in adap_init0()
4811 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4813 dev_err(adap->pdev_dev, in adap_init0()
4815 CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4816 return -EINVAL; in adap_init0()
4824 ret = -ENOMEM; in adap_init0()
4829 ret = request_firmware(&fw, fw_info->fw_mod_name, in adap_init0()
4830 adap->pdev_dev); in adap_init0()
4832 dev_err(adap->pdev_dev, in adap_init0()
4834 fw_info->fw_mod_name, ret); in adap_init0()
4836 fw_data = fw->data; in adap_init0()
4837 fw_size = fw->size; in adap_init0()
4858 dev_err(adap->pdev_dev, in adap_init0()
4861 dev_info(adap->pdev_dev, "Coming up as %s: "\ in adap_init0()
4863 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE"); in adap_init0()
4865 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ in adap_init0()
4873 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, in adap_init0()
4880 dev_err(adap->pdev_dev, "firmware doesn't support " in adap_init0()
4890 if (ret == -ENOENT) { in adap_init0()
4891 dev_err(adap->pdev_dev, "no Configuration File " in adap_init0()
4896 dev_err(adap->pdev_dev, "could not initialize " in adap_init0()
4897 "adapter, error %d\n", -ret); in adap_init0()
4908 dev_err(adap->pdev_dev, in adap_init0()
4924 ret = t4_get_vpd_params(adap, &adap->params.vpd); in adap_init0()
4936 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); in adap_init0()
4940 adap->params.nports = hweight32(port_vec); in adap_init0()
4941 adap->params.portvec = port_vec; in adap_init0()
4956 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
4960 adap->sge.dbqtimer_tick = val[0]; in adap_init0()
4962 ARRAY_SIZE(adap->sge.dbqtimer_val), in adap_init0()
4963 adap->sge.dbqtimer_val); in adap_init0()
4967 adap->flags |= CXGB4_SGE_DBQ_TIMER; in adap_init0()
4969 if (is_bypass_device(adap->pdev->device)) in adap_init0()
4970 adap->params.bypass = 1; in adap_init0()
4981 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); in adap_init0()
4984 adap->sge.egr_start = val[0]; in adap_init0()
4985 adap->l2t_start = val[1]; in adap_init0()
4986 adap->l2t_end = val[2]; in adap_init0()
4987 adap->tids.ftid_base = val[3]; in adap_init0()
4988 adap->tids.nftids = val[4] - val[3] + 1; in adap_init0()
4989 adap->sge.ingr_start = val[5]; in adap_init0()
4991 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { in adap_init0()
4994 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
4999 adap->tids.hpftid_base = val[0]; in adap_init0()
5000 adap->tids.nhpftids = val[1] - val[0] + 1; in adap_init0()
5007 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5010 adap->rawf_start = val[0]; in adap_init0()
5011 adap->rawf_cnt = val[1] - val[0] + 1; in adap_init0()
5014 adap->tids.tid_base = in adap_init0()
5026 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5029 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; in adap_init0()
5030 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; in adap_init0()
5032 adap->sge.egr_map = kcalloc(adap->sge.egr_sz, in adap_init0()
5033 sizeof(*adap->sge.egr_map), GFP_KERNEL); in adap_init0()
5034 if (!adap->sge.egr_map) { in adap_init0()
5035 ret = -ENOMEM; in adap_init0()
5039 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, in adap_init0()
5040 sizeof(*adap->sge.ingr_map), GFP_KERNEL); in adap_init0()
5041 if (!adap->sge.ingr_map) { in adap_init0()
5042 ret = -ENOMEM; in adap_init0()
5049 adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5050 if (!adap->sge.starving_fl) { in adap_init0()
5051 ret = -ENOMEM; in adap_init0()
5055 adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5056 if (!adap->sge.txq_maperr) { in adap_init0()
5057 ret = -ENOMEM; in adap_init0()
5062 adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5063 if (!adap->sge.blocked_fl) { in adap_init0()
5064 ret = -ENOMEM; in adap_init0()
5071 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5074 adap->clipt_start = val[0]; in adap_init0()
5075 adap->clipt_end = val[1]; in adap_init0()
5079 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
5085 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; in adap_init0()
5087 adap->params.nsched_cls = val[0]; in adap_init0()
5093 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5098 adap->flags |= CXGB4_FW_OFLD_CONN; in adap_init0()
5099 adap->tids.aftid_base = val[0]; in adap_init0()
5100 adap->tids.aftid_end = val[1]; in adap_init0()
5110 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
5118 if (is_t4(adap->params.chip)) { in adap_init0()
5119 adap->params.ulptx_memwrite_dsgl = false; in adap_init0()
5122 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5124 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); in adap_init0()
5129 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5131 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
5134 if (is_t4(adap->params.chip)) { in adap_init0()
5135 adap->params.filter2_wr_support = false; in adap_init0()
5138 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5140 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
5148 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5150 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0); in adap_init0()
5160 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0()
5170 adap->params.offload = 1; in adap_init0()
5175 /* query offload-related parameters */ in adap_init0()
5182 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
5186 adap->tids.ntids = val[0]; in adap_init0()
5187 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); in adap_init0()
5188 adap->tids.stid_base = val[1]; in adap_init0()
5189 adap->tids.nstids = val[2] - val[1] + 1; in adap_init0()
5199 if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) { in adap_init0()
5200 adap->tids.sftid_base = adap->tids.ftid_base + in adap_init0()
5201 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
5202 adap->tids.nsftids = adap->tids.nftids - in adap_init0()
5203 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
5204 adap->tids.nftids = adap->tids.sftid_base - in adap_init0()
5205 adap->tids.ftid_base; in adap_init0()
5207 adap->vres.ddp.start = val[3]; in adap_init0()
5208 adap->vres.ddp.size = val[4] - val[3] + 1; in adap_init0()
5209 adap->params.ofldq_wr_cred = val[5]; in adap_init0()
5214 adap->num_ofld_uld += 1; in adap_init0()
5220 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5223 adap->tids.eotid_base = val[0]; in adap_init0()
5224 adap->tids.neotids = min_t(u32, MAX_ATIDS, in adap_init0()
5225 val[1] - val[0] + 1); in adap_init0()
5226 adap->params.ethofld = 1; in adap_init0()
5237 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
5241 adap->vres.stag.start = val[0]; in adap_init0()
5242 adap->vres.stag.size = val[1] - val[0] + 1; in adap_init0()
5243 adap->vres.rq.start = val[2]; in adap_init0()
5244 adap->vres.rq.size = val[3] - val[2] + 1; in adap_init0()
5245 adap->vres.pbl.start = val[4]; in adap_init0()
5246 adap->vres.pbl.size = val[5] - val[4] + 1; in adap_init0()
5250 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5253 adap->vres.srq.start = val[0]; in adap_init0()
5254 adap->vres.srq.size = val[1] - val[0] + 1; in adap_init0()
5256 if (adap->vres.srq.size) { in adap_init0()
5257 adap->srq = t4_init_srq(adap->vres.srq.size); in adap_init0()
5258 if (!adap->srq) in adap_init0()
5259 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n"); in adap_init0()
5268 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, in adap_init0()
5272 adap->vres.qp.start = val[0]; in adap_init0()
5273 adap->vres.qp.size = val[1] - val[0] + 1; in adap_init0()
5274 adap->vres.cq.start = val[2]; in adap_init0()
5275 adap->vres.cq.size = val[3] - val[2] + 1; in adap_init0()
5276 adap->vres.ocq.start = val[4]; in adap_init0()
5277 adap->vres.ocq.size = val[5] - val[4] + 1; in adap_init0()
5281 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, in adap_init0()
5284 adap->params.max_ordird_qp = 8; in adap_init0()
5285 adap->params.max_ird_adapter = 32 * adap->tids.ntids; in adap_init0()
5288 adap->params.max_ordird_qp = val[0]; in adap_init0()
5289 adap->params.max_ird_adapter = val[1]; in adap_init0()
5291 dev_info(adap->pdev_dev, in adap_init0()
5293 adap->params.max_ordird_qp, in adap_init0()
5294 adap->params.max_ird_adapter); in adap_init0()
5298 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
5300 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0); in adap_init0()
5304 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
5306 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0); in adap_init0()
5307 adap->num_ofld_uld += 2; in adap_init0()
5312 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5316 adap->vres.iscsi.start = val[0]; in adap_init0()
5317 adap->vres.iscsi.size = val[1] - val[0] + 1; in adap_init0()
5318 if (is_t6(adap->params.chip)) { in adap_init0()
5321 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5324 adap->vres.ppod_edram.start = val[0]; in adap_init0()
5325 adap->vres.ppod_edram.size = in adap_init0()
5326 val[1] - val[0] + 1; in adap_init0()
5328 dev_info(adap->pdev_dev, in adap_init0()
5331 adap->vres.ppod_edram.size); in adap_init0()
5335 adap->num_ofld_uld += 2; in adap_init0()
5341 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5344 if (ret != -EINVAL) in adap_init0()
5347 adap->vres.ncrypto_fc = val[0]; in adap_init0()
5349 adap->num_ofld_uld += 1; in adap_init0()
5355 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5359 adap->vres.key.start = val[0]; in adap_init0()
5360 adap->vres.key.size = val[1] - val[0] + 1; in adap_init0()
5361 adap->num_uld += 1; in adap_init0()
5363 adap->params.crypto = ntohs(caps_cmd.cryptocaps); in adap_init0()
5371 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); in adap_init0()
5377 * a multiple of 8 +/- 4 bytes apart near this popular MTU. in adap_init0()
5382 * options are in use, then we have a 20-byte IP header and a in adap_init0()
5383 * 20-byte TCP header. In this case, a 1500-byte MSS would in adap_init0()
5384 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes in adap_init0()
5387 * is a multiple of 8. On the other hand, if 12-byte TCP Time in adap_init0()
5393 if (adap->params.mtus[i] == 1492) { in adap_init0()
5394 adap->params.mtus[i] = 1488; in adap_init0()
5398 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in adap_init0()
5399 adap->params.b_wnd); in adap_init0()
5402 adap->flags |= CXGB4_FW_OK; in adap_init0()
5413 kfree(adap->sge.egr_map); in adap_init0()
5414 kfree(adap->sge.ingr_map); in adap_init0()
5415 bitmap_free(adap->sge.starving_fl); in adap_init0()
5416 bitmap_free(adap->sge.txq_maperr); in adap_init0()
5418 bitmap_free(adap->sge.blocked_fl); in adap_init0()
5420 if (ret != -ETIMEDOUT && ret != -EIO) in adap_init0()
5421 t4_fw_bye(adap, adap->mbox); in adap_init0()
5437 adap->flags &= ~CXGB4_FW_OK; in eeh_err_detected()
5439 spin_lock(&adap->stats_lock); in eeh_err_detected()
5441 struct net_device *dev = adap->port[i]; in eeh_err_detected()
5447 spin_unlock(&adap->stats_lock); in eeh_err_detected()
5449 if (adap->flags & CXGB4_FULL_INIT_DONE) in eeh_err_detected()
5452 if ((adap->flags & CXGB4_DEV_ENABLED)) { in eeh_err_detected()
5454 adap->flags &= ~CXGB4_DEV_ENABLED; in eeh_err_detected()
5472 if (!(adap->flags & CXGB4_DEV_ENABLED)) { in eeh_slot_reset()
5474 dev_err(&pdev->dev, "Cannot reenable PCI " in eeh_slot_reset()
5478 adap->flags |= CXGB4_DEV_ENABLED; in eeh_slot_reset()
5485 if (t4_wait_dev_ready(adap->regs) < 0) in eeh_slot_reset()
5487 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) in eeh_slot_reset()
5489 adap->flags |= CXGB4_FW_OK; in eeh_slot_reset()
5497 ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1, in eeh_slot_reset()
5501 pi->viid = ret; in eeh_slot_reset()
5502 pi->xact_addr_filt = -1; in eeh_slot_reset()
5506 if (adap->params.viid_smt_extn_support) { in eeh_slot_reset()
5507 pi->vivld = vivld; in eeh_slot_reset()
5508 pi->vin = vin; in eeh_slot_reset()
5511 pi->vivld = FW_VIID_VIVLD_G(pi->viid); in eeh_slot_reset()
5512 pi->vin = FW_VIID_VIN_G(pi->viid); in eeh_slot_reset()
5516 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in eeh_slot_reset()
5517 adap->params.b_wnd); in eeh_slot_reset()
5534 struct net_device *dev = adap->port[i]; in eeh_resume()
5551 if (adapter->pf != 4) in eeh_reset_prepare()
5554 adapter->flags &= ~CXGB4_FW_OK; in eeh_reset_prepare()
5559 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in eeh_reset_prepare()
5560 cxgb_close(adapter->port[i]); in eeh_reset_prepare()
5567 if (adapter->flags & CXGB4_FULL_INIT_DONE) in eeh_reset_prepare()
5576 if (adapter->pf != 4) in eeh_reset_done()
5579 err = t4_wait_dev_ready(adapter->regs); in eeh_reset_done()
5581 dev_err(adapter->pdev_dev, in eeh_reset_done()
5590 dev_err(adapter->pdev_dev, in eeh_reset_done()
5597 if (adapter->flags & CXGB4_FW_OK) { in eeh_reset_done()
5598 err = t4_port_init(adapter, adapter->pf, adapter->pf, 0); in eeh_reset_done()
5600 dev_err(adapter->pdev_dev, in eeh_reset_done()
5608 dev_err(adapter->pdev_dev, in eeh_reset_done()
5617 dev_err(adapter->pdev_dev, in eeh_reset_done()
5623 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in eeh_reset_done()
5624 cxgb_open(adapter->port[i]); in eeh_reset_done()
5642 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps)); in is_x_10g_port()
5658 struct sge *s = &adap->sge; in cfg_queues()
5664 adap->params.offload = 0; in cfg_queues()
5665 adap->params.crypto = 0; in cfg_queues()
5666 adap->params.ethofld = 0; in cfg_queues()
5681 niqflint = adap->params.pfres.niqflint - 1; in cfg_queues()
5682 if (!(adap->flags & CXGB4_USING_MSIX)) in cfg_queues()
5683 niqflint--; in cfg_queues()
5684 neq = adap->params.pfres.neq / 2; in cfg_queues()
5687 if (avail_qsets < adap->params.nports) { in cfg_queues()
5688 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n", in cfg_queues()
5689 avail_qsets, adap->params.nports); in cfg_queues()
5690 return -ENOMEM; in cfg_queues()
5695 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); in cfg_queues()
5699 /* We default to 1 queue per non-10G port and up to # of cores queues in cfg_queues()
5703 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; in cfg_queues()
5708 * own TX Queue in order to prevent Head-Of-Line Blocking. in cfg_queues()
5711 if (adap->params.nports * 8 > avail_eth_qsets) { in cfg_queues()
5712 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", in cfg_queues()
5713 avail_eth_qsets, adap->params.nports * 8); in cfg_queues()
5714 return -ENOMEM; in cfg_queues()
5717 if (adap->params.nports * ncpus < avail_eth_qsets) in cfg_queues()
5723 (avail_eth_qsets - (adap->params.nports - n10g) * q1g)) in cfg_queues()
5724 q10g--; in cfg_queues()
5738 pi->first_qset = qidx; in cfg_queues()
5739 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g; in cfg_queues()
5740 qidx += pi->nqsets; in cfg_queues()
5743 s->ethqsets = qidx; in cfg_queues()
5744 s->max_ethqsets = qidx; /* MSI-X may lower it later */ in cfg_queues()
5745 avail_qsets -= qidx; in cfg_queues()
5752 num_ulds = adap->num_uld + adap->num_ofld_uld; in cfg_queues()
5754 avail_uld_qsets = roundup(i, adap->params.nports); in cfg_queues()
5755 if (avail_qsets < num_ulds * adap->params.nports) { in cfg_queues()
5756 adap->params.offload = 0; in cfg_queues()
5757 adap->params.crypto = 0; in cfg_queues()
5758 s->ofldqsets = 0; in cfg_queues()
5760 s->ofldqsets = adap->params.nports; in cfg_queues()
5762 s->ofldqsets = avail_uld_qsets; in cfg_queues()
5765 avail_qsets -= num_ulds * s->ofldqsets; in cfg_queues()
5772 if (avail_qsets < s->max_ethqsets) { in cfg_queues()
5773 adap->params.ethofld = 0; in cfg_queues()
5774 s->eoqsets = 0; in cfg_queues()
5776 s->eoqsets = s->max_ethqsets; in cfg_queues()
5778 avail_qsets -= s->eoqsets; in cfg_queues()
5786 if (avail_qsets >= s->max_ethqsets) in cfg_queues()
5787 s->mirrorqsets = s->max_ethqsets; in cfg_queues()
5788 else if (avail_qsets >= adap->params.nports) in cfg_queues()
5789 s->mirrorqsets = adap->params.nports; in cfg_queues()
5791 s->mirrorqsets = 0; in cfg_queues()
5792 avail_qsets -= s->mirrorqsets; in cfg_queues()
5794 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { in cfg_queues()
5795 struct sge_eth_rxq *r = &s->ethrxq[i]; in cfg_queues()
5797 init_rspq(adap, &r->rspq, 5, 10, 1024, 64); in cfg_queues()
5798 r->fl.size = 72; in cfg_queues()
5801 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) in cfg_queues()
5802 s->ethtxq[i].q.size = 1024; in cfg_queues()
5804 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) in cfg_queues()
5805 s->ctrlq[i].q.size = 512; in cfg_queues()
5807 if (!is_t4(adap->params.chip)) in cfg_queues()
5808 s->ptptxq.q.size = 8; in cfg_queues()
5810 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); in cfg_queues()
5811 init_rspq(adap, &s->intrq, 0, 1, 512, 64); in cfg_queues()
5825 while (n < adap->sge.ethqsets) in reduce_ethqs()
5828 if (pi->nqsets > 1) { in reduce_ethqs()
5829 pi->nqsets--; in reduce_ethqs()
5830 adap->sge.ethqsets--; in reduce_ethqs()
5831 if (adap->sge.ethqsets <= n) in reduce_ethqs()
5839 pi->first_qset = n; in reduce_ethqs()
5840 n += pi->nqsets; in reduce_ethqs()
5850 return -ENOMEM; in alloc_msix_info()
5852 adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL); in alloc_msix_info()
5853 if (!adap->msix_bmap.msix_bmap) { in alloc_msix_info()
5855 return -ENOMEM; in alloc_msix_info()
5858 spin_lock_init(&adap->msix_bmap.lock); in alloc_msix_info()
5859 adap->msix_bmap.mapsize = num_vec; in alloc_msix_info()
5861 adap->msix_info = msix_info; in alloc_msix_info()
5867 bitmap_free(adap->msix_bmap.msix_bmap); in free_msix_info()
5868 kfree(adap->msix_info); in free_msix_info()
5873 struct msix_bmap *bmap = &adap->msix_bmap; in cxgb4_get_msix_idx_from_bmap()
5877 spin_lock_irqsave(&bmap->lock, flags); in cxgb4_get_msix_idx_from_bmap()
5878 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); in cxgb4_get_msix_idx_from_bmap()
5879 if (msix_idx < bmap->mapsize) { in cxgb4_get_msix_idx_from_bmap()
5880 __set_bit(msix_idx, bmap->msix_bmap); in cxgb4_get_msix_idx_from_bmap()
5882 spin_unlock_irqrestore(&bmap->lock, flags); in cxgb4_get_msix_idx_from_bmap()
5883 return -ENOSPC; in cxgb4_get_msix_idx_from_bmap()
5886 spin_unlock_irqrestore(&bmap->lock, flags); in cxgb4_get_msix_idx_from_bmap()
5893 struct msix_bmap *bmap = &adap->msix_bmap; in cxgb4_free_msix_idx_in_bmap()
5896 spin_lock_irqsave(&bmap->lock, flags); in cxgb4_free_msix_idx_in_bmap()
5897 __clear_bit(msix_idx, bmap->msix_bmap); in cxgb4_free_msix_idx_in_bmap()
5898 spin_unlock_irqrestore(&bmap->lock, flags); in cxgb4_free_msix_idx_in_bmap()
5901 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5908 u8 num_uld = 0, nchan = adap->params.nports; in enable_msix()
5910 struct sge *s = &adap->sge; in enable_msix()
5915 want = s->max_ethqsets; in enable_msix()
5926 num_uld = adap->num_ofld_uld + adap->num_uld; in enable_msix()
5927 want += num_uld * s->ofldqsets; in enable_msix()
5933 want += s->eoqsets; in enable_msix()
5938 if (s->mirrorqsets) { in enable_msix()
5939 want += s->mirrorqsets; in enable_msix()
5949 return -ENOMEM; in enable_msix()
5954 allocated = pci_enable_msix_range(adap->pdev, entries, need, want); in enable_msix()
5959 want = s->max_ethqsets + EXTRA_VECS; in enable_msix()
5961 allocated = pci_enable_msix_range(adap->pdev, entries, in enable_msix()
5964 dev_info(adap->pdev_dev, in enable_msix()
5965 "Disabling MSI-X due to insufficient MSI-X vectors\n"); in enable_msix()
5970 dev_info(adap->pdev_dev, in enable_msix()
5971 "Disabling offload due to insufficient MSI-X vectors\n"); in enable_msix()
5972 adap->params.offload = 0; in enable_msix()
5973 adap->params.crypto = 0; in enable_msix()
5974 adap->params.ethofld = 0; in enable_msix()
5975 s->ofldqsets = 0; in enable_msix()
5976 s->eoqsets = 0; in enable_msix()
5977 s->mirrorqsets = 0; in enable_msix()
5994 if (s->mirrorqsets) in enable_msix()
5997 num_vec -= need; in enable_msix()
6000 ethqsets > s->max_ethqsets) in enable_msix()
6005 if (pi->nqsets < 2) in enable_msix()
6009 num_vec--; in enable_msix()
6012 num_vec--; in enable_msix()
6020 ofldqsets > s->ofldqsets) in enable_msix()
6024 num_vec -= uld_need; in enable_msix()
6028 if (s->mirrorqsets) { in enable_msix()
6031 mirrorqsets > s->mirrorqsets) in enable_msix()
6035 num_vec -= mirror_need; in enable_msix()
6039 ethqsets = s->max_ethqsets; in enable_msix()
6041 ofldqsets = s->ofldqsets; in enable_msix()
6043 eoqsets = s->eoqsets; in enable_msix()
6044 if (s->mirrorqsets) in enable_msix()
6045 mirrorqsets = s->mirrorqsets; in enable_msix()
6048 if (ethqsets < s->max_ethqsets) { in enable_msix()
6049 s->max_ethqsets = ethqsets; in enable_msix()
6054 s->ofldqsets = ofldqsets; in enable_msix()
6055 s->nqs_per_uld = s->ofldqsets; in enable_msix()
6059 s->eoqsets = eoqsets; in enable_msix()
6061 if (s->mirrorqsets) { in enable_msix()
6062 s->mirrorqsets = mirrorqsets; in enable_msix()
6065 pi->nmirrorqsets = s->mirrorqsets / nchan; in enable_msix()
6066 mutex_init(&pi->vi_mirror_mutex); in enable_msix()
6076 adap->msix_info[i].vec = entries[i].vector; in enable_msix()
6077 adap->msix_info[i].idx = i; in enable_msix()
6080 dev_info(adap->pdev_dev, in enable_msix()
6081 "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n", in enable_msix()
6082 allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld, in enable_msix()
6083 s->mirrorqsets); in enable_msix()
6089 pci_disable_msix(adap->pdev); in enable_msix()
6103 err = t4_init_rss_mode(adap, adap->mbox); in init_rss()
6110 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); in init_rss()
6111 if (!pi->rss) in init_rss()
6112 return -ENOMEM; in init_rss()
6124 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", in print_adapter_info()
6126 ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" : in print_adapter_info()
6127 (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""), in print_adapter_info()
6128 is_offload(adapter) ? "Offload" : "non-Offload"); in print_adapter_info()
6136 const struct adapter *adap = pi->adapter; in print_port_info()
6138 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) in print_port_info()
6140 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) in print_port_info()
6142 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) in print_port_info()
6144 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) in print_port_info()
6146 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) in print_port_info()
6148 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) in print_port_info()
6150 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) in print_port_info()
6152 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G) in print_port_info()
6154 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G) in print_port_info()
6157 --bufp; in print_port_info()
6158 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); in print_port_info()
6160 netdev_info(dev, "Chelsio %s %s\n", adap->params.vpd.id, buf); in print_port_info()
6165 * - memory used for tables
6166 * - MSI/MSI-X
6167 * - net devices
6168 * - resources FW is holding for us
6174 kvfree(adapter->smt); in free_some_resources()
6175 kvfree(adapter->l2t); in free_some_resources()
6176 kvfree(adapter->srq); in free_some_resources()
6178 kvfree(adapter->tids.tid_tab); in free_some_resources()
6184 kfree(adapter->sge.egr_map); in free_some_resources()
6185 kfree(adapter->sge.ingr_map); in free_some_resources()
6186 bitmap_free(adapter->sge.starving_fl); in free_some_resources()
6187 bitmap_free(adapter->sge.txq_maperr); in free_some_resources()
6189 bitmap_free(adapter->sge.blocked_fl); in free_some_resources()
6194 if (adapter->port[i]) { in free_some_resources()
6197 if (pi->viid != 0) in free_some_resources()
6198 t4_free_vi(adapter, adapter->mbox, adapter->pf, in free_some_resources()
6199 0, pi->viid); in free_some_resources()
6200 kfree(adap2pinfo(adapter, i)->rss); in free_some_resources()
6201 free_netdev(adapter->port[i]); in free_some_resources()
6203 if (adapter->flags & CXGB4_FW_OK) in free_some_resources()
6204 t4_fw_bye(adapter, adapter->pf); in free_some_resources()
6227 return -EINVAL; in t4_get_chip_type()
6233 dev->type = ARPHRD_NONE; in cxgb4_mgmt_setup()
6234 dev->mtu = 0; in cxgb4_mgmt_setup()
6235 dev->hard_header_len = 0; in cxgb4_mgmt_setup()
6236 dev->addr_len = 0; in cxgb4_mgmt_setup()
6237 dev->tx_queue_len = 0; in cxgb4_mgmt_setup()
6238 dev->flags |= IFF_NOARP; in cxgb4_mgmt_setup()
6239 dev->priv_flags |= IFF_NO_QUEUE; in cxgb4_mgmt_setup()
6242 dev->netdev_ops = &cxgb4_mgmt_netdev_ops; in cxgb4_mgmt_setup()
6243 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; in cxgb4_mgmt_setup()
6253 pcie_fw = readl(adap->regs + PCIE_FW_A); in cxgb4_iov_configure()
6256 dev_warn(&pdev->dev, "Device not initialized\n"); in cxgb4_iov_configure()
6257 return -EOPNOTSUPP; in cxgb4_iov_configure()
6264 dev_err(&pdev->dev, in cxgb4_iov_configure()
6265 "Cannot modify SR-IOV while VFs are assigned\n"); in cxgb4_iov_configure()
6268 /* Note that the upper-level code ensures that we're never called with in cxgb4_iov_configure()
6269 * a non-zero "num_vfs" when we already have VFs instantiated. But in cxgb4_iov_configure()
6273 return -EBUSY; in cxgb4_iov_configure()
6283 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6284 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6285 adap->port[0] = NULL; in cxgb4_iov_configure()
6288 adap->num_vfs = 0; in cxgb4_iov_configure()
6289 kfree(adap->vfinfo); in cxgb4_iov_configure()
6290 adap->vfinfo = NULL; in cxgb4_iov_configure()
6305 * parent bridge's PCI-E needs to support Alternative Routing in cxgb4_iov_configure()
6309 pbridge = pdev->bus->self; in cxgb4_iov_configure()
6319 …dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Fu… in cxgb4_iov_configure()
6320 pbridge->bus->number, PCI_SLOT(pbridge->devfn), in cxgb4_iov_configure()
6321 PCI_FUNC(pbridge->devfn)); in cxgb4_iov_configure()
6322 return -ENOTSUPP; in cxgb4_iov_configure()
6328 FW_PFVF_CMD_PFN_V(adap->pf) | in cxgb4_iov_configure()
6331 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd), in cxgb4_iov_configure()
6336 port = ffs(pmask) - 1; in cxgb4_iov_configure()
6338 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx, in cxgb4_iov_configure()
6339 adap->pf); in cxgb4_iov_configure()
6343 return -ENOMEM; in cxgb4_iov_configure()
6346 pi->adapter = adap; in cxgb4_iov_configure()
6347 pi->lport = port; in cxgb4_iov_configure()
6348 pi->tx_chan = port; in cxgb4_iov_configure()
6349 SET_NETDEV_DEV(netdev, &pdev->dev); in cxgb4_iov_configure()
6351 adap->port[0] = netdev; in cxgb4_iov_configure()
6352 pi->port_id = 0; in cxgb4_iov_configure()
6354 err = register_netdev(adap->port[0]); in cxgb4_iov_configure()
6357 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6358 adap->port[0] = NULL; in cxgb4_iov_configure()
6362 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev), in cxgb4_iov_configure()
6364 if (!adap->vfinfo) { in cxgb4_iov_configure()
6365 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6366 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6367 adap->port[0] = NULL; in cxgb4_iov_configure()
6368 return -ENOMEM; in cxgb4_iov_configure()
6377 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6378 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6379 adap->port[0] = NULL; in cxgb4_iov_configure()
6380 kfree(adap->vfinfo); in cxgb4_iov_configure()
6381 adap->vfinfo = NULL; in cxgb4_iov_configure()
6386 adap->num_vfs = num_vfs; in cxgb4_iov_configure()
6399 if (!adap->uld[CXGB4_ULD_KTLS].handle) { in chcr_offload_state()
6400 dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n"); in chcr_offload_state()
6401 return -EOPNOTSUPP; in chcr_offload_state()
6403 if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) { in chcr_offload_state()
6404 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6406 return -EOPNOTSUPP; in chcr_offload_state()
6412 if (!adap->uld[CXGB4_ULD_IPSEC].handle) { in chcr_offload_state()
6413 dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n"); in chcr_offload_state()
6414 return -EOPNOTSUPP; in chcr_offload_state()
6416 if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) { in chcr_offload_state()
6417 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6419 return -EOPNOTSUPP; in chcr_offload_state()
6424 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6426 return -EOPNOTSUPP; in chcr_offload_state()
6453 ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk, in cxgb4_ktls_dev_add()
6476 adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx, in cxgb4_ktls_dev_del()
6495 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_add_state()
6500 return -EBUSY; in cxgb4_xfrm_add_state()
6506 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack); in cxgb4_xfrm_add_state()
6516 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_del_state()
6519 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_del_state()
6526 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x); in cxgb4_xfrm_del_state()
6534 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_free_state()
6537 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_free_state()
6544 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x); in cxgb4_xfrm_free_state()
6552 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_ipsec_offload_ok()
6556 dev_dbg(adap->pdev_dev, in cxgb4_ipsec_offload_ok()
6563 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x); in cxgb4_ipsec_offload_ok()
6572 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_advance_esn_state()
6575 dev_dbg(adap->pdev_dev, in cxgb4_advance_esn_state()
6582 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x); in cxgb4_advance_esn_state()
6615 dev_info(&pdev->dev, "cannot obtain PCI resources\n"); in init_one()
6621 dev_err(&pdev->dev, "cannot enable PCI device\n"); in init_one()
6627 dev_err(&pdev->dev, "cannot map device registers\n"); in init_one()
6628 err = -ENOMEM; in init_one()
6634 err = -ENOMEM; in init_one()
6638 adapter->regs = regs; in init_one()
6648 dev_err(&pdev->dev, "Device %d is not supported\n", device_id); in init_one()
6656 adapter->pdev = pdev; in init_one()
6657 adapter->pdev_dev = &pdev->dev; in init_one()
6658 adapter->name = pci_name(pdev); in init_one()
6659 adapter->mbox = func; in init_one()
6660 adapter->pf = func; in init_one()
6661 adapter->params.chip = chip; in init_one()
6662 adapter->adap_idx = adap_idx; in init_one()
6663 adapter->msg_enable = DFLT_MSG_ENABLE; in init_one()
6664 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + in init_one()
6668 if (!adapter->mbox_log) { in init_one()
6669 err = -ENOMEM; in init_one()
6672 spin_lock_init(&adapter->mbox_lock); in init_one()
6673 INIT_LIST_HEAD(&adapter->mlist.list); in init_one()
6674 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; in init_one()
6677 if (func != ent->driver_data) { in init_one()
6679 pci_save_state(pdev); /* to restore SR-IOV later */ in init_one()
6683 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in init_one()
6685 dev_err(&pdev->dev, "no usable DMA configuration\n"); in init_one()
6692 adapter->workq = create_singlethread_workqueue("cxgb4"); in init_one()
6693 if (!adapter->workq) { in init_one()
6694 err = -ENOMEM; in init_one()
6699 adapter->flags |= CXGB4_DEV_ENABLED; in init_one()
6700 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); in init_one()
6717 adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING; in init_one()
6719 spin_lock_init(&adapter->stats_lock); in init_one()
6720 spin_lock_init(&adapter->tid_release_lock); in init_one()
6721 spin_lock_init(&adapter->win0_lock); in init_one()
6723 INIT_WORK(&adapter->tid_release_task, process_tid_release_list); in init_one()
6724 INIT_WORK(&adapter->db_full_task, process_db_full); in init_one()
6725 INIT_WORK(&adapter->db_drop_task, process_db_drop); in init_one()
6726 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err); in init_one()
6736 dev_warn(adapter->pdev_dev, in init_one()
6743 if (!is_t4(adapter->params.chip)) { in init_one()
6745 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * in init_one()
6746 adapter->pf); in init_one()
6757 dev_err(&pdev->dev, in init_one()
6759 err = -EINVAL; in init_one()
6762 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), in init_one()
6764 if (!adapter->bar2) { in init_one()
6765 dev_err(&pdev->dev, "cannot map device bar2 region\n"); in init_one()
6766 err = -ENOMEM; in init_one()
6779 if (!is_t4(adapter->params.chip)) in init_one()
6781 (is_t5(adapter->params.chip) ? STATMODE_V(0) : in init_one()
6785 INIT_LIST_HEAD(&adapter->mac_hlist); in init_one()
6797 err = -ENOMEM; in init_one()
6801 SET_NETDEV_DEV(netdev, &pdev->dev); in init_one()
6803 adapter->port[i] = netdev; in init_one()
6805 pi->adapter = adapter; in init_one()
6806 pi->xact_addr_filt = -1; in init_one()
6807 pi->port_id = i; in init_one()
6808 netdev->irq = pdev->irq; in init_one()
6810 netdev->hw_features = NETIF_F_SG | TSO_FLAGS | in init_one()
6817 netdev->hw_enc_features |= NETIF_F_IP_CSUM | in init_one()
6824 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | in init_one()
6828 if (adapter->rawf_cnt) in init_one()
6829 netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels; in init_one()
6832 netdev->features |= netdev->hw_features; in init_one()
6833 netdev->vlan_features = netdev->features & VLAN_FEAT; in init_one()
6835 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) { in init_one()
6836 netdev->hw_features |= NETIF_F_HW_TLS_TX; in init_one()
6837 netdev->tlsdev_ops = &cxgb4_ktls_ops; in init_one()
6839 refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0); in init_one()
6843 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) { in init_one()
6844 netdev->hw_enc_features |= NETIF_F_HW_ESP; in init_one()
6845 netdev->features |= NETIF_F_HW_ESP; in init_one()
6846 netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops; in init_one()
6850 netdev->priv_flags |= IFF_UNICAST_FLT; in init_one()
6852 /* MTU range: 81 - 9600 */ in init_one()
6853 netdev->min_mtu = 81; /* accommodate SACK */ in init_one()
6854 netdev->max_mtu = MAX_MTU; in init_one()
6856 netdev->netdev_ops = &cxgb4_netdev_ops; in init_one()
6858 netdev->dcbnl_ops = &cxgb4_dcb_ops; in init_one()
6869 if (adapter->flags & CXGB4_FW_OK) { in init_one()
6873 } else if (adapter->params.nports == 1) { in init_one()
6874 /* If we don't have a connection to the firmware -- possibly in init_one()
6875 * because of an error -- grab the raw VPD parameters so we in init_one()
6880 u8 *na = adapter->params.vpd.na; in init_one()
6882 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd); in init_one()
6891 if (!(adapter->flags & CXGB4_FW_OK)) in init_one()
6901 adapter->smt = t4_init_smt(); in init_one()
6902 if (!adapter->smt) { in init_one()
6904 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n"); in init_one()
6907 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end); in init_one()
6908 if (!adapter->l2t) { in init_one()
6910 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); in init_one()
6911 adapter->params.offload = 0; in init_one()
6920 dev_warn(&pdev->dev, in init_one()
6922 adapter->params.offload = 0; in init_one()
6924 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, in init_one()
6925 adapter->clipt_end); in init_one()
6926 if (!adapter->clipt) { in init_one()
6930 dev_warn(&pdev->dev, in init_one()
6932 adapter->params.offload = 0; in init_one()
6939 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls); in init_one()
6940 if (!pi->sched_tbl) in init_one()
6941 dev_warn(&pdev->dev, in init_one()
6952 adapter->tids.nhash = 1 << HASHTIDSIZE_G(v); in init_one()
6954 adapter->tids.hash_base = v / 4; in init_one()
6956 adapter->tids.nhash = HASHTBLSIZE_G(v) << 3; in init_one()
6959 adapter->tids.hash_base = v; in init_one()
6964 if (tid_init(&adapter->tids) < 0) { in init_one()
6965 dev_warn(&pdev->dev, "could not allocate TID table, " in init_one()
6967 adapter->params.offload = 0; in init_one()
6969 adapter->tc_u32 = cxgb4_init_tc_u32(adapter); in init_one()
6970 if (!adapter->tc_u32) in init_one()
6971 dev_warn(&pdev->dev, in init_one()
6975 dev_warn(&pdev->dev, in init_one()
6979 dev_warn(&pdev->dev, in init_one()
6983 dev_warn(&pdev->dev, in init_one()
6986 dev_warn(&pdev->dev, in init_one()
6992 adapter->flags |= CXGB4_USING_MSIX; in init_one()
6994 adapter->flags |= CXGB4_USING_MSI; in init_one()
7010 dev_err(adapter->pdev_dev, in init_one()
7017 dev_err(adapter->pdev_dev, in init_one()
7031 adapter->port[i]->dev_port = pi->lport; in init_one()
7032 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); in init_one()
7033 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); in init_one()
7035 netif_carrier_off(adapter->port[i]); in init_one()
7037 err = register_netdev(adapter->port[i]); in init_one()
7040 adapter->chan_map[pi->tx_chan] = i; in init_one()
7041 print_port_info(adapter->port[i]); in init_one()
7044 dev_err(&pdev->dev, "could not register any net devices\n"); in init_one()
7048 dev_warn(&pdev->dev, "only %d net devices registered\n", i); in init_one()
7053 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), in init_one()
7059 pdev->needs_freset = 1; in init_one()
7064 if (!is_t4(adapter->params.chip)) in init_one()
7068 !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK)) in init_one()
7077 if (adapter->flags & CXGB4_USING_MSIX) in init_one()
7079 if (adapter->num_uld || adapter->num_ofld_uld) in init_one()
7082 if (!is_t4(adapter->params.chip)) in init_one()
7083 iounmap(adapter->bar2); in init_one()
7085 if (adapter->workq) in init_one()
7086 destroy_workqueue(adapter->workq); in init_one()
7088 kfree(adapter->mbox_log); in init_one()
7114 adapter->flags |= CXGB4_SHUTTING_DOWN; in remove_one()
7116 if (adapter->pf == 4) { in remove_one()
7119 /* Tear down per-adapter Work Queue first since it can contain in remove_one()
7122 destroy_workqueue(adapter->workq); in remove_one()
7127 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in remove_one()
7128 unregister_netdev(adapter->port[i]); in remove_one()
7138 debugfs_remove_recursive(adapter->debugfs_root); in remove_one()
7140 if (!is_t4(adapter->params.chip)) in remove_one()
7145 if (adapter->flags & CXGB4_FULL_INIT_DONE) in remove_one()
7148 if (adapter->flags & CXGB4_USING_MSIX) in remove_one()
7150 if (adapter->num_uld || adapter->num_ofld_uld) in remove_one()
7153 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, in remove_one()
7155 list_del(&entry->list); in remove_one()
7162 if (!is_t4(adapter->params.chip)) in remove_one()
7163 iounmap(adapter->bar2); in remove_one()
7167 cxgb4_iov_configure(adapter->pdev, 0); in remove_one()
7170 iounmap(adapter->regs); in remove_one()
7171 if ((adapter->flags & CXGB4_DEV_ENABLED)) { in remove_one()
7173 adapter->flags &= ~CXGB4_DEV_ENABLED; in remove_one()
7176 kfree(adapter->mbox_log); in remove_one()
7199 adapter->flags |= CXGB4_SHUTTING_DOWN; in shutdown_one()
7201 if (adapter->pf == 4) { in shutdown_one()
7205 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in shutdown_one()
7206 cxgb_close(adapter->port[i]); in shutdown_one()
7221 if (adapter->flags & CXGB4_FW_OK) in shutdown_one()
7222 t4_fw_bye(adapter, adapter->mbox); in shutdown_one()