Lines Matching refs:vf_rep

70 static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,  in bnxt_hwrm_vfr_qcfg()  argument
82 req->fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid); in bnxt_hwrm_vfr_qcfg()
98 struct bnxt_vf_rep *vf_rep = netdev_priv(dev); in bnxt_vf_rep_open() local
99 struct bnxt *bp = vf_rep->bp; in bnxt_vf_rep_open()
120 struct bnxt_vf_rep *vf_rep = netdev_priv(dev); in bnxt_vf_rep_xmit() local
124 dst_hold((struct dst_entry *)vf_rep->dst); in bnxt_vf_rep_xmit()
125 skb_dst_set(skb, (struct dst_entry *)vf_rep->dst); in bnxt_vf_rep_xmit()
126 skb->dev = vf_rep->dst->u.port_info.lower_dev; in bnxt_vf_rep_xmit()
130 vf_rep->tx_stats.packets++; in bnxt_vf_rep_xmit()
131 vf_rep->tx_stats.bytes += len; in bnxt_vf_rep_xmit()
140 struct bnxt_vf_rep *vf_rep = netdev_priv(dev); in bnxt_vf_rep_get_stats64() local
142 stats->rx_packets = vf_rep->rx_stats.packets; in bnxt_vf_rep_get_stats64()
143 stats->rx_bytes = vf_rep->rx_stats.bytes; in bnxt_vf_rep_get_stats64()
144 stats->tx_packets = vf_rep->tx_stats.packets; in bnxt_vf_rep_get_stats64()
145 stats->tx_bytes = vf_rep->tx_stats.bytes; in bnxt_vf_rep_get_stats64()
152 struct bnxt_vf_rep *vf_rep = cb_priv; in bnxt_vf_rep_setup_tc_block_cb() local
153 struct bnxt *bp = vf_rep->bp; in bnxt_vf_rep_setup_tc_block_cb()
154 int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid; in bnxt_vf_rep_setup_tc_block_cb()
156 if (!bnxt_tc_flower_enabled(vf_rep->bp) || in bnxt_vf_rep_setup_tc_block_cb()
173 struct bnxt_vf_rep *vf_rep = netdev_priv(dev); in bnxt_vf_rep_setup_tc() local
180 vf_rep, vf_rep, true); in bnxt_vf_rep_setup_tc()
200 struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev); in bnxt_vf_rep_rx() local
202 vf_rep->rx_stats.bytes += skb->len; in bnxt_vf_rep_rx()
203 vf_rep->rx_stats.packets++; in bnxt_vf_rep_rx()
211 struct bnxt_vf_rep *vf_rep = netdev_priv(dev); in bnxt_vf_rep_get_phys_port_name() local
212 struct pci_dev *pf_pdev = vf_rep->bp->pdev; in bnxt_vf_rep_get_phys_port_name()
216 vf_rep->vf_idx); in bnxt_vf_rep_get_phys_port_name()
231 struct bnxt_vf_rep *vf_rep = netdev_priv(dev); in bnxt_vf_rep_get_port_parent_id() local
236 return bnxt_get_port_parent_id(vf_rep->bp->dev, ppid); in bnxt_vf_rep_get_port_parent_id()
265 struct bnxt_vf_rep *vf_rep; in bnxt_vf_reps_close() local
273 vf_rep = bp->vf_reps[i]; in bnxt_vf_reps_close()
274 if (netif_running(vf_rep->dev)) in bnxt_vf_reps_close()
275 bnxt_vf_rep_close(vf_rep->dev); in bnxt_vf_reps_close()
298 static void __bnxt_free_one_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep) in __bnxt_free_one_vf_rep() argument
300 if (!vf_rep) in __bnxt_free_one_vf_rep()
303 if (vf_rep->dst) { in __bnxt_free_one_vf_rep()
304 dst_release((struct dst_entry *)vf_rep->dst); in __bnxt_free_one_vf_rep()
305 vf_rep->dst = NULL; in __bnxt_free_one_vf_rep()
307 if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID) { in __bnxt_free_one_vf_rep()
308 hwrm_cfa_vfr_free(bp, vf_rep->vf_idx); in __bnxt_free_one_vf_rep()
309 vf_rep->tx_cfa_action = CFA_HANDLE_INVALID; in __bnxt_free_one_vf_rep()
316 struct bnxt_vf_rep *vf_rep; in __bnxt_vf_reps_destroy() local
320 vf_rep = bp->vf_reps[i]; in __bnxt_vf_reps_destroy()
321 if (vf_rep) { in __bnxt_vf_reps_destroy()
322 __bnxt_free_one_vf_rep(bp, vf_rep); in __bnxt_vf_reps_destroy()
323 if (vf_rep->dev) { in __bnxt_vf_reps_destroy()
327 if (vf_rep->dev->netdev_ops) in __bnxt_vf_reps_destroy()
328 unregister_netdev(vf_rep->dev); in __bnxt_vf_reps_destroy()
329 free_netdev(vf_rep->dev); in __bnxt_vf_reps_destroy()
393 static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, in bnxt_alloc_vf_rep() argument
397 if (hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, &vf_rep->tx_cfa_action, in bnxt_alloc_vf_rep()
398 &vf_rep->rx_cfa_code)) in bnxt_alloc_vf_rep()
401 cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx; in bnxt_alloc_vf_rep()
402 vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL); in bnxt_alloc_vf_rep()
403 if (!vf_rep->dst) in bnxt_alloc_vf_rep()
407 vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action; in bnxt_alloc_vf_rep()
408 vf_rep->dst->u.port_info.lower_dev = bp->dev; in bnxt_alloc_vf_rep()
421 struct bnxt_vf_rep *vf_rep; in bnxt_vf_reps_alloc() local
434 vf_rep = bp->vf_reps[i]; in bnxt_vf_reps_alloc()
435 vf_rep->vf_idx = i; in bnxt_vf_reps_alloc()
437 rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map); in bnxt_vf_reps_alloc()
465 static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, in bnxt_vf_rep_netdev_init() argument
482 bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx, in bnxt_vf_rep_netdev_init()
486 if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu)) in bnxt_vf_rep_netdev_init()
494 struct bnxt_vf_rep *vf_rep; in bnxt_vf_reps_create() local
501 bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); in bnxt_vf_reps_create()
516 dev = alloc_etherdev(sizeof(*vf_rep)); in bnxt_vf_reps_create()
522 vf_rep = netdev_priv(dev); in bnxt_vf_reps_create()
523 bp->vf_reps[i] = vf_rep; in bnxt_vf_reps_create()
524 vf_rep->dev = dev; in bnxt_vf_reps_create()
525 vf_rep->bp = bp; in bnxt_vf_reps_create()
526 vf_rep->vf_idx = i; in bnxt_vf_reps_create()
527 vf_rep->tx_cfa_action = CFA_HANDLE_INVALID; in bnxt_vf_reps_create()
529 rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map); in bnxt_vf_reps_create()
533 bnxt_vf_rep_netdev_init(bp, vf_rep, dev); in bnxt_vf_reps_create()