Home
last modified time | relevance | path

Searched refs:num_vfs (Results 1 – 25 of 152) sorted by relevance

1234567

/linux-6.12.1/drivers/gpu/drm/xe/
Dxe_pci_sriov.c16 static int pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs) in pf_needs_provisioning() argument
20 for (n = 1; n <= num_vfs; n++) in pf_needs_provisioning()
27 static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs) in pf_provision_vfs() argument
34 if (!pf_needs_provisioning(gt, num_vfs)) in pf_provision_vfs()
36 err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs); in pf_provision_vfs()
43 static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs) in pf_unprovision_vfs() argument
50 for (n = 1; n <= num_vfs; n++) in pf_unprovision_vfs()
54 static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs) in pf_reset_vfs() argument
61 for (n = 1; n <= num_vfs; n++) in pf_reset_vfs()
65 static int pf_enable_vfs(struct xe_device *xe, int num_vfs) in pf_enable_vfs() argument
[all …]
Dxe_gt_sriov_pf_config.h18 unsigned int vfid, unsigned int num_vfs);
20 unsigned int vfid, unsigned int num_vfs, u64 size);
24 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
25 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs,
30 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
31 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs,
36 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
37 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs,
52 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
Dxe_gt_sriov_pf_config.c538 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs, in pf_config_bulk_set_u64_done() argument
545 xe_gt_assert(gt, num_vfs); in pf_config_bulk_set_u64_done()
548 if (num_vfs == 1) in pf_config_bulk_set_u64_done()
553 first, first + num_vfs - 1, what); in pf_config_bulk_set_u64_done()
564 first, first + num_vfs - 1, value, size, what); in pf_config_bulk_set_u64_done()
580 unsigned int num_vfs, u64 size) in xe_gt_sriov_pf_config_bulk_set_ggtt() argument
588 if (!num_vfs) in xe_gt_sriov_pf_config_bulk_set_ggtt()
592 for (n = vfid; n < vfid + num_vfs; n++) { in xe_gt_sriov_pf_config_bulk_set_ggtt()
599 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size, in xe_gt_sriov_pf_config_bulk_set_ggtt()
619 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs) in pf_estimate_fair_ggtt() argument
[all …]
Dxe_pci_sriov.h12 int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
14 static inline int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) in xe_pci_sriov_configure() argument
/linux-6.12.1/drivers/crypto/cavium/nitrox/
Dnitrox_sriov.c15 static inline bool num_vfs_valid(int num_vfs) in num_vfs_valid() argument
19 switch (num_vfs) { in num_vfs_valid()
31 static inline enum vf_mode num_vfs_to_mode(int num_vfs) in num_vfs_to_mode() argument
35 switch (num_vfs) { in num_vfs_to_mode()
153 static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs) in nitrox_sriov_enable() argument
158 if (!num_vfs_valid(num_vfs)) { in nitrox_sriov_enable()
159 dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs); in nitrox_sriov_enable()
163 if (pci_num_vf(pdev) == num_vfs) in nitrox_sriov_enable()
164 return num_vfs; in nitrox_sriov_enable()
166 err = pci_enable_sriov(pdev, num_vfs); in nitrox_sriov_enable()
[all …]
Dnitrox_isr.h13 int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs);
15 static inline int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs) in nitrox_sriov_configure() argument
/linux-6.12.1/drivers/crypto/marvell/octeontx2/
Dotx2_cptpf_main.c21 int num_vfs) in cptpf_enable_vfpf_mbox_intr() argument
32 ena_bits = ((num_vfs - 1) % 64); in cptpf_enable_vfpf_mbox_intr()
37 if (num_vfs > 64) { in cptpf_enable_vfpf_mbox_intr()
39 ena_bits = num_vfs - 64 - 1; in cptpf_enable_vfpf_mbox_intr()
47 int num_vfs) in cptpf_disable_vfpf_mbox_intr() argument
63 if (num_vfs > 64) { in cptpf_disable_vfpf_mbox_intr()
72 int num_vfs) in cptpf_enable_vf_flr_me_intrs() argument
76 INTR_MASK(num_vfs)); in cptpf_enable_vf_flr_me_intrs()
80 RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs)); in cptpf_enable_vf_flr_me_intrs()
83 INTR_MASK(num_vfs)); in cptpf_enable_vf_flr_me_intrs()
[all …]
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/
Dsriov.c72 static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) in mlx5_device_enable_sriov() argument
78 err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs); in mlx5_device_enable_sriov()
85 num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs); in mlx5_device_enable_sriov()
86 for (vf = 0; vf < num_vfs; vf++) { in mlx5_device_enable_sriov()
126 mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf, bool num_vf_change) in mlx5_device_disable_sriov() argument
134 for (vf = num_vfs - 1; vf >= 0; vf--) { in mlx5_device_disable_sriov()
179 static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs) in mlx5_sriov_enable() argument
186 err = mlx5_device_enable_sriov(dev, num_vfs); in mlx5_sriov_enable()
193 err = pci_enable_sriov(pdev, num_vfs); in mlx5_sriov_enable()
196 mlx5_device_disable_sriov(dev, num_vfs, true, true); in mlx5_sriov_enable()
[all …]
Deswitch.c1063 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { in mlx5_eswitch_clear_vf_vports_info()
1182 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs) in mlx5_eswitch_unload_vf_vports() argument
1187 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { in mlx5_eswitch_unload_vf_vports()
1207 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, in mlx5_eswitch_load_vf_vports() argument
1214 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { in mlx5_eswitch_load_vf_vports()
1223 mlx5_eswitch_unload_vf_vports(esw, num_vfs); in mlx5_eswitch_load_vf_vports()
1307 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs, in mlx5_eswitch_enable_pf_vf_vports()
1332 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); in mlx5_eswitch_disable_pf_vf_vports()
1336 mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs); in mlx5_eswitch_disable_pf_vf_vports()
1367 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs) in mlx5_eswitch_update_num_of_vfs() argument
[all …]
/linux-6.12.1/arch/powerpc/platforms/powernv/
Dpci-sriov.c284 static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs) in pnv_pci_vf_release_m64() argument
415 static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) in pnv_pci_vf_assign_m64() argument
454 for (j = 0; j < num_vfs; j++) { in pnv_pci_vf_assign_m64()
471 pnv_pci_vf_release_m64(pdev, num_vfs); in pnv_pci_vf_assign_m64()
508 u16 num_vfs; in pnv_pci_vf_resource_shift() local
523 num_vfs = iov->num_vfs; in pnv_pci_vf_resource_shift()
540 res2.end = res2.start + (size * num_vfs) - 1; in pnv_pci_vf_resource_shift()
544 i, &res2, res, num_vfs, offset); in pnv_pci_vf_resource_shift()
570 num_vfs, offset); in pnv_pci_vf_resource_shift()
593 u16 num_vfs, base_pe; in pnv_pci_sriov_disable() local
[all …]
/linux-6.12.1/drivers/net/ethernet/broadcom/bnxt/
Dbnxt_sriov.c369 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) in bnxt_set_vf_attr() argument
374 for (i = 0; i < num_vfs; i++) { in bnxt_set_vf_attr()
381 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) in bnxt_hwrm_func_vf_resource_free() argument
392 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { in bnxt_hwrm_func_vf_resource_free()
424 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) in bnxt_alloc_vf_resources() argument
429 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); in bnxt_alloc_vf_resources()
433 bnxt_set_vf_attr(bp, num_vfs); in bnxt_alloc_vf_resources()
435 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; in bnxt_alloc_vf_resources()
449 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { in bnxt_alloc_vf_resources()
526 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) in bnxt_hwrm_func_vf_resc_cfg() argument
[all …]
Dbnxt_vfr.c266 u16 num_vfs, i; in bnxt_vf_reps_close() local
271 num_vfs = pci_num_vf(bp->pdev); in bnxt_vf_reps_close()
272 for (i = 0; i < num_vfs; i++) { in bnxt_vf_reps_close()
315 u16 num_vfs = pci_num_vf(bp->pdev); in __bnxt_vf_reps_destroy() local
319 for (i = 0; i < num_vfs; i++) { in __bnxt_vf_reps_destroy()
383 u16 num_vfs = pci_num_vf(bp->pdev); in bnxt_vf_reps_free() local
389 for (i = 0; i < num_vfs; i++) in bnxt_vf_reps_free()
420 u16 *cfa_code_map = bp->cfa_code_map, num_vfs = pci_num_vf(bp->pdev); in bnxt_vf_reps_alloc() local
433 for (i = 0; i < num_vfs; i++) { in bnxt_vf_reps_alloc()
493 u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev); in bnxt_vf_reps_create() local
[all …]
/linux-6.12.1/drivers/net/ethernet/intel/fm10k/
Dfm10k_iov.c179 i = iov_data->num_vfs; in fm10k_iov_event()
226 for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) { in fm10k_iov_mbx()
287 int num_vfs, i; in fm10k_iov_suspend() local
290 num_vfs = iov_data ? iov_data->num_vfs : 0; in fm10k_iov_suspend()
297 for (i = 0; i < num_vfs; i++) { in fm10k_iov_suspend()
332 int num_vfs, i; in fm10k_iov_resume() local
335 num_vfs = iov_data ? iov_data->num_vfs : 0; in fm10k_iov_resume()
348 hw->iov.ops.assign_resources(hw, num_vfs, num_vfs); in fm10k_iov_resume()
362 for (i = 0; i < num_vfs; i++) { in fm10k_iov_resume()
395 if (vf_idx >= iov_data->num_vfs) in fm10k_iov_update_pvid()
[all …]
Dfm10k_pf.c444 if (!hw->iov.num_vfs) in fm10k_update_int_moderator_pf()
586 u16 num_vfs = hw->iov.num_vfs; in fm10k_vf_queue_index() local
589 vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx); in fm10k_vf_queue_index()
620 static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs, in fm10k_iov_assign_resources_pf() argument
632 if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs)) in fm10k_iov_assign_resources_pf()
636 hw->iov.num_vfs = num_vfs; in fm10k_iov_assign_resources_pf()
640 qmap_stride = (num_vfs > 8) ? 32 : 256; in fm10k_iov_assign_resources_pf()
649 for (i = 0; i < num_vfs; i++) { in fm10k_iov_assign_resources_pf()
657 for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;) in fm10k_iov_assign_resources_pf()
684 fm10k_vf_vector_index(hw, num_vfs - 1)); in fm10k_iov_assign_resources_pf()
[all …]
/linux-6.12.1/arch/powerpc/platforms/pseries/
Dpci.c30 static int pseries_send_map_pe(struct pci_dev *pdev, u16 num_vfs, in pseries_send_map_pe() argument
50 num_vfs * sizeof(struct pe_map_bar_entry)); in pseries_send_map_pe()
76 static int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs) in pseries_associate_pes() argument
90 for (vf_index = 0; vf_index < num_vfs; vf_index++) { in pseries_associate_pes()
109 rc = pseries_send_map_pe(pdev, num_vfs, vf_pe_array); in pseries_associate_pes()
112 for (vf_index = 0; vf_index < num_vfs; vf_index++) in pseries_associate_pes()
120 static int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) in pseries_pci_sriov_enable() argument
135 if (max_config_vfs < num_vfs && num_vfs > MAX_VFS_FOR_MAP_PE) { in pseries_pci_sriov_enable()
138 num_vfs, (num_vfs > MAX_VFS_FOR_MAP_PE) ? in pseries_pci_sriov_enable()
144 pdn->pe_num_map = kmalloc_array(num_vfs, in pseries_pci_sriov_enable()
[all …]
/linux-6.12.1/drivers/net/ethernet/sfc/
Def100_sriov.c16 static int efx_ef100_pci_sriov_enable(struct efx_nic *efx, int num_vfs) in efx_ef100_pci_sriov_enable() argument
23 efx->vf_count = num_vfs; in efx_ef100_pci_sriov_enable()
24 rc = pci_enable_sriov(dev, num_vfs); in efx_ef100_pci_sriov_enable()
31 for (i = 0; i < num_vfs; i++) { in efx_ef100_pci_sriov_enable()
66 int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs) in efx_ef100_sriov_configure() argument
68 if (num_vfs == 0) in efx_ef100_sriov_configure()
71 return efx_ef100_pci_sriov_enable(efx, num_vfs); in efx_ef100_sriov_configure()
/linux-6.12.1/drivers/crypto/intel/qat/qat_common/
Dadf_pfvf_pf_msg.c17 int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); in adf_pf2vf_notify_restarting() local
20 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { in adf_pf2vf_notify_restarting()
37 int num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); in adf_pf2vf_wait_for_restarting_complete() local
45 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) in adf_pf2vf_wait_for_restarting_complete()
60 int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); in adf_pf2vf_notify_restarted() local
64 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { in adf_pf2vf_notify_restarted()
75 int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); in adf_pf2vf_notify_fatal_error() local
79 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { in adf_pf2vf_notify_fatal_error()
/linux-6.12.1/drivers/net/ethernet/pensando/ionic/
Dionic_bus_pci.c120 for (i = ionic->num_vfs - 1; i >= 0; i--) { in ionic_vf_dealloc_locked()
134 ionic->num_vfs = 0; in ionic_vf_dealloc_locked()
144 static int ionic_vf_alloc(struct ionic *ionic, int num_vfs) in ionic_vf_alloc() argument
153 ionic->vfs = kcalloc(num_vfs, sizeof(struct ionic_vf), GFP_KERNEL); in ionic_vf_alloc()
159 for (i = 0; i < num_vfs; i++) { in ionic_vf_alloc()
169 ionic->num_vfs++; in ionic_vf_alloc()
183 static int ionic_sriov_configure(struct pci_dev *pdev, int num_vfs) in ionic_sriov_configure() argument
193 if (num_vfs > 0) { in ionic_sriov_configure()
194 ret = pci_enable_sriov(pdev, num_vfs); in ionic_sriov_configure()
200 ret = ionic_vf_alloc(ionic, num_vfs); in ionic_sriov_configure()
[all …]
/linux-6.12.1/drivers/net/ethernet/intel/ixgbe/
Dixgbe_sriov.c25 unsigned int num_vfs) in ixgbe_alloc_vf_macvlans() argument
35 (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); in ixgbe_alloc_vf_macvlans()
52 unsigned int num_vfs) in __ixgbe_enable_sriov() argument
67 adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), in __ixgbe_enable_sriov()
72 adapter->num_vfs = num_vfs; in __ixgbe_enable_sriov()
74 ixgbe_alloc_vf_macvlans(adapter, num_vfs); in __ixgbe_enable_sriov()
75 adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; in __ixgbe_enable_sriov()
82 if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) { in __ixgbe_enable_sriov()
85 } else if (num_vfs < 32) { in __ixgbe_enable_sriov()
97 for (i = 0; i < num_vfs; i++) { in __ixgbe_enable_sriov()
[all …]
/linux-6.12.1/drivers/net/ethernet/intel/ice/
Dice_sriov.c429 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) in ice_set_per_vf_res() argument
439 if (!num_vfs) in ice_set_per_vf_res()
445 msix_avail_per_vf = msix_avail_for_sriov / num_vfs; in ice_set_per_vf_res()
457 num_vfs); in ice_set_per_vf_res()
463 avail_qs = ice_get_avail_txq_count(pf) / num_vfs; in ice_set_per_vf_res()
471 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; in ice_set_per_vf_res()
479 ICE_MIN_QS_PER_VF, num_vfs); in ice_set_per_vf_res()
483 err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); in ice_set_per_vf_res()
486 num_vfs, err); in ice_set_per_vf_res()
494 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); in ice_set_per_vf_res()
[all …]
/linux-6.12.1/drivers/net/ethernet/qlogic/qlcnic/
Dqlcnic_sriov_pf.c75 u32 num_vfs, max, temp; in qlcnic_sriov_pf_cal_res_limit() local
81 num_vfs = sriov->num_vfs; in qlcnic_sriov_pf_cal_res_limit()
82 max = num_vfs + 1; in qlcnic_sriov_pf_cal_res_limit()
95 temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs; in qlcnic_sriov_pf_cal_res_limit()
97 temp = res->num_tx_mac_filters - num_macs * num_vfs; in qlcnic_sriov_pf_cal_res_limit()
99 temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC; in qlcnic_sriov_pf_cal_res_limit()
103 info->max_tx_ques = res->num_tx_queues - sriov->num_vfs; in qlcnic_sriov_pf_cal_res_limit()
162 total_fn = sriov->num_vfs + 1; in qlcnic_sriov_set_vf_max_vlan()
412 for (i = 0; i < sriov->num_vfs; i++) in qlcnic_sriov_pf_del_flr_queue()
566 static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs) in qlcnic_sriov_pf_enable() argument
[all …]
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/
Dnfp_main.c242 static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) in nfp_pcie_sriov_enable() argument
249 if (num_vfs > pf->limit_vfs) { in nfp_pcie_sriov_enable()
255 err = pci_enable_sriov(pdev, num_vfs); in nfp_pcie_sriov_enable()
264 err = nfp_app_sriov_enable(pf->app, num_vfs); in nfp_pcie_sriov_enable()
272 pf->num_vfs = num_vfs; in nfp_pcie_sriov_enable()
274 dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs); in nfp_pcie_sriov_enable()
277 return num_vfs; in nfp_pcie_sriov_enable()
308 pf->num_vfs = 0; in nfp_pcie_sriov_disable()
318 static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) in nfp_pcie_sriov_configure() argument
323 if (num_vfs == 0) in nfp_pcie_sriov_configure()
[all …]
/linux-6.12.1/drivers/net/ethernet/huawei/hinic/
Dhinic_sriov.c604 if (vf >= sriov_info->num_vfs) in hinic_ndo_get_vf_config()
649 if (!is_valid_ether_addr(mac) || vf >= sriov_info->num_vfs) in hinic_ndo_set_vf_mac()
796 if (vf >= sriov_info->num_vfs || vlan >= VLAN_N_VID || qos > HINIC_MAX_QOS) in hinic_ndo_set_vf_vlan()
837 if (vf >= sriov_info->num_vfs) in hinic_ndo_set_vf_trust()
868 if (vf >= nic_dev->sriov_info.num_vfs) { in hinic_ndo_set_vf_bw()
870 nic_dev->sriov_info.num_vfs); in hinic_ndo_set_vf_bw()
956 if (vf >= sriov_info->num_vfs) in hinic_ndo_set_vf_spoofchk()
1020 if (vf_id >= sriov_info->num_vfs) { in hinic_ndo_set_vf_link_state()
1302 tmp_vfs = (u16)sriov_info->num_vfs; in hinic_pci_sriov_disable()
1303 sriov_info->num_vfs = 0; in hinic_pci_sriov_disable()
[all …]
/linux-6.12.1/drivers/pci/
Diov.c398 u16 num_vfs; in sriov_numvfs_show() local
402 num_vfs = pdev->sriov->num_VFs; in sriov_numvfs_show()
405 return sysfs_emit(buf, "%u\n", num_vfs); in sriov_numvfs_show()
421 u16 num_vfs; in sriov_numvfs_store() local
423 if (kstrtou16(buf, 0, &num_vfs) < 0) in sriov_numvfs_store()
426 if (num_vfs > pci_sriov_get_totalvfs(pdev)) in sriov_numvfs_store()
431 if (num_vfs == pdev->sriov->num_VFs) in sriov_numvfs_store()
448 if (num_vfs == 0) { in sriov_numvfs_store()
457 pdev->sriov->num_VFs, num_vfs); in sriov_numvfs_store()
462 ret = pdev->driver->sriov_configure(pdev, num_vfs); in sriov_numvfs_store()
[all …]
/linux-6.12.1/drivers/crypto/cavium/cpt/
Dcptpf_main.c19 static u32 num_vfs = 4; /* Default 4 VF enabled */ variable
20 module_param(num_vfs, uint, 0444);
21 MODULE_PARM_DESC(num_vfs, "Number of VFs to enable(1-16)");
502 static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs) in cpt_sriov_init() argument
515 cpt->num_vf_en = num_vfs; /* User requested VFs */ in cpt_sriov_init()
548 if (num_vfs > 16 || num_vfs < 4) { in cpt_probe()
550 num_vfs); in cpt_probe()
551 num_vfs = 4; in cpt_probe()
600 err = cpt_sriov_init(cpt, num_vfs); in cpt_probe()

1234567