Searched refs:MLX5_CAP_QOS (Results 1 – 10 of 10) sorted by relevance
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/ |
D | qos.c | 12 if (!MLX5_CAP_QOS(mdev, nic_sq_scheduling)) in mlx5_qos_is_supported() 14 if (!MLX5_CAP_QOS(mdev, nic_bw_share)) in mlx5_qos_is_supported() 16 if (!MLX5_CAP_QOS(mdev, nic_rate_limit)) in mlx5_qos_is_supported() 23 return 1 << MLX5_CAP_QOS(mdev, log_max_qos_nic_queue_group); in mlx5_qos_max_leaf_nodes() 31 if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP)) in mlx5_qos_create_leaf_node() 50 if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) || in mlx5_qos_create_inner_node() 51 !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR)) in mlx5_qos_create_inner_node()
|
D | rl.c | 327 MLX5_CAP_QOS(dev, packet_pacing_uid) ? in mlx5_rl_add_rate() 351 MLX5_CAP_QOS(dev, packet_pacing_uid) ? in mlx5_rl_remove_rate() 369 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) { in mlx5_init_rl_table() 377 table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1; in mlx5_init_rl_table() 378 table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate); in mlx5_init_rl_table() 379 table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate); in mlx5_init_rl_table() 393 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) in mlx5_cleanup_rl_table()
|
D | fw.c | 213 err = mlx5_core_get_caps_mode(dev, MLX5_CAP_QOS, HCA_CAP_OPMOD_GET_CUR); in mlx5_query_hca_caps()
|
D | main.c | 1780 MLX5_CAP_QOS,
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/esw/ |
D | qos.c | 30 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) in esw_qos_tsar_config() 94 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); in esw_qos_calculate_min_rate_divider() 139 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); in esw_qos_normalize_vports_min_rate() 167 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); in esw_qos_normalize_groups_min_rate() 204 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); in esw_qos_set_vport_min_rate() 205 min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && in esw_qos_set_vport_min_rate() 229 max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); in esw_qos_set_vport_max_rate() 253 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); in esw_qos_set_group_min_rate() 258 if (!(MLX5_CAP_QOS(dev, esw_bw_share) && fw_max_bw_share >= MLX5_MIN_BW_SHARE)) in esw_qos_set_group_min_rate() 319 return MLX5_CAP_QOS(dev, esw_element_type) & in esw_qos_element_type_supported() [all …]
|
/linux-6.12.1/drivers/infiniband/hw/mlx5/ |
D | qos.c | 20 MLX5_CAP_QOS(dev->mdev, packet_pacing) && in pp_is_supported() 21 MLX5_CAP_QOS(dev->mdev, packet_pacing_uid)); in pp_is_supported()
|
D | main.c | 1141 if (MLX5_CAP_QOS(mdev, packet_pacing) && in mlx5_ib_query_device() 1144 MLX5_CAP_QOS(mdev, packet_pacing_max_rate); in mlx5_ib_query_device() 1146 MLX5_CAP_QOS(mdev, packet_pacing_min_rate); in mlx5_ib_query_device() 1149 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) && in mlx5_ib_query_device() 1150 MLX5_CAP_QOS(mdev, packet_pacing_typical_size)) in mlx5_ib_query_device()
|
D | qp.c | 4350 MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) { in __mlx5_ib_modify_qp() 4361 MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) { in __mlx5_ib_modify_qp()
|
/linux-6.12.1/include/linux/mlx5/ |
D | device.h | 1238 MLX5_CAP_QOS = 0xc, enumerator 1405 #define MLX5_CAP_QOS(mdev, cap)\ macro 1406 MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ |
D | meter.c | 559 MLX5_CAP_QOS(mdev, log_meter_aso_max_alloc)); in mlx5e_flow_meters_init()
|