Lines Matching refs:ucmd
434 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) in set_rq_size() argument
452 if (ucmd) { in set_rq_size()
453 qp->rq.wqe_cnt = ucmd->rq_wqe_count; in set_rq_size()
454 if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift)) in set_rq_size()
456 qp->rq.wqe_shift = ucmd->rq_wqe_shift; in set_rq_size()
638 struct mlx5_ib_create_qp *ucmd, in set_user_buf_size() argument
650 if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) { in set_user_buf_size()
652 ucmd->sq_wqe_count); in set_user_buf_size()
656 qp->sq.wqe_cnt = ucmd->sq_wqe_count; in set_user_buf_size()
882 struct mlx5_ib_create_wq *ucmd) in create_user_rq() argument
890 if (!ucmd->buf_addr) in create_user_rq()
893 rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0); in create_user_rq()
912 rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE); in create_user_rq()
917 (unsigned long long)ucmd->buf_addr, rwq->buf_size, in create_user_rq()
921 err = mlx5_ib_db_map_user(ucontext, ucmd->db_addr, &rwq->db); in create_user_rq()
946 struct mlx5_ib_create_qp *ucmd) in _create_user_qp() argument
967 uar_index = ucmd->bfreg_index; in _create_user_qp()
972 ucmd->bfreg_index, true); in _create_user_qp()
997 err = set_user_buf_size(dev, qp, ucmd, base, attr); in _create_user_qp()
1001 if (ucmd->buf_addr && ubuffer->buf_size) { in _create_user_qp()
1002 ubuffer->buf_addr = ucmd->buf_addr; in _create_user_qp()
1047 err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db); in _create_user_qp()
1706 void *ucmd; member
1718 struct mlx5_ib_create_qp_rss *ucmd = params->ucmd; in create_rss_raw_qp_tir() local
1734 if (ucmd->comp_mask) { in create_rss_raw_qp_tir()
1739 if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER && in create_rss_raw_qp_tir()
1740 !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) { in create_rss_raw_qp_tir()
1771 if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) in create_rss_raw_qp_tir()
1776 if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER) in create_rss_raw_qp_tir()
1781 switch (ucmd->rx_hash_function) { in create_rss_raw_qp_tir()
1787 if (len != ucmd->rx_key_len) { in create_rss_raw_qp_tir()
1793 memcpy(rss_key, ucmd->rx_hash_key, len); in create_rss_raw_qp_tir()
1801 if (!ucmd->rx_hash_fields_mask) { in create_rss_raw_qp_tir()
1809 if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || in create_rss_raw_qp_tir()
1810 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) && in create_rss_raw_qp_tir()
1811 ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) || in create_rss_raw_qp_tir()
1812 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) { in create_rss_raw_qp_tir()
1818 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || in create_rss_raw_qp_tir()
1819 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) in create_rss_raw_qp_tir()
1822 else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) || in create_rss_raw_qp_tir()
1823 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6)) in create_rss_raw_qp_tir()
1827 outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || in create_rss_raw_qp_tir()
1828 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) in create_rss_raw_qp_tir()
1830 ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) || in create_rss_raw_qp_tir()
1831 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) in create_rss_raw_qp_tir()
1833 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2; in create_rss_raw_qp_tir()
1842 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || in create_rss_raw_qp_tir()
1843 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) in create_rss_raw_qp_tir()
1846 else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) || in create_rss_raw_qp_tir()
1847 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) in create_rss_raw_qp_tir()
1851 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || in create_rss_raw_qp_tir()
1852 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6)) in create_rss_raw_qp_tir()
1855 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) || in create_rss_raw_qp_tir()
1856 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6)) in create_rss_raw_qp_tir()
1859 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || in create_rss_raw_qp_tir()
1860 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP)) in create_rss_raw_qp_tir()
1863 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) || in create_rss_raw_qp_tir()
1864 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) in create_rss_raw_qp_tir()
1867 if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) in create_rss_raw_qp_tir()
2071 struct mlx5_ib_create_qp *ucmd = params->ucmd; in create_dci() local
2101 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd); in create_dci()
2107 if (ucmd->rq_wqe_shift != qp->rq.wqe_shift || in create_dci()
2108 ucmd->rq_wqe_count != qp->rq.wqe_cnt) in create_dci()
2111 if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz))) in create_dci()
2121 &inlen, base, ucmd); in create_dci()
2126 MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); in create_dci()
2150 ucmd->dci_streams.log_num_concurent); in create_dci()
2152 ucmd->dci_streams.log_num_errored); in create_dci()
2232 struct mlx5_ib_create_qp *ucmd = params->ucmd; in create_user_qp() local
2268 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd); in create_user_qp()
2274 if (ucmd->rq_wqe_shift != qp->rq.wqe_shift || in create_user_qp()
2275 ucmd->rq_wqe_count != qp->rq.wqe_cnt) in create_user_qp()
2278 if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz))) in create_user_qp()
2289 &inlen, base, ucmd); in create_user_qp()
2297 MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); in create_user_qp()
2390 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr; in create_user_qp()
2732 struct mlx5_ib_create_qp *ucmd = params->ucmd; in create_dct() local
2748 MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key); in create_dct()
2751 MLX5_SET(dctc, dctc, ece, ucmd->ece_options); in create_dct()
2871 void *ucmd, struct ib_qp_init_attr *attr) in process_vendor_flags() argument
2878 flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags; in process_vendor_flags()
2880 flags = ((struct mlx5_ib_create_qp *)ucmd)->flags; in process_vendor_flags()
3034 size_t ucmd = sizeof(struct mlx5_ib_create_qp); in process_udata_size() local
3040 params->ucmd_size = ucmd; in process_udata_size()
3050 params->inlen = (inlen < min_inlen) ? 0 : min(inlen, ucmd); in process_udata_size()
3061 ucmd = sizeof(struct mlx5_ib_create_qp_rss); in process_udata_size()
3062 params->ucmd_size = ucmd; in process_udata_size()
3063 if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd)) in process_udata_size()
3066 params->inlen = min(ucmd, inlen); in process_udata_size()
3164 struct mlx5_ib_create_qp *ucmd = params->ucmd; in get_qp_uidx() local
3172 return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), ¶ms->uidx); in get_qp_uidx()
3263 params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL); in mlx5_ib_create_qp()
3264 if (!params.ucmd) in mlx5_ib_create_qp()
3267 err = ib_copy_from_udata(params.ucmd, udata, params.inlen); in mlx5_ib_create_qp()
3275 err = process_vendor_flags(dev, qp, params.ucmd, attr); in mlx5_ib_create_qp()
3295 kfree(params.ucmd); in mlx5_ib_create_qp()
3296 params.ucmd = NULL; in mlx5_ib_create_qp()
3323 kfree(params.ucmd); in mlx5_ib_create_qp()
4097 const struct mlx5_ib_modify_qp *ucmd, in __mlx5_ib_modify_qp() argument
4348 if (ucmd->burst_info.max_burst_sz) { in __mlx5_ib_modify_qp()
4352 ucmd->burst_info.max_burst_sz; in __mlx5_ib_modify_qp()
4359 if (ucmd->burst_info.typical_pkt_sz) { in __mlx5_ib_modify_qp()
4363 ucmd->burst_info.typical_pkt_sz; in __mlx5_ib_modify_qp()
4379 ucmd->ece_options : 0; in __mlx5_ib_modify_qp()
4488 int attr_mask, struct mlx5_ib_modify_qp *ucmd, in mlx5_ib_modify_dct() argument
4505 if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options) in mlx5_ib_modify_dct()
4512 MLX5_SET(dctc, dctc, ece, ucmd->ece_options); in mlx5_ib_modify_dct()
4652 struct mlx5_ib_modify_qp ucmd = {}; in mlx5_ib_modify_qp() local
4667 if (udata->inlen < offsetofend(typeof(ucmd), ece_options)) in mlx5_ib_modify_qp()
4670 if (udata->inlen > sizeof(ucmd) && in mlx5_ib_modify_qp()
4671 !ib_is_udata_cleared(udata, sizeof(ucmd), in mlx5_ib_modify_qp()
4672 udata->inlen - sizeof(ucmd))) in mlx5_ib_modify_qp()
4675 if (ib_copy_from_udata(&ucmd, udata, in mlx5_ib_modify_qp()
4676 min(udata->inlen, sizeof(ucmd)))) in mlx5_ib_modify_qp()
4679 if (ucmd.comp_mask || in mlx5_ib_modify_qp()
4680 memchr_inv(&ucmd.burst_info.reserved, 0, in mlx5_ib_modify_qp()
4681 sizeof(ucmd.burst_info.reserved))) in mlx5_ib_modify_qp()
4692 return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata); in mlx5_ib_modify_qp()
4742 new_state, &ucmd, &resp, udata); in mlx5_ib_modify_qp()
5303 struct mlx5_ib_create_wq *ucmd, in set_user_rq_size() argument
5310 if (!ucmd->rq_wqe_count) in set_user_rq_size()
5313 rwq->wqe_count = ucmd->rq_wqe_count; in set_user_rq_size()
5314 rwq->wqe_shift = ucmd->rq_wqe_shift; in set_user_rq_size()
5342 struct mlx5_ib_create_wq ucmd = {}; in prepare_user_rq() local
5353 if (udata->inlen > sizeof(ucmd) && in prepare_user_rq()
5354 !ib_is_udata_cleared(udata, sizeof(ucmd), in prepare_user_rq()
5355 udata->inlen - sizeof(ucmd))) { in prepare_user_rq()
5360 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) { in prepare_user_rq()
5365 if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) { in prepare_user_rq()
5368 } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) { in prepare_user_rq()
5373 if ((ucmd.single_stride_log_num_of_bytes < in prepare_user_rq()
5375 (ucmd.single_stride_log_num_of_bytes > in prepare_user_rq()
5378 ucmd.single_stride_log_num_of_bytes, in prepare_user_rq()
5384 ucmd.single_wqe_log_num_of_strides)) { in prepare_user_rq()
5388 ucmd.single_wqe_log_num_of_strides, in prepare_user_rq()
5396 ucmd.single_stride_log_num_of_bytes; in prepare_user_rq()
5397 rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides; in prepare_user_rq()
5398 rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en; in prepare_user_rq()
5402 err = set_user_rq_size(dev, init_attr, &ucmd, rwq); in prepare_user_rq()
5408 err = create_user_rq(dev, pd, udata, rwq, &ucmd); in prepare_user_rq()
5414 rwq->user_index = ucmd.user_index; in prepare_user_rq()
5580 struct mlx5_ib_modify_wq ucmd = {}; in mlx5_ib_modify_wq() local
5593 if (udata->inlen > sizeof(ucmd) && in mlx5_ib_modify_wq()
5594 !ib_is_udata_cleared(udata, sizeof(ucmd), in mlx5_ib_modify_wq()
5595 udata->inlen - sizeof(ucmd))) in mlx5_ib_modify_wq()
5598 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) in mlx5_ib_modify_wq()
5601 if (ucmd.comp_mask || ucmd.reserved) in mlx5_ib_modify_wq()