Lines Matching refs:rwq

865 			    struct mlx5_ib_rwq *rwq, struct ib_udata *udata)  in destroy_user_rq()  argument
873 if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP) in destroy_user_rq()
876 mlx5_ib_db_unmap_user(context, &rwq->db); in destroy_user_rq()
877 ib_umem_release(rwq->umem); in destroy_user_rq()
881 struct ib_udata *udata, struct mlx5_ib_rwq *rwq, in create_user_rq() argument
893 rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0); in create_user_rq()
894 if (IS_ERR(rwq->umem)) { in create_user_rq()
896 err = PTR_ERR(rwq->umem); in create_user_rq()
901 rwq->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT, in create_user_rq()
902 page_offset, 64, &rwq->rq_page_offset); in create_user_rq()
909 rwq->rq_num_pas = ib_umem_num_dma_blocks(rwq->umem, page_size); in create_user_rq()
910 rwq->page_shift = order_base_2(page_size); in create_user_rq()
911 rwq->log_page_size = rwq->page_shift - MLX5_ADAPTER_PAGE_SHIFT; in create_user_rq()
912 rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE); in create_user_rq()
917 (unsigned long long)ucmd->buf_addr, rwq->buf_size, in create_user_rq()
918 ib_umem_num_pages(rwq->umem), page_size, rwq->rq_num_pas, in create_user_rq()
921 err = mlx5_ib_db_map_user(ucontext, ucmd->db_addr, &rwq->db); in create_user_rq()
930 ib_umem_release(rwq->umem); in create_user_rq()
5145 struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp); in mlx5_ib_wq_event() local
5146 struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device); in mlx5_ib_wq_event()
5149 if (rwq->ibwq.event_handler) { in mlx5_ib_wq_event()
5150 event.device = rwq->ibwq.device; in mlx5_ib_wq_event()
5151 event.element.wq = &rwq->ibwq; in mlx5_ib_wq_event()
5161 rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context); in mlx5_ib_wq_event()
5186 static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, in create_rq() argument
5205 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; in create_rq()
5215 MLX5_SET(rqc, rqc, user_index, rwq->user_index); in create_rq()
5221 rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ? in create_rq()
5232 MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride); in create_rq()
5233 if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) { in create_rq()
5242 MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en); in create_rq()
5244 rwq->single_stride_log_num_of_bytes - in create_rq()
5247 fw_map[rwq->log_num_strides - in create_rq()
5250 MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size); in create_rq()
5252 MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset); in create_rq()
5253 MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size); in create_rq()
5254 MLX5_SET(wq, wq, wq_signature, rwq->wq_sig); in create_rq()
5255 MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma); in create_rq()
5284 mlx5_ib_populate_pas(rwq->umem, 1UL << rwq->page_shift, rq_pas0, 0); in create_rq()
5285 err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp); in create_rq()
5291 mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); in create_rq()
5293 rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP; in create_rq()
5304 struct mlx5_ib_rwq *rwq) in set_user_rq_size() argument
5313 rwq->wqe_count = ucmd->rq_wqe_count; in set_user_rq_size()
5314 rwq->wqe_shift = ucmd->rq_wqe_shift; in set_user_rq_size()
5315 if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size)) in set_user_rq_size()
5318 rwq->log_rq_stride = rwq->wqe_shift; in set_user_rq_size()
5319 rwq->log_rq_size = ilog2(rwq->wqe_count); in set_user_rq_size()
5339 struct mlx5_ib_rwq *rwq) in prepare_user_rq() argument
5395 rwq->single_stride_log_num_of_bytes = in prepare_user_rq()
5397 rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides; in prepare_user_rq()
5398 rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en; in prepare_user_rq()
5399 rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ; in prepare_user_rq()
5402 err = set_user_rq_size(dev, init_attr, &ucmd, rwq); in prepare_user_rq()
5408 err = create_user_rq(dev, pd, udata, rwq, &ucmd); in prepare_user_rq()
5414 rwq->user_index = ucmd.user_index; in prepare_user_rq()
5423 struct mlx5_ib_rwq *rwq; in mlx5_ib_create_wq() local
5442 rwq = kzalloc(sizeof(*rwq), GFP_KERNEL); in mlx5_ib_create_wq()
5443 if (!rwq) in mlx5_ib_create_wq()
5445 err = prepare_user_rq(pd, init_attr, udata, rwq); in mlx5_ib_create_wq()
5448 err = create_rq(rwq, pd, init_attr); in mlx5_ib_create_wq()
5458 rwq->ibwq.wq_num = rwq->core_qp.qpn; in mlx5_ib_create_wq()
5459 rwq->ibwq.state = IB_WQS_RESET; in mlx5_ib_create_wq()
5468 rwq->core_qp.event = mlx5_ib_wq_event; in mlx5_ib_create_wq()
5469 rwq->ibwq.event_handler = init_attr->event_handler; in mlx5_ib_create_wq()
5470 return &rwq->ibwq; in mlx5_ib_create_wq()
5473 mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); in mlx5_ib_create_wq()
5475 destroy_user_rq(dev, pd, rwq, udata); in mlx5_ib_create_wq()
5477 kfree(rwq); in mlx5_ib_create_wq()
5484 struct mlx5_ib_rwq *rwq = to_mrwq(wq); in mlx5_ib_destroy_wq() local
5487 ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); in mlx5_ib_destroy_wq()
5490 destroy_user_rq(dev, wq->pd, rwq, udata); in mlx5_ib_destroy_wq()
5491 kfree(rwq); in mlx5_ib_destroy_wq()
5579 struct mlx5_ib_rwq *rwq = to_mrwq(wq); in mlx5_ib_modify_wq() local
5656 err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in); in mlx5_ib_modify_wq()
5658 rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state; in mlx5_ib_modify_wq()