Lines Matching full:rdma

5  * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
8 #include <rdma/rw.h>
20 /* Each R/W context contains state for one chain of RDMA Read or
27 * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
55 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) in svc_rdma_get_rw_ctxt() argument
57 struct ib_device *dev = rdma->sc_cm_id->device; in svc_rdma_get_rw_ctxt()
62 spin_lock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
63 node = llist_del_first(&rdma->sc_rw_ctxts); in svc_rdma_get_rw_ctxt()
64 spin_unlock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
87 trace_svcrdma_rwctx_empty(rdma, sges); in svc_rdma_get_rw_ctxt()
98 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, in svc_rdma_put_rw_ctxt() argument
101 __svc_rdma_put_rw_ctxt(ctxt, &rdma->sc_rw_ctxts); in svc_rdma_put_rw_ctxt()
106 * @rdma: transport about to be destroyed
109 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) in svc_rdma_destroy_rw_ctxts() argument
114 while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) { in svc_rdma_destroy_rw_ctxts()
122 * @rdma: controlling transport instance
124 * @offset: RDMA offset
125 * @handle: RDMA tag/handle
131 static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma, in svc_rdma_rw_ctx_init() argument
138 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num, in svc_rdma_rw_ctx_init()
142 trace_svcrdma_dma_map_rw_err(rdma, offset, handle, in svc_rdma_rw_ctx_init()
144 svc_rdma_put_rw_ctxt(rdma, ctxt); in svc_rdma_rw_ctx_init()
151 * @rdma: controlling transport instance
154 void svc_rdma_cc_init(struct svcxprt_rdma *rdma, in svc_rdma_cc_init() argument
160 svc_rdma_send_cid_init(rdma, cid); in svc_rdma_cc_init()
168 * @rdma: controlling transport instance
172 void svc_rdma_cc_release(struct svcxprt_rdma *rdma, in svc_rdma_cc_release() argument
186 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, in svc_rdma_cc_release()
187 rdma->sc_port_num, ctxt->rw_sg_table.sgl, in svc_rdma_cc_release()
197 llist_add_batch(first, last, &rdma->sc_rw_ctxts); in svc_rdma_cc_release()
201 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, in svc_rdma_write_info_alloc() argument
207 ibdev_to_node(rdma->sc_cm_id->device)); in svc_rdma_write_info_alloc()
211 info->wi_rdma = rdma; in svc_rdma_write_info_alloc()
213 svc_rdma_cc_init(rdma, &info->wi_cc); in svc_rdma_write_info_alloc()
235 * @rdma: controlling transport
238 void svc_rdma_reply_chunk_release(struct svcxprt_rdma *rdma, in svc_rdma_reply_chunk_release() argument
245 svc_rdma_cc_release(rdma, cc, DMA_TO_DEVICE); in svc_rdma_reply_chunk_release()
260 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_reply_done() local
273 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_reply_done()
285 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_write_done() local
303 svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount); in svc_rdma_write_done()
306 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_write_done()
312 * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
319 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_wc_read_done() local
325 svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount); in svc_rdma_wc_read_done()
333 spin_lock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_read_done()
334 list_add_tail(&ctxt->rc_list, &rdma->sc_read_complete_q); in svc_rdma_wc_read_done()
336 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); in svc_rdma_wc_read_done()
337 spin_unlock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_read_done()
338 svc_xprt_enqueue(&rdma->sc_xprt); in svc_rdma_wc_read_done()
347 /* The RDMA Read has flushed, so the incoming RPC message in svc_rdma_wc_read_done()
351 svc_rdma_cc_release(rdma, cc, DMA_FROM_DEVICE); in svc_rdma_wc_read_done()
352 svc_rdma_recv_ctxt_put(rdma, ctxt); in svc_rdma_wc_read_done()
353 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_wc_read_done()
362 static int svc_rdma_post_chunk_ctxt(struct svcxprt_rdma *rdma, in svc_rdma_post_chunk_ctxt() argument
373 if (cc->cc_sqecount > rdma->sc_sq_depth) in svc_rdma_post_chunk_ctxt()
382 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp, in svc_rdma_post_chunk_ctxt()
383 rdma->sc_port_num, cqe, first_wr); in svc_rdma_post_chunk_ctxt()
389 &rdma->sc_sq_avail) > 0) { in svc_rdma_post_chunk_ctxt()
391 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); in svc_rdma_post_chunk_ctxt()
398 trace_svcrdma_sq_full(rdma, &cc->cc_cid); in svc_rdma_post_chunk_ctxt()
399 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); in svc_rdma_post_chunk_ctxt()
400 wait_event(rdma->sc_send_wait, in svc_rdma_post_chunk_ctxt()
401 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount); in svc_rdma_post_chunk_ctxt()
402 trace_svcrdma_sq_retry(rdma, &cc->cc_cid); in svc_rdma_post_chunk_ctxt()
405 trace_svcrdma_sq_post_err(rdma, &cc->cc_cid, ret); in svc_rdma_post_chunk_ctxt()
406 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_post_chunk_ctxt()
412 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); in svc_rdma_post_chunk_ctxt()
413 wake_up(&rdma->sc_send_wait); in svc_rdma_post_chunk_ctxt()
464 /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
475 struct svcxprt_rdma *rdma = info->wi_rdma; in svc_rdma_build_writes() local
491 ctxt = svc_rdma_get_rw_ctxt(rdma, in svc_rdma_build_writes()
498 ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, seg->rs_handle, in svc_rdma_build_writes()
524 * svc_rdma_iov_write - Construct RDMA Writes from an iov
532 * %-EIO if an rdma-rw error occurred
543 * svc_rdma_pages_write - Construct RDMA Writes from pages
553 * %-EIO if an rdma-rw error occurred
567 * svc_rdma_xb_write - Construct RDMA Writes to write an xdr_buf
575 * %-EIO if an rdma-rw error occurred
604 static int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, in svc_rdma_send_write_chunk() argument
617 info = svc_rdma_write_info_alloc(rdma, chunk); in svc_rdma_send_write_chunk()
627 ret = svc_rdma_post_chunk_ctxt(rdma, cc); in svc_rdma_send_write_chunk()
639 * @rdma: controlling RDMA transport
646 int svc_rdma_send_write_list(struct svcxprt_rdma *rdma, in svc_rdma_send_write_list() argument
656 ret = svc_rdma_send_write_chunk(rdma, chunk, xdr); in svc_rdma_send_write_list()
665 * @rdma: controlling RDMA transport
678 int svc_rdma_prepare_reply_chunk(struct svcxprt_rdma *rdma, in svc_rdma_prepare_reply_chunk() argument
691 info->wi_rdma = rdma; in svc_rdma_prepare_reply_chunk()
708 first_wr = rdma_rw_ctx_wrs(&rwc->rw_ctx, rdma->sc_qp, in svc_rdma_prepare_reply_chunk()
709 rdma->sc_port_num, cqe, first_wr); in svc_rdma_prepare_reply_chunk()
720 * svc_rdma_build_read_segment - Build RDMA Read WQEs to pull one RDMA segment
735 struct svcxprt_rdma *rdma = svc_rdma_rqst_rdma(rqstp); in svc_rdma_build_read_segment() local
744 ctxt = svc_rdma_get_rw_ctxt(rdma, sge_no); in svc_rdma_build_read_segment()
772 ret = svc_rdma_rw_ctx_init(rdma, ctxt, segment->rs_offset, in svc_rdma_build_read_segment()
788 * svc_rdma_build_read_chunk - Build RDMA Read WQEs to pull one RDMA chunk
825 * head->rc_curpage and head->rc_pageoff so that the next RDMA Read
867 * svc_rdma_read_multiple_chunks - Construct RDMA Reads to pull data item Read chunks
875 * %0: RDMA Read WQEs were successfully built
919 * svc_rdma_read_data_item - Construct RDMA Reads to pull data item Read chunks
930 * %0: RDMA Read WQEs were successfully built
944 * svc_rdma_read_chunk_range - Build RDMA Read WRs for portion of a chunk
952 * %0: RDMA Read WQEs were successfully built
991 * svc_rdma_read_call_chunk - Build RDMA Read WQEs to pull a Long Message
996 * %0: RDMA Read WQEs were successfully built
1047 * svc_rdma_read_special - Build RDMA Read WQEs to pull a Long Message
1059 * %0: RDMA Read WQEs were successfully built
1075 * of two different RDMA segments.
1094 * @rdma: controlling RDMA transport
1098 * The RPC/RDMA protocol assumes that the upper layer's XDR decoders
1106 * RDMA Reads have completed.
1109 * %1: all needed RDMA Reads were posted successfully,
1115 int svc_rdma_process_read_list(struct svcxprt_rdma *rdma, in svc_rdma_process_read_list() argument
1140 ret = svc_rdma_post_chunk_ctxt(rdma, cc); in svc_rdma_process_read_list()