Lines Matching full:mr
49 struct rpcrdma_mr *mr) in frwr_cid_init() argument
51 struct rpc_rdma_cid *cid = &mr->mr_cid; in frwr_cid_init()
54 cid->ci_completion_id = mr->mr_ibmr->res.id; in frwr_cid_init()
57 static void frwr_mr_unmap(struct rpcrdma_mr *mr) in frwr_mr_unmap() argument
59 if (mr->mr_device) { in frwr_mr_unmap()
60 trace_xprtrdma_mr_unmap(mr); in frwr_mr_unmap()
61 ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents, in frwr_mr_unmap()
62 mr->mr_dir); in frwr_mr_unmap()
63 mr->mr_device = NULL; in frwr_mr_unmap()
68 * frwr_mr_release - Destroy one MR
69 * @mr: MR allocated by frwr_mr_init
72 void frwr_mr_release(struct rpcrdma_mr *mr) in frwr_mr_release() argument
76 frwr_mr_unmap(mr); in frwr_mr_release()
78 rc = ib_dereg_mr(mr->mr_ibmr); in frwr_mr_release()
80 trace_xprtrdma_frwr_dereg(mr, rc); in frwr_mr_release()
81 kfree(mr->mr_sg); in frwr_mr_release()
82 kfree(mr); in frwr_mr_release()
85 static void frwr_mr_put(struct rpcrdma_mr *mr) in frwr_mr_put() argument
87 frwr_mr_unmap(mr); in frwr_mr_put()
89 /* The MR is returned to the req's MR free list instead in frwr_mr_put()
90 * of to the xprt's MR free list. No spinlock is needed. in frwr_mr_put()
92 rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); in frwr_mr_put()
108 struct rpcrdma_mr *mr; in frwr_reset() local
110 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) in frwr_reset()
111 frwr_mr_put(mr); in frwr_reset()
115 * frwr_mr_init - Initialize one MR
117 * @mr: generic MR to prepare for FRWR
122 int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) in frwr_mr_init() argument
138 mr->mr_xprt = r_xprt; in frwr_mr_init()
139 mr->mr_ibmr = frmr; in frwr_mr_init()
140 mr->mr_device = NULL; in frwr_mr_init()
141 INIT_LIST_HEAD(&mr->mr_list); in frwr_mr_init()
142 init_completion(&mr->mr_linv_done); in frwr_mr_init()
143 frwr_cid_init(ep, mr); in frwr_mr_init()
146 mr->mr_sg = sg; in frwr_mr_init()
151 trace_xprtrdma_frwr_alloc(mr, PTR_ERR(frmr)); in frwr_mr_init()
277 * @mr: MR to fill in
283 * On success, @mr is filled in.
288 struct rpcrdma_mr *mr) in frwr_map() argument
299 sg_set_page(&mr->mr_sg[i], seg->mr_page, in frwr_map()
310 mr->mr_dir = rpcrdma_data_dir(writing); in frwr_map()
311 mr->mr_nents = i; in frwr_map()
313 dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents, in frwr_map()
314 mr->mr_dir); in frwr_map()
317 mr->mr_device = ep->re_id->device; in frwr_map()
319 ibmr = mr->mr_ibmr; in frwr_map()
320 n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE); in frwr_map()
329 reg_wr = &mr->mr_regwr; in frwr_map()
330 reg_wr->mr = ibmr; in frwr_map()
336 mr->mr_handle = ibmr->rkey; in frwr_map()
337 mr->mr_length = ibmr->length; in frwr_map()
338 mr->mr_offset = ibmr->iova; in frwr_map()
339 trace_xprtrdma_mr_map(mr); in frwr_map()
344 trace_xprtrdma_frwr_sgerr(mr, i); in frwr_map()
348 trace_xprtrdma_frwr_maperr(mr, n); in frwr_map()
357 * Each flushed MR gets destroyed after the QP has drained.
362 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe); in frwr_wc_fastreg() local
365 trace_xprtrdma_wc_fastreg(wc, &mr->mr_cid); in frwr_wc_fastreg()
388 struct rpcrdma_mr *mr; in frwr_send() local
394 list_for_each_entry(mr, &req->rl_registered, mr_list) { in frwr_send()
395 trace_xprtrdma_mr_fastreg(mr); in frwr_send()
397 mr->mr_cqe.done = frwr_wc_fastreg; in frwr_send()
398 mr->mr_regwr.wr.next = post_wr; in frwr_send()
399 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe; in frwr_send()
400 mr->mr_regwr.wr.num_sge = 0; in frwr_send()
401 mr->mr_regwr.wr.opcode = IB_WR_REG_MR; in frwr_send()
402 mr->mr_regwr.wr.send_flags = 0; in frwr_send()
403 post_wr = &mr->mr_regwr.wr; in frwr_send()
424 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
431 struct rpcrdma_mr *mr; in frwr_reminv() local
433 list_for_each_entry(mr, mrs, mr_list) in frwr_reminv()
434 if (mr->mr_handle == rep->rr_inv_rkey) { in frwr_reminv()
435 list_del_init(&mr->mr_list); in frwr_reminv()
436 trace_xprtrdma_mr_reminv(mr); in frwr_reminv()
437 frwr_mr_put(mr); in frwr_reminv()
438 break; /* only one invalidated MR per RPC */ in frwr_reminv()
442 static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr) in frwr_mr_done() argument
445 frwr_mr_put(mr); in frwr_mr_done()
457 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe); in frwr_wc_localinv() local
460 trace_xprtrdma_wc_li(wc, &mr->mr_cid); in frwr_wc_localinv()
461 frwr_mr_done(wc, mr); in frwr_wc_localinv()
471 * Awaken anyone waiting for an MR to finish being fenced.
476 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe); in frwr_wc_localinv_wake() local
479 trace_xprtrdma_wc_li_wake(wc, &mr->mr_cid); in frwr_wc_localinv_wake()
480 frwr_mr_done(wc, mr); in frwr_wc_localinv_wake()
481 complete(&mr->mr_linv_done); in frwr_wc_localinv_wake()
502 struct rpcrdma_mr *mr; in frwr_unmap_sync() local
511 mr = rpcrdma_mr_pop(&req->rl_registered); in frwr_unmap_sync()
513 trace_xprtrdma_mr_localinv(mr); in frwr_unmap_sync()
516 last = &mr->mr_invwr; in frwr_unmap_sync()
518 last->wr_cqe = &mr->mr_cqe; in frwr_unmap_sync()
523 last->ex.invalidate_rkey = mr->mr_handle; in frwr_unmap_sync()
529 } while ((mr = rpcrdma_mr_pop(&req->rl_registered))); in frwr_unmap_sync()
531 mr = container_of(last, struct rpcrdma_mr, mr_invwr); in frwr_unmap_sync()
538 reinit_completion(&mr->mr_linv_done); in frwr_unmap_sync()
552 wait_for_completion(&mr->mr_linv_done); in frwr_unmap_sync()
573 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe); in frwr_wc_localinv_done() local
577 trace_xprtrdma_wc_li_done(wc, &mr->mr_cid); in frwr_wc_localinv_done()
579 /* Ensure that @rep is generated before the MR is released */ in frwr_wc_localinv_done()
580 rep = mr->mr_req->rl_reply; in frwr_wc_localinv_done()
589 frwr_mr_put(mr); in frwr_wc_localinv_done()
607 struct rpcrdma_mr *mr; in frwr_unmap_async() local
614 mr = rpcrdma_mr_pop(&req->rl_registered); in frwr_unmap_async()
616 trace_xprtrdma_mr_localinv(mr); in frwr_unmap_async()
619 last = &mr->mr_invwr; in frwr_unmap_async()
621 last->wr_cqe = &mr->mr_cqe; in frwr_unmap_async()
626 last->ex.invalidate_rkey = mr->mr_handle; in frwr_unmap_async()
632 } while ((mr = rpcrdma_mr_pop(&req->rl_registered))); in frwr_unmap_async()
665 * frwr_wp_create - Create an MR for padding Write chunks
674 struct rpcrdma_mr *mr; in frwr_wp_create() local
676 mr = rpcrdma_mr_get(r_xprt); in frwr_wp_create()
677 if (!mr) in frwr_wp_create()
679 mr->mr_req = NULL; in frwr_wp_create()
680 ep->re_write_pad_mr = mr; in frwr_wp_create()
685 if (IS_ERR(frwr_map(r_xprt, &seg, 1, true, xdr_zero, mr))) in frwr_wp_create()
687 trace_xprtrdma_mr_fastreg(mr); in frwr_wp_create()
689 mr->mr_cqe.done = frwr_wc_fastreg; in frwr_wp_create()
690 mr->mr_regwr.wr.next = NULL; in frwr_wp_create()
691 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe; in frwr_wp_create()
692 mr->mr_regwr.wr.num_sge = 0; in frwr_wp_create()
693 mr->mr_regwr.wr.opcode = IB_WR_REG_MR; in frwr_wp_create()
694 mr->mr_regwr.wr.send_flags = 0; in frwr_wp_create()
696 return ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL); in frwr_wp_create()