Lines Matching full:mr
11 #include "mr.h"
15 * rvt_driver_mr_init - Init MR resources per driver
62 * rvt_mr_exit - clean up MR
70 rvt_pr_err(rdi, "DMA MR not null!\n"); in rvt_mr_exit()
75 static void rvt_deinit_mregion(struct rvt_mregion *mr) in rvt_deinit_mregion() argument
77 int i = mr->mapsz; in rvt_deinit_mregion()
79 mr->mapsz = 0; in rvt_deinit_mregion()
81 kfree(mr->map[--i]); in rvt_deinit_mregion()
82 percpu_ref_exit(&mr->refcount); in rvt_deinit_mregion()
87 struct rvt_mregion *mr = container_of(ref, struct rvt_mregion, in __rvt_mregion_complete() local
90 complete(&mr->comp); in __rvt_mregion_complete()
93 static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd, in rvt_init_mregion() argument
99 mr->mapsz = 0; in rvt_init_mregion()
102 mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL, in rvt_init_mregion()
104 if (!mr->map[i]) in rvt_init_mregion()
106 mr->mapsz++; in rvt_init_mregion()
108 init_completion(&mr->comp); in rvt_init_mregion()
110 if (percpu_ref_init(&mr->refcount, &__rvt_mregion_complete, in rvt_init_mregion()
114 atomic_set(&mr->lkey_invalid, 0); in rvt_init_mregion()
115 mr->pd = pd; in rvt_init_mregion()
116 mr->max_segs = count; in rvt_init_mregion()
119 rvt_deinit_mregion(mr); in rvt_init_mregion()
125 * @mr: memory region that this lkey protects
130 * Increments mr reference count as required.
132 * Sets the lkey field mr for non-dma regions.
135 static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region) in rvt_alloc_lkey() argument
141 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device); in rvt_alloc_lkey()
144 rvt_get_mr(mr); in rvt_alloc_lkey()
153 mr->lkey_published = 1; in rvt_alloc_lkey()
155 rcu_assign_pointer(dev->dma_mr, mr); in rvt_alloc_lkey()
156 rvt_get_mr(mr); in rvt_alloc_lkey()
180 mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) | in rvt_alloc_lkey()
183 if (mr->lkey == 0) { in rvt_alloc_lkey()
184 mr->lkey |= 1 << 8; in rvt_alloc_lkey()
187 mr->lkey_published = 1; in rvt_alloc_lkey()
189 rcu_assign_pointer(rkt->table[r], mr); in rvt_alloc_lkey()
195 rvt_put_mr(mr); in rvt_alloc_lkey()
203 * @mr: mr to free from tables
205 static void rvt_free_lkey(struct rvt_mregion *mr) in rvt_free_lkey() argument
208 u32 lkey = mr->lkey; in rvt_free_lkey()
210 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device); in rvt_free_lkey()
216 if (mr->lkey_published) { in rvt_free_lkey()
217 mr->lkey_published = 0; in rvt_free_lkey()
220 rvt_put_mr(mr); in rvt_free_lkey()
223 if (!mr->lkey_published) in rvt_free_lkey()
226 mr->lkey_published = 0; in rvt_free_lkey()
234 percpu_ref_kill(&mr->refcount); in rvt_free_lkey()
239 struct rvt_mr *mr; in __rvt_alloc_mr() local
245 mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL); in __rvt_alloc_mr()
246 if (!mr) in __rvt_alloc_mr()
249 rval = rvt_init_mregion(&mr->mr, pd, count, 0); in __rvt_alloc_mr()
253 * ib_reg_phys_mr() will initialize mr->ibmr except for in __rvt_alloc_mr()
256 rval = rvt_alloc_lkey(&mr->mr, 0); in __rvt_alloc_mr()
259 mr->ibmr.lkey = mr->mr.lkey; in __rvt_alloc_mr()
260 mr->ibmr.rkey = mr->mr.lkey; in __rvt_alloc_mr()
262 return mr; in __rvt_alloc_mr()
265 rvt_deinit_mregion(&mr->mr); in __rvt_alloc_mr()
267 kfree(mr); in __rvt_alloc_mr()
268 mr = ERR_PTR(rval); in __rvt_alloc_mr()
272 static void __rvt_free_mr(struct rvt_mr *mr) in __rvt_free_mr() argument
274 rvt_free_lkey(&mr->mr); in __rvt_free_mr()
275 rvt_deinit_mregion(&mr->mr); in __rvt_free_mr()
276 kfree(mr); in __rvt_free_mr()
288 struct rvt_mr *mr; in rvt_get_dma_mr() local
295 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in rvt_get_dma_mr()
296 if (!mr) { in rvt_get_dma_mr()
301 rval = rvt_init_mregion(&mr->mr, pd, 0, 0); in rvt_get_dma_mr()
307 rval = rvt_alloc_lkey(&mr->mr, 1); in rvt_get_dma_mr()
313 mr->mr.access_flags = acc; in rvt_get_dma_mr()
314 ret = &mr->ibmr; in rvt_get_dma_mr()
319 rvt_deinit_mregion(&mr->mr); in rvt_get_dma_mr()
321 kfree(mr); in rvt_get_dma_mr()
340 struct rvt_mr *mr; in rvt_reg_user_mr() local
355 mr = __rvt_alloc_mr(n, pd); in rvt_reg_user_mr()
356 if (IS_ERR(mr)) { in rvt_reg_user_mr()
357 ret = ERR_CAST(mr); in rvt_reg_user_mr()
361 mr->mr.user_base = start; in rvt_reg_user_mr()
362 mr->mr.iova = virt_addr; in rvt_reg_user_mr()
363 mr->mr.length = length; in rvt_reg_user_mr()
364 mr->mr.offset = ib_umem_offset(umem); in rvt_reg_user_mr()
365 mr->mr.access_flags = mr_access_flags; in rvt_reg_user_mr()
366 mr->umem = umem; in rvt_reg_user_mr()
368 mr->mr.page_shift = PAGE_SHIFT; in rvt_reg_user_mr()
379 mr->mr.map[m]->segs[n].vaddr = vaddr; in rvt_reg_user_mr()
380 mr->mr.map[m]->segs[n].length = PAGE_SIZE; in rvt_reg_user_mr()
381 trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE); in rvt_reg_user_mr()
387 return &mr->ibmr; in rvt_reg_user_mr()
390 __rvt_free_mr(mr); in rvt_reg_user_mr()
404 * for QPs in the same PD as the MR will call the
409 struct rvt_mregion *mr = (struct rvt_mregion *)v; in rvt_dereg_clean_qp_cb() local
412 if (mr->pd != qp->ibqp.pd) in rvt_dereg_clean_qp_cb()
414 rvt_qp_mr_clean(qp, mr->lkey); in rvt_dereg_clean_qp_cb()
419 * @mr: the MR that is being deregistered
422 * to the lkey noted in mr.
424 static void rvt_dereg_clean_qps(struct rvt_mregion *mr) in rvt_dereg_clean_qps() argument
426 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); in rvt_dereg_clean_qps()
428 rvt_qp_iter(rdi, (u64)mr, rvt_dereg_clean_qp_cb); in rvt_dereg_clean_qps()
433 * @mr: the megion
442 static int rvt_check_refs(struct rvt_mregion *mr, const char *t) in rvt_check_refs() argument
445 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); in rvt_check_refs()
447 if (mr->lkey) { in rvt_check_refs()
448 /* avoid dma mr */ in rvt_check_refs()
449 rvt_dereg_clean_qps(mr); in rvt_check_refs()
450 /* @mr was indexed on rcu protected @lkey_table */ in rvt_check_refs()
454 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); in rvt_check_refs()
457 "%s timeout mr %p pd %p lkey %x refcount %ld\n", in rvt_check_refs()
458 t, mr, mr->pd, mr->lkey, in rvt_check_refs()
459 atomic_long_read(&mr->refcount.data->count)); in rvt_check_refs()
460 rvt_get_mr(mr); in rvt_check_refs()
467 * rvt_mr_has_lkey - is MR
468 * @mr: the mregion
471 bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey) in rvt_mr_has_lkey() argument
473 return mr && lkey == mr->lkey; in rvt_mr_has_lkey()
477 * rvt_ss_has_lkey - is mr in sge tests
481 * This code tests for an MR in the indicated
492 rval = rvt_mr_has_lkey(ss->sge.mr, lkey); in rvt_ss_has_lkey()
495 rval = rvt_mr_has_lkey(ss->sg_list[i].mr, lkey); in rvt_ss_has_lkey()
511 struct rvt_mr *mr = to_imr(ibmr); in rvt_dereg_mr() local
514 rvt_free_lkey(&mr->mr); in rvt_dereg_mr()
516 rvt_put_mr(&mr->mr); /* will set completion if last */ in rvt_dereg_mr()
517 ret = rvt_check_refs(&mr->mr, __func__); in rvt_dereg_mr()
520 rvt_deinit_mregion(&mr->mr); in rvt_dereg_mr()
521 ib_umem_release(mr->umem); in rvt_dereg_mr()
522 kfree(mr); in rvt_dereg_mr()
538 struct rvt_mr *mr; in rvt_alloc_mr() local
543 mr = __rvt_alloc_mr(max_num_sg, pd); in rvt_alloc_mr()
544 if (IS_ERR(mr)) in rvt_alloc_mr()
545 return ERR_CAST(mr); in rvt_alloc_mr()
547 return &mr->ibmr; in rvt_alloc_mr()
559 struct rvt_mr *mr = to_imr(ibmr); in rvt_set_page() local
560 u32 ps = 1 << mr->mr.page_shift; in rvt_set_page()
561 u32 mapped_segs = mr->mr.length >> mr->mr.page_shift; in rvt_set_page()
564 if (unlikely(mapped_segs == mr->mr.max_segs)) in rvt_set_page()
569 mr->mr.map[m]->segs[n].vaddr = (void *)addr; in rvt_set_page()
570 mr->mr.map[m]->segs[n].length = ps; in rvt_set_page()
571 mr->mr.length += ps; in rvt_set_page()
572 trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps); in rvt_set_page()
584 * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
591 struct rvt_mr *mr = to_imr(ibmr); in rvt_map_mr_sg() local
594 mr->mr.length = 0; in rvt_map_mr_sg()
595 mr->mr.page_shift = PAGE_SHIFT; in rvt_map_mr_sg()
597 mr->mr.user_base = ibmr->iova; in rvt_map_mr_sg()
598 mr->mr.iova = ibmr->iova; in rvt_map_mr_sg()
599 mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; in rvt_map_mr_sg()
600 mr->mr.length = (size_t)ibmr->length; in rvt_map_mr_sg()
606 * rvt_fast_reg_mr - fast register physical MR
617 struct rvt_mr *mr = to_imr(ibmr); in rvt_fast_reg_mr() local
619 if (qp->ibqp.pd != mr->mr.pd) in rvt_fast_reg_mr()
622 /* not applicable to dma MR or user MR */ in rvt_fast_reg_mr()
623 if (!mr->mr.lkey || mr->umem) in rvt_fast_reg_mr()
626 if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00)) in rvt_fast_reg_mr()
631 mr->mr.lkey = key; in rvt_fast_reg_mr()
632 mr->mr.access_flags = access; in rvt_fast_reg_mr()
633 mr->mr.iova = ibmr->iova; in rvt_fast_reg_mr()
634 atomic_set(&mr->mr.lkey_invalid, 0); in rvt_fast_reg_mr()
641 * rvt_invalidate_rkey - invalidate an MR rkey
651 struct rvt_mregion *mr; in rvt_invalidate_rkey() local
657 mr = rcu_dereference( in rvt_invalidate_rkey()
659 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in rvt_invalidate_rkey()
662 atomic_set(&mr->lkey_invalid, 1); in rvt_invalidate_rkey()
684 if (last_sge && sge->lkey == last_sge->mr->lkey && in rvt_sge_adjacent()
687 if (unlikely((sge->addr - last_sge->mr->user_base + in rvt_sge_adjacent()
688 sge->length > last_sge->mr->length))) in rvt_sge_adjacent()
720 struct rvt_mregion *mr; in rvt_lkey_ok() local
736 mr = rcu_dereference(dev->dma_mr); in rvt_lkey_ok()
737 if (!mr) in rvt_lkey_ok()
739 rvt_get_mr(mr); in rvt_lkey_ok()
742 isge->mr = mr; in rvt_lkey_ok()
753 mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]); in rvt_lkey_ok()
754 if (!mr) in rvt_lkey_ok()
756 rvt_get_mr(mr); in rvt_lkey_ok()
757 if (!READ_ONCE(mr->lkey_published)) in rvt_lkey_ok()
760 if (unlikely(atomic_read(&mr->lkey_invalid) || in rvt_lkey_ok()
761 mr->lkey != sge->lkey || mr->pd != &pd->ibpd)) in rvt_lkey_ok()
764 off = sge->addr - mr->user_base; in rvt_lkey_ok()
765 if (unlikely(sge->addr < mr->user_base || in rvt_lkey_ok()
766 off + sge->length > mr->length || in rvt_lkey_ok()
767 (mr->access_flags & acc) != acc)) in rvt_lkey_ok()
771 off += mr->offset; in rvt_lkey_ok()
772 if (mr->page_shift) { in rvt_lkey_ok()
780 entries_spanned_by_off = off >> mr->page_shift; in rvt_lkey_ok()
781 off -= (entries_spanned_by_off << mr->page_shift); in rvt_lkey_ok()
787 while (off >= mr->map[m]->segs[n].length) { in rvt_lkey_ok()
788 off -= mr->map[m]->segs[n].length; in rvt_lkey_ok()
796 isge->mr = mr; in rvt_lkey_ok()
797 isge->vaddr = mr->map[m]->segs[n].vaddr + off; in rvt_lkey_ok()
798 isge->length = mr->map[m]->segs[n].length - off; in rvt_lkey_ok()
806 rvt_put_mr(mr); in rvt_lkey_ok()
831 struct rvt_mregion *mr; in rvt_rkey_ok() local
846 mr = rcu_dereference(rdi->dma_mr); in rvt_rkey_ok()
847 if (!mr) in rvt_rkey_ok()
849 rvt_get_mr(mr); in rvt_rkey_ok()
852 sge->mr = mr; in rvt_rkey_ok()
861 mr = rcu_dereference(rkt->table[rkey >> rkt->shift]); in rvt_rkey_ok()
862 if (!mr) in rvt_rkey_ok()
864 rvt_get_mr(mr); in rvt_rkey_ok()
865 /* insure mr read is before test */ in rvt_rkey_ok()
866 if (!READ_ONCE(mr->lkey_published)) in rvt_rkey_ok()
868 if (unlikely(atomic_read(&mr->lkey_invalid) || in rvt_rkey_ok()
869 mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in rvt_rkey_ok()
872 off = vaddr - mr->iova; in rvt_rkey_ok()
873 if (unlikely(vaddr < mr->iova || off + len > mr->length || in rvt_rkey_ok()
874 (mr->access_flags & acc) == 0)) in rvt_rkey_ok()
878 off += mr->offset; in rvt_rkey_ok()
879 if (mr->page_shift) { in rvt_rkey_ok()
887 entries_spanned_by_off = off >> mr->page_shift; in rvt_rkey_ok()
888 off -= (entries_spanned_by_off << mr->page_shift); in rvt_rkey_ok()
894 while (off >= mr->map[m]->segs[n].length) { in rvt_rkey_ok()
895 off -= mr->map[m]->segs[n].length; in rvt_rkey_ok()
903 sge->mr = mr; in rvt_rkey_ok()
904 sge->vaddr = mr->map[m]->segs[n].vaddr + off; in rvt_rkey_ok()
905 sge->length = mr->map[m]->segs[n].length - off; in rvt_rkey_ok()
912 rvt_put_mr(mr); in rvt_rkey_ok()