Lines Matching full:mr
60 struct mlx4_ib_mr *mr; in mlx4_ib_get_dma_mr() local
63 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx4_ib_get_dma_mr()
64 if (!mr) in mlx4_ib_get_dma_mr()
68 ~0ull, convert_access(acc), 0, 0, &mr->mmr); in mlx4_ib_get_dma_mr()
72 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr()
76 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; in mlx4_ib_get_dma_mr()
77 mr->umem = NULL; in mlx4_ib_get_dma_mr()
79 return &mr->ibmr; in mlx4_ib_get_dma_mr()
82 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr()
85 kfree(mr); in mlx4_ib_get_dma_mr()
119 * Required to handle cases when the MR starts in the middle of an MTT in mlx4_ib_umem_write_mtt_block()
287 * MR. in mlx4_ib_umem_calc_optimal_mtt_size()
389 * entirely cover the MR to support RO mappings. in mlx4_get_umem_mr()
411 struct mlx4_ib_mr *mr; in mlx4_ib_reg_user_mr() local
416 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx4_ib_reg_user_mr()
417 if (!mr) in mlx4_ib_reg_user_mr()
420 mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); in mlx4_ib_reg_user_mr()
421 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
422 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
426 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); in mlx4_ib_reg_user_mr()
429 convert_access(access_flags), n, shift, &mr->mmr); in mlx4_ib_reg_user_mr()
433 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr()
437 err = mlx4_mr_enable(dev->dev, &mr->mmr); in mlx4_ib_reg_user_mr()
441 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; in mlx4_ib_reg_user_mr()
442 mr->ibmr.page_size = 1U << shift; in mlx4_ib_reg_user_mr()
444 return &mr->ibmr; in mlx4_ib_reg_user_mr()
447 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_reg_user_mr()
450 ib_umem_release(mr->umem); in mlx4_ib_reg_user_mr()
453 kfree(mr); in mlx4_ib_reg_user_mr()
458 struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, in mlx4_ib_rereg_user_mr() argument
463 struct mlx4_ib_dev *dev = to_mdev(mr->device); in mlx4_ib_rereg_user_mr()
464 struct mlx4_ib_mr *mmr = to_mmr(mr); in mlx4_ib_rereg_user_mr()
505 mmr->umem = mlx4_get_umem_mr(mr->device, start, length, in mlx4_ib_rereg_user_mr()
534 /* If we couldn't transfer the MR to the HCA, just remember to in mlx4_ib_rereg_user_mr()
550 struct mlx4_ib_mr *mr, in mlx4_alloc_priv_pages() argument
560 mr->page_map_size = roundup(max_pages * sizeof(u64), in mlx4_alloc_priv_pages()
564 mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL); in mlx4_alloc_priv_pages()
565 if (!mr->pages) in mlx4_alloc_priv_pages()
568 mr->page_map = dma_map_single(device->dev.parent, mr->pages, in mlx4_alloc_priv_pages()
569 mr->page_map_size, DMA_TO_DEVICE); in mlx4_alloc_priv_pages()
571 if (dma_mapping_error(device->dev.parent, mr->page_map)) { in mlx4_alloc_priv_pages()
579 free_page((unsigned long)mr->pages); in mlx4_alloc_priv_pages()
584 mlx4_free_priv_pages(struct mlx4_ib_mr *mr) in mlx4_free_priv_pages() argument
586 if (mr->pages) { in mlx4_free_priv_pages()
587 struct ib_device *device = mr->ibmr.device; in mlx4_free_priv_pages()
589 dma_unmap_single(device->dev.parent, mr->page_map, in mlx4_free_priv_pages()
590 mr->page_map_size, DMA_TO_DEVICE); in mlx4_free_priv_pages()
591 free_page((unsigned long)mr->pages); in mlx4_free_priv_pages()
592 mr->pages = NULL; in mlx4_free_priv_pages()
598 struct mlx4_ib_mr *mr = to_mmr(ibmr); in mlx4_ib_dereg_mr() local
601 mlx4_free_priv_pages(mr); in mlx4_ib_dereg_mr()
603 ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); in mlx4_ib_dereg_mr()
606 if (mr->umem) in mlx4_ib_dereg_mr()
607 ib_umem_release(mr->umem); in mlx4_ib_dereg_mr()
608 kfree(mr); in mlx4_ib_dereg_mr()
648 struct mlx4_ib_mr *mr; in mlx4_ib_alloc_mr() local
655 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx4_ib_alloc_mr()
656 if (!mr) in mlx4_ib_alloc_mr()
660 max_num_sg, 0, &mr->mmr); in mlx4_ib_alloc_mr()
664 err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg); in mlx4_ib_alloc_mr()
668 mr->max_pages = max_num_sg; in mlx4_ib_alloc_mr()
669 err = mlx4_mr_enable(dev->dev, &mr->mmr); in mlx4_ib_alloc_mr()
673 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; in mlx4_ib_alloc_mr()
674 mr->umem = NULL; in mlx4_ib_alloc_mr()
676 return &mr->ibmr; in mlx4_ib_alloc_mr()
679 mr->ibmr.device = pd->device; in mlx4_ib_alloc_mr()
680 mlx4_free_priv_pages(mr); in mlx4_ib_alloc_mr()
682 (void) mlx4_mr_free(dev->dev, &mr->mmr); in mlx4_ib_alloc_mr()
684 kfree(mr); in mlx4_ib_alloc_mr()
690 struct mlx4_ib_mr *mr = to_mmr(ibmr); in mlx4_set_page() local
692 if (unlikely(mr->npages == mr->max_pages)) in mlx4_set_page()
695 mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT); in mlx4_set_page()
703 struct mlx4_ib_mr *mr = to_mmr(ibmr); in mlx4_ib_map_mr_sg() local
706 mr->npages = 0; in mlx4_ib_map_mr_sg()
708 ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, in mlx4_ib_map_mr_sg()
709 mr->page_map_size, DMA_TO_DEVICE); in mlx4_ib_map_mr_sg()
713 ib_dma_sync_single_for_device(ibmr->device, mr->page_map, in mlx4_ib_map_mr_sg()
714 mr->page_map_size, DMA_TO_DEVICE); in mlx4_ib_map_mr_sg()