1 /*
2  * Copyright 2023 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "vmm.h"
23 
24 #include <nvrm/nvtypes.h>
25 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h>
26 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h>
27 #include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
28 
29 static int
r535_mmu_promote_vmm(struct nvkm_vmm * vmm)30 r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
31 {
32 	NV_VASPACE_ALLOCATION_PARAMETERS *args;
33 	int ret;
34 
35 	ret = nvkm_gsp_client_device_ctor(vmm->mmu->subdev.device->gsp,
36 					  &vmm->rm.client, &vmm->rm.device);
37 	if (ret)
38 		return ret;
39 
40 	args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A,
41 				     sizeof(*args), &vmm->rm.object);
42 	if (IS_ERR(args))
43 		return PTR_ERR(args);
44 
45 	args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
46 
47 	ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args);
48 	if (ret)
49 		return ret;
50 
51 	{
52 		NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
53 
54 		mutex_lock(&vmm->mutex.vmm);
55 		ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000,
56 					  &vmm->rm.rsvd);
57 		mutex_unlock(&vmm->mutex.vmm);
58 		if (ret)
59 			return ret;
60 
61 		ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object,
62 					    NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
63 					    sizeof(*ctrl));
64 		if (IS_ERR(ctrl))
65 			return PTR_ERR(ctrl);
66 
67 		ctrl->pageSize = 0x20000000;
68 		ctrl->virtAddrLo = vmm->rm.rsvd->addr;
69 		ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
70 		ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2;
71 		ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr;
72 		ctrl->levels[0].size = 0x20;
73 		ctrl->levels[0].aperture = 1;
74 		ctrl->levels[0].pageShift = 0x2f;
75 		ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr;
76 		ctrl->levels[1].size = 0x1000;
77 		ctrl->levels[1].aperture = 1;
78 		ctrl->levels[1].pageShift = 0x26;
79 		if (vmm->pd->pde[0]->pde[0]) {
80 			ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr;
81 			ctrl->levels[2].size = 0x1000;
82 			ctrl->levels[2].aperture = 1;
83 			ctrl->levels[2].pageShift = 0x1d;
84 		}
85 
86 		ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl);
87 	}
88 
89 	return ret;
90 }
91 
92 static void
r535_mmu_dtor(struct nvkm_mmu * mmu)93 r535_mmu_dtor(struct nvkm_mmu *mmu)
94 {
95 	kfree(mmu->func);
96 }
97 
98 int
r535_mmu_new(const struct nvkm_mmu_func * hw,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_mmu ** pmmu)99 r535_mmu_new(const struct nvkm_mmu_func *hw,
100 	     struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
101 	     struct nvkm_mmu **pmmu)
102 {
103 	struct nvkm_mmu_func *rm;
104 	int ret;
105 
106 	if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
107 		return -ENOMEM;
108 
109 	rm->dtor = r535_mmu_dtor;
110 	rm->dma_bits = hw->dma_bits;
111 	rm->mmu = hw->mmu;
112 	rm->mem = hw->mem;
113 	rm->vmm = hw->vmm;
114 	rm->kind = hw->kind;
115 	rm->kind_sys = hw->kind_sys;
116 	rm->promote_vmm = r535_mmu_promote_vmm;
117 
118 	ret = nvkm_mmu_new_(rm, device, type, inst, pmmu);
119 	if (ret)
120 		kfree(rm);
121 
122 	return ret;
123 }
124