1 /*
2 * Copyright 2019 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #include "gf100.h"
23 #include "ctxgf100.h"
24
25 #include <core/firmware.h>
26 #include <subdev/gsp.h>
27 #include <subdev/acr.h>
28 #include <subdev/timer.h>
29 #include <subdev/vfn.h>
30
31 #include <nvfw/flcn.h>
32
33 #include <nvif/class.h>
34
35 static void
ga102_gr_zbc_clear_color(struct gf100_gr * gr,int zbc)36 ga102_gr_zbc_clear_color(struct gf100_gr *gr, int zbc)
37 {
38 struct nvkm_device *device = gr->base.engine.subdev.device;
39 u32 invalid[] = { 0, 0, 0, 0 }, *color;
40
41 if (gr->zbc_color[zbc].format)
42 color = gr->zbc_color[zbc].l2;
43 else
44 color = invalid;
45
46 nvkm_mask(device, 0x41bcb4, 0x0000001f, zbc);
47 nvkm_wr32(device, 0x41bcec, color[0]);
48 nvkm_wr32(device, 0x41bcf0, color[1]);
49 nvkm_wr32(device, 0x41bcf4, color[2]);
50 nvkm_wr32(device, 0x41bcf8, color[3]);
51 }
52
53 static const struct gf100_gr_func_zbc
54 ga102_gr_zbc = {
55 .clear_color = ga102_gr_zbc_clear_color,
56 .clear_depth = gp100_gr_zbc_clear_depth,
57 .stencil_get = gp102_gr_zbc_stencil_get,
58 .clear_stencil = gp102_gr_zbc_clear_stencil,
59 };
60
61 static void
ga102_gr_gpccs_reset(struct gf100_gr * gr)62 ga102_gr_gpccs_reset(struct gf100_gr *gr)
63 {
64 struct nvkm_device *device = gr->base.engine.subdev.device;
65
66 nvkm_wr32(device, 0x41a610, 0x00000000);
67 nvkm_msec(device, 1, NVKM_DELAY);
68 nvkm_wr32(device, 0x41a610, 0x00000001);
69 }
70
71 static const struct nvkm_acr_lsf_func
72 ga102_gr_gpccs_acr = {
73 .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
74 .bl_entry = 0x3400,
75 .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
76 .bld_write = gp108_gr_acr_bld_write,
77 .bld_patch = gp108_gr_acr_bld_patch,
78 };
79
80 static void
ga102_gr_fecs_reset(struct gf100_gr * gr)81 ga102_gr_fecs_reset(struct gf100_gr *gr)
82 {
83 struct nvkm_device *device = gr->base.engine.subdev.device;
84
85 nvkm_wr32(device, 0x409614, 0x00000010);
86 nvkm_wr32(device, 0x41a614, 0x00000020);
87 nvkm_usec(device, 10, NVKM_DELAY);
88 nvkm_wr32(device, 0x409614, 0x00000110);
89 nvkm_wr32(device, 0x41a614, 0x00000a20);
90 nvkm_usec(device, 10, NVKM_DELAY);
91 nvkm_rd32(device, 0x409614);
92 nvkm_rd32(device, 0x41a614);
93 }
94
95 static const struct nvkm_acr_lsf_func
96 ga102_gr_fecs_acr = {
97 .bl_entry = 0x7e00,
98 .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
99 .bld_write = gp108_gr_acr_bld_write,
100 .bld_patch = gp108_gr_acr_bld_patch,
101 };
102
103 static void
ga102_gr_init_rop_exceptions(struct gf100_gr * gr)104 ga102_gr_init_rop_exceptions(struct gf100_gr *gr)
105 {
106 struct nvkm_device *device = gr->base.engine.subdev.device;
107
108 nvkm_wr32(device, 0x41bcbc, 0x40000000);
109 nvkm_wr32(device, 0x41bc38, 0x40000000);
110 nvkm_wr32(device, 0x41ac94, nvkm_rd32(device, 0x502c94));
111 }
112
113 static void
ga102_gr_init_40a790(struct gf100_gr * gr)114 ga102_gr_init_40a790(struct gf100_gr *gr)
115 {
116 nvkm_wr32(gr->base.engine.subdev.device, 0x40a790, 0xc0000000);
117 }
118
119 static void
ga102_gr_init_gpc_mmu(struct gf100_gr * gr)120 ga102_gr_init_gpc_mmu(struct gf100_gr *gr)
121 {
122 struct nvkm_device *device = gr->base.engine.subdev.device;
123
124 nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf8001fff);
125 nvkm_wr32(device, 0x418894, 0x00000000);
126
127 nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
128 nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
129 nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
130 }
131
132 static struct nvkm_intr *
ga102_gr_oneinit_intr(struct gf100_gr * gr,enum nvkm_intr_type * pvector)133 ga102_gr_oneinit_intr(struct gf100_gr *gr, enum nvkm_intr_type *pvector)
134 {
135 struct nvkm_device *device = gr->base.engine.subdev.device;
136
137 *pvector = nvkm_rd32(device, 0x400154) & 0x00000fff;
138 return &device->vfn->intr;
139 }
140
141 static int
ga102_gr_nonstall(struct gf100_gr * gr)142 ga102_gr_nonstall(struct gf100_gr *gr)
143 {
144 return nvkm_rd32(gr->base.engine.subdev.device, 0x400160) & 0x00000fff;
145 }
146
147 static const struct gf100_gr_func
148 ga102_gr = {
149 .nonstall = ga102_gr_nonstall,
150 .oneinit_intr = ga102_gr_oneinit_intr,
151 .oneinit_tiles = gm200_gr_oneinit_tiles,
152 .oneinit_sm_id = gv100_gr_oneinit_sm_id,
153 .init = gf100_gr_init,
154 .init_419bd8 = gv100_gr_init_419bd8,
155 .init_gpc_mmu = ga102_gr_init_gpc_mmu,
156 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
157 .init_zcull = tu102_gr_init_zcull,
158 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
159 .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
160 .init_fs = tu102_gr_init_fs,
161 .init_fecs_exceptions = tu102_gr_init_fecs_exceptions,
162 .init_40a790 = ga102_gr_init_40a790,
163 .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
164 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
165 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
166 .init_504430 = gv100_gr_init_504430,
167 .init_shader_exceptions = gv100_gr_init_shader_exceptions,
168 .init_rop_exceptions = ga102_gr_init_rop_exceptions,
169 .init_4188a4 = gv100_gr_init_4188a4,
170 .trap_mp = gv100_gr_trap_mp,
171 .fecs.reset = ga102_gr_fecs_reset,
172 .gpccs.reset = ga102_gr_gpccs_reset,
173 .rops = gm200_gr_rops,
174 .gpc_nr = 7,
175 .tpc_nr = 6,
176 .ppc_nr = 3,
177 .grctx = &ga102_grctx,
178 .zbc = &ga102_gr_zbc,
179 .sclass = {
180 { -1, -1, FERMI_TWOD_A },
181 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
182 { -1, -1, AMPERE_B, &gf100_fermi },
183 { -1, -1, AMPERE_COMPUTE_B },
184 {}
185 }
186 };
187
188 MODULE_FIRMWARE("nvidia/ga102/gr/fecs_bl.bin");
189 MODULE_FIRMWARE("nvidia/ga102/gr/fecs_sig.bin");
190 MODULE_FIRMWARE("nvidia/ga102/gr/gpccs_bl.bin");
191 MODULE_FIRMWARE("nvidia/ga102/gr/gpccs_sig.bin");
192 MODULE_FIRMWARE("nvidia/ga102/gr/NET_img.bin");
193
194 MODULE_FIRMWARE("nvidia/ga103/gr/fecs_bl.bin");
195 MODULE_FIRMWARE("nvidia/ga103/gr/fecs_sig.bin");
196 MODULE_FIRMWARE("nvidia/ga103/gr/gpccs_bl.bin");
197 MODULE_FIRMWARE("nvidia/ga103/gr/gpccs_sig.bin");
198 MODULE_FIRMWARE("nvidia/ga103/gr/NET_img.bin");
199
200 MODULE_FIRMWARE("nvidia/ga104/gr/fecs_bl.bin");
201 MODULE_FIRMWARE("nvidia/ga104/gr/fecs_sig.bin");
202 MODULE_FIRMWARE("nvidia/ga104/gr/gpccs_bl.bin");
203 MODULE_FIRMWARE("nvidia/ga104/gr/gpccs_sig.bin");
204 MODULE_FIRMWARE("nvidia/ga104/gr/NET_img.bin");
205
206 MODULE_FIRMWARE("nvidia/ga106/gr/fecs_bl.bin");
207 MODULE_FIRMWARE("nvidia/ga106/gr/fecs_sig.bin");
208 MODULE_FIRMWARE("nvidia/ga106/gr/gpccs_bl.bin");
209 MODULE_FIRMWARE("nvidia/ga106/gr/gpccs_sig.bin");
210 MODULE_FIRMWARE("nvidia/ga106/gr/NET_img.bin");
211
212 MODULE_FIRMWARE("nvidia/ga107/gr/fecs_bl.bin");
213 MODULE_FIRMWARE("nvidia/ga107/gr/fecs_sig.bin");
214 MODULE_FIRMWARE("nvidia/ga107/gr/gpccs_bl.bin");
215 MODULE_FIRMWARE("nvidia/ga107/gr/gpccs_sig.bin");
216 MODULE_FIRMWARE("nvidia/ga107/gr/NET_img.bin");
217
218 struct netlist_region {
219 u32 region_id;
220 u32 data_size;
221 u32 data_offset;
222 };
223
224 struct netlist_image_header {
225 u32 version;
226 u32 regions;
227 };
228
229 struct netlist_image {
230 struct netlist_image_header header;
231 struct netlist_region regions[];
232 };
233
234 struct netlist_av64 {
235 u32 addr;
236 u32 data_hi;
237 u32 data_lo;
238 };
239
240 static int
ga102_gr_av64_to_init(struct nvkm_blob * blob,struct gf100_gr_pack ** ppack)241 ga102_gr_av64_to_init(struct nvkm_blob *blob, struct gf100_gr_pack **ppack)
242 {
243 struct gf100_gr_init *init;
244 struct gf100_gr_pack *pack;
245 int nent;
246 int i;
247
248 nent = (blob->size / sizeof(struct netlist_av64));
249 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
250 if (!pack)
251 return -ENOMEM;
252
253 init = (void *)(pack + 2);
254 pack[0].init = init;
255 pack[0].type = 64;
256
257 for (i = 0; i < nent; i++) {
258 struct gf100_gr_init *ent = &init[i];
259 struct netlist_av64 *av = &((struct netlist_av64 *)blob->data)[i];
260
261 ent->addr = av->addr;
262 ent->data = ((u64)av->data_hi << 32) | av->data_lo;
263 ent->count = 1;
264 ent->pitch = 1;
265 }
266
267 *ppack = pack;
268 return 0;
269 }
270
271 static int
ga102_gr_load(struct gf100_gr * gr,int ver,const struct gf100_gr_fwif * fwif)272 ga102_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
273 {
274 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
275 const struct firmware *fw;
276 const struct netlist_image *net;
277 const struct netlist_region *fecs_inst = NULL;
278 const struct netlist_region *fecs_data = NULL;
279 const struct netlist_region *gpccs_inst = NULL;
280 const struct netlist_region *gpccs_data = NULL;
281 int ret, i;
282
283 ret = nvkm_firmware_get(subdev, "gr/NET_img", 0, &fw);
284 if (ret)
285 return ret;
286
287 net = (const void *)fw->data;
288 nvkm_debug(subdev, "netlist version %d, %d regions\n",
289 net->header.version, net->header.regions);
290
291 for (i = 0; i < net->header.regions; i++) {
292 const struct netlist_region *reg = &net->regions[i];
293 struct nvkm_blob blob = {
294 .data = (void *)fw->data + reg->data_offset,
295 .size = reg->data_size,
296 };
297
298 nvkm_debug(subdev, "\t%2d: %08x %08x\n",
299 reg->region_id, reg->data_offset, reg->data_size);
300
301 switch (reg->region_id) {
302 case 0: fecs_data = reg; break;
303 case 1: fecs_inst = reg; break;
304 case 2: gpccs_data = reg; break;
305 case 3: gpccs_inst = reg; break;
306 case 4: gk20a_gr_av_to_init(&blob, &gr->bundle); break;
307 case 5: gk20a_gr_aiv_to_init(&blob, &gr->sw_ctx); break;
308 case 7: gk20a_gr_av_to_method(&blob, &gr->method); break;
309 case 28: tu102_gr_av_to_init_veid(&blob, &gr->bundle_veid); break;
310 case 34: ga102_gr_av64_to_init(&blob, &gr->bundle64); break;
311 case 48: gk20a_gr_av_to_init(&blob, &gr->sw_nonctx1); break;
312 case 49: gk20a_gr_av_to_init(&blob, &gr->sw_nonctx2); break;
313 case 50: gk20a_gr_av_to_init(&blob, &gr->sw_nonctx3); break;
314 case 51: gk20a_gr_av_to_init(&blob, &gr->sw_nonctx4); break;
315 default:
316 break;
317 }
318 }
319
320 ret = nvkm_acr_lsfw_load_bl_sig_net(subdev, &gr->fecs.falcon, NVKM_ACR_LSF_FECS,
321 "gr/fecs_", ver, fwif->fecs,
322 fw->data + fecs_inst->data_offset,
323 fecs_inst->data_size,
324 fw->data + fecs_data->data_offset,
325 fecs_data->data_size);
326 if (ret)
327 return ret;
328
329 ret = nvkm_acr_lsfw_load_bl_sig_net(subdev, &gr->gpccs.falcon, NVKM_ACR_LSF_GPCCS,
330 "gr/gpccs_", ver, fwif->gpccs,
331 fw->data + gpccs_inst->data_offset,
332 gpccs_inst->data_size,
333 fw->data + gpccs_data->data_offset,
334 gpccs_data->data_size);
335 if (ret)
336 return ret;
337
338 gr->firmware = true;
339
340 nvkm_firmware_put(fw);
341 return 0;
342 }
343
344 static const struct gf100_gr_fwif
345 ga102_gr_fwif[] = {
346 { 0, ga102_gr_load, &ga102_gr, &ga102_gr_fecs_acr, &ga102_gr_gpccs_acr },
347 { -1, gm200_gr_nofw },
348 {}
349 };
350
351 int
ga102_gr_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_gr ** pgr)352 ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
353 {
354 if (nvkm_gsp_rm(device->gsp))
355 return r535_gr_new(&ga102_gr, device, type, inst, pgr);
356
357 return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr);
358 }
359