1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4 #include "pvr_vm.h"
5
6 #include "pvr_device.h"
7 #include "pvr_drv.h"
8 #include "pvr_gem.h"
9 #include "pvr_mmu.h"
10 #include "pvr_rogue_fwif.h"
11 #include "pvr_rogue_heap_config.h"
12
13 #include <drm/drm_exec.h>
14 #include <drm/drm_gem.h>
15 #include <drm/drm_gpuvm.h>
16
17 #include <linux/bug.h>
18 #include <linux/container_of.h>
19 #include <linux/err.h>
20 #include <linux/errno.h>
21 #include <linux/gfp_types.h>
22 #include <linux/kref.h>
23 #include <linux/mutex.h>
24 #include <linux/stddef.h>
25
26 /**
27 * DOC: Memory context
28 *
29 * This is the "top level" datatype in the VM code. It's exposed in the public
30 * API as an opaque handle.
31 */
32
33 /**
34 * struct pvr_vm_context - Context type used to represent a single VM.
35 */
36 struct pvr_vm_context {
37 /**
38 * @pvr_dev: The PowerVR device to which this context is bound.
39 * This binding is immutable for the life of the context.
40 */
41 struct pvr_device *pvr_dev;
42
43 /** @mmu_ctx: The context for binding to physical memory. */
44 struct pvr_mmu_context *mmu_ctx;
45
46 /** @gpuvm_mgr: GPUVM object associated with this context. */
47 struct drm_gpuvm gpuvm_mgr;
48
49 /** @lock: Global lock on this VM. */
50 struct mutex lock;
51
52 /**
53 * @fw_mem_ctx_obj: Firmware object representing firmware memory
54 * context.
55 */
56 struct pvr_fw_object *fw_mem_ctx_obj;
57
58 /** @ref_count: Reference count of object. */
59 struct kref ref_count;
60
61 /**
62 * @dummy_gem: GEM object to enable VM reservation. All private BOs
63 * should use the @dummy_gem.resv and not their own _resv field.
64 */
65 struct drm_gem_object dummy_gem;
66 };
67
68 static inline
to_pvr_vm_context(struct drm_gpuvm * gpuvm)69 struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
70 {
71 return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
72 }
73
pvr_vm_context_get(struct pvr_vm_context * vm_ctx)74 struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
75 {
76 if (vm_ctx)
77 kref_get(&vm_ctx->ref_count);
78
79 return vm_ctx;
80 }
81
82 /**
83 * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
84 * page table structure behind a VM context.
85 * @vm_ctx: Target VM context.
86 */
pvr_vm_get_page_table_root_addr(struct pvr_vm_context * vm_ctx)87 dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
88 {
89 return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx);
90 }
91
92 /**
93 * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
94 * @vm_ctx: Target VM context.
95 *
96 * This is used to allow private BOs to share a dma_resv for faster fence
97 * updates.
98 *
99 * Returns: The dma_resv pointer.
100 */
pvr_vm_get_dma_resv(struct pvr_vm_context * vm_ctx)101 struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx)
102 {
103 return vm_ctx->dummy_gem.resv;
104 }
105
106 /**
107 * DOC: Memory mappings
108 */
109
110 /**
111 * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
112 */
113 struct pvr_vm_gpuva {
114 /** @base: The wrapped drm_gpuva object. */
115 struct drm_gpuva base;
116 };
117
118 #define to_pvr_vm_gpuva(va) container_of_const(va, struct pvr_vm_gpuva, base)
119
120 enum pvr_vm_bind_type {
121 PVR_VM_BIND_TYPE_MAP,
122 PVR_VM_BIND_TYPE_UNMAP,
123 };
124
125 /**
126 * struct pvr_vm_bind_op - Context of a map/unmap operation.
127 */
128 struct pvr_vm_bind_op {
129 /** @type: Map or unmap. */
130 enum pvr_vm_bind_type type;
131
132 /** @pvr_obj: Object associated with mapping (map only). */
133 struct pvr_gem_object *pvr_obj;
134
135 /**
136 * @vm_ctx: VM context where the mapping will be created or destroyed.
137 */
138 struct pvr_vm_context *vm_ctx;
139
140 /** @mmu_op_ctx: MMU op context. */
141 struct pvr_mmu_op_context *mmu_op_ctx;
142
143 /** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */
144 struct drm_gpuvm_bo *gpuvm_bo;
145
146 /**
147 * @new_va: Prealloced VA mapping object (init in callback).
148 * Used when creating a mapping.
149 */
150 struct pvr_vm_gpuva *new_va;
151
152 /**
153 * @prev_va: Prealloced VA mapping object (init in callback).
154 * Used when a mapping or unmapping operation overlaps an existing
155 * mapping and splits away the beginning into a new mapping.
156 */
157 struct pvr_vm_gpuva *prev_va;
158
159 /**
160 * @next_va: Prealloced VA mapping object (init in callback).
161 * Used when a mapping or unmapping operation overlaps an existing
162 * mapping and splits away the end into a new mapping.
163 */
164 struct pvr_vm_gpuva *next_va;
165
166 /** @offset: Offset into @pvr_obj to begin mapping from. */
167 u64 offset;
168
169 /** @device_addr: Device-virtual address at the start of the mapping. */
170 u64 device_addr;
171
172 /** @size: Size of the desired mapping. */
173 u64 size;
174 };
175
176 /**
177 * pvr_vm_bind_op_exec() - Execute a single bind op.
178 * @bind_op: Bind op context.
179 *
180 * Returns:
181 * * 0 on success,
182 * * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or
183 * a callback function.
184 */
pvr_vm_bind_op_exec(struct pvr_vm_bind_op * bind_op)185 static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
186 {
187 switch (bind_op->type) {
188 case PVR_VM_BIND_TYPE_MAP:
189 return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
190 bind_op, bind_op->device_addr,
191 bind_op->size,
192 gem_from_pvr_gem(bind_op->pvr_obj),
193 bind_op->offset);
194
195 case PVR_VM_BIND_TYPE_UNMAP:
196 return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
197 bind_op, bind_op->device_addr,
198 bind_op->size);
199 }
200
201 /*
202 * This shouldn't happen unless something went wrong
203 * in drm_sched.
204 */
205 WARN_ON(1);
206 return -EINVAL;
207 }
208
pvr_vm_bind_op_fini(struct pvr_vm_bind_op * bind_op)209 static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op)
210 {
211 drm_gpuvm_bo_put(bind_op->gpuvm_bo);
212
213 kfree(bind_op->new_va);
214 kfree(bind_op->prev_va);
215 kfree(bind_op->next_va);
216
217 if (bind_op->pvr_obj)
218 pvr_gem_object_put(bind_op->pvr_obj);
219
220 if (bind_op->mmu_op_ctx)
221 pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx);
222 }
223
224 static int
pvr_vm_bind_op_map_init(struct pvr_vm_bind_op * bind_op,struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 offset,u64 device_addr,u64 size)225 pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
226 struct pvr_vm_context *vm_ctx,
227 struct pvr_gem_object *pvr_obj, u64 offset,
228 u64 device_addr, u64 size)
229 {
230 struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
231 const bool is_user = vm_ctx != vm_ctx->pvr_dev->kernel_vm_ctx;
232 const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
233 struct sg_table *sgt;
234 u64 offset_plus_size;
235 int err;
236
237 if (check_add_overflow(offset, size, &offset_plus_size))
238 return -EINVAL;
239
240 if (is_user &&
241 !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) {
242 return -EINVAL;
243 }
244
245 if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) ||
246 offset & ~PAGE_MASK || size & ~PAGE_MASK ||
247 offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
248 return -EINVAL;
249
250 bind_op->type = PVR_VM_BIND_TYPE_MAP;
251
252 dma_resv_lock(obj->resv, NULL);
253 bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
254 dma_resv_unlock(obj->resv);
255 if (IS_ERR(bind_op->gpuvm_bo))
256 return PTR_ERR(bind_op->gpuvm_bo);
257
258 bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
259 bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
260 bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
261 if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) {
262 err = -ENOMEM;
263 goto err_bind_op_fini;
264 }
265
266 /* Pin pages so they're ready for use. */
267 sgt = pvr_gem_object_get_pages_sgt(pvr_obj);
268 err = PTR_ERR_OR_ZERO(sgt);
269 if (err)
270 goto err_bind_op_fini;
271
272 bind_op->mmu_op_ctx =
273 pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size);
274 err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
275 if (err) {
276 bind_op->mmu_op_ctx = NULL;
277 goto err_bind_op_fini;
278 }
279
280 bind_op->pvr_obj = pvr_obj;
281 bind_op->vm_ctx = vm_ctx;
282 bind_op->device_addr = device_addr;
283 bind_op->size = size;
284 bind_op->offset = offset;
285
286 return 0;
287
288 err_bind_op_fini:
289 pvr_vm_bind_op_fini(bind_op);
290
291 return err;
292 }
293
294 static int
pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op * bind_op,struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)295 pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
296 struct pvr_vm_context *vm_ctx, u64 device_addr,
297 u64 size)
298 {
299 int err;
300
301 if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size))
302 return -EINVAL;
303
304 bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
305
306 bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
307 bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
308 if (!bind_op->prev_va || !bind_op->next_va) {
309 err = -ENOMEM;
310 goto err_bind_op_fini;
311 }
312
313 bind_op->mmu_op_ctx =
314 pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0);
315 err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
316 if (err) {
317 bind_op->mmu_op_ctx = NULL;
318 goto err_bind_op_fini;
319 }
320
321 bind_op->vm_ctx = vm_ctx;
322 bind_op->device_addr = device_addr;
323 bind_op->size = size;
324
325 return 0;
326
327 err_bind_op_fini:
328 pvr_vm_bind_op_fini(bind_op);
329
330 return err;
331 }
332
333 /**
334 * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
335 * @op: gpuva op containing the remap details.
336 * @op_ctx: Operation context.
337 *
338 * Context: Called by drm_gpuvm_sm_map following a successful mapping while
339 * @op_ctx.vm_ctx mutex is held.
340 *
341 * Return:
342 * * 0 on success, or
343 * * Any error returned by pvr_mmu_map().
344 */
345 static int
pvr_vm_gpuva_map(struct drm_gpuva_op * op,void * op_ctx)346 pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx)
347 {
348 struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj);
349 struct pvr_vm_bind_op *ctx = op_ctx;
350 int err;
351
352 if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
353 return -EINVAL;
354
355 err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
356 op->map.va.addr);
357 if (err)
358 return err;
359
360 drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map);
361 drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo);
362 ctx->new_va = NULL;
363
364 return 0;
365 }
366
367 /**
368 * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context.
369 * @op: gpuva op containing the unmap details.
370 * @op_ctx: Operation context.
371 *
372 * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while
373 * @op_ctx.vm_ctx mutex is held.
374 *
375 * Return:
376 * * 0 on success, or
377 * * Any error returned by pvr_mmu_unmap().
378 */
379 static int
pvr_vm_gpuva_unmap(struct drm_gpuva_op * op,void * op_ctx)380 pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
381 {
382 struct pvr_vm_bind_op *ctx = op_ctx;
383
384 int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr,
385 op->unmap.va->va.range);
386
387 if (err)
388 return err;
389
390 drm_gpuva_unmap(&op->unmap);
391 drm_gpuva_unlink(op->unmap.va);
392 kfree(to_pvr_vm_gpuva(op->unmap.va));
393
394 return 0;
395 }
396
397 /**
398 * pvr_vm_gpuva_remap() - Remap a mapping within a memory context.
399 * @op: gpuva op containing the remap details.
400 * @op_ctx: Operation context.
401 *
402 * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a
403 * mapping or unmapping operation causes a region to be split. The
404 * @op_ctx.vm_ctx mutex is held.
405 *
406 * Return:
407 * * 0 on success, or
408 * * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap().
409 */
410 static int
pvr_vm_gpuva_remap(struct drm_gpuva_op * op,void * op_ctx)411 pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
412 {
413 struct pvr_vm_bind_op *ctx = op_ctx;
414 u64 va_start = 0, va_range = 0;
415 int err;
416
417 drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range);
418 err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range);
419 if (err)
420 return err;
421
422 /* No actual remap required: the page table tree depth is fixed to 3,
423 * and we use 4k page table entries only for now.
424 */
425 drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap);
426
427 if (op->remap.prev) {
428 pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj));
429 drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo);
430 ctx->prev_va = NULL;
431 }
432
433 if (op->remap.next) {
434 pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj));
435 drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo);
436 ctx->next_va = NULL;
437 }
438
439 drm_gpuva_unlink(op->remap.unmap->va);
440 kfree(to_pvr_vm_gpuva(op->remap.unmap->va));
441
442 return 0;
443 }
444
445 /*
446 * Public API
447 *
448 * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
449 */
450
451 /**
452 * pvr_device_addr_is_valid() - Tests whether a device-virtual address
453 * is valid.
454 * @device_addr: Virtual device address to test.
455 *
456 * Return:
457 * * %true if @device_addr is within the valid range for a device page
458 * table and is aligned to the device page size, or
459 * * %false otherwise.
460 */
461 bool
pvr_device_addr_is_valid(u64 device_addr)462 pvr_device_addr_is_valid(u64 device_addr)
463 {
464 return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
465 (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
466 }
467
468 /**
469 * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
470 * address and associated size are both valid.
471 * @vm_ctx: Target VM context.
472 * @device_addr: Virtual device address to test.
473 * @size: Size of the range based at @device_addr to test.
474 *
475 * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
476 * @device_addr + @size) to verify a device-virtual address range initially
477 * seems intuitive, but it produces a false-negative when the address range
478 * is right at the end of device-virtual address space.
479 *
480 * This function catches that corner case, as well as checking that
481 * @size is non-zero.
482 *
483 * Return:
484 * * %true if @device_addr is device page aligned; @size is device page
485 * aligned; the range specified by @device_addr and @size is within the
486 * bounds of the device-virtual address space, and @size is non-zero, or
487 * * %false otherwise.
488 */
489 bool
pvr_device_addr_and_size_are_valid(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)490 pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
491 u64 device_addr, u64 size)
492 {
493 return pvr_device_addr_is_valid(device_addr) &&
494 drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) &&
495 size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
496 (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
497 }
498
pvr_gpuvm_free(struct drm_gpuvm * gpuvm)499 static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
500 {
501 kfree(to_pvr_vm_context(gpuvm));
502 }
503
504 static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
505 .vm_free = pvr_gpuvm_free,
506 .sm_step_map = pvr_vm_gpuva_map,
507 .sm_step_remap = pvr_vm_gpuva_remap,
508 .sm_step_unmap = pvr_vm_gpuva_unmap,
509 };
510
511 static void
fw_mem_context_init(void * cpu_ptr,void * priv)512 fw_mem_context_init(void *cpu_ptr, void *priv)
513 {
514 struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr;
515 struct pvr_vm_context *vm_ctx = priv;
516
517 fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx);
518 fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET;
519 }
520
521 /**
522 * pvr_vm_create_context() - Create a new VM context.
523 * @pvr_dev: Target PowerVR device.
524 * @is_userspace_context: %true if this context is for userspace. This will
525 * create a firmware memory context for the VM context
526 * and disable warnings when tearing down mappings.
527 *
528 * Return:
529 * * A handle to the newly-minted VM context on success,
530 * * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
531 * missing or has an unsupported value,
532 * * -%ENOMEM if allocation of the structure behind the opaque handle fails,
533 * or
534 * * Any error encountered while setting up internal structures.
535 */
536 struct pvr_vm_context *
pvr_vm_create_context(struct pvr_device * pvr_dev,bool is_userspace_context)537 pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
538 {
539 struct drm_device *drm_dev = from_pvr_device(pvr_dev);
540
541 struct pvr_vm_context *vm_ctx;
542 u16 device_addr_bits;
543
544 int err;
545
546 err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
547 &device_addr_bits);
548 if (err) {
549 drm_err(drm_dev,
550 "Failed to get device virtual address space bits\n");
551 return ERR_PTR(err);
552 }
553
554 if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
555 drm_err(drm_dev,
556 "Device has unsupported virtual address space size\n");
557 return ERR_PTR(-EINVAL);
558 }
559
560 vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
561 if (!vm_ctx)
562 return ERR_PTR(-ENOMEM);
563
564 vm_ctx->pvr_dev = pvr_dev;
565
566 vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
567 err = PTR_ERR_OR_ZERO(vm_ctx->mmu_ctx);
568 if (err)
569 goto err_free;
570
571 if (is_userspace_context) {
572 err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext),
573 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
574 fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj);
575
576 if (err)
577 goto err_page_table_destroy;
578 }
579
580 drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
581 drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
582 is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
583 0, &pvr_dev->base, &vm_ctx->dummy_gem,
584 0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
585
586 mutex_init(&vm_ctx->lock);
587 kref_init(&vm_ctx->ref_count);
588
589 return vm_ctx;
590
591 err_page_table_destroy:
592 pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
593
594 err_free:
595 kfree(vm_ctx);
596
597 return ERR_PTR(err);
598 }
599
600 /**
601 * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
602 * @vm_ctx: Target VM context.
603 *
604 * This function ensures that no mappings are left dangling by unmapping them
605 * all in order of ascending device-virtual address.
606 */
607 void
pvr_vm_unmap_all(struct pvr_vm_context * vm_ctx)608 pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
609 {
610 WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
611 vm_ctx->gpuvm_mgr.mm_range));
612 }
613
614 /**
615 * pvr_vm_context_release() - Teardown a VM context.
616 * @ref_count: Pointer to reference counter of the VM context.
617 *
618 * This function also ensures that no mappings are left dangling by calling
619 * pvr_vm_unmap_all.
620 */
621 static void
pvr_vm_context_release(struct kref * ref_count)622 pvr_vm_context_release(struct kref *ref_count)
623 {
624 struct pvr_vm_context *vm_ctx =
625 container_of(ref_count, struct pvr_vm_context, ref_count);
626
627 if (vm_ctx->fw_mem_ctx_obj)
628 pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
629
630 pvr_vm_unmap_all(vm_ctx);
631
632 pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
633 drm_gem_private_object_fini(&vm_ctx->dummy_gem);
634 mutex_destroy(&vm_ctx->lock);
635
636 drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
637 }
638
639 /**
640 * pvr_vm_context_lookup() - Look up VM context from handle
641 * @pvr_file: Pointer to pvr_file structure.
642 * @handle: Object handle.
643 *
644 * Takes reference on VM context object. Call pvr_vm_context_put() to release.
645 *
646 * Returns:
647 * * The requested object on success, or
648 * * %NULL on failure (object does not exist in list, or is not a VM context)
649 */
650 struct pvr_vm_context *
pvr_vm_context_lookup(struct pvr_file * pvr_file,u32 handle)651 pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
652 {
653 struct pvr_vm_context *vm_ctx;
654
655 xa_lock(&pvr_file->vm_ctx_handles);
656 vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
657 if (vm_ctx)
658 kref_get(&vm_ctx->ref_count);
659
660 xa_unlock(&pvr_file->vm_ctx_handles);
661
662 return vm_ctx;
663 }
664
665 /**
666 * pvr_vm_context_put() - Release a reference on a VM context
667 * @vm_ctx: Target VM context.
668 *
669 * Returns:
670 * * %true if the VM context was destroyed, or
671 * * %false if there are any references still remaining.
672 */
673 bool
pvr_vm_context_put(struct pvr_vm_context * vm_ctx)674 pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
675 {
676 if (vm_ctx)
677 return kref_put(&vm_ctx->ref_count, pvr_vm_context_release);
678
679 return true;
680 }
681
682 /**
683 * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
684 * given file.
685 * @pvr_file: Pointer to pvr_file structure.
686 *
687 * Removes all vm_contexts associated with @pvr_file from the device VM context
688 * list and drops initial references. vm_contexts will then be destroyed once
689 * all outstanding references are dropped.
690 */
pvr_destroy_vm_contexts_for_file(struct pvr_file * pvr_file)691 void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
692 {
693 struct pvr_vm_context *vm_ctx;
694 unsigned long handle;
695
696 xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
697 /* vm_ctx is not used here because that would create a race with xa_erase */
698 pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
699 }
700 }
701
702 static int
pvr_vm_lock_extra(struct drm_gpuvm_exec * vm_exec)703 pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
704 {
705 struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
706 struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
707
708 /* Unmap operations don't have an object to lock. */
709 if (!pvr_obj)
710 return 0;
711
712 /* Acquire lock on the GEM being mapped. */
713 return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
714 }
715
716 /**
717 * pvr_vm_map() - Map a section of physical memory into a section of
718 * device-virtual memory.
719 * @vm_ctx: Target VM context.
720 * @pvr_obj: Target PowerVR memory object.
721 * @pvr_obj_offset: Offset into @pvr_obj to map from.
722 * @device_addr: Virtual device address at the start of the requested mapping.
723 * @size: Size of the requested mapping.
724 *
725 * No handle is returned to represent the mapping. Instead, callers should
726 * remember @device_addr and use that as a handle.
727 *
728 * Return:
729 * * 0 on success,
730 * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
731 * address; the region specified by @pvr_obj_offset and @size does not fall
732 * entirely within @pvr_obj, or any part of the specified region of @pvr_obj
733 * is not device-virtual page-aligned,
734 * * Any error encountered while performing internal operations required to
735 * destroy the mapping (returned from pvr_vm_gpuva_map or
736 * pvr_vm_gpuva_remap).
737 */
738 int
pvr_vm_map(struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 pvr_obj_offset,u64 device_addr,u64 size)739 pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
740 u64 pvr_obj_offset, u64 device_addr, u64 size)
741 {
742 struct pvr_vm_bind_op bind_op = {0};
743 struct drm_gpuvm_exec vm_exec = {
744 .vm = &vm_ctx->gpuvm_mgr,
745 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
746 DRM_EXEC_IGNORE_DUPLICATES,
747 .extra = {
748 .fn = pvr_vm_lock_extra,
749 .priv = &bind_op,
750 },
751 };
752
753 int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
754 pvr_obj_offset, device_addr,
755 size);
756
757 if (err)
758 return err;
759
760 pvr_gem_object_get(pvr_obj);
761
762 err = drm_gpuvm_exec_lock(&vm_exec);
763 if (err)
764 goto err_cleanup;
765
766 err = pvr_vm_bind_op_exec(&bind_op);
767
768 drm_gpuvm_exec_unlock(&vm_exec);
769
770 err_cleanup:
771 pvr_vm_bind_op_fini(&bind_op);
772
773 return err;
774 }
775
776 /**
777 * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
778 * @vm_ctx: Target VM context.
779 * @device_addr: Virtual device address at the start of the target mapping.
780 * @size: Size of the target mapping.
781 *
782 * Return:
783 * * 0 on success,
784 * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
785 * address,
786 * * Any error encountered while performing internal operations required to
787 * destroy the mapping (returned from pvr_vm_gpuva_unmap or
788 * pvr_vm_gpuva_remap).
789 */
790 int
pvr_vm_unmap(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)791 pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
792 {
793 struct pvr_vm_bind_op bind_op = {0};
794 struct drm_gpuvm_exec vm_exec = {
795 .vm = &vm_ctx->gpuvm_mgr,
796 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
797 DRM_EXEC_IGNORE_DUPLICATES,
798 .extra = {
799 .fn = pvr_vm_lock_extra,
800 .priv = &bind_op,
801 },
802 };
803
804 int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
805 size);
806 if (err)
807 return err;
808
809 err = drm_gpuvm_exec_lock(&vm_exec);
810 if (err)
811 goto err_cleanup;
812
813 err = pvr_vm_bind_op_exec(&bind_op);
814
815 drm_gpuvm_exec_unlock(&vm_exec);
816
817 err_cleanup:
818 pvr_vm_bind_op_fini(&bind_op);
819
820 return err;
821 }
822
823 /* Static data areas are determined by firmware. */
824 static const struct drm_pvr_static_data_area static_data_areas[] = {
825 {
826 .area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
827 .location_heap_id = DRM_PVR_HEAP_GENERAL,
828 .offset = 0,
829 .size = 128,
830 },
831 {
832 .area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
833 .location_heap_id = DRM_PVR_HEAP_GENERAL,
834 .offset = 128,
835 .size = 1024,
836 },
837 {
838 .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
839 .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
840 .offset = 0,
841 .size = 128,
842 },
843 {
844 .area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
845 .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
846 .offset = 128,
847 .size = 128,
848 },
849 {
850 .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
851 .location_heap_id = DRM_PVR_HEAP_USC_CODE,
852 .offset = 0,
853 .size = 128,
854 },
855 };
856
857 #define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
858
859 /*
860 * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
861 * static data area for each heap.
862 */
863 static const struct drm_pvr_heap pvr_heaps[] = {
864 [DRM_PVR_HEAP_GENERAL] = {
865 .base = ROGUE_GENERAL_HEAP_BASE,
866 .size = ROGUE_GENERAL_HEAP_SIZE,
867 .flags = 0,
868 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
869 },
870 [DRM_PVR_HEAP_PDS_CODE_DATA] = {
871 .base = ROGUE_PDSCODEDATA_HEAP_BASE,
872 .size = ROGUE_PDSCODEDATA_HEAP_SIZE,
873 .flags = 0,
874 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
875 },
876 [DRM_PVR_HEAP_USC_CODE] = {
877 .base = ROGUE_USCCODE_HEAP_BASE,
878 .size = ROGUE_USCCODE_HEAP_SIZE,
879 .flags = 0,
880 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
881 },
882 [DRM_PVR_HEAP_RGNHDR] = {
883 .base = ROGUE_RGNHDR_HEAP_BASE,
884 .size = ROGUE_RGNHDR_HEAP_SIZE,
885 .flags = 0,
886 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
887 },
888 [DRM_PVR_HEAP_VIS_TEST] = {
889 .base = ROGUE_VISTEST_HEAP_BASE,
890 .size = ROGUE_VISTEST_HEAP_SIZE,
891 .flags = 0,
892 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
893 },
894 [DRM_PVR_HEAP_TRANSFER_FRAG] = {
895 .base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
896 .size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
897 .flags = 0,
898 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
899 },
900 };
901
902 int
pvr_static_data_areas_get(const struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)903 pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
904 struct drm_pvr_ioctl_dev_query_args *args)
905 {
906 struct drm_pvr_dev_query_static_data_areas query = {0};
907 int err;
908
909 if (!args->pointer) {
910 args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
911 return 0;
912 }
913
914 err = PVR_UOBJ_GET(query, args->size, args->pointer);
915 if (err < 0)
916 return err;
917
918 if (!query.static_data_areas.array) {
919 query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
920 query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
921 goto copy_out;
922 }
923
924 if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
925 query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
926
927 err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
928 if (err < 0)
929 return err;
930
931 copy_out:
932 err = PVR_UOBJ_SET(args->pointer, args->size, query);
933 if (err < 0)
934 return err;
935
936 args->size = sizeof(query);
937 return 0;
938 }
939
940 int
pvr_heap_info_get(const struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)941 pvr_heap_info_get(const struct pvr_device *pvr_dev,
942 struct drm_pvr_ioctl_dev_query_args *args)
943 {
944 struct drm_pvr_dev_query_heap_info query = {0};
945 u64 dest;
946 int err;
947
948 if (!args->pointer) {
949 args->size = sizeof(struct drm_pvr_dev_query_heap_info);
950 return 0;
951 }
952
953 err = PVR_UOBJ_GET(query, args->size, args->pointer);
954 if (err < 0)
955 return err;
956
957 if (!query.heaps.array) {
958 query.heaps.count = ARRAY_SIZE(pvr_heaps);
959 query.heaps.stride = sizeof(struct drm_pvr_heap);
960 goto copy_out;
961 }
962
963 if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
964 query.heaps.count = ARRAY_SIZE(pvr_heaps);
965
966 /* Region header heap is only present if BRN63142 is present. */
967 dest = query.heaps.array;
968 for (size_t i = 0; i < query.heaps.count; i++) {
969 struct drm_pvr_heap heap = pvr_heaps[i];
970
971 if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
972 heap.size = 0;
973
974 err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
975 if (err < 0)
976 return err;
977
978 dest += query.heaps.stride;
979 }
980
981 copy_out:
982 err = PVR_UOBJ_SET(args->pointer, args->size, query);
983 if (err < 0)
984 return err;
985
986 args->size = sizeof(query);
987 return 0;
988 }
989
990 /**
991 * pvr_heap_contains_range() - Determine if a given heap contains the specified
992 * device-virtual address range.
993 * @pvr_heap: Target heap.
994 * @start: Inclusive start of the target range.
995 * @end: Inclusive end of the target range.
996 *
997 * It is an error to call this function with values of @start and @end that do
998 * not satisfy the condition @start <= @end.
999 */
1000 static __always_inline bool
pvr_heap_contains_range(const struct drm_pvr_heap * pvr_heap,u64 start,u64 end)1001 pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
1002 {
1003 return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
1004 }
1005
1006 /**
1007 * pvr_find_heap_containing() - Find a heap which contains the specified
1008 * device-virtual address range.
1009 * @pvr_dev: Target PowerVR device.
1010 * @start: Start of the target range.
1011 * @size: Size of the target range.
1012 *
1013 * Return:
1014 * * A pointer to a constant instance of struct drm_pvr_heap representing the
1015 * heap containing the entire range specified by @start and @size on
1016 * success, or
1017 * * %NULL if no such heap exists.
1018 */
1019 const struct drm_pvr_heap *
pvr_find_heap_containing(struct pvr_device * pvr_dev,u64 start,u64 size)1020 pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
1021 {
1022 u64 end;
1023
1024 if (check_add_overflow(start, size - 1, &end))
1025 return NULL;
1026
1027 /*
1028 * There are no guarantees about the order of address ranges in
1029 * &pvr_heaps, so iterate over the entire array for a heap whose
1030 * range completely encompasses the given range.
1031 */
1032 for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
1033 /* Filter heaps that present only with an associated quirk */
1034 if (heap_id == DRM_PVR_HEAP_RGNHDR &&
1035 !PVR_HAS_QUIRK(pvr_dev, 63142)) {
1036 continue;
1037 }
1038
1039 if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
1040 return &pvr_heaps[heap_id];
1041 }
1042
1043 return NULL;
1044 }
1045
1046 /**
1047 * pvr_vm_find_gem_object() - Look up a buffer object from a given
1048 * device-virtual address.
1049 * @vm_ctx: [IN] Target VM context.
1050 * @device_addr: [IN] Virtual device address at the start of the required
1051 * object.
1052 * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
1053 * of the mapped region within the buffer object. May be
1054 * %NULL if this information is not required.
1055 * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
1056 * region. May be %NULL if this information is not required.
1057 *
1058 * If successful, a reference will be taken on the buffer object. The caller
1059 * must drop the reference with pvr_gem_object_put().
1060 *
1061 * Return:
1062 * * The PowerVR buffer object mapped at @device_addr if one exists, or
1063 * * %NULL otherwise.
1064 */
1065 struct pvr_gem_object *
pvr_vm_find_gem_object(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 * mapped_offset_out,u64 * mapped_size_out)1066 pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
1067 u64 *mapped_offset_out, u64 *mapped_size_out)
1068 {
1069 struct pvr_gem_object *pvr_obj;
1070 struct drm_gpuva *va;
1071
1072 mutex_lock(&vm_ctx->lock);
1073
1074 va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1);
1075 if (!va)
1076 goto err_unlock;
1077
1078 pvr_obj = gem_to_pvr_gem(va->gem.obj);
1079 pvr_gem_object_get(pvr_obj);
1080
1081 if (mapped_offset_out)
1082 *mapped_offset_out = va->gem.offset;
1083 if (mapped_size_out)
1084 *mapped_size_out = va->va.range;
1085
1086 mutex_unlock(&vm_ctx->lock);
1087
1088 return pvr_obj;
1089
1090 err_unlock:
1091 mutex_unlock(&vm_ctx->lock);
1092
1093 return NULL;
1094 }
1095
1096 /**
1097 * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
1098 * @vm_ctx: Target VM context.
1099 *
1100 * Returns:
1101 * * FW object representing firmware memory context, or
1102 * * %NULL if this VM context does not have a firmware memory context.
1103 */
1104 struct pvr_fw_object *
pvr_vm_get_fw_mem_context(struct pvr_vm_context * vm_ctx)1105 pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
1106 {
1107 return vm_ctx->fw_mem_ctx_obj;
1108 }
1109