1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_cccb.h"
5 #include "pvr_context.h"
6 #include "pvr_device.h"
7 #include "pvr_drv.h"
8 #include "pvr_gem.h"
9 #include "pvr_job.h"
10 #include "pvr_power.h"
11 #include "pvr_rogue_fwif.h"
12 #include "pvr_rogue_fwif_common.h"
13 #include "pvr_rogue_fwif_resetframework.h"
14 #include "pvr_stream.h"
15 #include "pvr_stream_defs.h"
16 #include "pvr_vm.h"
17 
18 #include <drm/drm_auth.h>
19 #include <drm/drm_managed.h>
20 
21 #include <linux/bug.h>
22 #include <linux/errno.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/xarray.h>
31 
32 static int
remap_priority(struct pvr_file * pvr_file,s32 uapi_priority,enum pvr_context_priority * priority_out)33 remap_priority(struct pvr_file *pvr_file, s32 uapi_priority,
34 	       enum pvr_context_priority *priority_out)
35 {
36 	switch (uapi_priority) {
37 	case DRM_PVR_CTX_PRIORITY_LOW:
38 		*priority_out = PVR_CTX_PRIORITY_LOW;
39 		break;
40 	case DRM_PVR_CTX_PRIORITY_NORMAL:
41 		*priority_out = PVR_CTX_PRIORITY_MEDIUM;
42 		break;
43 	case DRM_PVR_CTX_PRIORITY_HIGH:
44 		if (!capable(CAP_SYS_NICE) && !drm_is_current_master(from_pvr_file(pvr_file)))
45 			return -EACCES;
46 		*priority_out = PVR_CTX_PRIORITY_HIGH;
47 		break;
48 	default:
49 		return -EINVAL;
50 	}
51 
52 	return 0;
53 }
54 
get_fw_obj_size(enum drm_pvr_ctx_type type)55 static int get_fw_obj_size(enum drm_pvr_ctx_type type)
56 {
57 	switch (type) {
58 	case DRM_PVR_CTX_TYPE_RENDER:
59 		return sizeof(struct rogue_fwif_fwrendercontext);
60 	case DRM_PVR_CTX_TYPE_COMPUTE:
61 		return sizeof(struct rogue_fwif_fwcomputecontext);
62 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
63 		return sizeof(struct rogue_fwif_fwtransfercontext);
64 	}
65 
66 	return -EINVAL;
67 }
68 
69 static int
process_static_context_state(struct pvr_device * pvr_dev,const struct pvr_stream_cmd_defs * cmd_defs,u64 stream_user_ptr,u32 stream_size,void * dest)70 process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
71 			     u64 stream_user_ptr, u32 stream_size, void *dest)
72 {
73 	void *stream;
74 	int err;
75 
76 	stream = kzalloc(stream_size, GFP_KERNEL);
77 	if (!stream)
78 		return -ENOMEM;
79 
80 	if (copy_from_user(stream, u64_to_user_ptr(stream_user_ptr), stream_size)) {
81 		err = -EFAULT;
82 		goto err_free;
83 	}
84 
85 	err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest);
86 	if (err)
87 		goto err_free;
88 
89 	kfree(stream);
90 
91 	return 0;
92 
93 err_free:
94 	kfree(stream);
95 
96 	return err;
97 }
98 
init_render_fw_objs(struct pvr_context * ctx,struct drm_pvr_ioctl_create_context_args * args,void * fw_ctx_map)99 static int init_render_fw_objs(struct pvr_context *ctx,
100 			       struct drm_pvr_ioctl_create_context_args *args,
101 			       void *fw_ctx_map)
102 {
103 	struct rogue_fwif_static_rendercontext_state *static_rendercontext_state;
104 	struct rogue_fwif_fwrendercontext *fw_render_context = fw_ctx_map;
105 
106 	if (!args->static_context_state_len)
107 		return -EINVAL;
108 
109 	static_rendercontext_state = &fw_render_context->static_render_context_state;
110 
111 	/* Copy static render context state from userspace. */
112 	return process_static_context_state(ctx->pvr_dev,
113 					    &pvr_static_render_context_state_stream,
114 					    args->static_context_state,
115 					    args->static_context_state_len,
116 					    &static_rendercontext_state->ctxswitch_regs[0]);
117 }
118 
init_compute_fw_objs(struct pvr_context * ctx,struct drm_pvr_ioctl_create_context_args * args,void * fw_ctx_map)119 static int init_compute_fw_objs(struct pvr_context *ctx,
120 				struct drm_pvr_ioctl_create_context_args *args,
121 				void *fw_ctx_map)
122 {
123 	struct rogue_fwif_fwcomputecontext *fw_compute_context = fw_ctx_map;
124 	struct rogue_fwif_cdm_registers_cswitch *ctxswitch_regs;
125 
126 	if (!args->static_context_state_len)
127 		return -EINVAL;
128 
129 	ctxswitch_regs = &fw_compute_context->static_compute_context_state.ctxswitch_regs;
130 
131 	/* Copy static render context state from userspace. */
132 	return process_static_context_state(ctx->pvr_dev,
133 					    &pvr_static_compute_context_state_stream,
134 					    args->static_context_state,
135 					    args->static_context_state_len,
136 					    ctxswitch_regs);
137 }
138 
init_transfer_fw_objs(struct pvr_context * ctx,struct drm_pvr_ioctl_create_context_args * args,void * fw_ctx_map)139 static int init_transfer_fw_objs(struct pvr_context *ctx,
140 				 struct drm_pvr_ioctl_create_context_args *args,
141 				 void *fw_ctx_map)
142 {
143 	if (args->static_context_state_len)
144 		return -EINVAL;
145 
146 	return 0;
147 }
148 
init_fw_objs(struct pvr_context * ctx,struct drm_pvr_ioctl_create_context_args * args,void * fw_ctx_map)149 static int init_fw_objs(struct pvr_context *ctx,
150 			struct drm_pvr_ioctl_create_context_args *args,
151 			void *fw_ctx_map)
152 {
153 	switch (ctx->type) {
154 	case DRM_PVR_CTX_TYPE_RENDER:
155 		return init_render_fw_objs(ctx, args, fw_ctx_map);
156 	case DRM_PVR_CTX_TYPE_COMPUTE:
157 		return init_compute_fw_objs(ctx, args, fw_ctx_map);
158 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
159 		return init_transfer_fw_objs(ctx, args, fw_ctx_map);
160 	}
161 
162 	return -EINVAL;
163 }
164 
165 static void
ctx_fw_data_init(void * cpu_ptr,void * priv)166 ctx_fw_data_init(void *cpu_ptr, void *priv)
167 {
168 	struct pvr_context *ctx = priv;
169 
170 	memcpy(cpu_ptr, ctx->data, ctx->data_size);
171 }
172 
173 /**
174  * pvr_context_destroy_queues() - Destroy all queues attached to a context.
175  * @ctx: Context to destroy queues on.
176  *
177  * Should be called when the last reference to a context object is dropped.
178  * It releases all resources attached to the queues bound to this context.
179  */
pvr_context_destroy_queues(struct pvr_context * ctx)180 static void pvr_context_destroy_queues(struct pvr_context *ctx)
181 {
182 	switch (ctx->type) {
183 	case DRM_PVR_CTX_TYPE_RENDER:
184 		pvr_queue_destroy(ctx->queues.fragment);
185 		pvr_queue_destroy(ctx->queues.geometry);
186 		break;
187 	case DRM_PVR_CTX_TYPE_COMPUTE:
188 		pvr_queue_destroy(ctx->queues.compute);
189 		break;
190 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
191 		pvr_queue_destroy(ctx->queues.transfer);
192 		break;
193 	}
194 }
195 
196 /**
197  * pvr_context_create_queues() - Create all queues attached to a context.
198  * @ctx: Context to create queues on.
199  * @args: Context creation arguments passed by userspace.
200  * @fw_ctx_map: CPU mapping of the FW context object.
201  *
202  * Return:
203  *  * 0 on success, or
204  *  * A negative error code otherwise.
205  */
pvr_context_create_queues(struct pvr_context * ctx,struct drm_pvr_ioctl_create_context_args * args,void * fw_ctx_map)206 static int pvr_context_create_queues(struct pvr_context *ctx,
207 				     struct drm_pvr_ioctl_create_context_args *args,
208 				     void *fw_ctx_map)
209 {
210 	int err;
211 
212 	switch (ctx->type) {
213 	case DRM_PVR_CTX_TYPE_RENDER:
214 		ctx->queues.geometry = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_GEOMETRY,
215 							args, fw_ctx_map);
216 		if (IS_ERR(ctx->queues.geometry)) {
217 			err = PTR_ERR(ctx->queues.geometry);
218 			ctx->queues.geometry = NULL;
219 			goto err_destroy_queues;
220 		}
221 
222 		ctx->queues.fragment = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_FRAGMENT,
223 							args, fw_ctx_map);
224 		if (IS_ERR(ctx->queues.fragment)) {
225 			err = PTR_ERR(ctx->queues.fragment);
226 			ctx->queues.fragment = NULL;
227 			goto err_destroy_queues;
228 		}
229 		return 0;
230 
231 	case DRM_PVR_CTX_TYPE_COMPUTE:
232 		ctx->queues.compute = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_COMPUTE,
233 						       args, fw_ctx_map);
234 		if (IS_ERR(ctx->queues.compute)) {
235 			err = PTR_ERR(ctx->queues.compute);
236 			ctx->queues.compute = NULL;
237 			goto err_destroy_queues;
238 		}
239 		return 0;
240 
241 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
242 		ctx->queues.transfer = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
243 							args, fw_ctx_map);
244 		if (IS_ERR(ctx->queues.transfer)) {
245 			err = PTR_ERR(ctx->queues.transfer);
246 			ctx->queues.transfer = NULL;
247 			goto err_destroy_queues;
248 		}
249 		return 0;
250 	}
251 
252 	return -EINVAL;
253 
254 err_destroy_queues:
255 	pvr_context_destroy_queues(ctx);
256 	return err;
257 }
258 
259 /**
260  * pvr_context_kill_queues() - Kill queues attached to context.
261  * @ctx: Context to kill queues on.
262  *
263  * Killing the queues implies making them unusable for future jobs, while still
264  * letting the currently submitted jobs a chance to finish. Queue resources will
265  * stay around until pvr_context_destroy_queues() is called.
266  */
pvr_context_kill_queues(struct pvr_context * ctx)267 static void pvr_context_kill_queues(struct pvr_context *ctx)
268 {
269 	switch (ctx->type) {
270 	case DRM_PVR_CTX_TYPE_RENDER:
271 		pvr_queue_kill(ctx->queues.fragment);
272 		pvr_queue_kill(ctx->queues.geometry);
273 		break;
274 	case DRM_PVR_CTX_TYPE_COMPUTE:
275 		pvr_queue_kill(ctx->queues.compute);
276 		break;
277 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
278 		pvr_queue_kill(ctx->queues.transfer);
279 		break;
280 	}
281 }
282 
283 /**
284  * pvr_context_create() - Create a context.
285  * @pvr_file: File to attach the created context to.
286  * @args: Context creation arguments.
287  *
288  * Return:
289  *  * 0 on success, or
290  *  * A negative error code on failure.
291  */
pvr_context_create(struct pvr_file * pvr_file,struct drm_pvr_ioctl_create_context_args * args)292 int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args)
293 {
294 	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
295 	struct pvr_context *ctx;
296 	int ctx_size;
297 	int err;
298 
299 	/* Context creation flags are currently unused and must be zero. */
300 	if (args->flags)
301 		return -EINVAL;
302 
303 	ctx_size = get_fw_obj_size(args->type);
304 	if (ctx_size < 0)
305 		return ctx_size;
306 
307 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
308 	if (!ctx)
309 		return -ENOMEM;
310 
311 	ctx->data_size = ctx_size;
312 	ctx->type = args->type;
313 	ctx->flags = args->flags;
314 	ctx->pvr_dev = pvr_dev;
315 	kref_init(&ctx->ref_count);
316 
317 	err = remap_priority(pvr_file, args->priority, &ctx->priority);
318 	if (err)
319 		goto err_free_ctx;
320 
321 	ctx->vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
322 	if (IS_ERR(ctx->vm_ctx)) {
323 		err = PTR_ERR(ctx->vm_ctx);
324 		goto err_free_ctx;
325 	}
326 
327 	ctx->data = kzalloc(ctx_size, GFP_KERNEL);
328 	if (!ctx->data) {
329 		err = -ENOMEM;
330 		goto err_put_vm;
331 	}
332 
333 	err = pvr_context_create_queues(ctx, args, ctx->data);
334 	if (err)
335 		goto err_free_ctx_data;
336 
337 	err = init_fw_objs(ctx, args, ctx->data);
338 	if (err)
339 		goto err_destroy_queues;
340 
341 	err = pvr_fw_object_create(pvr_dev, ctx_size, PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
342 				   ctx_fw_data_init, ctx, &ctx->fw_obj);
343 	if (err)
344 		goto err_free_ctx_data;
345 
346 	err = xa_alloc(&pvr_dev->ctx_ids, &ctx->ctx_id, ctx, xa_limit_32b, GFP_KERNEL);
347 	if (err)
348 		goto err_destroy_fw_obj;
349 
350 	err = xa_alloc(&pvr_file->ctx_handles, &args->handle, ctx, xa_limit_32b, GFP_KERNEL);
351 	if (err) {
352 		/*
353 		 * It's possible that another thread could have taken a reference on the context at
354 		 * this point as it is in the ctx_ids xarray. Therefore instead of directly
355 		 * destroying the context, drop a reference instead.
356 		 */
357 		pvr_context_put(ctx);
358 		return err;
359 	}
360 
361 	spin_lock(&pvr_dev->ctx_list_lock);
362 	list_add_tail(&ctx->file_link, &pvr_file->contexts);
363 	spin_unlock(&pvr_dev->ctx_list_lock);
364 
365 	return 0;
366 
367 err_destroy_fw_obj:
368 	pvr_fw_object_destroy(ctx->fw_obj);
369 
370 err_destroy_queues:
371 	pvr_context_destroy_queues(ctx);
372 
373 err_free_ctx_data:
374 	kfree(ctx->data);
375 
376 err_put_vm:
377 	pvr_vm_context_put(ctx->vm_ctx);
378 
379 err_free_ctx:
380 	kfree(ctx);
381 	return err;
382 }
383 
384 static void
pvr_context_release(struct kref * ref_count)385 pvr_context_release(struct kref *ref_count)
386 {
387 	struct pvr_context *ctx =
388 		container_of(ref_count, struct pvr_context, ref_count);
389 	struct pvr_device *pvr_dev = ctx->pvr_dev;
390 
391 	WARN_ON(in_interrupt());
392 	spin_lock(&pvr_dev->ctx_list_lock);
393 	list_del(&ctx->file_link);
394 	spin_unlock(&pvr_dev->ctx_list_lock);
395 
396 	xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id);
397 	pvr_context_destroy_queues(ctx);
398 	pvr_fw_object_destroy(ctx->fw_obj);
399 	kfree(ctx->data);
400 	pvr_vm_context_put(ctx->vm_ctx);
401 	kfree(ctx);
402 }
403 
404 /**
405  * pvr_context_put() - Release reference on context
406  * @ctx: Target context.
407  */
408 void
pvr_context_put(struct pvr_context * ctx)409 pvr_context_put(struct pvr_context *ctx)
410 {
411 	if (ctx)
412 		kref_put(&ctx->ref_count, pvr_context_release);
413 }
414 
415 /**
416  * pvr_context_destroy() - Destroy context
417  * @pvr_file: Pointer to pvr_file structure.
418  * @handle: Userspace context handle.
419  *
420  * Removes context from context list and drops initial reference. Context will
421  * then be destroyed once all outstanding references are dropped.
422  *
423  * Return:
424  *  * 0 on success, or
425  *  * -%EINVAL if context not in context list.
426  */
427 int
pvr_context_destroy(struct pvr_file * pvr_file,u32 handle)428 pvr_context_destroy(struct pvr_file *pvr_file, u32 handle)
429 {
430 	struct pvr_context *ctx = xa_erase(&pvr_file->ctx_handles, handle);
431 
432 	if (!ctx)
433 		return -EINVAL;
434 
435 	/* Make sure nothing can be queued to the queues after that point. */
436 	pvr_context_kill_queues(ctx);
437 
438 	/* Release the reference held by the handle set. */
439 	pvr_context_put(ctx);
440 
441 	return 0;
442 }
443 
444 /**
445  * pvr_destroy_contexts_for_file: Destroy any contexts associated with the given file
446  * @pvr_file: Pointer to pvr_file structure.
447  *
448  * Removes all contexts associated with @pvr_file from the device context list and drops initial
449  * references. Contexts will then be destroyed once all outstanding references are dropped.
450  */
pvr_destroy_contexts_for_file(struct pvr_file * pvr_file)451 void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
452 {
453 	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
454 	struct pvr_context *ctx;
455 	unsigned long handle;
456 
457 	xa_for_each(&pvr_file->ctx_handles, handle, ctx)
458 		pvr_context_destroy(pvr_file, handle);
459 
460 	spin_lock(&pvr_dev->ctx_list_lock);
461 	ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link);
462 
463 	while (!list_entry_is_head(ctx, &pvr_file->contexts, file_link)) {
464 		list_del_init(&ctx->file_link);
465 
466 		if (pvr_context_get_if_referenced(ctx)) {
467 			spin_unlock(&pvr_dev->ctx_list_lock);
468 
469 			pvr_vm_unmap_all(ctx->vm_ctx);
470 
471 			pvr_context_put(ctx);
472 			spin_lock(&pvr_dev->ctx_list_lock);
473 		}
474 		ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link);
475 	}
476 	spin_unlock(&pvr_dev->ctx_list_lock);
477 }
478 
479 /**
480  * pvr_context_device_init() - Device level initialization for queue related resources.
481  * @pvr_dev: The device to initialize.
482  */
pvr_context_device_init(struct pvr_device * pvr_dev)483 void pvr_context_device_init(struct pvr_device *pvr_dev)
484 {
485 	xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1);
486 	spin_lock_init(&pvr_dev->ctx_list_lock);
487 }
488 
489 /**
490  * pvr_context_device_fini() - Device level cleanup for queue related resources.
491  * @pvr_dev: The device to cleanup.
492  */
pvr_context_device_fini(struct pvr_device * pvr_dev)493 void pvr_context_device_fini(struct pvr_device *pvr_dev)
494 {
495 	WARN_ON(!xa_empty(&pvr_dev->ctx_ids));
496 	xa_destroy(&pvr_dev->ctx_ids);
497 }
498