1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #ifndef __AMDGPU_JOB_H__
24 #define __AMDGPU_JOB_H__
25 
26 #include <drm/gpu_scheduler.h>
27 #include "amdgpu_sync.h"
28 #include "amdgpu_ring.h"
29 
30 /* bit set means command submit involves a preamble IB */
31 #define AMDGPU_PREAMBLE_IB_PRESENT          (1 << 0)
32 /* bit set means preamble IB is first presented in belonging context */
33 #define AMDGPU_PREAMBLE_IB_PRESENT_FIRST    (1 << 1)
34 /* bit set means context switch occured */
35 #define AMDGPU_HAVE_CTX_SWITCH              (1 << 2)
36 /* bit set means IB is preempted */
37 #define AMDGPU_IB_PREEMPTED                 (1 << 3)
38 
39 #define to_amdgpu_job(sched_job)		\
40 		container_of((sched_job), struct amdgpu_job, base)
41 
42 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
43 
44 struct amdgpu_fence;
45 enum amdgpu_ib_pool_type;
46 
47 struct amdgpu_job {
48 	struct drm_sched_job    base;
49 	struct amdgpu_vm	*vm;
50 	struct amdgpu_sync	explicit_sync;
51 	struct dma_fence	hw_fence;
52 	struct dma_fence	*gang_submit;
53 	uint32_t		preamble_status;
54 	uint32_t                preemption_status;
55 	bool                    vm_needs_flush;
56 	bool			gds_switch_needed;
57 	bool			spm_update_needed;
58 	uint64_t		vm_pd_addr;
59 	unsigned		vmid;
60 	unsigned		pasid;
61 	uint32_t		gds_base, gds_size;
62 	uint32_t		gws_base, gws_size;
63 	uint32_t		oa_base, oa_size;
64 	uint64_t		generation;
65 
66 	/* user fence handling */
67 	uint64_t		uf_addr;
68 	uint64_t		uf_sequence;
69 
70 	/* virtual addresses for shadow/GDS/CSA */
71 	uint64_t		shadow_va;
72 	uint64_t		csa_va;
73 	uint64_t		gds_va;
74 	bool			init_shadow;
75 
76 	/* job_run_counter >= 1 means a resubmit job */
77 	uint32_t		job_run_counter;
78 
79 	/* enforce isolation */
80 	bool			enforce_isolation;
81 
82 	uint32_t		num_ibs;
83 	struct amdgpu_ib	ibs[];
84 };
85 
amdgpu_job_ring(struct amdgpu_job * job)86 static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
87 {
88 	return to_amdgpu_ring(job->base.entity->rq->sched);
89 }
90 
91 int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
92 		     struct drm_sched_entity *entity, void *owner,
93 		     unsigned int num_ibs, struct amdgpu_job **job);
94 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
95 			     struct drm_sched_entity *entity, void *owner,
96 			     size_t size, enum amdgpu_ib_pool_type pool_type,
97 			     struct amdgpu_job **job);
98 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
99 			      struct amdgpu_bo *gws, struct amdgpu_bo *oa);
100 void amdgpu_job_free_resources(struct amdgpu_job *job);
101 void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
102 				struct amdgpu_job *leader);
103 void amdgpu_job_free(struct amdgpu_job *job);
104 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job);
105 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
106 			     struct dma_fence **fence);
107 
108 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
109 
110 #endif
111