1  /* SPDX-License-Identifier: MIT */
2  /*
3   * Copyright © 2019 Intel Corporation
4   */
5  
6  #ifndef __INTEL_CONTEXT_TYPES__
7  #define __INTEL_CONTEXT_TYPES__
8  
9  #include <linux/average.h>
10  #include <linux/kref.h>
11  #include <linux/list.h>
12  #include <linux/mutex.h>
13  #include <linux/types.h>
14  
15  #include "i915_active_types.h"
16  #include "i915_sw_fence.h"
17  #include "i915_utils.h"
18  #include "intel_engine_types.h"
19  #include "intel_sseu.h"
20  #include "intel_wakeref.h"
21  
22  #include "uc/intel_guc_fwif.h"
23  
24  #define CONTEXT_REDZONE POISON_INUSE
25  DECLARE_EWMA(runtime, 3, 8);
26  
27  struct i915_gem_context;
28  struct i915_gem_ww_ctx;
29  struct i915_vma;
30  struct intel_breadcrumbs;
31  struct intel_context;
32  struct intel_ring;
33  
34  struct intel_context_ops {
35  	unsigned long flags;
36  #define COPS_HAS_INFLIGHT_BIT 0
37  #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
38  
39  #define COPS_RUNTIME_CYCLES_BIT 1
40  #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT)
41  
42  	int (*alloc)(struct intel_context *ce);
43  
44  	void (*revoke)(struct intel_context *ce, struct i915_request *rq,
45  		       unsigned int preempt_timeout_ms);
46  
47  	void (*close)(struct intel_context *ce);
48  
49  	int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
50  	int (*pin)(struct intel_context *ce, void *vaddr);
51  	void (*unpin)(struct intel_context *ce);
52  	void (*post_unpin)(struct intel_context *ce);
53  
54  	void (*cancel_request)(struct intel_context *ce,
55  			       struct i915_request *rq);
56  
57  	void (*enter)(struct intel_context *ce);
58  	void (*exit)(struct intel_context *ce);
59  
60  	void (*sched_disable)(struct intel_context *ce);
61  
62  	void (*update_stats)(struct intel_context *ce);
63  
64  	void (*reset)(struct intel_context *ce);
65  	void (*destroy)(struct kref *kref);
66  
67  	/* virtual/parallel engine/context interface */
68  	struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
69  						unsigned int count,
70  						unsigned long flags);
71  	struct intel_context *(*create_parallel)(struct intel_engine_cs **engines,
72  						 unsigned int num_siblings,
73  						 unsigned int width);
74  	struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
75  					       unsigned int sibling);
76  };
77  
78  struct intel_context {
79  	/*
80  	 * Note: Some fields may be accessed under RCU.
81  	 *
82  	 * Unless otherwise noted a field can safely be assumed to be protected
83  	 * by strong reference counting.
84  	 */
85  	union {
86  		struct kref ref; /* no kref_get_unless_zero()! */
87  		struct rcu_head rcu;
88  	};
89  
90  	struct intel_engine_cs *engine;
91  	struct intel_engine_cs *inflight;
92  #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
93  #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
94  #define intel_context_inflight(ce) \
95  	__intel_context_inflight(READ_ONCE((ce)->inflight))
96  #define intel_context_inflight_count(ce) \
97  	__intel_context_inflight_count(READ_ONCE((ce)->inflight))
98  
99  	struct i915_address_space *vm;
100  	struct i915_gem_context __rcu *gem_context;
101  
102  	struct file *default_state;
103  
104  	/*
105  	 * @signal_lock protects the list of requests that need signaling,
106  	 * @signals. While there are any requests that need signaling,
107  	 * we add the context to the breadcrumbs worker, and remove it
108  	 * upon completion/cancellation of the last request.
109  	 */
110  	struct list_head signal_link; /* Accessed under RCU */
111  	struct list_head signals; /* Guarded by signal_lock */
112  	spinlock_t signal_lock; /* protects signals, the list of requests */
113  
114  	struct i915_vma *state;
115  	u32 ring_size;
116  	struct intel_ring *ring;
117  	struct intel_timeline *timeline;
118  	intel_wakeref_t wakeref;
119  
120  	unsigned long flags;
121  #define CONTEXT_BARRIER_BIT		0
122  #define CONTEXT_ALLOC_BIT		1
123  #define CONTEXT_INIT_BIT		2
124  #define CONTEXT_VALID_BIT		3
125  #define CONTEXT_CLOSED_BIT		4
126  #define CONTEXT_USE_SEMAPHORES		5
127  #define CONTEXT_BANNED			6
128  #define CONTEXT_FORCE_SINGLE_SUBMISSION	7
129  #define CONTEXT_NOPREEMPT		8
130  #define CONTEXT_LRCA_DIRTY		9
131  #define CONTEXT_GUC_INIT		10
132  #define CONTEXT_PERMA_PIN		11
133  #define CONTEXT_IS_PARKING		12
134  #define CONTEXT_EXITING			13
135  #define CONTEXT_LOW_LATENCY		14
136  #define CONTEXT_OWN_STATE		15
137  
138  	struct {
139  		u64 timeout_us;
140  	} watchdog;
141  
142  	u32 *lrc_reg_state;
143  	union {
144  		struct {
145  			u32 lrca;
146  			u32 ccid;
147  		};
148  		u64 desc;
149  	} lrc;
150  	u32 tag; /* cookie passed to HW to track this context on submission */
151  
152  	/** stats: Context GPU engine busyness tracking. */
153  	struct intel_context_stats {
154  		u64 active;
155  
156  		/* Time on GPU as tracked by the hw. */
157  		struct {
158  			struct ewma_runtime avg;
159  			u64 total;
160  			u32 last;
161  			I915_SELFTEST_DECLARE(u32 num_underflow);
162  			I915_SELFTEST_DECLARE(u32 max_underflow);
163  		} runtime;
164  	} stats;
165  
166  	unsigned int active_count; /* protected by timeline->mutex */
167  
168  	atomic_t pin_count;
169  	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
170  
171  	/**
172  	 * active: Active tracker for the rq activity (inc. external) on this
173  	 * intel_context object.
174  	 */
175  	struct i915_active active;
176  
177  	const struct intel_context_ops *ops;
178  
179  	/** sseu: Control eu/slice partitioning */
180  	struct intel_sseu sseu;
181  
182  	/**
183  	 * pinned_contexts_link: List link for the engine's pinned contexts.
184  	 * This is only used if this is a perma-pinned kernel context and
185  	 * the list is assumed to only be manipulated during driver load
186  	 * or unload time so no mutex protection currently.
187  	 */
188  	struct list_head pinned_contexts_link;
189  
190  	u8 wa_bb_page; /* if set, page num reserved for context workarounds */
191  
192  	struct {
193  		/** @lock: protects everything in guc_state */
194  		spinlock_t lock;
195  		/**
196  		 * @sched_state: scheduling state of this context using GuC
197  		 * submission
198  		 */
199  		u32 sched_state;
200  		/*
201  		 * @fences: maintains a list of requests that are currently
202  		 * being fenced until a GuC operation completes
203  		 */
204  		struct list_head fences;
205  		/**
206  		 * @blocked: fence used to signal when the blocking of a
207  		 * context's submissions is complete.
208  		 */
209  		struct i915_sw_fence blocked;
210  		/** @requests: list of active requests on this context */
211  		struct list_head requests;
212  		/** @prio: the context's current guc priority */
213  		u8 prio;
214  		/**
215  		 * @prio_count: a counter of the number requests in flight in
216  		 * each priority bucket
217  		 */
218  		u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
219  		/**
220  		 * @sched_disable_delay_work: worker to disable scheduling on this
221  		 * context
222  		 */
223  		struct delayed_work sched_disable_delay_work;
224  	} guc_state;
225  
226  	struct {
227  		/**
228  		 * @id: handle which is used to uniquely identify this context
229  		 * with the GuC, protected by guc->submission_state.lock
230  		 */
231  		u16 id;
232  		/**
233  		 * @ref: the number of references to the guc_id, when
234  		 * transitioning in and out of zero protected by
235  		 * guc->submission_state.lock
236  		 */
237  		atomic_t ref;
238  		/**
239  		 * @link: in guc->guc_id_list when the guc_id has no refs but is
240  		 * still valid, protected by guc->submission_state.lock
241  		 */
242  		struct list_head link;
243  	} guc_id;
244  
245  	/**
246  	 * @destroyed_link: link in guc->submission_state.destroyed_contexts, in
247  	 * list when context is pending to be destroyed (deregistered with the
248  	 * GuC), protected by guc->submission_state.lock
249  	 */
250  	struct list_head destroyed_link;
251  
252  	/** @parallel: sub-structure for parallel submission members */
253  	struct {
254  		union {
255  			/**
256  			 * @child_list: parent's list of children
257  			 * contexts, no protection as immutable after context
258  			 * creation
259  			 */
260  			struct list_head child_list;
261  			/**
262  			 * @child_link: child's link into parent's list of
263  			 * children
264  			 */
265  			struct list_head child_link;
266  		};
267  		/** @parent: pointer to parent if child */
268  		struct intel_context *parent;
269  		/**
270  		 * @last_rq: last request submitted on a parallel context, used
271  		 * to insert submit fences between requests in the parallel
272  		 * context
273  		 */
274  		struct i915_request *last_rq;
275  		/**
276  		 * @fence_context: fence context composite fence when doing
277  		 * parallel submission
278  		 */
279  		u64 fence_context;
280  		/**
281  		 * @seqno: seqno for composite fence when doing parallel
282  		 * submission
283  		 */
284  		u32 seqno;
285  		/** @number_children: number of children if parent */
286  		u8 number_children;
287  		/** @child_index: index into child_list if child */
288  		u8 child_index;
289  		/** @guc: GuC specific members for parallel submission */
290  		struct {
291  			/** @wqi_head: cached head pointer in work queue */
292  			u16 wqi_head;
293  			/** @wqi_tail: cached tail pointer in work queue */
294  			u16 wqi_tail;
295  			/** @wq_head: pointer to the actual head in work queue */
296  			u32 *wq_head;
297  			/** @wq_tail: pointer to the actual head in work queue */
298  			u32 *wq_tail;
299  			/** @wq_status: pointer to the status in work queue */
300  			u32 *wq_status;
301  
302  			/**
303  			 * @parent_page: page in context state (ce->state) used
304  			 * by parent for work queue, process descriptor
305  			 */
306  			u8 parent_page;
307  		} guc;
308  	} parallel;
309  
310  #ifdef CONFIG_DRM_I915_SELFTEST
311  	/**
312  	 * @drop_schedule_enable: Force drop of schedule enable G2H for selftest
313  	 */
314  	bool drop_schedule_enable;
315  
316  	/**
317  	 * @drop_schedule_disable: Force drop of schedule disable G2H for
318  	 * selftest
319  	 */
320  	bool drop_schedule_disable;
321  
322  	/**
323  	 * @drop_deregister: Force drop of deregister G2H for selftest
324  	 */
325  	bool drop_deregister;
326  #endif
327  };
328  
329  #endif /* __INTEL_CONTEXT_TYPES__ */
330