1  /* SPDX-License-Identifier: MIT */
2  /*
3   * Copyright © 2019 Intel Corporation
4   */
5  
6  #ifndef INTEL_RING_H
7  #define INTEL_RING_H
8  
9  #include "i915_gem.h" /* GEM_BUG_ON */
10  #include "i915_request.h"
11  #include "intel_ring_types.h"
12  
13  struct intel_engine_cs;
14  
15  struct intel_ring *
16  intel_engine_create_ring(struct intel_engine_cs *engine, int size);
17  
18  u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
19  int intel_ring_cacheline_align(struct i915_request *rq);
20  
21  unsigned int intel_ring_update_space(struct intel_ring *ring);
22  
23  void __intel_ring_pin(struct intel_ring *ring);
24  int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww);
25  void intel_ring_unpin(struct intel_ring *ring);
26  void intel_ring_reset(struct intel_ring *ring, u32 tail);
27  
28  void intel_ring_free(struct kref *ref);
29  
intel_ring_get(struct intel_ring * ring)30  static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
31  {
32  	kref_get(&ring->ref);
33  	return ring;
34  }
35  
intel_ring_put(struct intel_ring * ring)36  static inline void intel_ring_put(struct intel_ring *ring)
37  {
38  	kref_put(&ring->ref, intel_ring_free);
39  }
40  
intel_ring_advance(struct i915_request * rq,u32 * cs)41  static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
42  {
43  	/* Dummy function.
44  	 *
45  	 * This serves as a placeholder in the code so that the reader
46  	 * can compare against the preceding intel_ring_begin() and
47  	 * check that the number of dwords emitted matches the space
48  	 * reserved for the command packet (i.e. the value passed to
49  	 * intel_ring_begin()).
50  	 */
51  	GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
52  	GEM_BUG_ON(!IS_ALIGNED(rq->ring->emit, 8)); /* RING_TAIL qword align */
53  }
54  
intel_ring_wrap(const struct intel_ring * ring,u32 pos)55  static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
56  {
57  	return pos & (ring->size - 1);
58  }
59  
intel_ring_direction(const struct intel_ring * ring,u32 next,u32 prev)60  static inline int intel_ring_direction(const struct intel_ring *ring,
61  				       u32 next, u32 prev)
62  {
63  	typecheck(typeof(ring->size), next);
64  	typecheck(typeof(ring->size), prev);
65  	return (next - prev) << ring->wrap;
66  }
67  
68  static inline bool
intel_ring_offset_valid(const struct intel_ring * ring,unsigned int pos)69  intel_ring_offset_valid(const struct intel_ring *ring,
70  			unsigned int pos)
71  {
72  	if (pos & -ring->size) /* must be strictly within the ring */
73  		return false;
74  
75  	if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
76  		return false;
77  
78  	return true;
79  }
80  
intel_ring_offset(const struct i915_request * rq,void * addr)81  static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
82  {
83  	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
84  	u32 offset = addr - rq->ring->vaddr;
85  
86  	GEM_BUG_ON(offset > rq->ring->size);
87  	return intel_ring_wrap(rq->ring, offset);
88  }
89  
90  static inline void
assert_ring_tail_valid(const struct intel_ring * ring,unsigned int tail)91  assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
92  {
93  	unsigned int head = READ_ONCE(ring->head);
94  
95  	GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
96  
97  	/*
98  	 * "Ring Buffer Use"
99  	 *	Gen2 BSpec "1. Programming Environment" / 1.4.4.6
100  	 *	Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
101  	 *	Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
102  	 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
103  	 * same cacheline, the Head Pointer must not be greater than the Tail
104  	 * Pointer."
105  	 *
106  	 * We use ring->head as the last known location of the actual RING_HEAD,
107  	 * it may have advanced but in the worst case it is equally the same
108  	 * as ring->head and so we should never program RING_TAIL to advance
109  	 * into the same cacheline as ring->head.
110  	 */
111  #define cacheline(a) round_down(a, CACHELINE_BYTES)
112  	GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head);
113  #undef cacheline
114  }
115  
116  static inline unsigned int
intel_ring_set_tail(struct intel_ring * ring,unsigned int tail)117  intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
118  {
119  	/* Whilst writes to the tail are strictly order, there is no
120  	 * serialisation between readers and the writers. The tail may be
121  	 * read by i915_request_retire() just as it is being updated
122  	 * by execlists, as although the breadcrumb is complete, the context
123  	 * switch hasn't been seen.
124  	 */
125  	assert_ring_tail_valid(ring, tail);
126  	ring->tail = tail;
127  	return tail;
128  }
129  
130  static inline unsigned int
__intel_ring_space(unsigned int head,unsigned int tail,unsigned int size)131  __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
132  {
133  	/*
134  	 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
135  	 * same cacheline, the Head Pointer must not be greater than the Tail
136  	 * Pointer."
137  	 */
138  	GEM_BUG_ON(!is_power_of_2(size));
139  	return (head - tail - CACHELINE_BYTES) & (size - 1);
140  }
141  
142  #endif /* INTEL_RING_H */
143