1  /* SPDX-License-Identifier: MIT */
2  /*
3   * Copyright © 2023 Intel Corporation
4   */
5  
6  #ifndef __INTEL_UNCORE_H__
7  #define __INTEL_UNCORE_H__
8  
9  #include "xe_device.h"
10  #include "xe_device_types.h"
11  #include "xe_mmio.h"
12  
__compat_uncore_to_gt(struct intel_uncore * uncore)13  static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
14  {
15  	struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
16  
17  	return xe_root_mmio_gt(xe);
18  }
19  
__compat_uncore_to_tile(struct intel_uncore * uncore)20  static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
21  {
22  	struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
23  
24  	return xe_device_get_root_tile(xe);
25  }
26  
intel_uncore_read(struct intel_uncore * uncore,i915_reg_t i915_reg)27  static inline u32 intel_uncore_read(struct intel_uncore *uncore,
28  				    i915_reg_t i915_reg)
29  {
30  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
31  
32  	return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
33  }
34  
intel_uncore_read8(struct intel_uncore * uncore,i915_reg_t i915_reg)35  static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
36  				    i915_reg_t i915_reg)
37  {
38  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
39  
40  	return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg);
41  }
42  
intel_uncore_read16(struct intel_uncore * uncore,i915_reg_t i915_reg)43  static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
44  				      i915_reg_t i915_reg)
45  {
46  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
47  
48  	return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg);
49  }
50  
51  static inline u64
intel_uncore_read64_2x32(struct intel_uncore * uncore,i915_reg_t i915_lower_reg,i915_reg_t i915_upper_reg)52  intel_uncore_read64_2x32(struct intel_uncore *uncore,
53  			 i915_reg_t i915_lower_reg, i915_reg_t i915_upper_reg)
54  {
55  	struct xe_reg lower_reg = XE_REG(i915_mmio_reg_offset(i915_lower_reg));
56  	struct xe_reg upper_reg = XE_REG(i915_mmio_reg_offset(i915_upper_reg));
57  	u32 upper, lower, old_upper;
58  	int loop = 0;
59  
60  	upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
61  	do {
62  		old_upper = upper;
63  		lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg);
64  		upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
65  	} while (upper != old_upper && loop++ < 2);
66  
67  	return (u64)upper << 32 | lower;
68  }
69  
intel_uncore_posting_read(struct intel_uncore * uncore,i915_reg_t i915_reg)70  static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
71  					     i915_reg_t i915_reg)
72  {
73  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
74  
75  	xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
76  }
77  
intel_uncore_write(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)78  static inline void intel_uncore_write(struct intel_uncore *uncore,
79  				      i915_reg_t i915_reg, u32 val)
80  {
81  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
82  
83  	xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
84  }
85  
intel_uncore_rmw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 clear,u32 set)86  static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
87  				   i915_reg_t i915_reg, u32 clear, u32 set)
88  {
89  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
90  
91  	return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set);
92  }
93  
intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int timeout)94  static inline int intel_wait_for_register(struct intel_uncore *uncore,
95  					  i915_reg_t i915_reg, u32 mask,
96  					  u32 value, unsigned int timeout)
97  {
98  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
99  
100  	return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
101  			      timeout * USEC_PER_MSEC, NULL, false);
102  }
103  
intel_wait_for_register_fw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int timeout)104  static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
105  					     i915_reg_t i915_reg, u32 mask,
106  					     u32 value, unsigned int timeout)
107  {
108  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
109  
110  	return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
111  			      timeout * USEC_PER_MSEC, NULL, false);
112  }
113  
114  static inline int
__intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 mask,u32 value,unsigned int fast_timeout_us,unsigned int slow_timeout_ms,u32 * out_value)115  __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
116  			  u32 mask, u32 value, unsigned int fast_timeout_us,
117  			  unsigned int slow_timeout_ms, u32 *out_value)
118  {
119  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
120  
121  	return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
122  			      fast_timeout_us + 1000 * slow_timeout_ms,
123  			      out_value, false);
124  }
125  
intel_uncore_read_fw(struct intel_uncore * uncore,i915_reg_t i915_reg)126  static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
127  				       i915_reg_t i915_reg)
128  {
129  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
130  
131  	return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
132  }
133  
intel_uncore_write_fw(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)134  static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
135  					 i915_reg_t i915_reg, u32 val)
136  {
137  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
138  
139  	xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
140  }
141  
intel_uncore_read_notrace(struct intel_uncore * uncore,i915_reg_t i915_reg)142  static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
143  					    i915_reg_t i915_reg)
144  {
145  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
146  
147  	return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
148  }
149  
intel_uncore_write_notrace(struct intel_uncore * uncore,i915_reg_t i915_reg,u32 val)150  static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
151  					      i915_reg_t i915_reg, u32 val)
152  {
153  	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
154  
155  	xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
156  }
157  
intel_uncore_regs(struct intel_uncore * uncore)158  static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
159  {
160  	struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
161  
162  	return xe_device_get_root_tile(xe)->mmio.regs;
163  }
164  
165  /*
166   * The raw_reg_{read,write} macros are intended as a micro-optimization for
167   * interrupt handlers so that the pointer indirection on uncore->regs can
168   * be computed once (and presumably cached in a register) instead of generating
169   * extra load instructions for each MMIO access.
170   *
171   * Given that these macros are only intended for non-GSI interrupt registers
172   * (and the goal is to avoid extra instructions generated by the compiler),
173   * these macros do not account for uncore->gsi_offset.  Any caller that needs
174   * to use these macros on a GSI register is responsible for adding the
175   * appropriate GSI offset to the 'base' parameter.
176   */
177  #define raw_reg_read(base, reg) \
178  	readl(base + i915_mmio_reg_offset(reg))
179  #define raw_reg_write(base, reg, value) \
180  	writel(value, base + i915_mmio_reg_offset(reg))
181  
182  #define intel_uncore_forcewake_get(x, y) do { } while (0)
183  #define intel_uncore_forcewake_put(x, y) do { } while (0)
184  
185  #define intel_uncore_arm_unclaimed_mmio_detection(x) do { } while (0)
186  
187  #endif /* __INTEL_UNCORE_H__ */
188