1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _ASM_X86_RESCTRL_H
3  #define _ASM_X86_RESCTRL_H
4  
5  #ifdef CONFIG_X86_CPU_RESCTRL
6  
7  #include <linux/sched.h>
8  #include <linux/jump_label.h>
9  
10  /*
11   * This value can never be a valid CLOSID, and is used when mapping a
12   * (closid, rmid) pair to an index and back. On x86 only the RMID is
13   * needed. The index is a software defined value.
14   */
15  #define X86_RESCTRL_EMPTY_CLOSID         ((u32)~0)
16  
17  /**
18   * struct resctrl_pqr_state - State cache for the PQR MSR
19   * @cur_rmid:		The cached Resource Monitoring ID
20   * @cur_closid:	The cached Class Of Service ID
21   * @default_rmid:	The user assigned Resource Monitoring ID
22   * @default_closid:	The user assigned cached Class Of Service ID
23   *
24   * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
25   * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
26   * contains both parts, so we need to cache them. This also
27   * stores the user configured per cpu CLOSID and RMID.
28   *
29   * The cache also helps to avoid pointless updates if the value does
30   * not change.
31   */
32  struct resctrl_pqr_state {
33  	u32			cur_rmid;
34  	u32			cur_closid;
35  	u32			default_rmid;
36  	u32			default_closid;
37  };
38  
39  DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
40  
41  extern bool rdt_alloc_capable;
42  extern bool rdt_mon_capable;
43  
44  DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
45  DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
46  DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
47  
resctrl_arch_alloc_capable(void)48  static inline bool resctrl_arch_alloc_capable(void)
49  {
50  	return rdt_alloc_capable;
51  }
52  
resctrl_arch_enable_alloc(void)53  static inline void resctrl_arch_enable_alloc(void)
54  {
55  	static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
56  	static_branch_inc_cpuslocked(&rdt_enable_key);
57  }
58  
resctrl_arch_disable_alloc(void)59  static inline void resctrl_arch_disable_alloc(void)
60  {
61  	static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
62  	static_branch_dec_cpuslocked(&rdt_enable_key);
63  }
64  
resctrl_arch_mon_capable(void)65  static inline bool resctrl_arch_mon_capable(void)
66  {
67  	return rdt_mon_capable;
68  }
69  
resctrl_arch_enable_mon(void)70  static inline void resctrl_arch_enable_mon(void)
71  {
72  	static_branch_enable_cpuslocked(&rdt_mon_enable_key);
73  	static_branch_inc_cpuslocked(&rdt_enable_key);
74  }
75  
resctrl_arch_disable_mon(void)76  static inline void resctrl_arch_disable_mon(void)
77  {
78  	static_branch_disable_cpuslocked(&rdt_mon_enable_key);
79  	static_branch_dec_cpuslocked(&rdt_enable_key);
80  }
81  
82  /*
83   * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
84   *
85   * Following considerations are made so that this has minimal impact
86   * on scheduler hot path:
87   * - This will stay as no-op unless we are running on an Intel SKU
88   *   which supports resource control or monitoring and we enable by
89   *   mounting the resctrl file system.
90   * - Caches the per cpu CLOSid/RMID values and does the MSR write only
91   *   when a task with a different CLOSid/RMID is scheduled in.
92   * - We allocate RMIDs/CLOSids globally in order to keep this as
93   *   simple as possible.
94   * Must be called with preemption disabled.
95   */
__resctrl_sched_in(struct task_struct * tsk)96  static inline void __resctrl_sched_in(struct task_struct *tsk)
97  {
98  	struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
99  	u32 closid = state->default_closid;
100  	u32 rmid = state->default_rmid;
101  	u32 tmp;
102  
103  	/*
104  	 * If this task has a closid/rmid assigned, use it.
105  	 * Else use the closid/rmid assigned to this cpu.
106  	 */
107  	if (static_branch_likely(&rdt_alloc_enable_key)) {
108  		tmp = READ_ONCE(tsk->closid);
109  		if (tmp)
110  			closid = tmp;
111  	}
112  
113  	if (static_branch_likely(&rdt_mon_enable_key)) {
114  		tmp = READ_ONCE(tsk->rmid);
115  		if (tmp)
116  			rmid = tmp;
117  	}
118  
119  	if (closid != state->cur_closid || rmid != state->cur_rmid) {
120  		state->cur_closid = closid;
121  		state->cur_rmid = rmid;
122  		wrmsr(MSR_IA32_PQR_ASSOC, rmid, closid);
123  	}
124  }
125  
resctrl_arch_round_mon_val(unsigned int val)126  static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
127  {
128  	unsigned int scale = boot_cpu_data.x86_cache_occ_scale;
129  
130  	/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
131  	val /= scale;
132  	return val * scale;
133  }
134  
resctrl_arch_set_closid_rmid(struct task_struct * tsk,u32 closid,u32 rmid)135  static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk,
136  						u32 closid, u32 rmid)
137  {
138  	WRITE_ONCE(tsk->closid, closid);
139  	WRITE_ONCE(tsk->rmid, rmid);
140  }
141  
resctrl_arch_match_closid(struct task_struct * tsk,u32 closid)142  static inline bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid)
143  {
144  	return READ_ONCE(tsk->closid) == closid;
145  }
146  
resctrl_arch_match_rmid(struct task_struct * tsk,u32 ignored,u32 rmid)147  static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored,
148  					   u32 rmid)
149  {
150  	return READ_ONCE(tsk->rmid) == rmid;
151  }
152  
resctrl_sched_in(struct task_struct * tsk)153  static inline void resctrl_sched_in(struct task_struct *tsk)
154  {
155  	if (static_branch_likely(&rdt_enable_key))
156  		__resctrl_sched_in(tsk);
157  }
158  
resctrl_arch_rmid_idx_decode(u32 idx,u32 * closid,u32 * rmid)159  static inline void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid)
160  {
161  	*rmid = idx;
162  	*closid = X86_RESCTRL_EMPTY_CLOSID;
163  }
164  
resctrl_arch_rmid_idx_encode(u32 ignored,u32 rmid)165  static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid)
166  {
167  	return rmid;
168  }
169  
170  /* x86 can always read an rmid, nothing needs allocating */
171  struct rdt_resource;
resctrl_arch_mon_ctx_alloc(struct rdt_resource * r,int evtid)172  static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid)
173  {
174  	might_sleep();
175  	return NULL;
176  };
177  
resctrl_arch_mon_ctx_free(struct rdt_resource * r,int evtid,void * ctx)178  static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid,
179  					     void *ctx) { };
180  
181  void resctrl_cpu_detect(struct cpuinfo_x86 *c);
182  
183  #else
184  
resctrl_sched_in(struct task_struct * tsk)185  static inline void resctrl_sched_in(struct task_struct *tsk) {}
resctrl_cpu_detect(struct cpuinfo_x86 * c)186  static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
187  
188  #endif /* CONFIG_X86_CPU_RESCTRL */
189  
190  #endif /* _ASM_X86_RESCTRL_H */
191