1  /*
2   * Written by: Matthew Dobson, IBM Corporation
3   *
4   * Copyright (C) 2002, IBM Corp.
5   *
6   * All rights reserved.
7   *
8   * This program is free software; you can redistribute it and/or modify
9   * it under the terms of the GNU General Public License as published by
10   * the Free Software Foundation; either version 2 of the License, or
11   * (at your option) any later version.
12   *
13   * This program is distributed in the hope that it will be useful, but
14   * WITHOUT ANY WARRANTY; without even the implied warranty of
15   * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16   * NON INFRINGEMENT.  See the GNU General Public License for more
17   * details.
18   *
19   * You should have received a copy of the GNU General Public License
20   * along with this program; if not, write to the Free Software
21   * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22   *
23   * Send feedback to <colpatch@us.ibm.com>
24   */
25  #ifndef _ASM_X86_TOPOLOGY_H
26  #define _ASM_X86_TOPOLOGY_H
27  
28  /*
29   * to preserve the visibility of NUMA_NO_NODE definition,
30   * moved to there from here.  May be used independent of
31   * CONFIG_NUMA.
32   */
33  #include <linux/numa.h>
34  #include <linux/cpumask.h>
35  
36  #ifdef CONFIG_NUMA
37  
38  #include <asm/mpspec.h>
39  #include <asm/percpu.h>
40  
41  /* Mappings between logical cpu number and node number */
42  DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
43  
44  #ifdef CONFIG_DEBUG_PER_CPU_MAPS
45  /*
46   * override generic percpu implementation of cpu_to_node
47   */
48  extern int __cpu_to_node(int cpu);
49  #define cpu_to_node __cpu_to_node
50  
51  extern int early_cpu_to_node(int cpu);
52  
53  #else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
54  
55  /* Same function but used if called before per_cpu areas are setup */
early_cpu_to_node(int cpu)56  static inline int early_cpu_to_node(int cpu)
57  {
58  	return early_per_cpu(x86_cpu_to_node_map, cpu);
59  }
60  
61  #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
62  
63  /* Mappings between node number and cpus on that node. */
64  extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
65  
66  #ifdef CONFIG_DEBUG_PER_CPU_MAPS
67  extern const struct cpumask *cpumask_of_node(int node);
68  #else
69  /* Returns a pointer to the cpumask of CPUs on Node 'node'. */
cpumask_of_node(int node)70  static inline const struct cpumask *cpumask_of_node(int node)
71  {
72  	return node_to_cpumask_map[node];
73  }
74  #endif
75  
76  extern void setup_node_to_cpumask_map(void);
77  
78  #define pcibus_to_node(bus) __pcibus_to_node(bus)
79  
80  extern int __node_distance(int, int);
81  #define node_distance(a, b) __node_distance(a, b)
82  
83  #else /* !CONFIG_NUMA */
84  
numa_node_id(void)85  static inline int numa_node_id(void)
86  {
87  	return 0;
88  }
89  /*
90   * indicate override:
91   */
92  #define numa_node_id numa_node_id
93  
early_cpu_to_node(int cpu)94  static inline int early_cpu_to_node(int cpu)
95  {
96  	return 0;
97  }
98  
setup_node_to_cpumask_map(void)99  static inline void setup_node_to_cpumask_map(void) { }
100  
101  #endif
102  
103  #include <asm-generic/topology.h>
104  
105  /* Topology information */
106  enum x86_topology_domains {
107  	TOPO_SMT_DOMAIN,
108  	TOPO_CORE_DOMAIN,
109  	TOPO_MODULE_DOMAIN,
110  	TOPO_TILE_DOMAIN,
111  	TOPO_DIE_DOMAIN,
112  	TOPO_DIEGRP_DOMAIN,
113  	TOPO_PKG_DOMAIN,
114  	TOPO_MAX_DOMAIN,
115  };
116  
117  struct x86_topology_system {
118  	unsigned int	dom_shifts[TOPO_MAX_DOMAIN];
119  	unsigned int	dom_size[TOPO_MAX_DOMAIN];
120  };
121  
122  extern struct x86_topology_system x86_topo_system;
123  
topology_get_domain_size(enum x86_topology_domains dom)124  static inline unsigned int topology_get_domain_size(enum x86_topology_domains dom)
125  {
126  	return x86_topo_system.dom_size[dom];
127  }
128  
topology_get_domain_shift(enum x86_topology_domains dom)129  static inline unsigned int topology_get_domain_shift(enum x86_topology_domains dom)
130  {
131  	return dom == TOPO_SMT_DOMAIN ? 0 : x86_topo_system.dom_shifts[dom - 1];
132  }
133  
134  extern const struct cpumask *cpu_coregroup_mask(int cpu);
135  extern const struct cpumask *cpu_clustergroup_mask(int cpu);
136  
137  #define topology_logical_package_id(cpu)	(cpu_data(cpu).topo.logical_pkg_id)
138  #define topology_physical_package_id(cpu)	(cpu_data(cpu).topo.pkg_id)
139  #define topology_logical_die_id(cpu)		(cpu_data(cpu).topo.logical_die_id)
140  #define topology_die_id(cpu)			(cpu_data(cpu).topo.die_id)
141  #define topology_core_id(cpu)			(cpu_data(cpu).topo.core_id)
142  #define topology_ppin(cpu)			(cpu_data(cpu).ppin)
143  
144  #define topology_amd_node_id(cpu)		(cpu_data(cpu).topo.amd_node_id)
145  
146  extern unsigned int __max_dies_per_package;
147  extern unsigned int __max_logical_packages;
148  extern unsigned int __max_threads_per_core;
149  extern unsigned int __num_threads_per_package;
150  extern unsigned int __num_cores_per_package;
151  
topology_max_packages(void)152  static inline unsigned int topology_max_packages(void)
153  {
154  	return __max_logical_packages;
155  }
156  
topology_max_dies_per_package(void)157  static inline unsigned int topology_max_dies_per_package(void)
158  {
159  	return __max_dies_per_package;
160  }
161  
topology_num_cores_per_package(void)162  static inline unsigned int topology_num_cores_per_package(void)
163  {
164  	return __num_cores_per_package;
165  }
166  
topology_num_threads_per_package(void)167  static inline unsigned int topology_num_threads_per_package(void)
168  {
169  	return __num_threads_per_package;
170  }
171  
172  #ifdef CONFIG_X86_LOCAL_APIC
173  int topology_get_logical_id(u32 apicid, enum x86_topology_domains at_level);
174  #else
topology_get_logical_id(u32 apicid,enum x86_topology_domains at_level)175  static inline int topology_get_logical_id(u32 apicid, enum x86_topology_domains at_level)
176  {
177  	return 0;
178  }
179  #endif
180  
181  #ifdef CONFIG_SMP
182  #define topology_cluster_id(cpu)		(cpu_data(cpu).topo.l2c_id)
183  #define topology_die_cpumask(cpu)		(per_cpu(cpu_die_map, cpu))
184  #define topology_cluster_cpumask(cpu)		(cpu_clustergroup_mask(cpu))
185  #define topology_core_cpumask(cpu)		(per_cpu(cpu_core_map, cpu))
186  #define topology_sibling_cpumask(cpu)		(per_cpu(cpu_sibling_map, cpu))
187  
188  
topology_phys_to_logical_pkg(unsigned int pkg)189  static inline int topology_phys_to_logical_pkg(unsigned int pkg)
190  {
191  	return topology_get_logical_id(pkg << x86_topo_system.dom_shifts[TOPO_PKG_DOMAIN],
192  				       TOPO_PKG_DOMAIN);
193  }
194  
195  extern int __max_smt_threads;
196  
topology_max_smt_threads(void)197  static inline int topology_max_smt_threads(void)
198  {
199  	return __max_smt_threads;
200  }
201  
202  #include <linux/cpu_smt.h>
203  
204  extern unsigned int __amd_nodes_per_pkg;
205  
topology_amd_nodes_per_pkg(void)206  static inline unsigned int topology_amd_nodes_per_pkg(void)
207  {
208  	return __amd_nodes_per_pkg;
209  }
210  
211  extern struct cpumask __cpu_primary_thread_mask;
212  #define cpu_primary_thread_mask ((const struct cpumask *)&__cpu_primary_thread_mask)
213  
214  /**
215   * topology_is_primary_thread - Check whether CPU is the primary SMT thread
216   * @cpu:	CPU to check
217   */
topology_is_primary_thread(unsigned int cpu)218  static inline bool topology_is_primary_thread(unsigned int cpu)
219  {
220  	return cpumask_test_cpu(cpu, cpu_primary_thread_mask);
221  }
222  
223  #else /* CONFIG_SMP */
topology_phys_to_logical_pkg(unsigned int pkg)224  static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
topology_max_smt_threads(void)225  static inline int topology_max_smt_threads(void) { return 1; }
topology_is_primary_thread(unsigned int cpu)226  static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
topology_amd_nodes_per_pkg(void)227  static inline unsigned int topology_amd_nodes_per_pkg(void) { return 1; }
228  #endif /* !CONFIG_SMP */
229  
arch_fix_phys_package_id(int num,u32 slot)230  static inline void arch_fix_phys_package_id(int num, u32 slot)
231  {
232  }
233  
234  struct pci_bus;
235  int x86_pci_root_bus_node(int bus);
236  void x86_pci_root_bus_resources(int bus, struct list_head *resources);
237  
238  extern bool x86_topology_update;
239  
240  #ifdef CONFIG_SCHED_MC_PRIO
241  #include <asm/percpu.h>
242  
243  DECLARE_PER_CPU_READ_MOSTLY(int, sched_core_priority);
244  extern unsigned int __read_mostly sysctl_sched_itmt_enabled;
245  
246  /* Interface to set priority of a cpu */
247  void sched_set_itmt_core_prio(int prio, int core_cpu);
248  
249  /* Interface to notify scheduler that system supports ITMT */
250  int sched_set_itmt_support(void);
251  
252  /* Interface to notify scheduler that system revokes ITMT support */
253  void sched_clear_itmt_support(void);
254  
255  #else /* CONFIG_SCHED_MC_PRIO */
256  
257  #define sysctl_sched_itmt_enabled	0
sched_set_itmt_core_prio(int prio,int core_cpu)258  static inline void sched_set_itmt_core_prio(int prio, int core_cpu)
259  {
260  }
sched_set_itmt_support(void)261  static inline int sched_set_itmt_support(void)
262  {
263  	return 0;
264  }
sched_clear_itmt_support(void)265  static inline void sched_clear_itmt_support(void)
266  {
267  }
268  #endif /* CONFIG_SCHED_MC_PRIO */
269  
270  #if defined(CONFIG_SMP) && defined(CONFIG_X86_64)
271  #include <asm/cpufeature.h>
272  
273  DECLARE_STATIC_KEY_FALSE(arch_scale_freq_key);
274  
275  #define arch_scale_freq_invariant() static_branch_likely(&arch_scale_freq_key)
276  
277  DECLARE_PER_CPU(unsigned long, arch_freq_scale);
278  
arch_scale_freq_capacity(int cpu)279  static inline long arch_scale_freq_capacity(int cpu)
280  {
281  	return per_cpu(arch_freq_scale, cpu);
282  }
283  #define arch_scale_freq_capacity arch_scale_freq_capacity
284  
285  bool arch_enable_hybrid_capacity_scale(void);
286  void arch_set_cpu_capacity(int cpu, unsigned long cap, unsigned long max_cap,
287  			   unsigned long cap_freq, unsigned long base_freq);
288  
289  unsigned long arch_scale_cpu_capacity(int cpu);
290  #define arch_scale_cpu_capacity arch_scale_cpu_capacity
291  
292  extern void arch_set_max_freq_ratio(bool turbo_disabled);
293  extern void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled);
294  #else
arch_enable_hybrid_capacity_scale(void)295  static inline bool arch_enable_hybrid_capacity_scale(void) { return false; }
arch_set_cpu_capacity(int cpu,unsigned long cap,unsigned long max_cap,unsigned long cap_freq,unsigned long base_freq)296  static inline void arch_set_cpu_capacity(int cpu, unsigned long cap,
297  					 unsigned long max_cap,
298  					 unsigned long cap_freq,
299  					 unsigned long base_freq) { }
300  
arch_set_max_freq_ratio(bool turbo_disabled)301  static inline void arch_set_max_freq_ratio(bool turbo_disabled) { }
freq_invariance_set_perf_ratio(u64 ratio,bool turbo_disabled)302  static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled) { }
303  #endif
304  
305  extern void arch_scale_freq_tick(void);
306  #define arch_scale_freq_tick arch_scale_freq_tick
307  
308  #endif /* _ASM_X86_TOPOLOGY_H */
309