1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /*
3   * smp.h: PowerPC-specific SMP code.
4   *
5   * Original was a copy of sparc smp.h.  Now heavily modified
6   * for PPC.
7   *
8   * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
9   * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com>
10   */
11  
12  #ifndef _ASM_POWERPC_SMP_H
13  #define _ASM_POWERPC_SMP_H
14  #ifdef __KERNEL__
15  
16  #include <linux/threads.h>
17  #include <linux/cpumask.h>
18  #include <linux/kernel.h>
19  #include <linux/irqreturn.h>
20  
21  #ifndef __ASSEMBLY__
22  
23  #ifdef CONFIG_PPC64
24  #include <asm/paca.h>
25  #endif
26  #include <asm/percpu.h>
27  
28  extern int boot_cpuid;
29  extern int boot_cpu_hwid; /* PPC64 only */
30  extern int boot_core_hwid;
31  extern int spinning_secondaries;
32  extern u32 *cpu_to_phys_id;
33  extern bool coregroup_enabled;
34  
35  extern int cpu_to_chip_id(int cpu);
36  extern int *chip_id_lookup_table;
37  
38  DECLARE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
39  DECLARE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
40  DECLARE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
41  
42  #ifdef CONFIG_SMP
43  
44  struct smp_ops_t {
45  	void  (*message_pass)(int cpu, int msg);
46  #ifdef CONFIG_PPC_SMP_MUXED_IPI
47  	void  (*cause_ipi)(int cpu);
48  #endif
49  	int   (*cause_nmi_ipi)(int cpu);
50  	void  (*probe)(void);
51  	int   (*kick_cpu)(int nr);
52  	int   (*prepare_cpu)(int nr);
53  	void  (*setup_cpu)(int nr);
54  	void  (*bringup_done)(void);
55  	void  (*take_timebase)(void);
56  	void  (*give_timebase)(void);
57  	int   (*cpu_disable)(void);
58  	void  (*cpu_die)(unsigned int nr);
59  	int   (*cpu_bootable)(unsigned int nr);
60  #ifdef CONFIG_HOTPLUG_CPU
61  	void  (*cpu_offline_self)(void);
62  #endif
63  };
64  
65  extern struct task_struct *secondary_current;
66  
67  void start_secondary(void *unused);
68  extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
69  extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
70  extern void smp_send_debugger_break(void);
71  extern void __noreturn start_secondary_resume(void);
72  extern void smp_generic_give_timebase(void);
73  extern void smp_generic_take_timebase(void);
74  
75  DECLARE_PER_CPU(unsigned int, cpu_pvr);
76  
77  #ifdef CONFIG_HOTPLUG_CPU
78  int generic_cpu_disable(void);
79  void generic_cpu_die(unsigned int cpu);
80  void generic_set_cpu_dead(unsigned int cpu);
81  void generic_set_cpu_up(unsigned int cpu);
82  int generic_check_cpu_restart(unsigned int cpu);
83  int is_cpu_dead(unsigned int cpu);
84  #else
85  #define generic_set_cpu_up(i)	do { } while (0)
86  #endif
87  
88  #ifdef CONFIG_PPC64
89  #define raw_smp_processor_id()	(local_paca->paca_index)
90  #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
91  #else
92  /* 32-bit */
93  extern int smp_hw_index[];
94  
95  #define raw_smp_processor_id()		(current_thread_info()->cpu)
96  #define hard_smp_processor_id() 	(smp_hw_index[smp_processor_id()])
97  
get_hard_smp_processor_id(int cpu)98  static inline int get_hard_smp_processor_id(int cpu)
99  {
100  	return smp_hw_index[cpu];
101  }
102  
set_hard_smp_processor_id(int cpu,int phys)103  static inline void set_hard_smp_processor_id(int cpu, int phys)
104  {
105  	smp_hw_index[cpu] = phys;
106  }
107  #endif
108  
109  DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
110  DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
111  DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
112  DECLARE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
113  
cpu_sibling_mask(int cpu)114  static inline struct cpumask *cpu_sibling_mask(int cpu)
115  {
116  	return per_cpu(cpu_sibling_map, cpu);
117  }
118  
cpu_core_mask(int cpu)119  static inline struct cpumask *cpu_core_mask(int cpu)
120  {
121  	return per_cpu(cpu_core_map, cpu);
122  }
123  
cpu_l2_cache_mask(int cpu)124  static inline struct cpumask *cpu_l2_cache_mask(int cpu)
125  {
126  	return per_cpu(cpu_l2_cache_map, cpu);
127  }
128  
cpu_smallcore_mask(int cpu)129  static inline struct cpumask *cpu_smallcore_mask(int cpu)
130  {
131  	return per_cpu(cpu_smallcore_map, cpu);
132  }
133  
134  extern int cpu_to_core_id(int cpu);
135  
136  extern bool has_big_cores;
137  extern bool thread_group_shares_l2;
138  extern bool thread_group_shares_l3;
139  
140  #define cpu_smt_mask cpu_smt_mask
141  #ifdef CONFIG_SCHED_SMT
cpu_smt_mask(int cpu)142  static inline const struct cpumask *cpu_smt_mask(int cpu)
143  {
144  	if (has_big_cores)
145  		return per_cpu(cpu_smallcore_map, cpu);
146  
147  	return per_cpu(cpu_sibling_map, cpu);
148  }
149  #endif /* CONFIG_SCHED_SMT */
150  
151  /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
152   *
153   * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
154   * in /proc/interrupts will be wrong!!! --Troy */
155  #define PPC_MSG_CALL_FUNCTION	0
156  #define PPC_MSG_RESCHEDULE	1
157  #define PPC_MSG_TICK_BROADCAST	2
158  #define PPC_MSG_NMI_IPI		3
159  
160  /* This is only used by the powernv kernel */
161  #define PPC_MSG_RM_HOST_ACTION	4
162  
163  #define NMI_IPI_ALL_OTHERS		-2
164  
165  #ifdef CONFIG_NMI_IPI
166  extern int smp_handle_nmi_ipi(struct pt_regs *regs);
167  #else
smp_handle_nmi_ipi(struct pt_regs * regs)168  static inline int smp_handle_nmi_ipi(struct pt_regs *regs) { return 0; }
169  #endif
170  
171  /* for irq controllers that have dedicated ipis per message (4) */
172  extern int smp_request_message_ipi(int virq, int message);
173  extern const char *smp_ipi_name[];
174  
175  /* for irq controllers with only a single ipi */
176  extern void smp_muxed_ipi_message_pass(int cpu, int msg);
177  extern void smp_muxed_ipi_set_message(int cpu, int msg);
178  extern irqreturn_t smp_ipi_demux(void);
179  extern irqreturn_t smp_ipi_demux_relaxed(void);
180  
181  void smp_init_pSeries(void);
182  void smp_init_cell(void);
183  void smp_setup_cpu_maps(void);
184  
185  extern int __cpu_disable(void);
186  extern void __cpu_die(unsigned int cpu);
187  
188  #else
189  /* for UP */
190  #define hard_smp_processor_id()		get_hard_smp_processor_id(0)
191  #define smp_setup_cpu_maps()
192  #define thread_group_shares_l2  0
193  #define thread_group_shares_l3	0
cpu_sibling_mask(int cpu)194  static inline const struct cpumask *cpu_sibling_mask(int cpu)
195  {
196  	return cpumask_of(cpu);
197  }
198  
cpu_smallcore_mask(int cpu)199  static inline const struct cpumask *cpu_smallcore_mask(int cpu)
200  {
201  	return cpumask_of(cpu);
202  }
203  
cpu_l2_cache_mask(int cpu)204  static inline const struct cpumask *cpu_l2_cache_mask(int cpu)
205  {
206  	return cpumask_of(cpu);
207  }
208  #endif /* CONFIG_SMP */
209  
210  #ifdef CONFIG_PPC64
get_hard_smp_processor_id(int cpu)211  static inline int get_hard_smp_processor_id(int cpu)
212  {
213  	return paca_ptrs[cpu]->hw_cpu_id;
214  }
215  
set_hard_smp_processor_id(int cpu,int phys)216  static inline void set_hard_smp_processor_id(int cpu, int phys)
217  {
218  	paca_ptrs[cpu]->hw_cpu_id = phys;
219  }
220  #else
221  /* 32-bit */
222  #ifndef CONFIG_SMP
223  extern int boot_cpuid_phys;
get_hard_smp_processor_id(int cpu)224  static inline int get_hard_smp_processor_id(int cpu)
225  {
226  	return boot_cpuid_phys;
227  }
228  
set_hard_smp_processor_id(int cpu,int phys)229  static inline void set_hard_smp_processor_id(int cpu, int phys)
230  {
231  	boot_cpuid_phys = phys;
232  }
233  #endif /* !CONFIG_SMP */
234  #endif /* !CONFIG_PPC64 */
235  
236  #if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE))
237  extern void smp_release_cpus(void);
238  #else
smp_release_cpus(void)239  static inline void smp_release_cpus(void) { }
240  #endif
241  
242  extern int smt_enabled_at_boot;
243  
244  extern void smp_mpic_probe(void);
245  extern void smp_mpic_setup_cpu(int cpu);
246  extern int smp_generic_kick_cpu(int nr);
247  extern int smp_generic_cpu_bootable(unsigned int nr);
248  
249  
250  extern void smp_generic_give_timebase(void);
251  extern void smp_generic_take_timebase(void);
252  
253  extern struct smp_ops_t *smp_ops;
254  
255  extern void arch_send_call_function_single_ipi(int cpu);
256  extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
257  
258  /* Definitions relative to the secondary CPU spin loop
259   * and entry point. Not all of them exist on both 32 and
260   * 64-bit but defining them all here doesn't harm
261   */
262  extern void generic_secondary_smp_init(void);
263  extern unsigned long __secondary_hold_spinloop;
264  extern unsigned long __secondary_hold_acknowledge;
265  extern char __secondary_hold;
266  extern unsigned int booting_thread_hwid;
267  
268  extern void __early_start(void);
269  #endif /* __ASSEMBLY__ */
270  
271  #endif /* __KERNEL__ */
272  #endif /* _ASM_POWERPC_SMP_H) */
273