1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * Common boot and setup code for both 32-bit and 64-bit.
4   * Extracted from arch/powerpc/kernel/setup_64.c.
5   *
6   * Copyright (C) 2001 PPC64 Team, IBM Corp
7   */
8  
9  #undef DEBUG
10  
11  #include <linux/export.h>
12  #include <linux/panic_notifier.h>
13  #include <linux/string.h>
14  #include <linux/sched.h>
15  #include <linux/init.h>
16  #include <linux/kernel.h>
17  #include <linux/reboot.h>
18  #include <linux/delay.h>
19  #include <linux/initrd.h>
20  #include <linux/platform_device.h>
21  #include <linux/printk.h>
22  #include <linux/seq_file.h>
23  #include <linux/ioport.h>
24  #include <linux/console.h>
25  #include <linux/root_dev.h>
26  #include <linux/cpu.h>
27  #include <linux/unistd.h>
28  #include <linux/seq_buf.h>
29  #include <linux/serial.h>
30  #include <linux/serial_8250.h>
31  #include <linux/percpu.h>
32  #include <linux/memblock.h>
33  #include <linux/of.h>
34  #include <linux/of_fdt.h>
35  #include <linux/of_irq.h>
36  #include <linux/hugetlb.h>
37  #include <linux/pgtable.h>
38  #include <asm/io.h>
39  #include <asm/paca.h>
40  #include <asm/processor.h>
41  #include <asm/vdso_datapage.h>
42  #include <asm/smp.h>
43  #include <asm/elf.h>
44  #include <asm/machdep.h>
45  #include <asm/time.h>
46  #include <asm/cputable.h>
47  #include <asm/sections.h>
48  #include <asm/firmware.h>
49  #include <asm/btext.h>
50  #include <asm/nvram.h>
51  #include <asm/setup.h>
52  #include <asm/rtas.h>
53  #include <asm/iommu.h>
54  #include <asm/serial.h>
55  #include <asm/cache.h>
56  #include <asm/page.h>
57  #include <asm/mmu.h>
58  #include <asm/xmon.h>
59  #include <asm/cputhreads.h>
60  #include <mm/mmu_decl.h>
61  #include <asm/archrandom.h>
62  #include <asm/fadump.h>
63  #include <asm/udbg.h>
64  #include <asm/hugetlb.h>
65  #include <asm/livepatch.h>
66  #include <asm/mmu_context.h>
67  #include <asm/cpu_has_feature.h>
68  #include <asm/kasan.h>
69  #include <asm/mce.h>
70  
71  #include "setup.h"
72  
73  #ifdef DEBUG
74  #define DBG(fmt...) udbg_printf(fmt)
75  #else
76  #define DBG(fmt...)
77  #endif
78  
79  /* The main machine-dep calls structure
80   */
81  struct machdep_calls ppc_md;
82  EXPORT_SYMBOL(ppc_md);
83  struct machdep_calls *machine_id;
84  EXPORT_SYMBOL(machine_id);
85  
86  int boot_cpuid = -1;
87  EXPORT_SYMBOL_GPL(boot_cpuid);
88  int __initdata boot_core_hwid = -1;
89  
90  #ifdef CONFIG_PPC64
91  int boot_cpu_hwid = -1;
92  #endif
93  
94  /*
95   * These are used in binfmt_elf.c to put aux entries on the stack
96   * for each elf executable being started.
97   */
98  int dcache_bsize;
99  int icache_bsize;
100  
101  /* Variables required to store legacy IO irq routing */
102  int of_i8042_kbd_irq;
103  EXPORT_SYMBOL_GPL(of_i8042_kbd_irq);
104  int of_i8042_aux_irq;
105  EXPORT_SYMBOL_GPL(of_i8042_aux_irq);
106  
107  #ifdef __DO_IRQ_CANON
108  /* XXX should go elsewhere eventually */
109  int ppc_do_canonicalize_irqs;
110  EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
111  #endif
112  
113  #ifdef CONFIG_CRASH_DUMP
114  /* This keeps a track of which one is the crashing cpu. */
115  int crashing_cpu = -1;
116  #endif
117  
118  /* also used by kexec */
machine_shutdown(void)119  void machine_shutdown(void)
120  {
121  	/*
122  	 * if fadump is active, cleanup the fadump registration before we
123  	 * shutdown.
124  	 */
125  	fadump_cleanup();
126  
127  	if (ppc_md.machine_shutdown)
128  		ppc_md.machine_shutdown();
129  }
130  
machine_hang(void)131  static void machine_hang(void)
132  {
133  	pr_emerg("System Halted, OK to turn off power\n");
134  	local_irq_disable();
135  	while (1)
136  		;
137  }
138  
machine_restart(char * cmd)139  void machine_restart(char *cmd)
140  {
141  	machine_shutdown();
142  	if (ppc_md.restart)
143  		ppc_md.restart(cmd);
144  
145  	smp_send_stop();
146  
147  	do_kernel_restart(cmd);
148  	mdelay(1000);
149  
150  	machine_hang();
151  }
152  
machine_power_off(void)153  void machine_power_off(void)
154  {
155  	machine_shutdown();
156  	do_kernel_power_off();
157  	smp_send_stop();
158  	machine_hang();
159  }
160  /* Used by the G5 thermal driver */
161  EXPORT_SYMBOL_GPL(machine_power_off);
162  
163  void (*pm_power_off)(void);
164  EXPORT_SYMBOL_GPL(pm_power_off);
165  
arch_get_random_seed_longs(unsigned long * v,size_t max_longs)166  size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
167  {
168  	if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
169  		return 1;
170  	return 0;
171  }
172  EXPORT_SYMBOL(arch_get_random_seed_longs);
173  
machine_halt(void)174  void machine_halt(void)
175  {
176  	machine_shutdown();
177  	if (ppc_md.halt)
178  		ppc_md.halt();
179  
180  	smp_send_stop();
181  	machine_hang();
182  }
183  
184  #ifdef CONFIG_SMP
185  DEFINE_PER_CPU(unsigned int, cpu_pvr);
186  #endif
187  
show_cpuinfo_summary(struct seq_file * m)188  static void show_cpuinfo_summary(struct seq_file *m)
189  {
190  	struct device_node *root;
191  	const char *model = NULL;
192  	unsigned long bogosum = 0;
193  	int i;
194  
195  	if (IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_PPC32)) {
196  		for_each_online_cpu(i)
197  			bogosum += loops_per_jiffy;
198  		seq_printf(m, "total bogomips\t: %lu.%02lu\n",
199  			   bogosum / (500000 / HZ), bogosum / (5000 / HZ) % 100);
200  	}
201  	seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
202  	if (ppc_md.name)
203  		seq_printf(m, "platform\t: %s\n", ppc_md.name);
204  	root = of_find_node_by_path("/");
205  	if (root)
206  		model = of_get_property(root, "model", NULL);
207  	if (model)
208  		seq_printf(m, "model\t\t: %s\n", model);
209  	of_node_put(root);
210  
211  	if (ppc_md.show_cpuinfo != NULL)
212  		ppc_md.show_cpuinfo(m);
213  
214  	/* Display the amount of memory */
215  	if (IS_ENABLED(CONFIG_PPC32))
216  		seq_printf(m, "Memory\t\t: %d MB\n",
217  			   (unsigned int)(total_memory / (1024 * 1024)));
218  }
219  
show_cpuinfo(struct seq_file * m,void * v)220  static int show_cpuinfo(struct seq_file *m, void *v)
221  {
222  	unsigned long cpu_id = (unsigned long)v - 1;
223  	unsigned int pvr;
224  	unsigned long proc_freq;
225  	unsigned short maj;
226  	unsigned short min;
227  
228  #ifdef CONFIG_SMP
229  	pvr = per_cpu(cpu_pvr, cpu_id);
230  #else
231  	pvr = mfspr(SPRN_PVR);
232  #endif
233  	maj = (pvr >> 8) & 0xFF;
234  	min = pvr & 0xFF;
235  
236  	seq_printf(m, "processor\t: %lu\ncpu\t\t: ", cpu_id);
237  
238  	if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name)
239  		seq_puts(m, cur_cpu_spec->cpu_name);
240  	else
241  		seq_printf(m, "unknown (%08x)", pvr);
242  
243  	if (cpu_has_feature(CPU_FTR_ALTIVEC))
244  		seq_puts(m, ", altivec supported");
245  
246  	seq_putc(m, '\n');
247  
248  #ifdef CONFIG_TAU
249  	if (cpu_has_feature(CPU_FTR_TAU)) {
250  		if (IS_ENABLED(CONFIG_TAU_AVERAGE)) {
251  			/* more straightforward, but potentially misleading */
252  			seq_printf(m,  "temperature \t: %u C (uncalibrated)\n",
253  				   cpu_temp(cpu_id));
254  		} else {
255  			/* show the actual temp sensor range */
256  			u32 temp;
257  			temp = cpu_temp_both(cpu_id);
258  			seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
259  				   temp & 0xff, temp >> 16);
260  		}
261  	}
262  #endif /* CONFIG_TAU */
263  
264  	/*
265  	 * Platforms that have variable clock rates, should implement
266  	 * the method ppc_md.get_proc_freq() that reports the clock
267  	 * rate of a given cpu. The rest can use ppc_proc_freq to
268  	 * report the clock rate that is same across all cpus.
269  	 */
270  	if (ppc_md.get_proc_freq)
271  		proc_freq = ppc_md.get_proc_freq(cpu_id);
272  	else
273  		proc_freq = ppc_proc_freq;
274  
275  	if (proc_freq)
276  		seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
277  			   proc_freq / 1000000, proc_freq % 1000000);
278  
279  	/* If we are a Freescale core do a simple check so
280  	 * we don't have to keep adding cases in the future */
281  	if (PVR_VER(pvr) & 0x8000) {
282  		switch (PVR_VER(pvr)) {
283  		case 0x8000:	/* 7441/7450/7451, Voyager */
284  		case 0x8001:	/* 7445/7455, Apollo 6 */
285  		case 0x8002:	/* 7447/7457, Apollo 7 */
286  		case 0x8003:	/* 7447A, Apollo 7 PM */
287  		case 0x8004:	/* 7448, Apollo 8 */
288  		case 0x800c:	/* 7410, Nitro */
289  			maj = ((pvr >> 8) & 0xF);
290  			min = PVR_MIN(pvr);
291  			break;
292  		default:	/* e500/book-e */
293  			maj = PVR_MAJ(pvr);
294  			min = PVR_MIN(pvr);
295  			break;
296  		}
297  	} else {
298  		switch (PVR_VER(pvr)) {
299  			case 0x1008:	/* 740P/750P ?? */
300  				maj = ((pvr >> 8) & 0xFF) - 1;
301  				min = pvr & 0xFF;
302  				break;
303  			case 0x004e: /* POWER9 bits 12-15 give chip type */
304  			case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
305  				maj = (pvr >> 8) & 0x0F;
306  				min = pvr & 0xFF;
307  				break;
308  			default:
309  				maj = (pvr >> 8) & 0xFF;
310  				min = pvr & 0xFF;
311  				break;
312  		}
313  	}
314  
315  	seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
316  		   maj, min, PVR_VER(pvr), PVR_REV(pvr));
317  
318  	if (IS_ENABLED(CONFIG_PPC32))
319  		seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ),
320  			   (loops_per_jiffy / (5000 / HZ)) % 100);
321  
322  	seq_putc(m, '\n');
323  
324  	/* If this is the last cpu, print the summary */
325  	if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
326  		show_cpuinfo_summary(m);
327  
328  	return 0;
329  }
330  
c_start(struct seq_file * m,loff_t * pos)331  static void *c_start(struct seq_file *m, loff_t *pos)
332  {
333  	if (*pos == 0)	/* just in case, cpu 0 is not the first */
334  		*pos = cpumask_first(cpu_online_mask);
335  	else
336  		*pos = cpumask_next(*pos - 1, cpu_online_mask);
337  	if ((*pos) < nr_cpu_ids)
338  		return (void *)(unsigned long)(*pos + 1);
339  	return NULL;
340  }
341  
c_next(struct seq_file * m,void * v,loff_t * pos)342  static void *c_next(struct seq_file *m, void *v, loff_t *pos)
343  {
344  	(*pos)++;
345  	return c_start(m, pos);
346  }
347  
c_stop(struct seq_file * m,void * v)348  static void c_stop(struct seq_file *m, void *v)
349  {
350  }
351  
352  const struct seq_operations cpuinfo_op = {
353  	.start	= c_start,
354  	.next	= c_next,
355  	.stop	= c_stop,
356  	.show	= show_cpuinfo,
357  };
358  
check_for_initrd(void)359  void __init check_for_initrd(void)
360  {
361  #ifdef CONFIG_BLK_DEV_INITRD
362  	DBG(" -> check_for_initrd()  initrd_start=0x%lx  initrd_end=0x%lx\n",
363  	    initrd_start, initrd_end);
364  
365  	/* If we were passed an initrd, set the ROOT_DEV properly if the values
366  	 * look sensible. If not, clear initrd reference.
367  	 */
368  	if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
369  	    initrd_end > initrd_start)
370  		ROOT_DEV = Root_RAM0;
371  	else
372  		initrd_start = initrd_end = 0;
373  
374  	if (initrd_start)
375  		pr_info("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
376  
377  	DBG(" <- check_for_initrd()\n");
378  #endif /* CONFIG_BLK_DEV_INITRD */
379  }
380  
381  #ifdef CONFIG_SMP
382  
383  int threads_per_core, threads_per_subcore, threads_shift __read_mostly;
384  cpumask_t threads_core_mask __read_mostly;
385  EXPORT_SYMBOL_GPL(threads_per_core);
386  EXPORT_SYMBOL_GPL(threads_per_subcore);
387  EXPORT_SYMBOL_GPL(threads_shift);
388  EXPORT_SYMBOL_GPL(threads_core_mask);
389  
cpu_init_thread_core_maps(int tpc)390  static void __init cpu_init_thread_core_maps(int tpc)
391  {
392  	int i;
393  
394  	threads_per_core = tpc;
395  	threads_per_subcore = tpc;
396  	cpumask_clear(&threads_core_mask);
397  
398  	/* This implementation only supports power of 2 number of threads
399  	 * for simplicity and performance
400  	 */
401  	threads_shift = ilog2(tpc);
402  	BUG_ON(tpc != (1 << threads_shift));
403  
404  	for (i = 0; i < tpc; i++)
405  		cpumask_set_cpu(i, &threads_core_mask);
406  
407  	printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
408  	       tpc, str_plural(tpc));
409  	printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
410  }
411  
412  
413  u32 *cpu_to_phys_id = NULL;
414  
assign_threads(unsigned int cpu,unsigned int nthreads,bool present,const __be32 * hw_ids)415  static int assign_threads(unsigned int cpu, unsigned int nthreads, bool present,
416  			  const __be32 *hw_ids)
417  {
418  	for (int i = 0; i < nthreads && cpu < nr_cpu_ids; i++) {
419  		__be32 hwid;
420  
421  		hwid = be32_to_cpu(hw_ids[i]);
422  
423  		DBG("    thread %d -> cpu %d (hard id %d)\n", i, cpu, hwid);
424  
425  		set_cpu_present(cpu, present);
426  		set_cpu_possible(cpu, true);
427  		cpu_to_phys_id[cpu] = hwid;
428  		cpu++;
429  	}
430  
431  	return cpu;
432  }
433  
434  /**
435   * setup_cpu_maps - initialize the following cpu maps:
436   *                  cpu_possible_mask
437   *                  cpu_present_mask
438   *
439   * Having the possible map set up early allows us to restrict allocations
440   * of things like irqstacks to nr_cpu_ids rather than NR_CPUS.
441   *
442   * We do not initialize the online map here; cpus set their own bits in
443   * cpu_online_mask as they come up.
444   *
445   * This function is valid only for Open Firmware systems.  finish_device_tree
446   * must be called before using this.
447   *
448   * While we're here, we may as well set the "physical" cpu ids in the paca.
449   *
450   * NOTE: This must match the parsing done in early_init_dt_scan_cpus.
451   */
smp_setup_cpu_maps(void)452  void __init smp_setup_cpu_maps(void)
453  {
454  	struct device_node *dn;
455  	int cpu = 0;
456  	int nthreads = 1;
457  
458  	DBG("smp_setup_cpu_maps()\n");
459  
460  	cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
461  					__alignof__(u32));
462  	if (!cpu_to_phys_id)
463  		panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
464  		      __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
465  
466  	for_each_node_by_type(dn, "cpu") {
467  		const __be32 *intserv;
468  		__be32 cpu_be;
469  		int len;
470  
471  		DBG("  * %pOF...\n", dn);
472  
473  		intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
474  				&len);
475  		if (intserv) {
476  			DBG("    ibm,ppc-interrupt-server#s -> %lu threads\n",
477  			    (len / sizeof(int)));
478  		} else {
479  			DBG("    no ibm,ppc-interrupt-server#s -> 1 thread\n");
480  			intserv = of_get_property(dn, "reg", &len);
481  			if (!intserv) {
482  				cpu_be = cpu_to_be32(cpu);
483  				/* XXX: what is this? uninitialized?? */
484  				intserv = &cpu_be;	/* assume logical == phys */
485  				len = 4;
486  			}
487  		}
488  
489  		nthreads = len / sizeof(int);
490  
491  		bool avail = of_device_is_available(dn);
492  		if (!avail)
493  			avail = !of_property_match_string(dn,
494  					"enable-method", "spin-table");
495  
496  		if (boot_core_hwid >= 0) {
497  			if (cpu == 0) {
498  				pr_info("Skipping CPU node %pOF to allow for boot core.\n", dn);
499  				cpu = nthreads;
500  				continue;
501  			}
502  
503  			if (be32_to_cpu(intserv[0]) == boot_core_hwid) {
504  				pr_info("Renumbered boot core %pOF to logical 0\n", dn);
505  				assign_threads(0, nthreads, avail, intserv);
506  				of_node_put(dn);
507  				break;
508  			}
509  		} else if (cpu >= nr_cpu_ids) {
510  			of_node_put(dn);
511  			break;
512  		}
513  
514  		if (cpu < nr_cpu_ids)
515  			cpu = assign_threads(cpu, nthreads, avail, intserv);
516  	}
517  
518  	/* If no SMT supported, nthreads is forced to 1 */
519  	if (!cpu_has_feature(CPU_FTR_SMT)) {
520  		DBG("  SMT disabled ! nthreads forced to 1\n");
521  		nthreads = 1;
522  	}
523  
524  #ifdef CONFIG_PPC64
525  	/*
526  	 * On pSeries LPAR, we need to know how many cpus
527  	 * could possibly be added to this partition.
528  	 */
529  	if (firmware_has_feature(FW_FEATURE_LPAR) &&
530  	    (dn = of_find_node_by_path("/rtas"))) {
531  		int num_addr_cell, num_size_cell, maxcpus;
532  		const __be32 *ireg;
533  
534  		num_addr_cell = of_n_addr_cells(dn);
535  		num_size_cell = of_n_size_cells(dn);
536  
537  		ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL);
538  
539  		if (!ireg)
540  			goto out;
541  
542  		maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
543  
544  		/* Double maxcpus for processors which have SMT capability */
545  		if (cpu_has_feature(CPU_FTR_SMT))
546  			maxcpus *= nthreads;
547  
548  		if (maxcpus > nr_cpu_ids) {
549  			printk(KERN_WARNING
550  			       "Partition configured for %d cpus, "
551  			       "operating system maximum is %u.\n",
552  			       maxcpus, nr_cpu_ids);
553  			maxcpus = nr_cpu_ids;
554  		} else
555  			printk(KERN_INFO "Partition configured for %d cpus.\n",
556  			       maxcpus);
557  
558  		for (cpu = 0; cpu < maxcpus; cpu++)
559  			set_cpu_possible(cpu, true);
560  	out:
561  		of_node_put(dn);
562  	}
563  	vdso_data->processorCount = num_present_cpus();
564  #endif /* CONFIG_PPC64 */
565  
566          /* Initialize CPU <=> thread mapping/
567  	 *
568  	 * WARNING: We assume that the number of threads is the same for
569  	 * every CPU in the system. If that is not the case, then some code
570  	 * here will have to be reworked
571  	 */
572  	cpu_init_thread_core_maps(nthreads);
573  
574  	/* Now that possible cpus are set, set nr_cpu_ids for later use */
575  	setup_nr_cpu_ids();
576  
577  	free_unused_pacas();
578  }
579  #endif /* CONFIG_SMP */
580  
581  #ifdef CONFIG_PCSPKR_PLATFORM
add_pcspkr(void)582  static __init int add_pcspkr(void)
583  {
584  	struct device_node *np;
585  	struct platform_device *pd;
586  	int ret;
587  
588  	np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
589  	of_node_put(np);
590  	if (!np)
591  		return -ENODEV;
592  
593  	pd = platform_device_alloc("pcspkr", -1);
594  	if (!pd)
595  		return -ENOMEM;
596  
597  	ret = platform_device_add(pd);
598  	if (ret)
599  		platform_device_put(pd);
600  
601  	return ret;
602  }
603  device_initcall(add_pcspkr);
604  #endif	/* CONFIG_PCSPKR_PLATFORM */
605  
606  static char ppc_hw_desc_buf[128] __initdata;
607  
608  struct seq_buf ppc_hw_desc __initdata = {
609  	.buffer = ppc_hw_desc_buf,
610  	.size = sizeof(ppc_hw_desc_buf),
611  	.len = 0,
612  };
613  
probe_machine(void)614  static __init void probe_machine(void)
615  {
616  	extern struct machdep_calls __machine_desc_start;
617  	extern struct machdep_calls __machine_desc_end;
618  	unsigned int i;
619  
620  	/*
621  	 * Iterate all ppc_md structures until we find the proper
622  	 * one for the current machine type
623  	 */
624  	DBG("Probing machine type ...\n");
625  
626  	/*
627  	 * Check ppc_md is empty, if not we have a bug, ie, we setup an
628  	 * entry before probe_machine() which will be overwritten
629  	 */
630  	for (i = 0; i < (sizeof(ppc_md) / sizeof(void *)); i++) {
631  		if (((void **)&ppc_md)[i]) {
632  			printk(KERN_ERR "Entry %d in ppc_md non empty before"
633  			       " machine probe !\n", i);
634  		}
635  	}
636  
637  	for (machine_id = &__machine_desc_start;
638  	     machine_id < &__machine_desc_end;
639  	     machine_id++) {
640  		DBG("  %s ...\n", machine_id->name);
641  		if (machine_id->compatible && !of_machine_is_compatible(machine_id->compatible))
642  			continue;
643  		if (machine_id->compatibles && !of_machine_compatible_match(machine_id->compatibles))
644  			continue;
645  		memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
646  		if (ppc_md.probe && !ppc_md.probe())
647  			continue;
648  		DBG("   %s match !\n", machine_id->name);
649  		break;
650  	}
651  	/* What can we do if we didn't find ? */
652  	if (machine_id >= &__machine_desc_end) {
653  		pr_err("No suitable machine description found !\n");
654  		for (;;);
655  	}
656  
657  	// Append the machine name to other info we've gathered
658  	seq_buf_puts(&ppc_hw_desc, ppc_md.name);
659  
660  	// Set the generic hardware description shown in oopses
661  	dump_stack_set_arch_desc(ppc_hw_desc.buffer);
662  
663  	pr_info("Hardware name: %s\n", ppc_hw_desc.buffer);
664  }
665  
666  /* Match a class of boards, not a specific device configuration. */
check_legacy_ioport(unsigned long base_port)667  int check_legacy_ioport(unsigned long base_port)
668  {
669  	struct device_node *parent, *np = NULL;
670  	int ret = -ENODEV;
671  
672  	switch(base_port) {
673  	case I8042_DATA_REG:
674  		if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303")))
675  			np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03");
676  		if (np) {
677  			parent = of_get_parent(np);
678  
679  			of_i8042_kbd_irq = irq_of_parse_and_map(parent, 0);
680  			if (!of_i8042_kbd_irq)
681  				of_i8042_kbd_irq = 1;
682  
683  			of_i8042_aux_irq = irq_of_parse_and_map(parent, 1);
684  			if (!of_i8042_aux_irq)
685  				of_i8042_aux_irq = 12;
686  
687  			of_node_put(np);
688  			np = parent;
689  			break;
690  		}
691  		np = of_find_node_by_type(NULL, "8042");
692  		/* Pegasos has no device_type on its 8042 node, look for the
693  		 * name instead */
694  		if (!np)
695  			np = of_find_node_by_name(NULL, "8042");
696  		if (np) {
697  			of_i8042_kbd_irq = 1;
698  			of_i8042_aux_irq = 12;
699  		}
700  		break;
701  	case FDC_BASE: /* FDC1 */
702  		np = of_find_node_by_type(NULL, "fdc");
703  		break;
704  	default:
705  		/* ipmi is supposed to fail here */
706  		break;
707  	}
708  	if (!np)
709  		return ret;
710  	parent = of_get_parent(np);
711  	if (parent) {
712  		if (of_node_is_type(parent, "isa"))
713  			ret = 0;
714  		of_node_put(parent);
715  	}
716  	of_node_put(np);
717  	return ret;
718  }
719  EXPORT_SYMBOL(check_legacy_ioport);
720  
721  /*
722   * Panic notifiers setup
723   *
724   * We have 3 notifiers for powerpc, each one from a different "nature":
725   *
726   * - ppc_panic_fadump_handler() is a hypervisor notifier, which hard-disables
727   *   IRQs and deal with the Firmware-Assisted dump, when it is configured;
728   *   should run early in the panic path.
729   *
730   * - dump_kernel_offset() is an informative notifier, just showing the KASLR
731   *   offset if we have RANDOMIZE_BASE set.
732   *
733   * - ppc_panic_platform_handler() is a low-level handler that's registered
734   *   only if the platform wishes to perform final actions in the panic path,
735   *   hence it should run late and might not even return. Currently, only
736   *   pseries and ps3 platforms register callbacks.
737   */
ppc_panic_fadump_handler(struct notifier_block * this,unsigned long event,void * ptr)738  static int ppc_panic_fadump_handler(struct notifier_block *this,
739  				    unsigned long event, void *ptr)
740  {
741  	/*
742  	 * panic does a local_irq_disable, but we really
743  	 * want interrupts to be hard disabled.
744  	 */
745  	hard_irq_disable();
746  
747  	/*
748  	 * If firmware-assisted dump has been registered then trigger
749  	 * its callback and let the firmware handles everything else.
750  	 */
751  	crash_fadump(NULL, ptr);
752  
753  	return NOTIFY_DONE;
754  }
755  
dump_kernel_offset(struct notifier_block * self,unsigned long v,void * p)756  static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
757  			      void *p)
758  {
759  	pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
760  		 kaslr_offset(), KERNELBASE);
761  
762  	return NOTIFY_DONE;
763  }
764  
ppc_panic_platform_handler(struct notifier_block * this,unsigned long event,void * ptr)765  static int ppc_panic_platform_handler(struct notifier_block *this,
766  				      unsigned long event, void *ptr)
767  {
768  	/*
769  	 * This handler is only registered if we have a panic callback
770  	 * on ppc_md, hence NULL check is not needed.
771  	 * Also, it may not return, so it runs really late on panic path.
772  	 */
773  	ppc_md.panic(ptr);
774  
775  	return NOTIFY_DONE;
776  }
777  
778  static struct notifier_block ppc_fadump_block = {
779  	.notifier_call = ppc_panic_fadump_handler,
780  	.priority = INT_MAX, /* run early, to notify the firmware ASAP */
781  };
782  
783  static struct notifier_block kernel_offset_notifier = {
784  	.notifier_call = dump_kernel_offset,
785  };
786  
787  static struct notifier_block ppc_panic_block = {
788  	.notifier_call = ppc_panic_platform_handler,
789  	.priority = INT_MIN, /* may not return; must be done last */
790  };
791  
setup_panic(void)792  void __init setup_panic(void)
793  {
794  	/* Hard-disables IRQs + deal with FW-assisted dump (fadump) */
795  	atomic_notifier_chain_register(&panic_notifier_list,
796  				       &ppc_fadump_block);
797  
798  	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
799  		atomic_notifier_chain_register(&panic_notifier_list,
800  					       &kernel_offset_notifier);
801  
802  	/* Low-level platform-specific routines that should run on panic */
803  	if (ppc_md.panic)
804  		atomic_notifier_chain_register(&panic_notifier_list,
805  					       &ppc_panic_block);
806  }
807  
808  #ifdef CONFIG_CHECK_CACHE_COHERENCY
809  /*
810   * For platforms that have configurable cache-coherency.  This function
811   * checks that the cache coherency setting of the kernel matches the setting
812   * left by the firmware, as indicated in the device tree.  Since a mismatch
813   * will eventually result in DMA failures, we print * and error and call
814   * BUG() in that case.
815   */
816  
817  #define KERNEL_COHERENCY	(!IS_ENABLED(CONFIG_NOT_COHERENT_CACHE))
818  
check_cache_coherency(void)819  static int __init check_cache_coherency(void)
820  {
821  	struct device_node *np;
822  	const void *prop;
823  	bool devtree_coherency;
824  
825  	np = of_find_node_by_path("/");
826  	prop = of_get_property(np, "coherency-off", NULL);
827  	of_node_put(np);
828  
829  	devtree_coherency = prop ? false : true;
830  
831  	if (devtree_coherency != KERNEL_COHERENCY) {
832  		printk(KERN_ERR
833  			"kernel coherency:%s != device tree_coherency:%s\n",
834  			KERNEL_COHERENCY ? "on" : "off",
835  			devtree_coherency ? "on" : "off");
836  		BUG();
837  	}
838  
839  	return 0;
840  }
841  
842  late_initcall(check_cache_coherency);
843  #endif /* CONFIG_CHECK_CACHE_COHERENCY */
844  
ppc_printk_progress(char * s,unsigned short hex)845  void ppc_printk_progress(char *s, unsigned short hex)
846  {
847  	pr_info("%s\n", s);
848  }
849  
print_system_info(void)850  static __init void print_system_info(void)
851  {
852  	pr_info("-----------------------------------------------------\n");
853  	pr_info("phys_mem_size     = 0x%llx\n",
854  		(unsigned long long)memblock_phys_mem_size());
855  
856  	pr_info("dcache_bsize      = 0x%x\n", dcache_bsize);
857  	pr_info("icache_bsize      = 0x%x\n", icache_bsize);
858  
859  	pr_info("cpu_features      = 0x%016lx\n", cur_cpu_spec->cpu_features);
860  	pr_info("  possible        = 0x%016lx\n",
861  		(unsigned long)CPU_FTRS_POSSIBLE);
862  	pr_info("  always          = 0x%016lx\n",
863  		(unsigned long)CPU_FTRS_ALWAYS);
864  	pr_info("cpu_user_features = 0x%08x 0x%08x\n",
865  		cur_cpu_spec->cpu_user_features,
866  		cur_cpu_spec->cpu_user_features2);
867  	pr_info("mmu_features      = 0x%08x\n", cur_cpu_spec->mmu_features);
868  #ifdef CONFIG_PPC64
869  	pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
870  #ifdef CONFIG_PPC_BOOK3S
871  	pr_info("vmalloc start     = 0x%lx\n", KERN_VIRT_START);
872  	pr_info("IO start          = 0x%lx\n", KERN_IO_START);
873  	pr_info("vmemmap start     = 0x%lx\n", (unsigned long)vmemmap);
874  #endif
875  #endif
876  
877  	if (!early_radix_enabled())
878  		print_system_hash_info();
879  
880  	if (PHYSICAL_START > 0)
881  		pr_info("physical_start    = 0x%llx\n",
882  		       (unsigned long long)PHYSICAL_START);
883  	pr_info("-----------------------------------------------------\n");
884  }
885  
886  #ifdef CONFIG_SMP
smp_setup_pacas(void)887  static void __init smp_setup_pacas(void)
888  {
889  	int cpu;
890  
891  	for_each_possible_cpu(cpu) {
892  		if (cpu == smp_processor_id())
893  			continue;
894  		allocate_paca(cpu);
895  		set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]);
896  	}
897  
898  	memblock_free(cpu_to_phys_id, nr_cpu_ids * sizeof(u32));
899  	cpu_to_phys_id = NULL;
900  }
901  #endif
902  
903  /*
904   * Called into from start_kernel this initializes memblock, which is used
905   * to manage page allocation until mem_init is called.
906   */
setup_arch(char ** cmdline_p)907  void __init setup_arch(char **cmdline_p)
908  {
909  	kasan_init();
910  
911  	*cmdline_p = boot_command_line;
912  
913  	/* Set a half-reasonable default so udelay does something sensible */
914  	loops_per_jiffy = 500000000 / HZ;
915  
916  	/* Unflatten the device-tree passed by prom_init or kexec */
917  	unflatten_device_tree();
918  
919  	/*
920  	 * Initialize cache line/block info from device-tree (on ppc64) or
921  	 * just cputable (on ppc32).
922  	 */
923  	initialize_cache_info();
924  
925  	/* Initialize RTAS if available. */
926  	rtas_initialize();
927  
928  	/* Check if we have an initrd provided via the device-tree. */
929  	check_for_initrd();
930  
931  	/* Probe the machine type, establish ppc_md. */
932  	probe_machine();
933  
934  	/* Setup panic notifier if requested by the platform. */
935  	setup_panic();
936  
937  	/*
938  	 * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
939  	 * it from their respective probe() function.
940  	 */
941  	setup_power_save();
942  
943  	/* Discover standard serial ports. */
944  	find_legacy_serial_ports();
945  
946  	/* Register early console with the printk subsystem. */
947  	register_early_udbg_console();
948  
949  	/* Setup the various CPU maps based on the device-tree. */
950  	smp_setup_cpu_maps();
951  
952  	/* Initialize xmon. */
953  	xmon_setup();
954  
955  	/* Check the SMT related command line arguments (ppc64). */
956  	check_smt_enabled();
957  
958  	/* Parse memory topology */
959  	mem_topology_setup();
960  	/* Set max_mapnr before paging_init() */
961  	set_max_mapnr(max_pfn);
962  	high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
963  
964  	/*
965  	 * Release secondary cpus out of their spinloops at 0x60 now that
966  	 * we can map physical -> logical CPU ids.
967  	 *
968  	 * Freescale Book3e parts spin in a loop provided by firmware,
969  	 * so smp_release_cpus() does nothing for them.
970  	 */
971  #ifdef CONFIG_SMP
972  	smp_setup_pacas();
973  
974  	/* On BookE, setup per-core TLB data structures. */
975  	setup_tlb_core_data();
976  #endif
977  
978  	/* Print various info about the machine that has been gathered so far. */
979  	print_system_info();
980  
981  	klp_init_thread_info(&init_task);
982  
983  	setup_initial_init_mm(_stext, _etext, _edata, _end);
984  	/* sched_init() does the mmgrab(&init_mm) for the primary CPU */
985  	VM_WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(&init_mm)));
986  	cpumask_set_cpu(smp_processor_id(), mm_cpumask(&init_mm));
987  	inc_mm_active_cpus(&init_mm);
988  	mm_iommu_init(&init_mm);
989  
990  	irqstack_early_init();
991  	exc_lvl_early_init();
992  	emergency_stack_init();
993  
994  	mce_init();
995  	smp_release_cpus();
996  
997  	initmem_init();
998  
999  	/*
1000  	 * Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must
1001  	 * be called after initmem_init(), so that pageblock_order is initialised.
1002  	 */
1003  	kvm_cma_reserve();
1004  	gigantic_hugetlb_cma_reserve();
1005  
1006  	early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
1007  
1008  	if (ppc_md.setup_arch)
1009  		ppc_md.setup_arch();
1010  
1011  	setup_barrier_nospec();
1012  	setup_spectre_v2();
1013  
1014  	paging_init();
1015  
1016  	/* Initialize the MMU context management stuff. */
1017  	mmu_context_init();
1018  
1019  	/* Interrupt code needs to be 64K-aligned. */
1020  	if (IS_ENABLED(CONFIG_PPC64) && (unsigned long)_stext & 0xffff)
1021  		panic("Kernelbase not 64K-aligned (0x%lx)!\n",
1022  		      (unsigned long)_stext);
1023  }
1024