1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * machine_kexec.c for kexec
4   *
5   * Copyright (C) 2022 Loongson Technology Corporation Limited
6   */
7  #include <linux/compiler.h>
8  #include <linux/cpu.h>
9  #include <linux/kexec.h>
10  #include <linux/crash_dump.h>
11  #include <linux/delay.h>
12  #include <linux/irq.h>
13  #include <linux/libfdt.h>
14  #include <linux/mm.h>
15  #include <linux/of_fdt.h>
16  #include <linux/reboot.h>
17  #include <linux/sched.h>
18  #include <linux/sched/task_stack.h>
19  
20  #include <asm/bootinfo.h>
21  #include <asm/cacheflush.h>
22  #include <asm/page.h>
23  
24  /* 0x100000 ~ 0x200000 is safe */
25  #define KEXEC_CONTROL_CODE	TO_CACHE(0x100000UL)
26  #define KEXEC_CMDLINE_ADDR	TO_CACHE(0x108000UL)
27  
28  static unsigned long reboot_code_buffer;
29  static cpumask_t cpus_in_crash = CPU_MASK_NONE;
30  
31  #ifdef CONFIG_SMP
32  static void (*relocated_kexec_smp_wait)(void *);
33  atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
34  #endif
35  
36  static unsigned long efi_boot;
37  static unsigned long cmdline_ptr;
38  static unsigned long systable_ptr;
39  static unsigned long start_addr;
40  static unsigned long first_ind_entry;
41  
kexec_image_info(const struct kimage * kimage)42  static void kexec_image_info(const struct kimage *kimage)
43  {
44  	unsigned long i;
45  
46  	pr_debug("kexec kimage info:\n");
47  	pr_debug("\ttype:        %d\n", kimage->type);
48  	pr_debug("\tstart:       %lx\n", kimage->start);
49  	pr_debug("\thead:        %lx\n", kimage->head);
50  	pr_debug("\tnr_segments: %lu\n", kimage->nr_segments);
51  
52  	for (i = 0; i < kimage->nr_segments; i++) {
53  		pr_debug("\t    segment[%lu]: %016lx - %016lx", i,
54  			kimage->segment[i].mem,
55  			kimage->segment[i].mem + kimage->segment[i].memsz);
56  		pr_debug("\t\t0x%lx bytes, %lu pages\n",
57  			(unsigned long)kimage->segment[i].memsz,
58  			(unsigned long)kimage->segment[i].memsz /  PAGE_SIZE);
59  	}
60  }
61  
machine_kexec_prepare(struct kimage * kimage)62  int machine_kexec_prepare(struct kimage *kimage)
63  {
64  	int i;
65  	char *bootloader = "kexec";
66  	void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR;
67  
68  	kexec_image_info(kimage);
69  
70  	kimage->arch.efi_boot = fw_arg0;
71  	kimage->arch.systable_ptr = fw_arg2;
72  
73  	/* Find the command line */
74  	for (i = 0; i < kimage->nr_segments; i++) {
75  		if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
76  			if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
77  				kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
78  			break;
79  		}
80  	}
81  
82  	if (!kimage->arch.cmdline_ptr) {
83  		pr_err("Command line not included in the provided image\n");
84  		return -EINVAL;
85  	}
86  
87  	/* kexec/kdump need a safe page to save reboot_code_buffer */
88  	kimage->control_code_page = virt_to_page((void *)KEXEC_CONTROL_CODE);
89  
90  	reboot_code_buffer = (unsigned long)page_address(kimage->control_code_page);
91  	memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size);
92  
93  #ifdef CONFIG_SMP
94  	/* All secondary cpus now may jump to kexec_smp_wait cycle */
95  	relocated_kexec_smp_wait = reboot_code_buffer + (void *)(kexec_smp_wait - relocate_new_kernel);
96  #endif
97  
98  	return 0;
99  }
100  
machine_kexec_cleanup(struct kimage * kimage)101  void machine_kexec_cleanup(struct kimage *kimage)
102  {
103  }
104  
kexec_reboot(void)105  void kexec_reboot(void)
106  {
107  	do_kexec_t do_kexec = NULL;
108  
109  	/*
110  	 * We know we were online, and there will be no incoming IPIs at
111  	 * this point. Mark online again before rebooting so that the crash
112  	 * analysis tool will see us correctly.
113  	 */
114  	set_cpu_online(smp_processor_id(), true);
115  
116  	/* Ensure remote CPUs observe that we're online before rebooting. */
117  	smp_mb__after_atomic();
118  
119  	/*
120  	 * Make sure we get correct instructions written by the
121  	 * machine_kexec_prepare() CPU.
122  	 */
123  	__asm__ __volatile__ ("\tibar 0\n"::);
124  
125  #ifdef CONFIG_SMP
126  	/* All secondary cpus go to kexec_smp_wait */
127  	if (smp_processor_id() > 0) {
128  		relocated_kexec_smp_wait(NULL);
129  		unreachable();
130  	}
131  #endif
132  
133  	do_kexec = (void *)reboot_code_buffer;
134  	do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
135  
136  	unreachable();
137  }
138  
139  
140  #ifdef CONFIG_SMP
kexec_shutdown_secondary(void * regs)141  static void kexec_shutdown_secondary(void *regs)
142  {
143  	int cpu = smp_processor_id();
144  
145  	if (!cpu_online(cpu))
146  		return;
147  
148  	/* We won't be sent IPIs any more. */
149  	set_cpu_online(cpu, false);
150  
151  	local_irq_disable();
152  	while (!atomic_read(&kexec_ready_to_reboot))
153  		cpu_relax();
154  
155  	kexec_reboot();
156  }
157  
crash_shutdown_secondary(void * passed_regs)158  static void crash_shutdown_secondary(void *passed_regs)
159  {
160  	int cpu = smp_processor_id();
161  	struct pt_regs *regs = passed_regs;
162  
163  	/*
164  	 * If we are passed registers, use those. Otherwise get the
165  	 * regs from the last interrupt, which should be correct, as
166  	 * we are in an interrupt. But if the regs are not there,
167  	 * pull them from the top of the stack. They are probably
168  	 * wrong, but we need something to keep from crashing again.
169  	 */
170  	if (!regs)
171  		regs = get_irq_regs();
172  	if (!regs)
173  		regs = task_pt_regs(current);
174  
175  	if (!cpu_online(cpu))
176  		return;
177  
178  	/* We won't be sent IPIs any more. */
179  	set_cpu_online(cpu, false);
180  
181  	local_irq_disable();
182  	if (!cpumask_test_cpu(cpu, &cpus_in_crash))
183  		crash_save_cpu(regs, cpu);
184  	cpumask_set_cpu(cpu, &cpus_in_crash);
185  
186  	while (!atomic_read(&kexec_ready_to_reboot))
187  		cpu_relax();
188  
189  	kexec_reboot();
190  }
191  
crash_smp_send_stop(void)192  void crash_smp_send_stop(void)
193  {
194  	unsigned int ncpus;
195  	unsigned long timeout;
196  	static int cpus_stopped;
197  
198  	/*
199  	 * This function can be called twice in panic path, but obviously
200  	 * we should execute this only once.
201  	 */
202  	if (cpus_stopped)
203  		return;
204  
205  	cpus_stopped = 1;
206  
207  	 /* Excluding the panic cpu */
208  	ncpus = num_online_cpus() - 1;
209  
210  	smp_call_function(crash_shutdown_secondary, NULL, 0);
211  	smp_wmb();
212  
213  	/*
214  	 * The crash CPU sends an IPI and wait for other CPUs to
215  	 * respond. Delay of at least 10 seconds.
216  	 */
217  	timeout = MSEC_PER_SEC * 10;
218  	pr_emerg("Sending IPI to other cpus...\n");
219  	while ((cpumask_weight(&cpus_in_crash) < ncpus) && timeout--) {
220  		mdelay(1);
221  		cpu_relax();
222  	}
223  }
224  #endif /* defined(CONFIG_SMP) */
225  
machine_shutdown(void)226  void machine_shutdown(void)
227  {
228  #ifdef CONFIG_SMP
229  	int cpu;
230  
231  	/* All CPUs go to reboot_code_buffer */
232  	for_each_possible_cpu(cpu)
233  		if (!cpu_online(cpu))
234  			cpu_device_up(get_cpu_device(cpu));
235  
236  	smp_call_function(kexec_shutdown_secondary, NULL, 0);
237  #endif
238  }
239  
machine_crash_shutdown(struct pt_regs * regs)240  void machine_crash_shutdown(struct pt_regs *regs)
241  {
242  	int crashing_cpu;
243  
244  	local_irq_disable();
245  
246  	crashing_cpu = smp_processor_id();
247  	crash_save_cpu(regs, crashing_cpu);
248  
249  #ifdef CONFIG_SMP
250  	crash_smp_send_stop();
251  #endif
252  	cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
253  
254  	pr_info("Starting crashdump kernel...\n");
255  }
256  
machine_kexec(struct kimage * image)257  void machine_kexec(struct kimage *image)
258  {
259  	unsigned long entry, *ptr;
260  	struct kimage_arch *internal = &image->arch;
261  
262  	efi_boot = internal->efi_boot;
263  	cmdline_ptr = internal->cmdline_ptr;
264  	systable_ptr = internal->systable_ptr;
265  
266  	start_addr = (unsigned long)phys_to_virt(image->start);
267  
268  	first_ind_entry = (image->type == KEXEC_TYPE_DEFAULT) ?
269  		(unsigned long)phys_to_virt(image->head & PAGE_MASK) : 0;
270  
271  	/*
272  	 * The generic kexec code builds a page list with physical
273  	 * addresses. they are directly accessible through XKPRANGE
274  	 * hence the phys_to_virt() call.
275  	 */
276  	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
277  	     ptr = (entry & IND_INDIRECTION) ?
278  	       phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
279  		if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
280  		    *ptr & IND_DESTINATION)
281  			*ptr = (unsigned long) phys_to_virt(*ptr);
282  	}
283  
284  	/* Mark offline before disabling local irq. */
285  	set_cpu_online(smp_processor_id(), false);
286  
287  	/* We do not want to be bothered. */
288  	local_irq_disable();
289  
290  	pr_notice("EFI boot flag 0x%lx\n", efi_boot);
291  	pr_notice("Command line at 0x%lx\n", cmdline_ptr);
292  	pr_notice("System table at 0x%lx\n", systable_ptr);
293  	pr_notice("We will call new kernel at 0x%lx\n", start_addr);
294  	pr_notice("Bye ...\n");
295  
296  	/* Make reboot code buffer available to the boot CPU. */
297  	flush_cache_all();
298  
299  #ifdef CONFIG_SMP
300  	atomic_set(&kexec_ready_to_reboot, 1);
301  #endif
302  
303  	kexec_reboot();
304  }
305