1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (c) 2022 Ventana Micro Systems Inc.
5  */
6 
7 #define pr_fmt(fmt) "suspend: " fmt
8 
9 #include <linux/ftrace.h>
10 #include <linux/suspend.h>
11 #include <asm/csr.h>
12 #include <asm/sbi.h>
13 #include <asm/suspend.h>
14 
suspend_save_csrs(struct suspend_context * context)15 void suspend_save_csrs(struct suspend_context *context)
16 {
17 	if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
18 		context->envcfg = csr_read(CSR_ENVCFG);
19 	context->tvec = csr_read(CSR_TVEC);
20 	context->ie = csr_read(CSR_IE);
21 
22 	/*
23 	 * No need to save/restore IP CSR (i.e. MIP or SIP) because:
24 	 *
25 	 * 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
26 	 *    external devices (such as interrupt controller, timer, etc).
27 	 * 2. For MMU (S-mode) kernel, the bits in SIP are set by
28 	 *    M-mode firmware and external devices (such as interrupt
29 	 *    controller, etc).
30 	 */
31 
32 #ifdef CONFIG_MMU
33 	context->satp = csr_read(CSR_SATP);
34 #endif
35 }
36 
suspend_restore_csrs(struct suspend_context * context)37 void suspend_restore_csrs(struct suspend_context *context)
38 {
39 	csr_write(CSR_SCRATCH, 0);
40 	if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
41 		csr_write(CSR_ENVCFG, context->envcfg);
42 	csr_write(CSR_TVEC, context->tvec);
43 	csr_write(CSR_IE, context->ie);
44 
45 #ifdef CONFIG_MMU
46 	csr_write(CSR_SATP, context->satp);
47 #endif
48 }
49 
cpu_suspend(unsigned long arg,int (* finish)(unsigned long arg,unsigned long entry,unsigned long context))50 int cpu_suspend(unsigned long arg,
51 		int (*finish)(unsigned long arg,
52 			      unsigned long entry,
53 			      unsigned long context))
54 {
55 	int rc = 0;
56 	struct suspend_context context = { 0 };
57 
58 	/* Finisher should be non-NULL */
59 	if (!finish)
60 		return -EINVAL;
61 
62 	/* Save additional CSRs*/
63 	suspend_save_csrs(&context);
64 
65 	/*
66 	 * Function graph tracer state gets incosistent when the kernel
67 	 * calls functions that never return (aka finishers) hence disable
68 	 * graph tracing during their execution.
69 	 */
70 	pause_graph_tracing();
71 
72 	/* Save context on stack */
73 	if (__cpu_suspend_enter(&context)) {
74 		/* Call the finisher */
75 		rc = finish(arg, __pa_symbol(__cpu_resume_enter),
76 			    (ulong)&context);
77 
78 		/*
79 		 * Should never reach here, unless the suspend finisher
80 		 * fails. Successful cpu_suspend() should return from
81 		 * __cpu_resume_entry()
82 		 */
83 		if (!rc)
84 			rc = -EOPNOTSUPP;
85 	}
86 
87 	/* Enable function graph tracer */
88 	unpause_graph_tracing();
89 
90 	/* Restore additional CSRs */
91 	suspend_restore_csrs(&context);
92 
93 	return rc;
94 }
95 
96 #ifdef CONFIG_RISCV_SBI
sbi_system_suspend(unsigned long sleep_type,unsigned long resume_addr,unsigned long opaque)97 static int sbi_system_suspend(unsigned long sleep_type,
98 			      unsigned long resume_addr,
99 			      unsigned long opaque)
100 {
101 	struct sbiret ret;
102 
103 	ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND,
104 			sleep_type, resume_addr, opaque, 0, 0, 0);
105 	if (ret.error)
106 		return sbi_err_map_linux_errno(ret.error);
107 
108 	return ret.value;
109 }
110 
sbi_system_suspend_enter(suspend_state_t state)111 static int sbi_system_suspend_enter(suspend_state_t state)
112 {
113 	return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend);
114 }
115 
116 static const struct platform_suspend_ops sbi_system_suspend_ops = {
117 	.valid = suspend_valid_only_mem,
118 	.enter = sbi_system_suspend_enter,
119 };
120 
sbi_system_suspend_init(void)121 static int __init sbi_system_suspend_init(void)
122 {
123 	if (sbi_spec_version >= sbi_mk_version(2, 0) &&
124 	    sbi_probe_extension(SBI_EXT_SUSP) > 0) {
125 		pr_info("SBI SUSP extension detected\n");
126 		if (IS_ENABLED(CONFIG_SUSPEND))
127 			suspend_set_ops(&sbi_system_suspend_ops);
128 	}
129 
130 	return 0;
131 }
132 
133 arch_initcall(sbi_system_suspend_init);
134 
sbi_suspend_finisher(unsigned long suspend_type,unsigned long resume_addr,unsigned long opaque)135 static int sbi_suspend_finisher(unsigned long suspend_type,
136 				unsigned long resume_addr,
137 				unsigned long opaque)
138 {
139 	struct sbiret ret;
140 
141 	ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
142 			suspend_type, resume_addr, opaque, 0, 0, 0);
143 
144 	return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
145 }
146 
riscv_sbi_hart_suspend(u32 state)147 int riscv_sbi_hart_suspend(u32 state)
148 {
149 	if (state & SBI_HSM_SUSP_NON_RET_BIT)
150 		return cpu_suspend(state, sbi_suspend_finisher);
151 	else
152 		return sbi_suspend_finisher(state, 0, 0);
153 }
154 
riscv_sbi_suspend_state_is_valid(u32 state)155 bool riscv_sbi_suspend_state_is_valid(u32 state)
156 {
157 	if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
158 	    state < SBI_HSM_SUSPEND_RET_PLATFORM)
159 		return false;
160 
161 	if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
162 	    state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
163 		return false;
164 
165 	return true;
166 }
167 
riscv_sbi_hsm_is_supported(void)168 bool riscv_sbi_hsm_is_supported(void)
169 {
170 	/*
171 	 * The SBI HSM suspend function is only available when:
172 	 * 1) SBI version is 0.3 or higher
173 	 * 2) SBI HSM extension is available
174 	 */
175 	if (sbi_spec_version < sbi_mk_version(0, 3) ||
176 	    !sbi_probe_extension(SBI_EXT_HSM)) {
177 		pr_info("HSM suspend not available\n");
178 		return false;
179 	}
180 
181 	return true;
182 }
183 #endif /* CONFIG_RISCV_SBI */
184