1  // SPDX-License-Identifier: GPL-2.0-only
2  /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3   * Copyright (C) 2015 Linaro Ltd.
4   */
5  
6  #include <linux/arm-smccc.h>
7  #include <linux/bitfield.h>
8  #include <linux/bits.h>
9  #include <linux/cleanup.h>
10  #include <linux/clk.h>
11  #include <linux/completion.h>
12  #include <linux/cpumask.h>
13  #include <linux/dma-mapping.h>
14  #include <linux/err.h>
15  #include <linux/export.h>
16  #include <linux/firmware/qcom/qcom_scm.h>
17  #include <linux/firmware/qcom/qcom_tzmem.h>
18  #include <linux/init.h>
19  #include <linux/interconnect.h>
20  #include <linux/interrupt.h>
21  #include <linux/kstrtox.h>
22  #include <linux/module.h>
23  #include <linux/of.h>
24  #include <linux/of_address.h>
25  #include <linux/of_irq.h>
26  #include <linux/of_platform.h>
27  #include <linux/of_reserved_mem.h>
28  #include <linux/platform_device.h>
29  #include <linux/reset-controller.h>
30  #include <linux/sizes.h>
31  #include <linux/types.h>
32  
33  #include "qcom_scm.h"
34  #include "qcom_tzmem.h"
35  
36  static u32 download_mode;
37  
38  struct qcom_scm {
39  	struct device *dev;
40  	struct clk *core_clk;
41  	struct clk *iface_clk;
42  	struct clk *bus_clk;
43  	struct icc_path *path;
44  	struct completion waitq_comp;
45  	struct reset_controller_dev reset;
46  
47  	/* control access to the interconnect path */
48  	struct mutex scm_bw_lock;
49  	int scm_vote_count;
50  
51  	u64 dload_mode_addr;
52  
53  	struct qcom_tzmem_pool *mempool;
54  };
55  
56  struct qcom_scm_current_perm_info {
57  	__le32 vmid;
58  	__le32 perm;
59  	__le64 ctx;
60  	__le32 ctx_size;
61  	__le32 unused;
62  };
63  
64  struct qcom_scm_mem_map_info {
65  	__le64 mem_addr;
66  	__le64 mem_size;
67  };
68  
69  /**
70   * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
71   * @result:    Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
72   * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
73   * @data:      Response data. The type of this data is given in @resp_type.
74   */
75  struct qcom_scm_qseecom_resp {
76  	u64 result;
77  	u64 resp_type;
78  	u64 data;
79  };
80  
81  enum qcom_scm_qseecom_result {
82  	QSEECOM_RESULT_SUCCESS			= 0,
83  	QSEECOM_RESULT_INCOMPLETE		= 1,
84  	QSEECOM_RESULT_BLOCKED_ON_LISTENER	= 2,
85  	QSEECOM_RESULT_FAILURE			= 0xFFFFFFFF,
86  };
87  
88  enum qcom_scm_qseecom_resp_type {
89  	QSEECOM_SCM_RES_APP_ID			= 0xEE01,
90  	QSEECOM_SCM_RES_QSEOS_LISTENER_ID	= 0xEE02,
91  };
92  
93  enum qcom_scm_qseecom_tz_owner {
94  	QSEECOM_TZ_OWNER_SIP			= 2,
95  	QSEECOM_TZ_OWNER_TZ_APPS		= 48,
96  	QSEECOM_TZ_OWNER_QSEE_OS		= 50
97  };
98  
99  enum qcom_scm_qseecom_tz_svc {
100  	QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER	= 0,
101  	QSEECOM_TZ_SVC_APP_MGR			= 1,
102  	QSEECOM_TZ_SVC_INFO			= 6,
103  };
104  
105  enum qcom_scm_qseecom_tz_cmd_app {
106  	QSEECOM_TZ_CMD_APP_SEND			= 1,
107  	QSEECOM_TZ_CMD_APP_LOOKUP		= 3,
108  };
109  
110  enum qcom_scm_qseecom_tz_cmd_info {
111  	QSEECOM_TZ_CMD_INFO_VERSION		= 3,
112  };
113  
114  #define QSEECOM_MAX_APP_NAME_SIZE		64
115  #define SHMBRIDGE_RESULT_NOTSUPP		4
116  
117  /* Each bit configures cold/warm boot address for one of the 4 CPUs */
118  static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
119  	0, BIT(0), BIT(3), BIT(5)
120  };
121  static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
122  	BIT(2), BIT(1), BIT(4), BIT(6)
123  };
124  
125  #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE	BIT(0)
126  
127  #define QCOM_DLOAD_MASK		GENMASK(5, 4)
128  #define QCOM_DLOAD_NODUMP	0
129  #define QCOM_DLOAD_FULLDUMP	1
130  #define QCOM_DLOAD_MINIDUMP	2
131  #define QCOM_DLOAD_BOTHDUMP	3
132  
133  static const char * const qcom_scm_convention_names[] = {
134  	[SMC_CONVENTION_UNKNOWN] = "unknown",
135  	[SMC_CONVENTION_ARM_32] = "smc arm 32",
136  	[SMC_CONVENTION_ARM_64] = "smc arm 64",
137  	[SMC_CONVENTION_LEGACY] = "smc legacy",
138  };
139  
140  static const char * const download_mode_name[] = {
141  	[QCOM_DLOAD_NODUMP]	= "off",
142  	[QCOM_DLOAD_FULLDUMP]	= "full",
143  	[QCOM_DLOAD_MINIDUMP]	= "mini",
144  	[QCOM_DLOAD_BOTHDUMP]	= "full,mini",
145  };
146  
147  static struct qcom_scm *__scm;
148  
qcom_scm_clk_enable(void)149  static int qcom_scm_clk_enable(void)
150  {
151  	int ret;
152  
153  	ret = clk_prepare_enable(__scm->core_clk);
154  	if (ret)
155  		goto bail;
156  
157  	ret = clk_prepare_enable(__scm->iface_clk);
158  	if (ret)
159  		goto disable_core;
160  
161  	ret = clk_prepare_enable(__scm->bus_clk);
162  	if (ret)
163  		goto disable_iface;
164  
165  	return 0;
166  
167  disable_iface:
168  	clk_disable_unprepare(__scm->iface_clk);
169  disable_core:
170  	clk_disable_unprepare(__scm->core_clk);
171  bail:
172  	return ret;
173  }
174  
qcom_scm_clk_disable(void)175  static void qcom_scm_clk_disable(void)
176  {
177  	clk_disable_unprepare(__scm->core_clk);
178  	clk_disable_unprepare(__scm->iface_clk);
179  	clk_disable_unprepare(__scm->bus_clk);
180  }
181  
qcom_scm_bw_enable(void)182  static int qcom_scm_bw_enable(void)
183  {
184  	int ret = 0;
185  
186  	if (!__scm->path)
187  		return 0;
188  
189  	mutex_lock(&__scm->scm_bw_lock);
190  	if (!__scm->scm_vote_count) {
191  		ret = icc_set_bw(__scm->path, 0, UINT_MAX);
192  		if (ret < 0) {
193  			dev_err(__scm->dev, "failed to set bandwidth request\n");
194  			goto err_bw;
195  		}
196  	}
197  	__scm->scm_vote_count++;
198  err_bw:
199  	mutex_unlock(&__scm->scm_bw_lock);
200  
201  	return ret;
202  }
203  
qcom_scm_bw_disable(void)204  static void qcom_scm_bw_disable(void)
205  {
206  	if (!__scm->path)
207  		return;
208  
209  	mutex_lock(&__scm->scm_bw_lock);
210  	if (__scm->scm_vote_count-- == 1)
211  		icc_set_bw(__scm->path, 0, 0);
212  	mutex_unlock(&__scm->scm_bw_lock);
213  }
214  
215  enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
216  static DEFINE_SPINLOCK(scm_query_lock);
217  
qcom_scm_get_tzmem_pool(void)218  struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
219  {
220  	return __scm ? __scm->mempool : NULL;
221  }
222  
__get_convention(void)223  static enum qcom_scm_convention __get_convention(void)
224  {
225  	unsigned long flags;
226  	struct qcom_scm_desc desc = {
227  		.svc = QCOM_SCM_SVC_INFO,
228  		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
229  		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
230  					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
231  			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
232  		.arginfo = QCOM_SCM_ARGS(1),
233  		.owner = ARM_SMCCC_OWNER_SIP,
234  	};
235  	struct qcom_scm_res res;
236  	enum qcom_scm_convention probed_convention;
237  	int ret;
238  	bool forced = false;
239  
240  	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
241  		return qcom_scm_convention;
242  
243  	/*
244  	 * Per the "SMC calling convention specification", the 64-bit calling
245  	 * convention can only be used when the client is 64-bit, otherwise
246  	 * system will encounter the undefined behaviour.
247  	 */
248  #if IS_ENABLED(CONFIG_ARM64)
249  	/*
250  	 * Device isn't required as there is only one argument - no device
251  	 * needed to dma_map_single to secure world
252  	 */
253  	probed_convention = SMC_CONVENTION_ARM_64;
254  	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
255  	if (!ret && res.result[0] == 1)
256  		goto found;
257  
258  	/*
259  	 * Some SC7180 firmwares didn't implement the
260  	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
261  	 * calling conventions on these firmwares. Luckily we don't make any
262  	 * early calls into the firmware on these SoCs so the device pointer
263  	 * will be valid here to check if the compatible matches.
264  	 */
265  	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
266  		forced = true;
267  		goto found;
268  	}
269  #endif
270  
271  	probed_convention = SMC_CONVENTION_ARM_32;
272  	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
273  	if (!ret && res.result[0] == 1)
274  		goto found;
275  
276  	probed_convention = SMC_CONVENTION_LEGACY;
277  found:
278  	spin_lock_irqsave(&scm_query_lock, flags);
279  	if (probed_convention != qcom_scm_convention) {
280  		qcom_scm_convention = probed_convention;
281  		pr_info("qcom_scm: convention: %s%s\n",
282  			qcom_scm_convention_names[qcom_scm_convention],
283  			forced ? " (forced)" : "");
284  	}
285  	spin_unlock_irqrestore(&scm_query_lock, flags);
286  
287  	return qcom_scm_convention;
288  }
289  
290  /**
291   * qcom_scm_call() - Invoke a syscall in the secure world
292   * @dev:	device
293   * @desc:	Descriptor structure containing arguments and return values
294   * @res:        Structure containing results from SMC/HVC call
295   *
296   * Sends a command to the SCM and waits for the command to finish processing.
297   * This should *only* be called in pre-emptible context.
298   */
qcom_scm_call(struct device * dev,const struct qcom_scm_desc * desc,struct qcom_scm_res * res)299  static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
300  			 struct qcom_scm_res *res)
301  {
302  	might_sleep();
303  	switch (__get_convention()) {
304  	case SMC_CONVENTION_ARM_32:
305  	case SMC_CONVENTION_ARM_64:
306  		return scm_smc_call(dev, desc, res, false);
307  	case SMC_CONVENTION_LEGACY:
308  		return scm_legacy_call(dev, desc, res);
309  	default:
310  		pr_err("Unknown current SCM calling convention.\n");
311  		return -EINVAL;
312  	}
313  }
314  
315  /**
316   * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
317   * @dev:	device
318   * @desc:	Descriptor structure containing arguments and return values
319   * @res:	Structure containing results from SMC/HVC call
320   *
321   * Sends a command to the SCM and waits for the command to finish processing.
322   * This can be called in atomic context.
323   */
qcom_scm_call_atomic(struct device * dev,const struct qcom_scm_desc * desc,struct qcom_scm_res * res)324  static int qcom_scm_call_atomic(struct device *dev,
325  				const struct qcom_scm_desc *desc,
326  				struct qcom_scm_res *res)
327  {
328  	switch (__get_convention()) {
329  	case SMC_CONVENTION_ARM_32:
330  	case SMC_CONVENTION_ARM_64:
331  		return scm_smc_call(dev, desc, res, true);
332  	case SMC_CONVENTION_LEGACY:
333  		return scm_legacy_call_atomic(dev, desc, res);
334  	default:
335  		pr_err("Unknown current SCM calling convention.\n");
336  		return -EINVAL;
337  	}
338  }
339  
__qcom_scm_is_call_available(struct device * dev,u32 svc_id,u32 cmd_id)340  static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
341  					 u32 cmd_id)
342  {
343  	int ret;
344  	struct qcom_scm_desc desc = {
345  		.svc = QCOM_SCM_SVC_INFO,
346  		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
347  		.owner = ARM_SMCCC_OWNER_SIP,
348  	};
349  	struct qcom_scm_res res;
350  
351  	desc.arginfo = QCOM_SCM_ARGS(1);
352  	switch (__get_convention()) {
353  	case SMC_CONVENTION_ARM_32:
354  	case SMC_CONVENTION_ARM_64:
355  		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
356  				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
357  		break;
358  	case SMC_CONVENTION_LEGACY:
359  		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
360  		break;
361  	default:
362  		pr_err("Unknown SMC convention being used\n");
363  		return false;
364  	}
365  
366  	ret = qcom_scm_call(dev, &desc, &res);
367  
368  	return ret ? false : !!res.result[0];
369  }
370  
qcom_scm_set_boot_addr(void * entry,const u8 * cpu_bits)371  static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
372  {
373  	int cpu;
374  	unsigned int flags = 0;
375  	struct qcom_scm_desc desc = {
376  		.svc = QCOM_SCM_SVC_BOOT,
377  		.cmd = QCOM_SCM_BOOT_SET_ADDR,
378  		.arginfo = QCOM_SCM_ARGS(2),
379  		.owner = ARM_SMCCC_OWNER_SIP,
380  	};
381  
382  	for_each_present_cpu(cpu) {
383  		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
384  			return -EINVAL;
385  		flags |= cpu_bits[cpu];
386  	}
387  
388  	desc.args[0] = flags;
389  	desc.args[1] = virt_to_phys(entry);
390  
391  	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
392  }
393  
qcom_scm_set_boot_addr_mc(void * entry,unsigned int flags)394  static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
395  {
396  	struct qcom_scm_desc desc = {
397  		.svc = QCOM_SCM_SVC_BOOT,
398  		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
399  		.owner = ARM_SMCCC_OWNER_SIP,
400  		.arginfo = QCOM_SCM_ARGS(6),
401  		.args = {
402  			virt_to_phys(entry),
403  			/* Apply to all CPUs in all affinity levels */
404  			~0ULL, ~0ULL, ~0ULL, ~0ULL,
405  			flags,
406  		},
407  	};
408  
409  	/* Need a device for DMA of the additional arguments */
410  	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
411  		return -EOPNOTSUPP;
412  
413  	return qcom_scm_call(__scm->dev, &desc, NULL);
414  }
415  
416  /**
417   * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
418   * @entry: Entry point function for the cpus
419   *
420   * Set the Linux entry point for the SCM to transfer control to when coming
421   * out of a power down. CPU power down may be executed on cpuidle or hotplug.
422   */
qcom_scm_set_warm_boot_addr(void * entry)423  int qcom_scm_set_warm_boot_addr(void *entry)
424  {
425  	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
426  		/* Fallback to old SCM call */
427  		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
428  	return 0;
429  }
430  EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
431  
432  /**
433   * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
434   * @entry: Entry point function for the cpus
435   */
qcom_scm_set_cold_boot_addr(void * entry)436  int qcom_scm_set_cold_boot_addr(void *entry)
437  {
438  	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
439  		/* Fallback to old SCM call */
440  		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
441  	return 0;
442  }
443  EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
444  
445  /**
446   * qcom_scm_cpu_power_down() - Power down the cpu
447   * @flags:	Flags to flush cache
448   *
449   * This is an end point to power down cpu. If there was a pending interrupt,
450   * the control would return from this function, otherwise, the cpu jumps to the
451   * warm boot entry point set for this cpu upon reset.
452   */
qcom_scm_cpu_power_down(u32 flags)453  void qcom_scm_cpu_power_down(u32 flags)
454  {
455  	struct qcom_scm_desc desc = {
456  		.svc = QCOM_SCM_SVC_BOOT,
457  		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
458  		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
459  		.arginfo = QCOM_SCM_ARGS(1),
460  		.owner = ARM_SMCCC_OWNER_SIP,
461  	};
462  
463  	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
464  }
465  EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
466  
qcom_scm_set_remote_state(u32 state,u32 id)467  int qcom_scm_set_remote_state(u32 state, u32 id)
468  {
469  	struct qcom_scm_desc desc = {
470  		.svc = QCOM_SCM_SVC_BOOT,
471  		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
472  		.arginfo = QCOM_SCM_ARGS(2),
473  		.args[0] = state,
474  		.args[1] = id,
475  		.owner = ARM_SMCCC_OWNER_SIP,
476  	};
477  	struct qcom_scm_res res;
478  	int ret;
479  
480  	ret = qcom_scm_call(__scm->dev, &desc, &res);
481  
482  	return ret ? : res.result[0];
483  }
484  EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
485  
qcom_scm_disable_sdi(void)486  static int qcom_scm_disable_sdi(void)
487  {
488  	int ret;
489  	struct qcom_scm_desc desc = {
490  		.svc = QCOM_SCM_SVC_BOOT,
491  		.cmd = QCOM_SCM_BOOT_SDI_CONFIG,
492  		.args[0] = 1, /* Disable watchdog debug */
493  		.args[1] = 0, /* Disable SDI */
494  		.arginfo = QCOM_SCM_ARGS(2),
495  		.owner = ARM_SMCCC_OWNER_SIP,
496  	};
497  	struct qcom_scm_res res;
498  
499  	ret = qcom_scm_clk_enable();
500  	if (ret)
501  		return ret;
502  	ret = qcom_scm_call(__scm->dev, &desc, &res);
503  
504  	qcom_scm_clk_disable();
505  
506  	return ret ? : res.result[0];
507  }
508  
__qcom_scm_set_dload_mode(struct device * dev,bool enable)509  static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
510  {
511  	struct qcom_scm_desc desc = {
512  		.svc = QCOM_SCM_SVC_BOOT,
513  		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
514  		.arginfo = QCOM_SCM_ARGS(2),
515  		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
516  		.owner = ARM_SMCCC_OWNER_SIP,
517  	};
518  
519  	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
520  
521  	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
522  }
523  
qcom_scm_io_rmw(phys_addr_t addr,unsigned int mask,unsigned int val)524  static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val)
525  {
526  	unsigned int old;
527  	unsigned int new;
528  	int ret;
529  
530  	ret = qcom_scm_io_readl(addr, &old);
531  	if (ret)
532  		return ret;
533  
534  	new = (old & ~mask) | (val & mask);
535  
536  	return qcom_scm_io_writel(addr, new);
537  }
538  
qcom_scm_set_download_mode(u32 dload_mode)539  static void qcom_scm_set_download_mode(u32 dload_mode)
540  {
541  	int ret = 0;
542  
543  	if (__scm->dload_mode_addr) {
544  		ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
545  				      FIELD_PREP(QCOM_DLOAD_MASK, dload_mode));
546  	} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
547  						QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
548  		ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
549  	} else if (dload_mode) {
550  		dev_err(__scm->dev,
551  			"No available mechanism for setting download mode\n");
552  	}
553  
554  	if (ret)
555  		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
556  }
557  
558  /**
559   * qcom_scm_pas_init_image() - Initialize peripheral authentication service
560   *			       state machine for a given peripheral, using the
561   *			       metadata
562   * @peripheral: peripheral id
563   * @metadata:	pointer to memory containing ELF header, program header table
564   *		and optional blob of data used for authenticating the metadata
565   *		and the rest of the firmware
566   * @size:	size of the metadata
567   * @ctx:	optional metadata context
568   *
569   * Return: 0 on success.
570   *
571   * Upon successful return, the PAS metadata context (@ctx) will be used to
572   * track the metadata allocation, this needs to be released by invoking
573   * qcom_scm_pas_metadata_release() by the caller.
574   */
qcom_scm_pas_init_image(u32 peripheral,const void * metadata,size_t size,struct qcom_scm_pas_metadata * ctx)575  int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
576  			    struct qcom_scm_pas_metadata *ctx)
577  {
578  	dma_addr_t mdata_phys;
579  	void *mdata_buf;
580  	int ret;
581  	struct qcom_scm_desc desc = {
582  		.svc = QCOM_SCM_SVC_PIL,
583  		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
584  		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
585  		.args[0] = peripheral,
586  		.owner = ARM_SMCCC_OWNER_SIP,
587  	};
588  	struct qcom_scm_res res;
589  
590  	/*
591  	 * During the scm call memory protection will be enabled for the meta
592  	 * data blob, so make sure it's physically contiguous, 4K aligned and
593  	 * non-cachable to avoid XPU violations.
594  	 *
595  	 * For PIL calls the hypervisor creates SHM Bridges for the blob
596  	 * buffers on behalf of Linux so we must not do it ourselves hence
597  	 * not using the TZMem allocator here.
598  	 *
599  	 * If we pass a buffer that is already part of an SHM Bridge to this
600  	 * call, it will fail.
601  	 */
602  	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
603  				       GFP_KERNEL);
604  	if (!mdata_buf)
605  		return -ENOMEM;
606  
607  	memcpy(mdata_buf, metadata, size);
608  
609  	ret = qcom_scm_clk_enable();
610  	if (ret)
611  		goto out;
612  
613  	ret = qcom_scm_bw_enable();
614  	if (ret)
615  		goto disable_clk;
616  
617  	desc.args[1] = mdata_phys;
618  
619  	ret = qcom_scm_call(__scm->dev, &desc, &res);
620  	qcom_scm_bw_disable();
621  
622  disable_clk:
623  	qcom_scm_clk_disable();
624  
625  out:
626  	if (ret < 0 || !ctx) {
627  		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
628  	} else if (ctx) {
629  		ctx->ptr = mdata_buf;
630  		ctx->phys = mdata_phys;
631  		ctx->size = size;
632  	}
633  
634  	return ret ? : res.result[0];
635  }
636  EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
637  
638  /**
639   * qcom_scm_pas_metadata_release() - release metadata context
640   * @ctx:	metadata context
641   */
qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata * ctx)642  void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
643  {
644  	if (!ctx->ptr)
645  		return;
646  
647  	dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
648  
649  	ctx->ptr = NULL;
650  	ctx->phys = 0;
651  	ctx->size = 0;
652  }
653  EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
654  
655  /**
656   * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
657   *			      for firmware loading
658   * @peripheral:	peripheral id
659   * @addr:	start address of memory area to prepare
660   * @size:	size of the memory area to prepare
661   *
662   * Returns 0 on success.
663   */
qcom_scm_pas_mem_setup(u32 peripheral,phys_addr_t addr,phys_addr_t size)664  int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
665  {
666  	int ret;
667  	struct qcom_scm_desc desc = {
668  		.svc = QCOM_SCM_SVC_PIL,
669  		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
670  		.arginfo = QCOM_SCM_ARGS(3),
671  		.args[0] = peripheral,
672  		.args[1] = addr,
673  		.args[2] = size,
674  		.owner = ARM_SMCCC_OWNER_SIP,
675  	};
676  	struct qcom_scm_res res;
677  
678  	ret = qcom_scm_clk_enable();
679  	if (ret)
680  		return ret;
681  
682  	ret = qcom_scm_bw_enable();
683  	if (ret)
684  		goto disable_clk;
685  
686  	ret = qcom_scm_call(__scm->dev, &desc, &res);
687  	qcom_scm_bw_disable();
688  
689  disable_clk:
690  	qcom_scm_clk_disable();
691  
692  	return ret ? : res.result[0];
693  }
694  EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
695  
696  /**
697   * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
698   *				   and reset the remote processor
699   * @peripheral:	peripheral id
700   *
701   * Return 0 on success.
702   */
qcom_scm_pas_auth_and_reset(u32 peripheral)703  int qcom_scm_pas_auth_and_reset(u32 peripheral)
704  {
705  	int ret;
706  	struct qcom_scm_desc desc = {
707  		.svc = QCOM_SCM_SVC_PIL,
708  		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
709  		.arginfo = QCOM_SCM_ARGS(1),
710  		.args[0] = peripheral,
711  		.owner = ARM_SMCCC_OWNER_SIP,
712  	};
713  	struct qcom_scm_res res;
714  
715  	ret = qcom_scm_clk_enable();
716  	if (ret)
717  		return ret;
718  
719  	ret = qcom_scm_bw_enable();
720  	if (ret)
721  		goto disable_clk;
722  
723  	ret = qcom_scm_call(__scm->dev, &desc, &res);
724  	qcom_scm_bw_disable();
725  
726  disable_clk:
727  	qcom_scm_clk_disable();
728  
729  	return ret ? : res.result[0];
730  }
731  EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
732  
733  /**
734   * qcom_scm_pas_shutdown() - Shut down the remote processor
735   * @peripheral: peripheral id
736   *
737   * Returns 0 on success.
738   */
qcom_scm_pas_shutdown(u32 peripheral)739  int qcom_scm_pas_shutdown(u32 peripheral)
740  {
741  	int ret;
742  	struct qcom_scm_desc desc = {
743  		.svc = QCOM_SCM_SVC_PIL,
744  		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
745  		.arginfo = QCOM_SCM_ARGS(1),
746  		.args[0] = peripheral,
747  		.owner = ARM_SMCCC_OWNER_SIP,
748  	};
749  	struct qcom_scm_res res;
750  
751  	ret = qcom_scm_clk_enable();
752  	if (ret)
753  		return ret;
754  
755  	ret = qcom_scm_bw_enable();
756  	if (ret)
757  		goto disable_clk;
758  
759  	ret = qcom_scm_call(__scm->dev, &desc, &res);
760  	qcom_scm_bw_disable();
761  
762  disable_clk:
763  	qcom_scm_clk_disable();
764  
765  	return ret ? : res.result[0];
766  }
767  EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
768  
769  /**
770   * qcom_scm_pas_supported() - Check if the peripheral authentication service is
771   *			      available for the given peripherial
772   * @peripheral:	peripheral id
773   *
774   * Returns true if PAS is supported for this peripheral, otherwise false.
775   */
qcom_scm_pas_supported(u32 peripheral)776  bool qcom_scm_pas_supported(u32 peripheral)
777  {
778  	int ret;
779  	struct qcom_scm_desc desc = {
780  		.svc = QCOM_SCM_SVC_PIL,
781  		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
782  		.arginfo = QCOM_SCM_ARGS(1),
783  		.args[0] = peripheral,
784  		.owner = ARM_SMCCC_OWNER_SIP,
785  	};
786  	struct qcom_scm_res res;
787  
788  	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
789  					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
790  		return false;
791  
792  	ret = qcom_scm_call(__scm->dev, &desc, &res);
793  
794  	return ret ? false : !!res.result[0];
795  }
796  EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
797  
__qcom_scm_pas_mss_reset(struct device * dev,bool reset)798  static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
799  {
800  	struct qcom_scm_desc desc = {
801  		.svc = QCOM_SCM_SVC_PIL,
802  		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
803  		.arginfo = QCOM_SCM_ARGS(2),
804  		.args[0] = reset,
805  		.args[1] = 0,
806  		.owner = ARM_SMCCC_OWNER_SIP,
807  	};
808  	struct qcom_scm_res res;
809  	int ret;
810  
811  	ret = qcom_scm_call(__scm->dev, &desc, &res);
812  
813  	return ret ? : res.result[0];
814  }
815  
qcom_scm_pas_reset_assert(struct reset_controller_dev * rcdev,unsigned long idx)816  static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
817  				     unsigned long idx)
818  {
819  	if (idx != 0)
820  		return -EINVAL;
821  
822  	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
823  }
824  
qcom_scm_pas_reset_deassert(struct reset_controller_dev * rcdev,unsigned long idx)825  static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
826  				       unsigned long idx)
827  {
828  	if (idx != 0)
829  		return -EINVAL;
830  
831  	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
832  }
833  
834  static const struct reset_control_ops qcom_scm_pas_reset_ops = {
835  	.assert = qcom_scm_pas_reset_assert,
836  	.deassert = qcom_scm_pas_reset_deassert,
837  };
838  
qcom_scm_io_readl(phys_addr_t addr,unsigned int * val)839  int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
840  {
841  	struct qcom_scm_desc desc = {
842  		.svc = QCOM_SCM_SVC_IO,
843  		.cmd = QCOM_SCM_IO_READ,
844  		.arginfo = QCOM_SCM_ARGS(1),
845  		.args[0] = addr,
846  		.owner = ARM_SMCCC_OWNER_SIP,
847  	};
848  	struct qcom_scm_res res;
849  	int ret;
850  
851  
852  	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
853  	if (ret >= 0)
854  		*val = res.result[0];
855  
856  	return ret < 0 ? ret : 0;
857  }
858  EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
859  
qcom_scm_io_writel(phys_addr_t addr,unsigned int val)860  int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
861  {
862  	struct qcom_scm_desc desc = {
863  		.svc = QCOM_SCM_SVC_IO,
864  		.cmd = QCOM_SCM_IO_WRITE,
865  		.arginfo = QCOM_SCM_ARGS(2),
866  		.args[0] = addr,
867  		.args[1] = val,
868  		.owner = ARM_SMCCC_OWNER_SIP,
869  	};
870  
871  	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
872  }
873  EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
874  
875  /**
876   * qcom_scm_restore_sec_cfg_available() - Check if secure environment
877   * supports restore security config interface.
878   *
879   * Return true if restore-cfg interface is supported, false if not.
880   */
qcom_scm_restore_sec_cfg_available(void)881  bool qcom_scm_restore_sec_cfg_available(void)
882  {
883  	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
884  					    QCOM_SCM_MP_RESTORE_SEC_CFG);
885  }
886  EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
887  
qcom_scm_restore_sec_cfg(u32 device_id,u32 spare)888  int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
889  {
890  	struct qcom_scm_desc desc = {
891  		.svc = QCOM_SCM_SVC_MP,
892  		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
893  		.arginfo = QCOM_SCM_ARGS(2),
894  		.args[0] = device_id,
895  		.args[1] = spare,
896  		.owner = ARM_SMCCC_OWNER_SIP,
897  	};
898  	struct qcom_scm_res res;
899  	int ret;
900  
901  	ret = qcom_scm_call(__scm->dev, &desc, &res);
902  
903  	return ret ? : res.result[0];
904  }
905  EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
906  
qcom_scm_iommu_secure_ptbl_size(u32 spare,size_t * size)907  int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
908  {
909  	struct qcom_scm_desc desc = {
910  		.svc = QCOM_SCM_SVC_MP,
911  		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
912  		.arginfo = QCOM_SCM_ARGS(1),
913  		.args[0] = spare,
914  		.owner = ARM_SMCCC_OWNER_SIP,
915  	};
916  	struct qcom_scm_res res;
917  	int ret;
918  
919  	ret = qcom_scm_call(__scm->dev, &desc, &res);
920  
921  	if (size)
922  		*size = res.result[0];
923  
924  	return ret ? : res.result[1];
925  }
926  EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
927  
qcom_scm_iommu_secure_ptbl_init(u64 addr,u32 size,u32 spare)928  int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
929  {
930  	struct qcom_scm_desc desc = {
931  		.svc = QCOM_SCM_SVC_MP,
932  		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
933  		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
934  					 QCOM_SCM_VAL),
935  		.args[0] = addr,
936  		.args[1] = size,
937  		.args[2] = spare,
938  		.owner = ARM_SMCCC_OWNER_SIP,
939  	};
940  	int ret;
941  
942  	ret = qcom_scm_call(__scm->dev, &desc, NULL);
943  
944  	/* the pg table has been initialized already, ignore the error */
945  	if (ret == -EPERM)
946  		ret = 0;
947  
948  	return ret;
949  }
950  EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
951  
qcom_scm_iommu_set_cp_pool_size(u32 spare,u32 size)952  int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
953  {
954  	struct qcom_scm_desc desc = {
955  		.svc = QCOM_SCM_SVC_MP,
956  		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
957  		.arginfo = QCOM_SCM_ARGS(2),
958  		.args[0] = size,
959  		.args[1] = spare,
960  		.owner = ARM_SMCCC_OWNER_SIP,
961  	};
962  
963  	return qcom_scm_call(__scm->dev, &desc, NULL);
964  }
965  EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
966  
qcom_scm_mem_protect_video_var(u32 cp_start,u32 cp_size,u32 cp_nonpixel_start,u32 cp_nonpixel_size)967  int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
968  				   u32 cp_nonpixel_start,
969  				   u32 cp_nonpixel_size)
970  {
971  	int ret;
972  	struct qcom_scm_desc desc = {
973  		.svc = QCOM_SCM_SVC_MP,
974  		.cmd = QCOM_SCM_MP_VIDEO_VAR,
975  		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
976  					 QCOM_SCM_VAL, QCOM_SCM_VAL),
977  		.args[0] = cp_start,
978  		.args[1] = cp_size,
979  		.args[2] = cp_nonpixel_start,
980  		.args[3] = cp_nonpixel_size,
981  		.owner = ARM_SMCCC_OWNER_SIP,
982  	};
983  	struct qcom_scm_res res;
984  
985  	ret = qcom_scm_call(__scm->dev, &desc, &res);
986  
987  	return ret ? : res.result[0];
988  }
989  EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
990  
__qcom_scm_assign_mem(struct device * dev,phys_addr_t mem_region,size_t mem_sz,phys_addr_t src,size_t src_sz,phys_addr_t dest,size_t dest_sz)991  static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
992  				 size_t mem_sz, phys_addr_t src, size_t src_sz,
993  				 phys_addr_t dest, size_t dest_sz)
994  {
995  	int ret;
996  	struct qcom_scm_desc desc = {
997  		.svc = QCOM_SCM_SVC_MP,
998  		.cmd = QCOM_SCM_MP_ASSIGN,
999  		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
1000  					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
1001  					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1002  		.args[0] = mem_region,
1003  		.args[1] = mem_sz,
1004  		.args[2] = src,
1005  		.args[3] = src_sz,
1006  		.args[4] = dest,
1007  		.args[5] = dest_sz,
1008  		.args[6] = 0,
1009  		.owner = ARM_SMCCC_OWNER_SIP,
1010  	};
1011  	struct qcom_scm_res res;
1012  
1013  	ret = qcom_scm_call(dev, &desc, &res);
1014  
1015  	return ret ? : res.result[0];
1016  }
1017  
1018  /**
1019   * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
1020   * @mem_addr: mem region whose ownership need to be reassigned
1021   * @mem_sz:   size of the region.
1022   * @srcvm:    vmid for current set of owners, each set bit in
1023   *            flag indicate a unique owner
1024   * @newvm:    array having new owners and corresponding permission
1025   *            flags
1026   * @dest_cnt: number of owners in next set.
1027   *
1028   * Return negative errno on failure or 0 on success with @srcvm updated.
1029   */
qcom_scm_assign_mem(phys_addr_t mem_addr,size_t mem_sz,u64 * srcvm,const struct qcom_scm_vmperm * newvm,unsigned int dest_cnt)1030  int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1031  			u64 *srcvm,
1032  			const struct qcom_scm_vmperm *newvm,
1033  			unsigned int dest_cnt)
1034  {
1035  	struct qcom_scm_current_perm_info *destvm;
1036  	struct qcom_scm_mem_map_info *mem_to_map;
1037  	phys_addr_t mem_to_map_phys;
1038  	phys_addr_t dest_phys;
1039  	phys_addr_t ptr_phys;
1040  	size_t mem_to_map_sz;
1041  	size_t dest_sz;
1042  	size_t src_sz;
1043  	size_t ptr_sz;
1044  	int next_vm;
1045  	__le32 *src;
1046  	int ret, i, b;
1047  	u64 srcvm_bits = *srcvm;
1048  
1049  	src_sz = hweight64(srcvm_bits) * sizeof(*src);
1050  	mem_to_map_sz = sizeof(*mem_to_map);
1051  	dest_sz = dest_cnt * sizeof(*destvm);
1052  	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1053  			ALIGN(dest_sz, SZ_64);
1054  
1055  	void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1056  							ptr_sz, GFP_KERNEL);
1057  	if (!ptr)
1058  		return -ENOMEM;
1059  
1060  	ptr_phys = qcom_tzmem_to_phys(ptr);
1061  
1062  	/* Fill source vmid detail */
1063  	src = ptr;
1064  	i = 0;
1065  	for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1066  		if (srcvm_bits & BIT(b))
1067  			src[i++] = cpu_to_le32(b);
1068  	}
1069  
1070  	/* Fill details of mem buff to map */
1071  	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1072  	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
1073  	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1074  	mem_to_map->mem_size = cpu_to_le64(mem_sz);
1075  
1076  	next_vm = 0;
1077  	/* Fill details of next vmid detail */
1078  	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1079  	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1080  	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1081  		destvm->vmid = cpu_to_le32(newvm->vmid);
1082  		destvm->perm = cpu_to_le32(newvm->perm);
1083  		destvm->ctx = 0;
1084  		destvm->ctx_size = 0;
1085  		next_vm |= BIT(newvm->vmid);
1086  	}
1087  
1088  	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1089  				    ptr_phys, src_sz, dest_phys, dest_sz);
1090  	if (ret) {
1091  		dev_err(__scm->dev,
1092  			"Assign memory protection call failed %d\n", ret);
1093  		return -EINVAL;
1094  	}
1095  
1096  	*srcvm = next_vm;
1097  	return 0;
1098  }
1099  EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
1100  
1101  /**
1102   * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1103   */
qcom_scm_ocmem_lock_available(void)1104  bool qcom_scm_ocmem_lock_available(void)
1105  {
1106  	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1107  					    QCOM_SCM_OCMEM_LOCK_CMD);
1108  }
1109  EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
1110  
1111  /**
1112   * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1113   * region to the specified initiator
1114   *
1115   * @id:     tz initiator id
1116   * @offset: OCMEM offset
1117   * @size:   OCMEM size
1118   * @mode:   access mode (WIDE/NARROW)
1119   */
qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id,u32 offset,u32 size,u32 mode)1120  int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1121  			u32 mode)
1122  {
1123  	struct qcom_scm_desc desc = {
1124  		.svc = QCOM_SCM_SVC_OCMEM,
1125  		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1126  		.args[0] = id,
1127  		.args[1] = offset,
1128  		.args[2] = size,
1129  		.args[3] = mode,
1130  		.arginfo = QCOM_SCM_ARGS(4),
1131  	};
1132  
1133  	return qcom_scm_call(__scm->dev, &desc, NULL);
1134  }
1135  EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1136  
1137  /**
1138   * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1139   * region from the specified initiator
1140   *
1141   * @id:     tz initiator id
1142   * @offset: OCMEM offset
1143   * @size:   OCMEM size
1144   */
qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,u32 offset,u32 size)1145  int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1146  {
1147  	struct qcom_scm_desc desc = {
1148  		.svc = QCOM_SCM_SVC_OCMEM,
1149  		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1150  		.args[0] = id,
1151  		.args[1] = offset,
1152  		.args[2] = size,
1153  		.arginfo = QCOM_SCM_ARGS(3),
1154  	};
1155  
1156  	return qcom_scm_call(__scm->dev, &desc, NULL);
1157  }
1158  EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1159  
1160  /**
1161   * qcom_scm_ice_available() - Is the ICE key programming interface available?
1162   *
1163   * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1164   *	   qcom_scm_ice_set_key() are available.
1165   */
qcom_scm_ice_available(void)1166  bool qcom_scm_ice_available(void)
1167  {
1168  	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1169  					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1170  		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1171  					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1172  }
1173  EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1174  
1175  /**
1176   * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1177   * @index: the keyslot to invalidate
1178   *
1179   * The UFSHCI and eMMC standards define a standard way to do this, but it
1180   * doesn't work on these SoCs; only this SCM call does.
1181   *
1182   * It is assumed that the SoC has only one ICE instance being used, as this SCM
1183   * call doesn't specify which ICE instance the keyslot belongs to.
1184   *
1185   * Return: 0 on success; -errno on failure.
1186   */
qcom_scm_ice_invalidate_key(u32 index)1187  int qcom_scm_ice_invalidate_key(u32 index)
1188  {
1189  	struct qcom_scm_desc desc = {
1190  		.svc = QCOM_SCM_SVC_ES,
1191  		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1192  		.arginfo = QCOM_SCM_ARGS(1),
1193  		.args[0] = index,
1194  		.owner = ARM_SMCCC_OWNER_SIP,
1195  	};
1196  
1197  	return qcom_scm_call(__scm->dev, &desc, NULL);
1198  }
1199  EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1200  
1201  /**
1202   * qcom_scm_ice_set_key() - Set an inline encryption key
1203   * @index: the keyslot into which to set the key
1204   * @key: the key to program
1205   * @key_size: the size of the key in bytes
1206   * @cipher: the encryption algorithm the key is for
1207   * @data_unit_size: the encryption data unit size, i.e. the size of each
1208   *		    individual plaintext and ciphertext.  Given in 512-byte
1209   *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1210   *
1211   * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1212   * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1213   *
1214   * The UFSHCI and eMMC standards define a standard way to do this, but it
1215   * doesn't work on these SoCs; only this SCM call does.
1216   *
1217   * It is assumed that the SoC has only one ICE instance being used, as this SCM
1218   * call doesn't specify which ICE instance the keyslot belongs to.
1219   *
1220   * Return: 0 on success; -errno on failure.
1221   */
qcom_scm_ice_set_key(u32 index,const u8 * key,u32 key_size,enum qcom_scm_ice_cipher cipher,u32 data_unit_size)1222  int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1223  			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1224  {
1225  	struct qcom_scm_desc desc = {
1226  		.svc = QCOM_SCM_SVC_ES,
1227  		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1228  		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1229  					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1230  					 QCOM_SCM_VAL),
1231  		.args[0] = index,
1232  		.args[2] = key_size,
1233  		.args[3] = cipher,
1234  		.args[4] = data_unit_size,
1235  		.owner = ARM_SMCCC_OWNER_SIP,
1236  	};
1237  
1238  	int ret;
1239  
1240  	void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1241  							   key_size,
1242  							   GFP_KERNEL);
1243  	if (!keybuf)
1244  		return -ENOMEM;
1245  	memcpy(keybuf, key, key_size);
1246  	desc.args[1] = qcom_tzmem_to_phys(keybuf);
1247  
1248  	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1249  
1250  	memzero_explicit(keybuf, key_size);
1251  
1252  	return ret;
1253  }
1254  EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1255  
1256  /**
1257   * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1258   *
1259   * Return true if HDCP is supported, false if not.
1260   */
qcom_scm_hdcp_available(void)1261  bool qcom_scm_hdcp_available(void)
1262  {
1263  	bool avail;
1264  	int ret = qcom_scm_clk_enable();
1265  
1266  	if (ret)
1267  		return ret;
1268  
1269  	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1270  						QCOM_SCM_HDCP_INVOKE);
1271  
1272  	qcom_scm_clk_disable();
1273  
1274  	return avail;
1275  }
1276  EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1277  
1278  /**
1279   * qcom_scm_hdcp_req() - Send HDCP request.
1280   * @req: HDCP request array
1281   * @req_cnt: HDCP request array count
1282   * @resp: response buffer passed to SCM
1283   *
1284   * Write HDCP register(s) through SCM.
1285   */
qcom_scm_hdcp_req(struct qcom_scm_hdcp_req * req,u32 req_cnt,u32 * resp)1286  int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1287  {
1288  	int ret;
1289  	struct qcom_scm_desc desc = {
1290  		.svc = QCOM_SCM_SVC_HDCP,
1291  		.cmd = QCOM_SCM_HDCP_INVOKE,
1292  		.arginfo = QCOM_SCM_ARGS(10),
1293  		.args = {
1294  			req[0].addr,
1295  			req[0].val,
1296  			req[1].addr,
1297  			req[1].val,
1298  			req[2].addr,
1299  			req[2].val,
1300  			req[3].addr,
1301  			req[3].val,
1302  			req[4].addr,
1303  			req[4].val
1304  		},
1305  		.owner = ARM_SMCCC_OWNER_SIP,
1306  	};
1307  	struct qcom_scm_res res;
1308  
1309  	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1310  		return -ERANGE;
1311  
1312  	ret = qcom_scm_clk_enable();
1313  	if (ret)
1314  		return ret;
1315  
1316  	ret = qcom_scm_call(__scm->dev, &desc, &res);
1317  	*resp = res.result[0];
1318  
1319  	qcom_scm_clk_disable();
1320  
1321  	return ret;
1322  }
1323  EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1324  
qcom_scm_iommu_set_pt_format(u32 sec_id,u32 ctx_num,u32 pt_fmt)1325  int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1326  {
1327  	struct qcom_scm_desc desc = {
1328  		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1329  		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1330  		.arginfo = QCOM_SCM_ARGS(3),
1331  		.args[0] = sec_id,
1332  		.args[1] = ctx_num,
1333  		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1334  		.owner = ARM_SMCCC_OWNER_SIP,
1335  	};
1336  
1337  	return qcom_scm_call(__scm->dev, &desc, NULL);
1338  }
1339  EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1340  
qcom_scm_qsmmu500_wait_safe_toggle(bool en)1341  int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1342  {
1343  	struct qcom_scm_desc desc = {
1344  		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1345  		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1346  		.arginfo = QCOM_SCM_ARGS(2),
1347  		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1348  		.args[1] = en,
1349  		.owner = ARM_SMCCC_OWNER_SIP,
1350  	};
1351  
1352  
1353  	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1354  }
1355  EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1356  
qcom_scm_lmh_dcvsh_available(void)1357  bool qcom_scm_lmh_dcvsh_available(void)
1358  {
1359  	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1360  }
1361  EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1362  
qcom_scm_shm_bridge_enable(void)1363  int qcom_scm_shm_bridge_enable(void)
1364  {
1365  	int ret;
1366  
1367  	struct qcom_scm_desc desc = {
1368  		.svc = QCOM_SCM_SVC_MP,
1369  		.cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
1370  		.owner = ARM_SMCCC_OWNER_SIP
1371  	};
1372  
1373  	struct qcom_scm_res res;
1374  
1375  	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
1376  					  QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
1377  		return -EOPNOTSUPP;
1378  
1379  	ret = qcom_scm_call(__scm->dev, &desc, &res);
1380  
1381  	if (ret)
1382  		return ret;
1383  
1384  	if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP)
1385  		return -EOPNOTSUPP;
1386  
1387  	return res.result[0];
1388  }
1389  EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
1390  
qcom_scm_shm_bridge_create(struct device * dev,u64 pfn_and_ns_perm_flags,u64 ipfn_and_s_perm_flags,u64 size_and_flags,u64 ns_vmids,u64 * handle)1391  int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
1392  			       u64 ipfn_and_s_perm_flags, u64 size_and_flags,
1393  			       u64 ns_vmids, u64 *handle)
1394  {
1395  	struct qcom_scm_desc desc = {
1396  		.svc = QCOM_SCM_SVC_MP,
1397  		.cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE,
1398  		.owner = ARM_SMCCC_OWNER_SIP,
1399  		.args[0] = pfn_and_ns_perm_flags,
1400  		.args[1] = ipfn_and_s_perm_flags,
1401  		.args[2] = size_and_flags,
1402  		.args[3] = ns_vmids,
1403  		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1404  					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1405  	};
1406  
1407  	struct qcom_scm_res res;
1408  	int ret;
1409  
1410  	ret = qcom_scm_call(__scm->dev, &desc, &res);
1411  
1412  	if (handle && !ret)
1413  		*handle = res.result[1];
1414  
1415  	return ret ?: res.result[0];
1416  }
1417  EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
1418  
qcom_scm_shm_bridge_delete(struct device * dev,u64 handle)1419  int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle)
1420  {
1421  	struct qcom_scm_desc desc = {
1422  		.svc = QCOM_SCM_SVC_MP,
1423  		.cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE,
1424  		.owner = ARM_SMCCC_OWNER_SIP,
1425  		.args[0] = handle,
1426  		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1427  	};
1428  
1429  	return qcom_scm_call(__scm->dev, &desc, NULL);
1430  }
1431  EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete);
1432  
qcom_scm_lmh_profile_change(u32 profile_id)1433  int qcom_scm_lmh_profile_change(u32 profile_id)
1434  {
1435  	struct qcom_scm_desc desc = {
1436  		.svc = QCOM_SCM_SVC_LMH,
1437  		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1438  		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1439  		.args[0] = profile_id,
1440  		.owner = ARM_SMCCC_OWNER_SIP,
1441  	};
1442  
1443  	return qcom_scm_call(__scm->dev, &desc, NULL);
1444  }
1445  EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1446  
qcom_scm_lmh_dcvsh(u32 payload_fn,u32 payload_reg,u32 payload_val,u64 limit_node,u32 node_id,u64 version)1447  int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1448  		       u64 limit_node, u32 node_id, u64 version)
1449  {
1450  	int ret, payload_size = 5 * sizeof(u32);
1451  
1452  	struct qcom_scm_desc desc = {
1453  		.svc = QCOM_SCM_SVC_LMH,
1454  		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1455  		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1456  					QCOM_SCM_VAL, QCOM_SCM_VAL),
1457  		.args[1] = payload_size,
1458  		.args[2] = limit_node,
1459  		.args[3] = node_id,
1460  		.args[4] = version,
1461  		.owner = ARM_SMCCC_OWNER_SIP,
1462  	};
1463  
1464  	u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1465  							       payload_size,
1466  							       GFP_KERNEL);
1467  	if (!payload_buf)
1468  		return -ENOMEM;
1469  
1470  	payload_buf[0] = payload_fn;
1471  	payload_buf[1] = 0;
1472  	payload_buf[2] = payload_reg;
1473  	payload_buf[3] = 1;
1474  	payload_buf[4] = payload_val;
1475  
1476  	desc.args[0] = qcom_tzmem_to_phys(payload_buf);
1477  
1478  	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1479  
1480  	return ret;
1481  }
1482  EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
1483  
qcom_scm_gpu_init_regs(u32 gpu_req)1484  int qcom_scm_gpu_init_regs(u32 gpu_req)
1485  {
1486  	struct qcom_scm_desc desc = {
1487  		.svc = QCOM_SCM_SVC_GPU,
1488  		.cmd = QCOM_SCM_SVC_GPU_INIT_REGS,
1489  		.arginfo = QCOM_SCM_ARGS(1),
1490  		.args[0] = gpu_req,
1491  		.owner = ARM_SMCCC_OWNER_SIP,
1492  	};
1493  
1494  	return qcom_scm_call(__scm->dev, &desc, NULL);
1495  }
1496  EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs);
1497  
qcom_scm_find_dload_address(struct device * dev,u64 * addr)1498  static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1499  {
1500  	struct device_node *tcsr;
1501  	struct device_node *np = dev->of_node;
1502  	struct resource res;
1503  	u32 offset;
1504  	int ret;
1505  
1506  	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1507  	if (!tcsr)
1508  		return 0;
1509  
1510  	ret = of_address_to_resource(tcsr, 0, &res);
1511  	of_node_put(tcsr);
1512  	if (ret)
1513  		return ret;
1514  
1515  	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1516  	if (ret < 0)
1517  		return ret;
1518  
1519  	*addr = res.start + offset;
1520  
1521  	return 0;
1522  }
1523  
1524  #ifdef CONFIG_QCOM_QSEECOM
1525  
1526  /* Lock for QSEECOM SCM call executions */
1527  static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1528  
__qcom_scm_qseecom_call(const struct qcom_scm_desc * desc,struct qcom_scm_qseecom_resp * res)1529  static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1530  				   struct qcom_scm_qseecom_resp *res)
1531  {
1532  	struct qcom_scm_res scm_res = {};
1533  	int status;
1534  
1535  	/*
1536  	 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1537  	 * require the respective call lock to be held.
1538  	 */
1539  	lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1540  
1541  	status = qcom_scm_call(__scm->dev, desc, &scm_res);
1542  
1543  	res->result = scm_res.result[0];
1544  	res->resp_type = scm_res.result[1];
1545  	res->data = scm_res.result[2];
1546  
1547  	if (status)
1548  		return status;
1549  
1550  	return 0;
1551  }
1552  
1553  /**
1554   * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1555   * @desc: SCM call descriptor.
1556   * @res:  SCM call response (output).
1557   *
1558   * Performs the QSEECOM SCM call described by @desc, returning the response in
1559   * @rsp.
1560   *
1561   * Return: Zero on success, nonzero on failure.
1562   */
qcom_scm_qseecom_call(const struct qcom_scm_desc * desc,struct qcom_scm_qseecom_resp * res)1563  static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1564  				 struct qcom_scm_qseecom_resp *res)
1565  {
1566  	int status;
1567  
1568  	/*
1569  	 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1570  	 * so lock things here. This needs to be extended to callback/listener
1571  	 * handling when support for that is implemented.
1572  	 */
1573  
1574  	mutex_lock(&qcom_scm_qseecom_call_lock);
1575  	status = __qcom_scm_qseecom_call(desc, res);
1576  	mutex_unlock(&qcom_scm_qseecom_call_lock);
1577  
1578  	dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1579  		__func__, desc->owner, desc->svc, desc->cmd, res->result,
1580  		res->resp_type, res->data);
1581  
1582  	if (status) {
1583  		dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1584  		return status;
1585  	}
1586  
1587  	/*
1588  	 * TODO: Handle incomplete and blocked calls:
1589  	 *
1590  	 * Incomplete and blocked calls are not supported yet. Some devices
1591  	 * and/or commands require those, some don't. Let's warn about them
1592  	 * prominently in case someone attempts to try these commands with a
1593  	 * device/command combination that isn't supported yet.
1594  	 */
1595  	WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1596  	WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1597  
1598  	return 0;
1599  }
1600  
1601  /**
1602   * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1603   * @version: Pointer where the QSEECOM version will be stored.
1604   *
1605   * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1606   * the TrustZone.
1607   *
1608   * Return: Zero on success, nonzero on failure.
1609   */
qcom_scm_qseecom_get_version(u32 * version)1610  static int qcom_scm_qseecom_get_version(u32 *version)
1611  {
1612  	struct qcom_scm_desc desc = {};
1613  	struct qcom_scm_qseecom_resp res = {};
1614  	u32 feature = 10;
1615  	int ret;
1616  
1617  	desc.owner = QSEECOM_TZ_OWNER_SIP;
1618  	desc.svc = QSEECOM_TZ_SVC_INFO;
1619  	desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1620  	desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1621  	desc.args[0] = feature;
1622  
1623  	ret = qcom_scm_qseecom_call(&desc, &res);
1624  	if (ret)
1625  		return ret;
1626  
1627  	*version = res.result;
1628  	return 0;
1629  }
1630  
1631  /**
1632   * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1633   * @app_name: The name of the app.
1634   * @app_id:   The returned app ID.
1635   *
1636   * Query and return the application ID of the SEE app identified by the given
1637   * name. This returned ID is the unique identifier of the app required for
1638   * subsequent communication.
1639   *
1640   * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1641   * loaded or could not be found.
1642   */
qcom_scm_qseecom_app_get_id(const char * app_name,u32 * app_id)1643  int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1644  {
1645  	unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1646  	unsigned long app_name_len = strlen(app_name);
1647  	struct qcom_scm_desc desc = {};
1648  	struct qcom_scm_qseecom_resp res = {};
1649  	int status;
1650  
1651  	if (app_name_len >= name_buf_size)
1652  		return -EINVAL;
1653  
1654  	char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1655  							     name_buf_size,
1656  							     GFP_KERNEL);
1657  	if (!name_buf)
1658  		return -ENOMEM;
1659  
1660  	memcpy(name_buf, app_name, app_name_len);
1661  
1662  	desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1663  	desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1664  	desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1665  	desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1666  	desc.args[0] = qcom_tzmem_to_phys(name_buf);
1667  	desc.args[1] = app_name_len;
1668  
1669  	status = qcom_scm_qseecom_call(&desc, &res);
1670  
1671  	if (status)
1672  		return status;
1673  
1674  	if (res.result == QSEECOM_RESULT_FAILURE)
1675  		return -ENOENT;
1676  
1677  	if (res.result != QSEECOM_RESULT_SUCCESS)
1678  		return -EINVAL;
1679  
1680  	if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1681  		return -EINVAL;
1682  
1683  	*app_id = res.data;
1684  	return 0;
1685  }
1686  EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1687  
1688  /**
1689   * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1690   * @app_id:   The ID of the target app.
1691   * @req:      Request buffer sent to the app (must be TZ memory)
1692   * @req_size: Size of the request buffer.
1693   * @rsp:      Response buffer, written to by the app (must be TZ memory)
1694   * @rsp_size: Size of the response buffer.
1695   *
1696   * Sends a request to the QSEE app associated with the given ID and read back
1697   * its response. The caller must provide two DMA memory regions, one for the
1698   * request and one for the response, and fill out the @req region with the
1699   * respective (app-specific) request data. The QSEE app reads this and returns
1700   * its response in the @rsp region.
1701   *
1702   * Return: Zero on success, nonzero on failure.
1703   */
qcom_scm_qseecom_app_send(u32 app_id,void * req,size_t req_size,void * rsp,size_t rsp_size)1704  int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
1705  			      void *rsp, size_t rsp_size)
1706  {
1707  	struct qcom_scm_qseecom_resp res = {};
1708  	struct qcom_scm_desc desc = {};
1709  	phys_addr_t req_phys;
1710  	phys_addr_t rsp_phys;
1711  	int status;
1712  
1713  	req_phys = qcom_tzmem_to_phys(req);
1714  	rsp_phys = qcom_tzmem_to_phys(rsp);
1715  
1716  	desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1717  	desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1718  	desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1719  	desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1720  				     QCOM_SCM_RW, QCOM_SCM_VAL,
1721  				     QCOM_SCM_RW, QCOM_SCM_VAL);
1722  	desc.args[0] = app_id;
1723  	desc.args[1] = req_phys;
1724  	desc.args[2] = req_size;
1725  	desc.args[3] = rsp_phys;
1726  	desc.args[4] = rsp_size;
1727  
1728  	status = qcom_scm_qseecom_call(&desc, &res);
1729  
1730  	if (status)
1731  		return status;
1732  
1733  	if (res.result != QSEECOM_RESULT_SUCCESS)
1734  		return -EIO;
1735  
1736  	return 0;
1737  }
1738  EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1739  
1740  /*
1741   * We do not yet support re-entrant calls via the qseecom interface. To prevent
1742   + any potential issues with this, only allow validated machines for now.
1743   */
1744  static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
1745  	{ .compatible = "lenovo,flex-5g" },
1746  	{ .compatible = "lenovo,thinkpad-t14s" },
1747  	{ .compatible = "lenovo,thinkpad-x13s", },
1748  	{ .compatible = "microsoft,romulus13", },
1749  	{ .compatible = "microsoft,romulus15", },
1750  	{ .compatible = "qcom,sc8180x-primus" },
1751  	{ .compatible = "qcom,x1e80100-crd" },
1752  	{ .compatible = "qcom,x1e80100-qcp" },
1753  	{ }
1754  };
1755  
qcom_scm_qseecom_machine_is_allowed(void)1756  static bool qcom_scm_qseecom_machine_is_allowed(void)
1757  {
1758  	struct device_node *np;
1759  	bool match;
1760  
1761  	np = of_find_node_by_path("/");
1762  	if (!np)
1763  		return false;
1764  
1765  	match = of_match_node(qcom_scm_qseecom_allowlist, np);
1766  	of_node_put(np);
1767  
1768  	return match;
1769  }
1770  
qcom_scm_qseecom_free(void * data)1771  static void qcom_scm_qseecom_free(void *data)
1772  {
1773  	struct platform_device *qseecom_dev = data;
1774  
1775  	platform_device_del(qseecom_dev);
1776  	platform_device_put(qseecom_dev);
1777  }
1778  
qcom_scm_qseecom_init(struct qcom_scm * scm)1779  static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1780  {
1781  	struct platform_device *qseecom_dev;
1782  	u32 version;
1783  	int ret;
1784  
1785  	/*
1786  	 * Note: We do two steps of validation here: First, we try to query the
1787  	 * QSEECOM version as a check to see if the interface exists on this
1788  	 * device. Second, we check against known good devices due to current
1789  	 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
1790  	 *
1791  	 * Note that we deliberately do the machine check after the version
1792  	 * check so that we can log potentially supported devices. This should
1793  	 * be safe as downstream sources indicate that the version query is
1794  	 * neither blocking nor reentrant.
1795  	 */
1796  	ret = qcom_scm_qseecom_get_version(&version);
1797  	if (ret)
1798  		return 0;
1799  
1800  	dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
1801  
1802  	if (!qcom_scm_qseecom_machine_is_allowed()) {
1803  		dev_info(scm->dev, "qseecom: untested machine, skipping\n");
1804  		return 0;
1805  	}
1806  
1807  	/*
1808  	 * Set up QSEECOM interface device. All application clients will be
1809  	 * set up and managed by the corresponding driver for it.
1810  	 */
1811  	qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
1812  	if (!qseecom_dev)
1813  		return -ENOMEM;
1814  
1815  	qseecom_dev->dev.parent = scm->dev;
1816  
1817  	ret = platform_device_add(qseecom_dev);
1818  	if (ret) {
1819  		platform_device_put(qseecom_dev);
1820  		return ret;
1821  	}
1822  
1823  	return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
1824  }
1825  
1826  #else /* CONFIG_QCOM_QSEECOM */
1827  
qcom_scm_qseecom_init(struct qcom_scm * scm)1828  static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1829  {
1830  	return 0;
1831  }
1832  
1833  #endif /* CONFIG_QCOM_QSEECOM */
1834  
1835  /**
1836   * qcom_scm_is_available() - Checks if SCM is available
1837   */
qcom_scm_is_available(void)1838  bool qcom_scm_is_available(void)
1839  {
1840  	return !!READ_ONCE(__scm);
1841  }
1842  EXPORT_SYMBOL_GPL(qcom_scm_is_available);
1843  
qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)1844  static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
1845  {
1846  	/* FW currently only supports a single wq_ctx (zero).
1847  	 * TODO: Update this logic to include dynamic allocation and lookup of
1848  	 * completion structs when FW supports more wq_ctx values.
1849  	 */
1850  	if (wq_ctx != 0) {
1851  		dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
1852  		return -EINVAL;
1853  	}
1854  
1855  	return 0;
1856  }
1857  
qcom_scm_wait_for_wq_completion(u32 wq_ctx)1858  int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
1859  {
1860  	int ret;
1861  
1862  	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1863  	if (ret)
1864  		return ret;
1865  
1866  	wait_for_completion(&__scm->waitq_comp);
1867  
1868  	return 0;
1869  }
1870  
qcom_scm_waitq_wakeup(unsigned int wq_ctx)1871  static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
1872  {
1873  	int ret;
1874  
1875  	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1876  	if (ret)
1877  		return ret;
1878  
1879  	complete(&__scm->waitq_comp);
1880  
1881  	return 0;
1882  }
1883  
qcom_scm_irq_handler(int irq,void * data)1884  static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
1885  {
1886  	int ret;
1887  	struct qcom_scm *scm = data;
1888  	u32 wq_ctx, flags, more_pending = 0;
1889  
1890  	do {
1891  		ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
1892  		if (ret) {
1893  			dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
1894  			goto out;
1895  		}
1896  
1897  		if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) {
1898  			dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags);
1899  			goto out;
1900  		}
1901  
1902  		ret = qcom_scm_waitq_wakeup(wq_ctx);
1903  		if (ret)
1904  			goto out;
1905  	} while (more_pending);
1906  
1907  out:
1908  	return IRQ_HANDLED;
1909  }
1910  
get_download_mode(char * buffer,const struct kernel_param * kp)1911  static int get_download_mode(char *buffer, const struct kernel_param *kp)
1912  {
1913  	if (download_mode >= ARRAY_SIZE(download_mode_name))
1914  		return sysfs_emit(buffer, "unknown mode\n");
1915  
1916  	return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]);
1917  }
1918  
set_download_mode(const char * val,const struct kernel_param * kp)1919  static int set_download_mode(const char *val, const struct kernel_param *kp)
1920  {
1921  	bool tmp;
1922  	int ret;
1923  
1924  	ret = sysfs_match_string(download_mode_name, val);
1925  	if (ret < 0) {
1926  		ret = kstrtobool(val, &tmp);
1927  		if (ret < 0) {
1928  			pr_err("qcom_scm: err: %d\n", ret);
1929  			return ret;
1930  		}
1931  
1932  		ret = tmp ? 1 : 0;
1933  	}
1934  
1935  	download_mode = ret;
1936  	if (__scm)
1937  		qcom_scm_set_download_mode(download_mode);
1938  
1939  	return 0;
1940  }
1941  
1942  static const struct kernel_param_ops download_mode_param_ops = {
1943  	.get = get_download_mode,
1944  	.set = set_download_mode,
1945  };
1946  
1947  module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644);
1948  MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values");
1949  
qcom_scm_probe(struct platform_device * pdev)1950  static int qcom_scm_probe(struct platform_device *pdev)
1951  {
1952  	struct qcom_tzmem_pool_config pool_config;
1953  	struct qcom_scm *scm;
1954  	int irq, ret;
1955  
1956  	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1957  	if (!scm)
1958  		return -ENOMEM;
1959  
1960  	scm->dev = &pdev->dev;
1961  	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1962  	if (ret < 0)
1963  		return ret;
1964  
1965  	init_completion(&scm->waitq_comp);
1966  	mutex_init(&scm->scm_bw_lock);
1967  
1968  	scm->path = devm_of_icc_get(&pdev->dev, NULL);
1969  	if (IS_ERR(scm->path))
1970  		return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1971  				     "failed to acquire interconnect path\n");
1972  
1973  	scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
1974  	if (IS_ERR(scm->core_clk))
1975  		return PTR_ERR(scm->core_clk);
1976  
1977  	scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
1978  	if (IS_ERR(scm->iface_clk))
1979  		return PTR_ERR(scm->iface_clk);
1980  
1981  	scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
1982  	if (IS_ERR(scm->bus_clk))
1983  		return PTR_ERR(scm->bus_clk);
1984  
1985  	scm->reset.ops = &qcom_scm_pas_reset_ops;
1986  	scm->reset.nr_resets = 1;
1987  	scm->reset.of_node = pdev->dev.of_node;
1988  	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1989  	if (ret)
1990  		return ret;
1991  
1992  	/* vote for max clk rate for highest performance */
1993  	ret = clk_set_rate(scm->core_clk, INT_MAX);
1994  	if (ret)
1995  		return ret;
1996  
1997  	/* Let all above stores be available after this */
1998  	smp_store_release(&__scm, scm);
1999  
2000  	irq = platform_get_irq_optional(pdev, 0);
2001  	if (irq < 0) {
2002  		if (irq != -ENXIO)
2003  			return irq;
2004  	} else {
2005  		ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
2006  						IRQF_ONESHOT, "qcom-scm", __scm);
2007  		if (ret < 0)
2008  			return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
2009  	}
2010  
2011  	__get_convention();
2012  
2013  	/*
2014  	 * If "download mode" is requested, from this point on warmboot
2015  	 * will cause the boot stages to enter download mode, unless
2016  	 * disabled below by a clean shutdown/reboot.
2017  	 */
2018  	qcom_scm_set_download_mode(download_mode);
2019  
2020  	/*
2021  	 * Disable SDI if indicated by DT that it is enabled by default.
2022  	 */
2023  	if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode)
2024  		qcom_scm_disable_sdi();
2025  
2026  	ret = of_reserved_mem_device_init(__scm->dev);
2027  	if (ret && ret != -ENODEV)
2028  		return dev_err_probe(__scm->dev, ret,
2029  				     "Failed to setup the reserved memory region for TZ mem\n");
2030  
2031  	ret = qcom_tzmem_enable(__scm->dev);
2032  	if (ret)
2033  		return dev_err_probe(__scm->dev, ret,
2034  				     "Failed to enable the TrustZone memory allocator\n");
2035  
2036  	memset(&pool_config, 0, sizeof(pool_config));
2037  	pool_config.initial_size = 0;
2038  	pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
2039  	pool_config.max_size = SZ_256K;
2040  
2041  	__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
2042  	if (IS_ERR(__scm->mempool))
2043  		return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
2044  				     "Failed to create the SCM memory pool\n");
2045  
2046  	/*
2047  	 * Initialize the QSEECOM interface.
2048  	 *
2049  	 * Note: QSEECOM is fairly self-contained and this only adds the
2050  	 * interface device (the driver of which does most of the heavy
2051  	 * lifting). So any errors returned here should be either -ENOMEM or
2052  	 * -EINVAL (with the latter only in case there's a bug in our code).
2053  	 * This means that there is no need to bring down the whole SCM driver.
2054  	 * Just log the error instead and let SCM live.
2055  	 */
2056  	ret = qcom_scm_qseecom_init(scm);
2057  	WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
2058  
2059  	return 0;
2060  }
2061  
qcom_scm_shutdown(struct platform_device * pdev)2062  static void qcom_scm_shutdown(struct platform_device *pdev)
2063  {
2064  	/* Clean shutdown, disable download mode to allow normal restart */
2065  	qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP);
2066  }
2067  
2068  static const struct of_device_id qcom_scm_dt_match[] = {
2069  	{ .compatible = "qcom,scm" },
2070  
2071  	/* Legacy entries kept for backwards compatibility */
2072  	{ .compatible = "qcom,scm-apq8064" },
2073  	{ .compatible = "qcom,scm-apq8084" },
2074  	{ .compatible = "qcom,scm-ipq4019" },
2075  	{ .compatible = "qcom,scm-msm8953" },
2076  	{ .compatible = "qcom,scm-msm8974" },
2077  	{ .compatible = "qcom,scm-msm8996" },
2078  	{}
2079  };
2080  MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
2081  
2082  static struct platform_driver qcom_scm_driver = {
2083  	.driver = {
2084  		.name	= "qcom_scm",
2085  		.of_match_table = qcom_scm_dt_match,
2086  		.suppress_bind_attrs = true,
2087  	},
2088  	.probe = qcom_scm_probe,
2089  	.shutdown = qcom_scm_shutdown,
2090  };
2091  
qcom_scm_init(void)2092  static int __init qcom_scm_init(void)
2093  {
2094  	return platform_driver_register(&qcom_scm_driver);
2095  }
2096  subsys_initcall(qcom_scm_init);
2097  
2098  MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
2099  MODULE_LICENSE("GPL v2");
2100