1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (c) 2021, Linaro Limited
4   * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
5   */
6  
7  #include <linux/delay.h>
8  #include <linux/err.h>
9  #include <linux/init.h>
10  #include <linux/interrupt.h>
11  #include <linux/io.h>
12  #include <linux/irqchip.h>
13  #include <linux/irqdomain.h>
14  #include <linux/mailbox_client.h>
15  #include <linux/module.h>
16  #include <linux/of.h>
17  #include <linux/of_address.h>
18  #include <linux/of_platform.h>
19  #include <linux/platform_device.h>
20  #include <linux/pm_domain.h>
21  #include <linux/slab.h>
22  #include <linux/soc/qcom/irq.h>
23  #include <linux/spinlock.h>
24  
25  /*
26   * This is the driver for Qualcomm MPM (MSM Power Manager) interrupt controller,
27   * which is commonly found on Qualcomm SoCs built on the RPM architecture.
28   * Sitting in always-on domain, MPM monitors the wakeup interrupts when SoC is
29   * asleep, and wakes up the AP when one of those interrupts occurs.  This driver
30   * doesn't directly access physical MPM registers though.  Instead, the access
31   * is bridged via a piece of internal memory (SRAM) that is accessible to both
32   * AP and RPM.  This piece of memory is called 'vMPM' in the driver.
33   *
34   * When SoC is awake, the vMPM is owned by AP and the register setup by this
35   * driver all happens on vMPM.  When AP is about to get power collapsed, the
36   * driver sends a mailbox notification to RPM, which will take over the vMPM
37   * ownership and dump vMPM into physical MPM registers.  On wakeup, AP is woken
38   * up by a MPM pin/interrupt, and RPM will copy STATUS registers into vMPM.
39   * Then AP start owning vMPM again.
40   *
41   * vMPM register map:
42   *
43   *    31                              0
44   *    +--------------------------------+
45   *    |            TIMER0              | 0x00
46   *    +--------------------------------+
47   *    |            TIMER1              | 0x04
48   *    +--------------------------------+
49   *    |            ENABLE0             | 0x08
50   *    +--------------------------------+
51   *    |              ...               | ...
52   *    +--------------------------------+
53   *    |            ENABLEn             |
54   *    +--------------------------------+
55   *    |          FALLING_EDGE0         |
56   *    +--------------------------------+
57   *    |              ...               |
58   *    +--------------------------------+
59   *    |            STATUSn             |
60   *    +--------------------------------+
61   *
62   *    n = DIV_ROUND_UP(pin_cnt, 32)
63   *
64   */
65  
66  #define MPM_REG_ENABLE		0
67  #define MPM_REG_FALLING_EDGE	1
68  #define MPM_REG_RISING_EDGE	2
69  #define MPM_REG_POLARITY	3
70  #define MPM_REG_STATUS		4
71  
72  /* MPM pin map to GIC hwirq */
73  struct mpm_gic_map {
74  	int pin;
75  	irq_hw_number_t hwirq;
76  };
77  
78  struct qcom_mpm_priv {
79  	void __iomem *base;
80  	raw_spinlock_t lock;
81  	struct mbox_client mbox_client;
82  	struct mbox_chan *mbox_chan;
83  	struct mpm_gic_map *maps;
84  	unsigned int map_cnt;
85  	unsigned int reg_stride;
86  	struct irq_domain *domain;
87  	struct generic_pm_domain genpd;
88  };
89  
qcom_mpm_read(struct qcom_mpm_priv * priv,unsigned int reg,unsigned int index)90  static u32 qcom_mpm_read(struct qcom_mpm_priv *priv, unsigned int reg,
91  			 unsigned int index)
92  {
93  	unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
94  
95  	return readl_relaxed(priv->base + offset);
96  }
97  
qcom_mpm_write(struct qcom_mpm_priv * priv,unsigned int reg,unsigned int index,u32 val)98  static void qcom_mpm_write(struct qcom_mpm_priv *priv, unsigned int reg,
99  			   unsigned int index, u32 val)
100  {
101  	unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
102  
103  	writel_relaxed(val, priv->base + offset);
104  
105  	/* Ensure the write is completed */
106  	wmb();
107  }
108  
qcom_mpm_enable_irq(struct irq_data * d,bool en)109  static void qcom_mpm_enable_irq(struct irq_data *d, bool en)
110  {
111  	struct qcom_mpm_priv *priv = d->chip_data;
112  	int pin = d->hwirq;
113  	unsigned int index = pin / 32;
114  	unsigned int shift = pin % 32;
115  	unsigned long flags, val;
116  
117  	raw_spin_lock_irqsave(&priv->lock, flags);
118  
119  	val = qcom_mpm_read(priv, MPM_REG_ENABLE, index);
120  	__assign_bit(shift, &val, en);
121  	qcom_mpm_write(priv, MPM_REG_ENABLE, index, val);
122  
123  	raw_spin_unlock_irqrestore(&priv->lock, flags);
124  }
125  
qcom_mpm_mask(struct irq_data * d)126  static void qcom_mpm_mask(struct irq_data *d)
127  {
128  	qcom_mpm_enable_irq(d, false);
129  
130  	if (d->parent_data)
131  		irq_chip_mask_parent(d);
132  }
133  
qcom_mpm_unmask(struct irq_data * d)134  static void qcom_mpm_unmask(struct irq_data *d)
135  {
136  	qcom_mpm_enable_irq(d, true);
137  
138  	if (d->parent_data)
139  		irq_chip_unmask_parent(d);
140  }
141  
mpm_set_type(struct qcom_mpm_priv * priv,bool set,unsigned int reg,unsigned int index,unsigned int shift)142  static void mpm_set_type(struct qcom_mpm_priv *priv, bool set, unsigned int reg,
143  			 unsigned int index, unsigned int shift)
144  {
145  	unsigned long flags, val;
146  
147  	raw_spin_lock_irqsave(&priv->lock, flags);
148  
149  	val = qcom_mpm_read(priv, reg, index);
150  	__assign_bit(shift, &val, set);
151  	qcom_mpm_write(priv, reg, index, val);
152  
153  	raw_spin_unlock_irqrestore(&priv->lock, flags);
154  }
155  
qcom_mpm_set_type(struct irq_data * d,unsigned int type)156  static int qcom_mpm_set_type(struct irq_data *d, unsigned int type)
157  {
158  	struct qcom_mpm_priv *priv = d->chip_data;
159  	int pin = d->hwirq;
160  	unsigned int index = pin / 32;
161  	unsigned int shift = pin % 32;
162  
163  	if (type & IRQ_TYPE_EDGE_RISING)
164  		mpm_set_type(priv, true, MPM_REG_RISING_EDGE, index, shift);
165  	else
166  		mpm_set_type(priv, false, MPM_REG_RISING_EDGE, index, shift);
167  
168  	if (type & IRQ_TYPE_EDGE_FALLING)
169  		mpm_set_type(priv, true, MPM_REG_FALLING_EDGE, index, shift);
170  	else
171  		mpm_set_type(priv, false, MPM_REG_FALLING_EDGE, index, shift);
172  
173  	if (type & IRQ_TYPE_LEVEL_HIGH)
174  		mpm_set_type(priv, true, MPM_REG_POLARITY, index, shift);
175  	else
176  		mpm_set_type(priv, false, MPM_REG_POLARITY, index, shift);
177  
178  	if (!d->parent_data)
179  		return 0;
180  
181  	if (type & IRQ_TYPE_EDGE_BOTH)
182  		type = IRQ_TYPE_EDGE_RISING;
183  
184  	if (type & IRQ_TYPE_LEVEL_MASK)
185  		type = IRQ_TYPE_LEVEL_HIGH;
186  
187  	return irq_chip_set_type_parent(d, type);
188  }
189  
190  static struct irq_chip qcom_mpm_chip = {
191  	.name			= "mpm",
192  	.irq_eoi		= irq_chip_eoi_parent,
193  	.irq_mask		= qcom_mpm_mask,
194  	.irq_unmask		= qcom_mpm_unmask,
195  	.irq_retrigger		= irq_chip_retrigger_hierarchy,
196  	.irq_set_type		= qcom_mpm_set_type,
197  	.irq_set_affinity	= irq_chip_set_affinity_parent,
198  	.flags			= IRQCHIP_MASK_ON_SUSPEND |
199  				  IRQCHIP_SKIP_SET_WAKE,
200  };
201  
get_mpm_gic_map(struct qcom_mpm_priv * priv,int pin)202  static struct mpm_gic_map *get_mpm_gic_map(struct qcom_mpm_priv *priv, int pin)
203  {
204  	struct mpm_gic_map *maps = priv->maps;
205  	int i;
206  
207  	for (i = 0; i < priv->map_cnt; i++) {
208  		if (maps[i].pin == pin)
209  			return &maps[i];
210  	}
211  
212  	return NULL;
213  }
214  
qcom_mpm_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * data)215  static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
216  			  unsigned int nr_irqs, void *data)
217  {
218  	struct qcom_mpm_priv *priv = domain->host_data;
219  	struct irq_fwspec *fwspec = data;
220  	struct irq_fwspec parent_fwspec;
221  	struct mpm_gic_map *map;
222  	irq_hw_number_t pin;
223  	unsigned int type;
224  	int  ret;
225  
226  	ret = irq_domain_translate_twocell(domain, fwspec, &pin, &type);
227  	if (ret)
228  		return ret;
229  
230  	ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
231  					    &qcom_mpm_chip, priv);
232  	if (ret)
233  		return ret;
234  
235  	map = get_mpm_gic_map(priv, pin);
236  	if (map == NULL)
237  		return irq_domain_disconnect_hierarchy(domain->parent, virq);
238  
239  	if (type & IRQ_TYPE_EDGE_BOTH)
240  		type = IRQ_TYPE_EDGE_RISING;
241  
242  	if (type & IRQ_TYPE_LEVEL_MASK)
243  		type = IRQ_TYPE_LEVEL_HIGH;
244  
245  	parent_fwspec.fwnode = domain->parent->fwnode;
246  	parent_fwspec.param_count = 3;
247  	parent_fwspec.param[0] = 0;
248  	parent_fwspec.param[1] = map->hwirq;
249  	parent_fwspec.param[2] = type;
250  
251  	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
252  					    &parent_fwspec);
253  }
254  
255  static const struct irq_domain_ops qcom_mpm_ops = {
256  	.alloc		= qcom_mpm_alloc,
257  	.free		= irq_domain_free_irqs_common,
258  	.translate	= irq_domain_translate_twocell,
259  };
260  
261  /* Triggered by RPM when system resumes from deep sleep */
qcom_mpm_handler(int irq,void * dev_id)262  static irqreturn_t qcom_mpm_handler(int irq, void *dev_id)
263  {
264  	struct qcom_mpm_priv *priv = dev_id;
265  	unsigned long enable, pending;
266  	irqreturn_t ret = IRQ_NONE;
267  	unsigned long flags;
268  	int i, j;
269  
270  	for (i = 0; i < priv->reg_stride; i++) {
271  		raw_spin_lock_irqsave(&priv->lock, flags);
272  		enable = qcom_mpm_read(priv, MPM_REG_ENABLE, i);
273  		pending = qcom_mpm_read(priv, MPM_REG_STATUS, i);
274  		pending &= enable;
275  		raw_spin_unlock_irqrestore(&priv->lock, flags);
276  
277  		for_each_set_bit(j, &pending, 32) {
278  			unsigned int pin = 32 * i + j;
279  			struct irq_desc *desc = irq_resolve_mapping(priv->domain, pin);
280  			struct irq_data *d = &desc->irq_data;
281  
282  			if (!irqd_is_level_type(d))
283  				irq_set_irqchip_state(d->irq,
284  						IRQCHIP_STATE_PENDING, true);
285  			ret = IRQ_HANDLED;
286  		}
287  	}
288  
289  	return ret;
290  }
291  
mpm_pd_power_off(struct generic_pm_domain * genpd)292  static int mpm_pd_power_off(struct generic_pm_domain *genpd)
293  {
294  	struct qcom_mpm_priv *priv = container_of(genpd, struct qcom_mpm_priv,
295  						  genpd);
296  	int i, ret;
297  
298  	for (i = 0; i < priv->reg_stride; i++)
299  		qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
300  
301  	/* Notify RPM to write vMPM into HW */
302  	ret = mbox_send_message(priv->mbox_chan, NULL);
303  	if (ret < 0)
304  		return ret;
305  
306  	return 0;
307  }
308  
gic_hwirq_is_mapped(struct mpm_gic_map * maps,int cnt,u32 hwirq)309  static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq)
310  {
311  	int i;
312  
313  	for (i = 0; i < cnt; i++)
314  		if (maps[i].hwirq == hwirq)
315  			return true;
316  
317  	return false;
318  }
319  
qcom_mpm_init(struct device_node * np,struct device_node * parent)320  static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
321  {
322  	struct platform_device *pdev = of_find_device_by_node(np);
323  	struct device *dev = &pdev->dev;
324  	struct irq_domain *parent_domain;
325  	struct generic_pm_domain *genpd;
326  	struct device_node *msgram_np;
327  	struct qcom_mpm_priv *priv;
328  	unsigned int pin_cnt;
329  	struct resource res;
330  	int i, irq;
331  	int ret;
332  
333  	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
334  	if (!priv)
335  		return -ENOMEM;
336  
337  	ret = of_property_read_u32(np, "qcom,mpm-pin-count", &pin_cnt);
338  	if (ret) {
339  		dev_err(dev, "failed to read qcom,mpm-pin-count: %d\n", ret);
340  		return ret;
341  	}
342  
343  	priv->reg_stride = DIV_ROUND_UP(pin_cnt, 32);
344  
345  	ret = of_property_count_u32_elems(np, "qcom,mpm-pin-map");
346  	if (ret < 0) {
347  		dev_err(dev, "failed to read qcom,mpm-pin-map: %d\n", ret);
348  		return ret;
349  	}
350  
351  	if (ret % 2) {
352  		dev_err(dev, "invalid qcom,mpm-pin-map\n");
353  		return -EINVAL;
354  	}
355  
356  	priv->map_cnt = ret / 2;
357  	priv->maps = devm_kcalloc(dev, priv->map_cnt, sizeof(*priv->maps),
358  				  GFP_KERNEL);
359  	if (!priv->maps)
360  		return -ENOMEM;
361  
362  	for (i = 0; i < priv->map_cnt; i++) {
363  		u32 pin, hwirq;
364  
365  		of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2, &pin);
366  		of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2 + 1, &hwirq);
367  
368  		if (gic_hwirq_is_mapped(priv->maps, i, hwirq)) {
369  			dev_warn(dev, "failed to map pin %d as GIC hwirq %d is already mapped\n",
370  				 pin, hwirq);
371  			continue;
372  		}
373  
374  		priv->maps[i].pin = pin;
375  		priv->maps[i].hwirq = hwirq;
376  	}
377  
378  	raw_spin_lock_init(&priv->lock);
379  
380  	/* If we have a handle to an RPM message ram partition, use it. */
381  	msgram_np = of_parse_phandle(np, "qcom,rpm-msg-ram", 0);
382  	if (msgram_np) {
383  		ret = of_address_to_resource(msgram_np, 0, &res);
384  		if (ret) {
385  			of_node_put(msgram_np);
386  			return ret;
387  		}
388  
389  		/* Don't use devm_ioremap_resource, as we're accessing a shared region. */
390  		priv->base = devm_ioremap(dev, res.start, resource_size(&res));
391  		of_node_put(msgram_np);
392  		if (!priv->base)
393  			return -ENOMEM;
394  	} else {
395  		/* Otherwise, fall back to simple MMIO. */
396  		priv->base = devm_platform_ioremap_resource(pdev, 0);
397  		if (IS_ERR(priv->base))
398  			return PTR_ERR(priv->base);
399  	}
400  
401  	for (i = 0; i < priv->reg_stride; i++) {
402  		qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0);
403  		qcom_mpm_write(priv, MPM_REG_FALLING_EDGE, i, 0);
404  		qcom_mpm_write(priv, MPM_REG_RISING_EDGE, i, 0);
405  		qcom_mpm_write(priv, MPM_REG_POLARITY, i, 0);
406  		qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
407  	}
408  
409  	irq = platform_get_irq(pdev, 0);
410  	if (irq < 0)
411  		return irq;
412  
413  	genpd = &priv->genpd;
414  	genpd->flags = GENPD_FLAG_IRQ_SAFE;
415  	genpd->power_off = mpm_pd_power_off;
416  
417  	genpd->name = devm_kasprintf(dev, GFP_KERNEL, "%s", dev_name(dev));
418  	if (!genpd->name)
419  		return -ENOMEM;
420  
421  	ret = pm_genpd_init(genpd, NULL, false);
422  	if (ret) {
423  		dev_err(dev, "failed to init genpd: %d\n", ret);
424  		return ret;
425  	}
426  
427  	ret = of_genpd_add_provider_simple(np, genpd);
428  	if (ret) {
429  		dev_err(dev, "failed to add genpd provider: %d\n", ret);
430  		goto remove_genpd;
431  	}
432  
433  	priv->mbox_client.dev = dev;
434  	priv->mbox_chan = mbox_request_channel(&priv->mbox_client, 0);
435  	if (IS_ERR(priv->mbox_chan)) {
436  		ret = PTR_ERR(priv->mbox_chan);
437  		dev_err(dev, "failed to acquire IPC channel: %d\n", ret);
438  		return ret;
439  	}
440  
441  	parent_domain = irq_find_host(parent);
442  	if (!parent_domain) {
443  		dev_err(dev, "failed to find MPM parent domain\n");
444  		ret = -ENXIO;
445  		goto free_mbox;
446  	}
447  
448  	priv->domain = irq_domain_create_hierarchy(parent_domain,
449  				IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt,
450  				of_node_to_fwnode(np), &qcom_mpm_ops, priv);
451  	if (!priv->domain) {
452  		dev_err(dev, "failed to create MPM domain\n");
453  		ret = -ENOMEM;
454  		goto free_mbox;
455  	}
456  
457  	irq_domain_update_bus_token(priv->domain, DOMAIN_BUS_WAKEUP);
458  
459  	ret = devm_request_irq(dev, irq, qcom_mpm_handler, IRQF_NO_SUSPEND,
460  			       "qcom_mpm", priv);
461  	if (ret) {
462  		dev_err(dev, "failed to request irq: %d\n", ret);
463  		goto remove_domain;
464  	}
465  
466  	return 0;
467  
468  remove_domain:
469  	irq_domain_remove(priv->domain);
470  free_mbox:
471  	mbox_free_channel(priv->mbox_chan);
472  remove_genpd:
473  	pm_genpd_remove(genpd);
474  	return ret;
475  }
476  
477  IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm)
478  IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init)
479  IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm)
480  MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager");
481  MODULE_LICENSE("GPL v2");
482