1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * This file implements an irqchip for OPAL events. Whenever there is
4   * an interrupt that is handled by OPAL we get passed a list of events
5   * that Linux needs to do something about. These basically look like
6   * interrupts to Linux so we implement an irqchip to handle them.
7   *
8   * Copyright Alistair Popple, IBM Corporation 2014.
9   */
10  #include <linux/bitops.h>
11  #include <linux/irq.h>
12  #include <linux/irqchip.h>
13  #include <linux/irqdomain.h>
14  #include <linux/interrupt.h>
15  #include <linux/module.h>
16  #include <linux/of.h>
17  #include <linux/platform_device.h>
18  #include <linux/kthread.h>
19  #include <linux/delay.h>
20  #include <linux/slab.h>
21  #include <linux/of_irq.h>
22  
23  #include <asm/machdep.h>
24  #include <asm/opal.h>
25  
26  #include "powernv.h"
27  
28  /* Maximum number of events supported by OPAL firmware */
29  #define MAX_NUM_EVENTS 64
30  
31  struct opal_event_irqchip {
32  	struct irq_chip irqchip;
33  	struct irq_domain *domain;
34  	unsigned long mask;
35  };
36  static struct opal_event_irqchip opal_event_irqchip;
37  static u64 last_outstanding_events;
38  static int opal_irq_count;
39  static struct resource *opal_irqs;
40  
opal_handle_events(void)41  void opal_handle_events(void)
42  {
43  	__be64 events = 0;
44  	u64 e;
45  
46  	e = READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask;
47  again:
48  	while (e) {
49  		int hwirq;
50  
51  		hwirq = fls64(e) - 1;
52  		e &= ~BIT_ULL(hwirq);
53  
54  		local_irq_disable();
55  		irq_enter();
56  		generic_handle_domain_irq(opal_event_irqchip.domain, hwirq);
57  		irq_exit();
58  		local_irq_enable();
59  
60  		cond_resched();
61  	}
62  	WRITE_ONCE(last_outstanding_events, 0);
63  	if (opal_poll_events(&events) != OPAL_SUCCESS)
64  		return;
65  	e = be64_to_cpu(events) & opal_event_irqchip.mask;
66  	if (e)
67  		goto again;
68  }
69  
opal_have_pending_events(void)70  bool opal_have_pending_events(void)
71  {
72  	if (READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask)
73  		return true;
74  	return false;
75  }
76  
opal_event_mask(struct irq_data * d)77  static void opal_event_mask(struct irq_data *d)
78  {
79  	clear_bit(d->hwirq, &opal_event_irqchip.mask);
80  }
81  
opal_event_unmask(struct irq_data * d)82  static void opal_event_unmask(struct irq_data *d)
83  {
84  	set_bit(d->hwirq, &opal_event_irqchip.mask);
85  	if (opal_have_pending_events())
86  		opal_wake_poller();
87  }
88  
opal_event_set_type(struct irq_data * d,unsigned int flow_type)89  static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
90  {
91  	/*
92  	 * For now we only support level triggered events. The irq
93  	 * handler will be called continuously until the event has
94  	 * been cleared in OPAL.
95  	 */
96  	if (flow_type != IRQ_TYPE_LEVEL_HIGH)
97  		return -EINVAL;
98  
99  	return 0;
100  }
101  
102  static struct opal_event_irqchip opal_event_irqchip = {
103  	.irqchip = {
104  		.name = "OPAL EVT",
105  		.irq_mask = opal_event_mask,
106  		.irq_unmask = opal_event_unmask,
107  		.irq_set_type = opal_event_set_type,
108  	},
109  	.mask = 0,
110  };
111  
opal_event_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)112  static int opal_event_map(struct irq_domain *d, unsigned int irq,
113  			irq_hw_number_t hwirq)
114  {
115  	irq_set_chip_data(irq, &opal_event_irqchip);
116  	irq_set_chip_and_handler(irq, &opal_event_irqchip.irqchip,
117  				handle_level_irq);
118  
119  	return 0;
120  }
121  
opal_interrupt(int irq,void * data)122  static irqreturn_t opal_interrupt(int irq, void *data)
123  {
124  	__be64 events;
125  
126  	opal_handle_interrupt(virq_to_hw(irq), &events);
127  	WRITE_ONCE(last_outstanding_events, be64_to_cpu(events));
128  	if (opal_have_pending_events())
129  		opal_wake_poller();
130  
131  	return IRQ_HANDLED;
132  }
133  
opal_event_match(struct irq_domain * h,struct device_node * node,enum irq_domain_bus_token bus_token)134  static int opal_event_match(struct irq_domain *h, struct device_node *node,
135  			    enum irq_domain_bus_token bus_token)
136  {
137  	return irq_domain_get_of_node(h) == node;
138  }
139  
opal_event_xlate(struct irq_domain * h,struct device_node * np,const u32 * intspec,unsigned int intsize,irq_hw_number_t * out_hwirq,unsigned int * out_flags)140  static int opal_event_xlate(struct irq_domain *h, struct device_node *np,
141  			   const u32 *intspec, unsigned int intsize,
142  			   irq_hw_number_t *out_hwirq, unsigned int *out_flags)
143  {
144  	*out_hwirq = intspec[0];
145  	*out_flags = IRQ_TYPE_LEVEL_HIGH;
146  
147  	return 0;
148  }
149  
150  static const struct irq_domain_ops opal_event_domain_ops = {
151  	.match	= opal_event_match,
152  	.map	= opal_event_map,
153  	.xlate	= opal_event_xlate,
154  };
155  
opal_event_shutdown(void)156  void opal_event_shutdown(void)
157  {
158  	unsigned int i;
159  
160  	/* First free interrupts, which will also mask them */
161  	for (i = 0; i < opal_irq_count; i++) {
162  		if (!opal_irqs || !opal_irqs[i].start)
163  			continue;
164  
165  		if (in_interrupt() || irqs_disabled())
166  			disable_irq_nosync(opal_irqs[i].start);
167  		else
168  			free_irq(opal_irqs[i].start, NULL);
169  
170  		opal_irqs[i].start = 0;
171  	}
172  }
173  
opal_event_init(void)174  int __init opal_event_init(void)
175  {
176  	struct device_node *dn, *opal_node;
177  	bool old_style = false;
178  	int i, rc = 0;
179  
180  	opal_node = of_find_node_by_path("/ibm,opal");
181  	if (!opal_node) {
182  		pr_warn("opal: Node not found\n");
183  		return -ENODEV;
184  	}
185  
186  	/* If dn is NULL it means the domain won't be linked to a DT
187  	 * node so therefore irq_of_parse_and_map(...) wont work. But
188  	 * that shouldn't be problem because if we're running a
189  	 * version of skiboot that doesn't have the dn then the
190  	 * devices won't have the correct properties and will have to
191  	 * fall back to the legacy method (opal_event_request(...))
192  	 * anyway. */
193  	dn = of_find_compatible_node(NULL, NULL, "ibm,opal-event");
194  	opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS,
195  				&opal_event_domain_ops, &opal_event_irqchip);
196  	of_node_put(dn);
197  	if (!opal_event_irqchip.domain) {
198  		pr_warn("opal: Unable to create irq domain\n");
199  		rc = -ENOMEM;
200  		goto out;
201  	}
202  
203  	/* Look for new-style (standard) "interrupts" property */
204  	opal_irq_count = of_irq_count(opal_node);
205  
206  	/* Absent ? Look for the old one */
207  	if (opal_irq_count < 1) {
208  		/* Get opal-interrupts property and names if present */
209  		rc = of_property_count_u32_elems(opal_node, "opal-interrupts");
210  		if (rc > 0)
211  			opal_irq_count = rc;
212  		old_style = true;
213  	}
214  
215  	/* No interrupts ? Bail out */
216  	if (!opal_irq_count)
217  		goto out;
218  
219  	pr_debug("OPAL: Found %d interrupts reserved for OPAL using %s scheme\n",
220  		 opal_irq_count, old_style ? "old" : "new");
221  
222  	/* Allocate an IRQ resources array */
223  	opal_irqs = kcalloc(opal_irq_count, sizeof(struct resource), GFP_KERNEL);
224  	if (WARN_ON(!opal_irqs)) {
225  		rc = -ENOMEM;
226  		goto out;
227  	}
228  
229  	/* Build the resources array */
230  	if (old_style) {
231  		/* Old style "opal-interrupts" property */
232  		for (i = 0; i < opal_irq_count; i++) {
233  			struct resource *r = &opal_irqs[i];
234  			const char *name = NULL;
235  			u32 hw_irq;
236  			int virq;
237  
238  			rc = of_property_read_u32_index(opal_node, "opal-interrupts",
239  							i, &hw_irq);
240  			if (WARN_ON(rc < 0)) {
241  				opal_irq_count = i;
242  				break;
243  			}
244  			of_property_read_string_index(opal_node, "opal-interrupts-names",
245  						      i, &name);
246  			virq = irq_create_mapping(NULL, hw_irq);
247  			if (!virq) {
248  				pr_warn("Failed to map OPAL irq 0x%x\n", hw_irq);
249  				continue;
250  			}
251  			r->start = r->end = virq;
252  			r->flags = IORESOURCE_IRQ | IRQ_TYPE_LEVEL_LOW;
253  			r->name = name;
254  		}
255  	} else {
256  		/* new style standard "interrupts" property */
257  		rc = of_irq_to_resource_table(opal_node, opal_irqs, opal_irq_count);
258  		if (WARN_ON(rc < 0)) {
259  			opal_irq_count = 0;
260  			kfree(opal_irqs);
261  			goto out;
262  		}
263  		if (WARN_ON(rc < opal_irq_count))
264  			opal_irq_count = rc;
265  	}
266  
267  	/* Install interrupt handlers */
268  	for (i = 0; i < opal_irq_count; i++) {
269  		struct resource *r = &opal_irqs[i];
270  		const char *name;
271  
272  		/* Prefix name */
273  		if (r->name && strlen(r->name))
274  			name = kasprintf(GFP_KERNEL, "opal-%s", r->name);
275  		else
276  			name = kasprintf(GFP_KERNEL, "opal");
277  
278  		if (!name)
279  			continue;
280  		/* Install interrupt handler */
281  		rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
282  				 name, NULL);
283  		if (rc) {
284  			pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start);
285  			kfree(name);
286  			continue;
287  		}
288  	}
289  	rc = 0;
290   out:
291  	of_node_put(opal_node);
292  	return rc;
293  }
294  machine_arch_initcall(powernv, opal_event_init);
295  
296  /**
297   * opal_event_request(unsigned int opal_event_nr) - Request an event
298   * @opal_event_nr: the opal event number to request
299   *
300   * This routine can be used to find the linux virq number which can
301   * then be passed to request_irq to assign a handler for a particular
302   * opal event. This should only be used by legacy devices which don't
303   * have proper device tree bindings. Most devices should use
304   * irq_of_parse_and_map() instead.
305   */
opal_event_request(unsigned int opal_event_nr)306  int opal_event_request(unsigned int opal_event_nr)
307  {
308  	if (WARN_ON_ONCE(!opal_event_irqchip.domain))
309  		return 0;
310  
311  	return irq_create_mapping(opal_event_irqchip.domain, opal_event_nr);
312  }
313  EXPORT_SYMBOL(opal_event_request);
314