1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Broadcom BCM6345 style Level 1 interrupt controller driver
4   *
5   * Copyright (C) 2014 Broadcom Corporation
6   * Copyright 2015 Simon Arlott
7   *
8   * This is based on the BCM7038 (which supports SMP) but with a single
9   * enable register instead of separate mask/set/clear registers.
10   *
11   * The BCM3380 has a similar mask/status register layout, but each pair
12   * of words is at separate locations (and SMP is not supported).
13   *
14   * ENABLE/STATUS words are packed next to each other for each CPU:
15   *
16   * BCM6368:
17   *   0x1000_0020: CPU0_W0_ENABLE
18   *   0x1000_0024: CPU0_W1_ENABLE
19   *   0x1000_0028: CPU0_W0_STATUS		IRQs 31-63
20   *   0x1000_002c: CPU0_W1_STATUS		IRQs 0-31
21   *   0x1000_0030: CPU1_W0_ENABLE
22   *   0x1000_0034: CPU1_W1_ENABLE
23   *   0x1000_0038: CPU1_W0_STATUS		IRQs 31-63
24   *   0x1000_003c: CPU1_W1_STATUS		IRQs 0-31
25   *
26   * BCM63168:
27   *   0x1000_0020: CPU0_W0_ENABLE
28   *   0x1000_0024: CPU0_W1_ENABLE
29   *   0x1000_0028: CPU0_W2_ENABLE
30   *   0x1000_002c: CPU0_W3_ENABLE
31   *   0x1000_0030: CPU0_W0_STATUS	IRQs 96-127
32   *   0x1000_0034: CPU0_W1_STATUS	IRQs 64-95
33   *   0x1000_0038: CPU0_W2_STATUS	IRQs 32-63
34   *   0x1000_003c: CPU0_W3_STATUS	IRQs 0-31
35   *   0x1000_0040: CPU1_W0_ENABLE
36   *   0x1000_0044: CPU1_W1_ENABLE
37   *   0x1000_0048: CPU1_W2_ENABLE
38   *   0x1000_004c: CPU1_W3_ENABLE
39   *   0x1000_0050: CPU1_W0_STATUS	IRQs 96-127
40   *   0x1000_0054: CPU1_W1_STATUS	IRQs 64-95
41   *   0x1000_0058: CPU1_W2_STATUS	IRQs 32-63
42   *   0x1000_005c: CPU1_W3_STATUS	IRQs 0-31
43   *
44   * IRQs are numbered in CPU native endian order
45   * (which is big-endian in these examples)
46   */
47  
48  #define pr_fmt(fmt)	KBUILD_MODNAME	": " fmt
49  
50  #include <linux/bitops.h>
51  #include <linux/cpumask.h>
52  #include <linux/kernel.h>
53  #include <linux/init.h>
54  #include <linux/interrupt.h>
55  #include <linux/io.h>
56  #include <linux/ioport.h>
57  #include <linux/irq.h>
58  #include <linux/irqdomain.h>
59  #include <linux/module.h>
60  #include <linux/of.h>
61  #include <linux/of_irq.h>
62  #include <linux/of_address.h>
63  #include <linux/platform_device.h>
64  #include <linux/slab.h>
65  #include <linux/smp.h>
66  #include <linux/types.h>
67  #include <linux/irqchip.h>
68  #include <linux/irqchip/chained_irq.h>
69  
70  #define IRQS_PER_WORD		32
71  #define REG_BYTES_PER_IRQ_WORD	(sizeof(u32) * 2)
72  
73  struct bcm6345_l1_cpu;
74  
75  struct bcm6345_l1_chip {
76  	raw_spinlock_t		lock;
77  	unsigned int		n_words;
78  	struct irq_domain	*domain;
79  	struct cpumask		cpumask;
80  	struct bcm6345_l1_cpu	*cpus[NR_CPUS];
81  };
82  
83  struct bcm6345_l1_cpu {
84  	struct bcm6345_l1_chip	*intc;
85  	void __iomem		*map_base;
86  	unsigned int		parent_irq;
87  	u32			enable_cache[];
88  };
89  
reg_enable(struct bcm6345_l1_chip * intc,unsigned int word)90  static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc,
91  					   unsigned int word)
92  {
93  #ifdef __BIG_ENDIAN
94  	return (1 * intc->n_words - word - 1) * sizeof(u32);
95  #else
96  	return (0 * intc->n_words + word) * sizeof(u32);
97  #endif
98  }
99  
reg_status(struct bcm6345_l1_chip * intc,unsigned int word)100  static inline unsigned int reg_status(struct bcm6345_l1_chip *intc,
101  				      unsigned int word)
102  {
103  #ifdef __BIG_ENDIAN
104  	return (2 * intc->n_words - word - 1) * sizeof(u32);
105  #else
106  	return (1 * intc->n_words + word) * sizeof(u32);
107  #endif
108  }
109  
cpu_for_irq(struct bcm6345_l1_chip * intc,struct irq_data * d)110  static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
111  					struct irq_data *d)
112  {
113  	return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d));
114  }
115  
bcm6345_l1_irq_handle(struct irq_desc * desc)116  static void bcm6345_l1_irq_handle(struct irq_desc *desc)
117  {
118  	struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc);
119  	struct bcm6345_l1_chip *intc = cpu->intc;
120  	struct irq_chip *chip = irq_desc_get_chip(desc);
121  	unsigned int idx;
122  
123  	chained_irq_enter(chip, desc);
124  
125  	for (idx = 0; idx < intc->n_words; idx++) {
126  		int base = idx * IRQS_PER_WORD;
127  		unsigned long pending;
128  		irq_hw_number_t hwirq;
129  
130  		pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
131  		pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
132  
133  		for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
134  			if (generic_handle_domain_irq(intc->domain, base + hwirq))
135  				spurious_interrupt();
136  		}
137  	}
138  
139  	chained_irq_exit(chip, desc);
140  }
141  
__bcm6345_l1_unmask(struct irq_data * d)142  static inline void __bcm6345_l1_unmask(struct irq_data *d)
143  {
144  	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
145  	u32 word = d->hwirq / IRQS_PER_WORD;
146  	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
147  	unsigned int cpu_idx = cpu_for_irq(intc, d);
148  
149  	intc->cpus[cpu_idx]->enable_cache[word] |= mask;
150  	__raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
151  		intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
152  }
153  
__bcm6345_l1_mask(struct irq_data * d)154  static inline void __bcm6345_l1_mask(struct irq_data *d)
155  {
156  	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
157  	u32 word = d->hwirq / IRQS_PER_WORD;
158  	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
159  	unsigned int cpu_idx = cpu_for_irq(intc, d);
160  
161  	intc->cpus[cpu_idx]->enable_cache[word] &= ~mask;
162  	__raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
163  		intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
164  }
165  
bcm6345_l1_unmask(struct irq_data * d)166  static void bcm6345_l1_unmask(struct irq_data *d)
167  {
168  	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
169  	unsigned long flags;
170  
171  	raw_spin_lock_irqsave(&intc->lock, flags);
172  	__bcm6345_l1_unmask(d);
173  	raw_spin_unlock_irqrestore(&intc->lock, flags);
174  }
175  
bcm6345_l1_mask(struct irq_data * d)176  static void bcm6345_l1_mask(struct irq_data *d)
177  {
178  	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
179  	unsigned long flags;
180  
181  	raw_spin_lock_irqsave(&intc->lock, flags);
182  	__bcm6345_l1_mask(d);
183  	raw_spin_unlock_irqrestore(&intc->lock, flags);
184  }
185  
bcm6345_l1_set_affinity(struct irq_data * d,const struct cpumask * dest,bool force)186  static int bcm6345_l1_set_affinity(struct irq_data *d,
187  				   const struct cpumask *dest,
188  				   bool force)
189  {
190  	struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
191  	u32 word = d->hwirq / IRQS_PER_WORD;
192  	u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
193  	unsigned int old_cpu = cpu_for_irq(intc, d);
194  	unsigned int new_cpu;
195  	unsigned long flags;
196  	bool enabled;
197  
198  	new_cpu = cpumask_first_and_and(&intc->cpumask, dest, cpu_online_mask);
199  	if (new_cpu >= nr_cpu_ids)
200  		return -EINVAL;
201  
202  	dest = cpumask_of(new_cpu);
203  
204  	raw_spin_lock_irqsave(&intc->lock, flags);
205  	if (old_cpu != new_cpu) {
206  		enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
207  		if (enabled)
208  			__bcm6345_l1_mask(d);
209  		irq_data_update_affinity(d, dest);
210  		if (enabled)
211  			__bcm6345_l1_unmask(d);
212  	} else {
213  		irq_data_update_affinity(d, dest);
214  	}
215  	raw_spin_unlock_irqrestore(&intc->lock, flags);
216  
217  	irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
218  
219  	return IRQ_SET_MASK_OK_NOCOPY;
220  }
221  
bcm6345_l1_init_one(struct device_node * dn,unsigned int idx,struct bcm6345_l1_chip * intc)222  static int __init bcm6345_l1_init_one(struct device_node *dn,
223  				      unsigned int idx,
224  				      struct bcm6345_l1_chip *intc)
225  {
226  	struct resource res;
227  	resource_size_t sz;
228  	struct bcm6345_l1_cpu *cpu;
229  	unsigned int i, n_words;
230  
231  	if (of_address_to_resource(dn, idx, &res))
232  		return -EINVAL;
233  	sz = resource_size(&res);
234  	n_words = sz / REG_BYTES_PER_IRQ_WORD;
235  
236  	if (!intc->n_words)
237  		intc->n_words = n_words;
238  	else if (intc->n_words != n_words)
239  		return -EINVAL;
240  
241  	cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, enable_cache, n_words),
242  					GFP_KERNEL);
243  	if (!cpu)
244  		return -ENOMEM;
245  
246  	cpu->intc = intc;
247  	cpu->map_base = ioremap(res.start, sz);
248  	if (!cpu->map_base)
249  		return -ENOMEM;
250  
251  	if (!request_mem_region(res.start, sz, res.name))
252  		pr_err("failed to request intc memory");
253  
254  	for (i = 0; i < n_words; i++) {
255  		cpu->enable_cache[i] = 0;
256  		__raw_writel(0, cpu->map_base + reg_enable(intc, i));
257  	}
258  
259  	cpu->parent_irq = irq_of_parse_and_map(dn, idx);
260  	if (!cpu->parent_irq) {
261  		pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
262  		return -EINVAL;
263  	}
264  	irq_set_chained_handler_and_data(cpu->parent_irq,
265  						bcm6345_l1_irq_handle, cpu);
266  
267  	return 0;
268  }
269  
270  static struct irq_chip bcm6345_l1_irq_chip = {
271  	.name			= "bcm6345-l1",
272  	.irq_mask		= bcm6345_l1_mask,
273  	.irq_unmask		= bcm6345_l1_unmask,
274  	.irq_set_affinity	= bcm6345_l1_set_affinity,
275  };
276  
bcm6345_l1_map(struct irq_domain * d,unsigned int virq,irq_hw_number_t hw_irq)277  static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
278  			  irq_hw_number_t hw_irq)
279  {
280  	irq_set_chip_and_handler(virq,
281  		&bcm6345_l1_irq_chip, handle_percpu_irq);
282  	irq_set_chip_data(virq, d->host_data);
283  	irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
284  	return 0;
285  }
286  
287  static const struct irq_domain_ops bcm6345_l1_domain_ops = {
288  	.xlate			= irq_domain_xlate_onecell,
289  	.map			= bcm6345_l1_map,
290  };
291  
bcm6345_l1_of_init(struct device_node * dn,struct device_node * parent)292  static int __init bcm6345_l1_of_init(struct device_node *dn,
293  			      struct device_node *parent)
294  {
295  	struct bcm6345_l1_chip *intc;
296  	unsigned int idx;
297  	int ret;
298  
299  	intc = kzalloc(sizeof(*intc), GFP_KERNEL);
300  	if (!intc)
301  		return -ENOMEM;
302  
303  	for_each_possible_cpu(idx) {
304  		ret = bcm6345_l1_init_one(dn, idx, intc);
305  		if (ret)
306  			pr_err("failed to init intc L1 for cpu %d: %d\n",
307  				idx, ret);
308  		else
309  			cpumask_set_cpu(idx, &intc->cpumask);
310  	}
311  
312  	if (cpumask_empty(&intc->cpumask)) {
313  		ret = -ENODEV;
314  		goto out_free;
315  	}
316  
317  	raw_spin_lock_init(&intc->lock);
318  
319  	intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
320  					     &bcm6345_l1_domain_ops,
321  					     intc);
322  	if (!intc->domain) {
323  		ret = -ENOMEM;
324  		goto out_unmap;
325  	}
326  
327  	pr_info("registered BCM6345 L1 intc (IRQs: %d)\n",
328  			IRQS_PER_WORD * intc->n_words);
329  	for_each_cpu(idx, &intc->cpumask) {
330  		struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
331  
332  		pr_info("  CPU%u (irq = %d)\n", idx, cpu->parent_irq);
333  	}
334  
335  	return 0;
336  
337  out_unmap:
338  	for_each_possible_cpu(idx) {
339  		struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
340  
341  		if (cpu) {
342  			if (cpu->map_base)
343  				iounmap(cpu->map_base);
344  			kfree(cpu);
345  		}
346  	}
347  out_free:
348  	kfree(intc);
349  	return ret;
350  }
351  
352  IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init);
353