1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl>
4  * Copyright (C) 2020 Bartosz Golaszewski <bgolaszewski@baylibre.com>
5  */
6 
7 #include <linux/cleanup.h>
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 #include <linux/irq_sim.h>
11 #include <linux/irq_work.h>
12 #include <linux/slab.h>
13 
14 struct irq_sim_work_ctx {
15 	struct irq_work		work;
16 	unsigned int		irq_count;
17 	unsigned long		*pending;
18 	struct irq_domain	*domain;
19 	struct irq_sim_ops	ops;
20 	void			*user_data;
21 };
22 
23 struct irq_sim_irq_ctx {
24 	bool			enabled;
25 	struct irq_sim_work_ctx	*work_ctx;
26 };
27 
irq_sim_irqmask(struct irq_data * data)28 static void irq_sim_irqmask(struct irq_data *data)
29 {
30 	struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
31 
32 	irq_ctx->enabled = false;
33 }
34 
irq_sim_irqunmask(struct irq_data * data)35 static void irq_sim_irqunmask(struct irq_data *data)
36 {
37 	struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
38 
39 	irq_ctx->enabled = true;
40 }
41 
irq_sim_set_type(struct irq_data * data,unsigned int type)42 static int irq_sim_set_type(struct irq_data *data, unsigned int type)
43 {
44 	/* We only support rising and falling edge trigger types. */
45 	if (type & ~IRQ_TYPE_EDGE_BOTH)
46 		return -EINVAL;
47 
48 	irqd_set_trigger_type(data, type);
49 
50 	return 0;
51 }
52 
irq_sim_get_irqchip_state(struct irq_data * data,enum irqchip_irq_state which,bool * state)53 static int irq_sim_get_irqchip_state(struct irq_data *data,
54 				     enum irqchip_irq_state which, bool *state)
55 {
56 	struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
57 	irq_hw_number_t hwirq = irqd_to_hwirq(data);
58 
59 	switch (which) {
60 	case IRQCHIP_STATE_PENDING:
61 		if (irq_ctx->enabled)
62 			*state = test_bit(hwirq, irq_ctx->work_ctx->pending);
63 		break;
64 	default:
65 		return -EINVAL;
66 	}
67 
68 	return 0;
69 }
70 
irq_sim_set_irqchip_state(struct irq_data * data,enum irqchip_irq_state which,bool state)71 static int irq_sim_set_irqchip_state(struct irq_data *data,
72 				     enum irqchip_irq_state which, bool state)
73 {
74 	struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
75 	irq_hw_number_t hwirq = irqd_to_hwirq(data);
76 
77 	switch (which) {
78 	case IRQCHIP_STATE_PENDING:
79 		if (irq_ctx->enabled) {
80 			assign_bit(hwirq, irq_ctx->work_ctx->pending, state);
81 			if (state)
82 				irq_work_queue(&irq_ctx->work_ctx->work);
83 		}
84 		break;
85 	default:
86 		return -EINVAL;
87 	}
88 
89 	return 0;
90 }
91 
irq_sim_request_resources(struct irq_data * data)92 static int irq_sim_request_resources(struct irq_data *data)
93 {
94 	struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
95 	struct irq_sim_work_ctx *work_ctx = irq_ctx->work_ctx;
96 	irq_hw_number_t hwirq = irqd_to_hwirq(data);
97 
98 	if (work_ctx->ops.irq_sim_irq_requested)
99 		return work_ctx->ops.irq_sim_irq_requested(work_ctx->domain,
100 							   hwirq,
101 							   work_ctx->user_data);
102 
103 	return 0;
104 }
105 
irq_sim_release_resources(struct irq_data * data)106 static void irq_sim_release_resources(struct irq_data *data)
107 {
108 	struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
109 	struct irq_sim_work_ctx *work_ctx = irq_ctx->work_ctx;
110 	irq_hw_number_t hwirq = irqd_to_hwirq(data);
111 
112 	if (work_ctx->ops.irq_sim_irq_released)
113 		work_ctx->ops.irq_sim_irq_released(work_ctx->domain, hwirq,
114 						   work_ctx->user_data);
115 }
116 
117 static struct irq_chip irq_sim_irqchip = {
118 	.name			= "irq_sim",
119 	.irq_mask		= irq_sim_irqmask,
120 	.irq_unmask		= irq_sim_irqunmask,
121 	.irq_set_type		= irq_sim_set_type,
122 	.irq_get_irqchip_state	= irq_sim_get_irqchip_state,
123 	.irq_set_irqchip_state	= irq_sim_set_irqchip_state,
124 	.irq_request_resources	= irq_sim_request_resources,
125 	.irq_release_resources	= irq_sim_release_resources,
126 };
127 
irq_sim_handle_irq(struct irq_work * work)128 static void irq_sim_handle_irq(struct irq_work *work)
129 {
130 	struct irq_sim_work_ctx *work_ctx;
131 	unsigned int offset = 0;
132 	int irqnum;
133 
134 	work_ctx = container_of(work, struct irq_sim_work_ctx, work);
135 
136 	while (!bitmap_empty(work_ctx->pending, work_ctx->irq_count)) {
137 		offset = find_next_bit(work_ctx->pending,
138 				       work_ctx->irq_count, offset);
139 		clear_bit(offset, work_ctx->pending);
140 		irqnum = irq_find_mapping(work_ctx->domain, offset);
141 		handle_simple_irq(irq_to_desc(irqnum));
142 	}
143 }
144 
irq_sim_domain_map(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hw)145 static int irq_sim_domain_map(struct irq_domain *domain,
146 			      unsigned int virq, irq_hw_number_t hw)
147 {
148 	struct irq_sim_work_ctx *work_ctx = domain->host_data;
149 	struct irq_sim_irq_ctx *irq_ctx;
150 
151 	irq_ctx = kzalloc(sizeof(*irq_ctx), GFP_KERNEL);
152 	if (!irq_ctx)
153 		return -ENOMEM;
154 
155 	irq_set_chip(virq, &irq_sim_irqchip);
156 	irq_set_chip_data(virq, irq_ctx);
157 	irq_set_handler(virq, handle_simple_irq);
158 	irq_modify_status(virq, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
159 	irq_ctx->work_ctx = work_ctx;
160 
161 	return 0;
162 }
163 
irq_sim_domain_unmap(struct irq_domain * domain,unsigned int virq)164 static void irq_sim_domain_unmap(struct irq_domain *domain, unsigned int virq)
165 {
166 	struct irq_sim_irq_ctx *irq_ctx;
167 	struct irq_data *irqd;
168 
169 	irqd = irq_domain_get_irq_data(domain, virq);
170 	irq_ctx = irq_data_get_irq_chip_data(irqd);
171 
172 	irq_set_handler(virq, NULL);
173 	irq_domain_reset_irq_data(irqd);
174 	kfree(irq_ctx);
175 }
176 
177 static const struct irq_domain_ops irq_sim_domain_ops = {
178 	.map		= irq_sim_domain_map,
179 	.unmap		= irq_sim_domain_unmap,
180 };
181 
182 /**
183  * irq_domain_create_sim - Create a new interrupt simulator irq_domain and
184  *                         allocate a range of dummy interrupts.
185  *
186  * @fwnode:     struct fwnode_handle to be associated with this domain.
187  * @num_irqs:   Number of interrupts to allocate.
188  *
189  * On success: return a new irq_domain object.
190  * On failure: a negative errno wrapped with ERR_PTR().
191  */
irq_domain_create_sim(struct fwnode_handle * fwnode,unsigned int num_irqs)192 struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
193 					 unsigned int num_irqs)
194 {
195 	return irq_domain_create_sim_full(fwnode, num_irqs, NULL, NULL);
196 }
197 EXPORT_SYMBOL_GPL(irq_domain_create_sim);
198 
irq_domain_create_sim_full(struct fwnode_handle * fwnode,unsigned int num_irqs,const struct irq_sim_ops * ops,void * data)199 struct irq_domain *irq_domain_create_sim_full(struct fwnode_handle *fwnode,
200 					      unsigned int num_irqs,
201 					      const struct irq_sim_ops *ops,
202 					      void *data)
203 {
204 	struct irq_sim_work_ctx *work_ctx __free(kfree) =
205 				kmalloc(sizeof(*work_ctx), GFP_KERNEL);
206 
207 	if (!work_ctx)
208 		return ERR_PTR(-ENOMEM);
209 
210 	unsigned long *pending __free(bitmap) = bitmap_zalloc(num_irqs, GFP_KERNEL);
211 	if (!pending)
212 		return ERR_PTR(-ENOMEM);
213 
214 	work_ctx->domain = irq_domain_create_linear(fwnode, num_irqs,
215 						    &irq_sim_domain_ops,
216 						    work_ctx);
217 	if (!work_ctx->domain)
218 		return ERR_PTR(-ENOMEM);
219 
220 	work_ctx->irq_count = num_irqs;
221 	work_ctx->work = IRQ_WORK_INIT_HARD(irq_sim_handle_irq);
222 	work_ctx->pending = no_free_ptr(pending);
223 	work_ctx->user_data = data;
224 
225 	if (ops)
226 		memcpy(&work_ctx->ops, ops, sizeof(*ops));
227 
228 	return no_free_ptr(work_ctx)->domain;
229 }
230 EXPORT_SYMBOL_GPL(irq_domain_create_sim_full);
231 
232 /**
233  * irq_domain_remove_sim - Deinitialize the interrupt simulator domain: free
234  *                         the interrupt descriptors and allocated memory.
235  *
236  * @domain:     The interrupt simulator domain to tear down.
237  */
irq_domain_remove_sim(struct irq_domain * domain)238 void irq_domain_remove_sim(struct irq_domain *domain)
239 {
240 	struct irq_sim_work_ctx *work_ctx = domain->host_data;
241 
242 	irq_work_sync(&work_ctx->work);
243 	bitmap_free(work_ctx->pending);
244 	kfree(work_ctx);
245 
246 	irq_domain_remove(domain);
247 }
248 EXPORT_SYMBOL_GPL(irq_domain_remove_sim);
249 
devm_irq_domain_remove_sim(void * data)250 static void devm_irq_domain_remove_sim(void *data)
251 {
252 	struct irq_domain *domain = data;
253 
254 	irq_domain_remove_sim(domain);
255 }
256 
257 /**
258  * devm_irq_domain_create_sim - Create a new interrupt simulator for
259  *                              a managed device.
260  *
261  * @dev:        Device to initialize the simulator object for.
262  * @fwnode:     struct fwnode_handle to be associated with this domain.
263  * @num_irqs:   Number of interrupts to allocate
264  *
265  * On success: return a new irq_domain object.
266  * On failure: a negative errno wrapped with ERR_PTR().
267  */
devm_irq_domain_create_sim(struct device * dev,struct fwnode_handle * fwnode,unsigned int num_irqs)268 struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
269 					      struct fwnode_handle *fwnode,
270 					      unsigned int num_irqs)
271 {
272 	return devm_irq_domain_create_sim_full(dev, fwnode, num_irqs,
273 					       NULL, NULL);
274 }
275 EXPORT_SYMBOL_GPL(devm_irq_domain_create_sim);
276 
277 struct irq_domain *
devm_irq_domain_create_sim_full(struct device * dev,struct fwnode_handle * fwnode,unsigned int num_irqs,const struct irq_sim_ops * ops,void * data)278 devm_irq_domain_create_sim_full(struct device *dev,
279 				struct fwnode_handle *fwnode,
280 				unsigned int num_irqs,
281 				const struct irq_sim_ops *ops,
282 				void *data)
283 {
284 	struct irq_domain *domain;
285 	int ret;
286 
287 	domain = irq_domain_create_sim_full(fwnode, num_irqs, ops, data);
288 	if (IS_ERR(domain))
289 		return domain;
290 
291 	ret = devm_add_action_or_reset(dev, devm_irq_domain_remove_sim, domain);
292 	if (ret)
293 		return ERR_PTR(ret);
294 
295 	return domain;
296 }
297 EXPORT_SYMBOL_GPL(devm_irq_domain_create_sim_full);
298