1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx Event Management Driver
4  *
5  *  Copyright (C) 2021 Xilinx, Inc.
6  *  Copyright (C) 2024 Advanced Micro Devices, Inc.
7  *
8  *  Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
9  */
10 
11 #include <linux/cpuhotplug.h>
12 #include <linux/firmware/xlnx-event-manager.h>
13 #include <linux/firmware/xlnx-zynqmp.h>
14 #include <linux/hashtable.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/module.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 
23 static DEFINE_PER_CPU_READ_MOSTLY(int, dummy_cpu_number);
24 
25 static int virq_sgi;
26 static int event_manager_availability = -EACCES;
27 
28 /* SGI number used for Event management driver */
29 #define XLNX_EVENT_SGI_NUM	(15)
30 
31 /* Max number of driver can register for same event */
32 #define MAX_DRIVER_PER_EVENT	(10U)
33 
34 /* Max HashMap Order for PM API feature check (1<<7 = 128) */
35 #define REGISTERED_DRIVER_MAX_ORDER	(7)
36 
37 #define MAX_BITS	(32U) /* Number of bits available for error mask */
38 
39 #define REGISTER_NOTIFIER_FIRMWARE_VERSION	(2U)
40 
41 static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
42 static int sgi_num = XLNX_EVENT_SGI_NUM;
43 
44 static bool is_need_to_unregister;
45 
46 /**
47  * struct agent_cb - Registered callback function and private data.
48  * @agent_data:		Data passed back to handler function.
49  * @eve_cb:		Function pointer to store the callback function.
50  * @list:		member to create list.
51  */
52 struct agent_cb {
53 	void *agent_data;
54 	event_cb_func_t eve_cb;
55 	struct list_head list;
56 };
57 
58 /**
59  * struct registered_event_data - Registered Event Data.
60  * @key:		key is the combine id(Node-Id | Event-Id) of type u64
61  *			where upper u32 for Node-Id and lower u32 for Event-Id,
62  *			And this used as key to index into hashmap.
63  * @cb_type:		Type of Api callback, like PM_NOTIFY_CB, etc.
64  * @wake:		If this flag set, firmware will wake up processor if is
65  *			in sleep or power down state.
66  * @cb_list_head:	Head of call back data list which contain the information
67  *			about registered handler and private data.
68  * @hentry:		hlist_node that hooks this entry into hashtable.
69  */
70 struct registered_event_data {
71 	u64 key;
72 	enum pm_api_cb_id cb_type;
73 	bool wake;
74 	struct list_head cb_list_head;
75 	struct hlist_node hentry;
76 };
77 
xlnx_is_error_event(const u32 node_id)78 static bool xlnx_is_error_event(const u32 node_id)
79 {
80 	u32 pm_family_code, pm_sub_family_code;
81 
82 	zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code);
83 
84 	if (pm_sub_family_code == VERSAL_SUB_FAMILY_CODE) {
85 		if (node_id == VERSAL_EVENT_ERROR_PMC_ERR1 ||
86 		    node_id == VERSAL_EVENT_ERROR_PMC_ERR2 ||
87 		    node_id == VERSAL_EVENT_ERROR_PSM_ERR1 ||
88 		    node_id == VERSAL_EVENT_ERROR_PSM_ERR2)
89 			return true;
90 	} else {
91 		if (node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR1 ||
92 		    node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR2 ||
93 		    node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR3 ||
94 		    node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR1 ||
95 		    node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR2 ||
96 		    node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR3 ||
97 		    node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR4)
98 			return true;
99 	}
100 
101 	return false;
102 }
103 
xlnx_add_cb_for_notify_event(const u32 node_id,const u32 event,const bool wake,event_cb_func_t cb_fun,void * data)104 static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake,
105 					event_cb_func_t cb_fun,	void *data)
106 {
107 	u64 key = 0;
108 	bool present_in_hash = false;
109 	struct registered_event_data *eve_data;
110 	struct agent_cb *cb_data;
111 	struct agent_cb *cb_pos;
112 	struct agent_cb *cb_next;
113 
114 	key = ((u64)node_id << 32U) | (u64)event;
115 	/* Check for existing entry in hash table for given key id */
116 	hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
117 		if (eve_data->key == key) {
118 			present_in_hash = true;
119 			break;
120 		}
121 	}
122 
123 	if (!present_in_hash) {
124 		/* Add new entry if not present in HASH table */
125 		eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
126 		if (!eve_data)
127 			return -ENOMEM;
128 		eve_data->key = key;
129 		eve_data->cb_type = PM_NOTIFY_CB;
130 		eve_data->wake = wake;
131 		INIT_LIST_HEAD(&eve_data->cb_list_head);
132 
133 		cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
134 		if (!cb_data) {
135 			kfree(eve_data);
136 			return -ENOMEM;
137 		}
138 		cb_data->eve_cb = cb_fun;
139 		cb_data->agent_data = data;
140 
141 		/* Add into callback list */
142 		list_add(&cb_data->list, &eve_data->cb_list_head);
143 
144 		/* Add into HASH table */
145 		hash_add(reg_driver_map, &eve_data->hentry, key);
146 	} else {
147 		/* Search for callback function and private data in list */
148 		list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
149 			if (cb_pos->eve_cb == cb_fun &&
150 			    cb_pos->agent_data == data) {
151 				return 0;
152 			}
153 		}
154 
155 		/* Add multiple handler and private data in list */
156 		cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
157 		if (!cb_data)
158 			return -ENOMEM;
159 		cb_data->eve_cb = cb_fun;
160 		cb_data->agent_data = data;
161 
162 		list_add(&cb_data->list, &eve_data->cb_list_head);
163 	}
164 
165 	return 0;
166 }
167 
xlnx_add_cb_for_suspend(event_cb_func_t cb_fun,void * data)168 static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
169 {
170 	struct registered_event_data *eve_data;
171 	struct agent_cb *cb_data;
172 
173 	/* Check for existing entry in hash table for given cb_type */
174 	hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
175 		if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
176 			pr_err("Found as already registered\n");
177 			return -EINVAL;
178 		}
179 	}
180 
181 	/* Add new entry if not present */
182 	eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
183 	if (!eve_data)
184 		return -ENOMEM;
185 
186 	eve_data->key = 0;
187 	eve_data->cb_type = PM_INIT_SUSPEND_CB;
188 	INIT_LIST_HEAD(&eve_data->cb_list_head);
189 
190 	cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
191 	if (!cb_data)
192 		return -ENOMEM;
193 	cb_data->eve_cb = cb_fun;
194 	cb_data->agent_data = data;
195 
196 	/* Add into callback list */
197 	list_add(&cb_data->list, &eve_data->cb_list_head);
198 
199 	hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
200 
201 	return 0;
202 }
203 
xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)204 static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
205 {
206 	bool is_callback_found = false;
207 	struct registered_event_data *eve_data;
208 	struct agent_cb *cb_pos;
209 	struct agent_cb *cb_next;
210 	struct hlist_node *tmp;
211 
212 	is_need_to_unregister = false;
213 
214 	/* Check for existing entry in hash table for given cb_type */
215 	hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, PM_INIT_SUSPEND_CB) {
216 		if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
217 			/* Delete the list of callback */
218 			list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
219 				if (cb_pos->eve_cb == cb_fun) {
220 					is_callback_found = true;
221 					list_del_init(&cb_pos->list);
222 					kfree(cb_pos);
223 				}
224 			}
225 			/* remove an object from a hashtable */
226 			hash_del(&eve_data->hentry);
227 			kfree(eve_data);
228 			is_need_to_unregister = true;
229 		}
230 	}
231 	if (!is_callback_found) {
232 		pr_warn("Didn't find any registered callback for suspend event\n");
233 		return -EINVAL;
234 	}
235 
236 	return 0;
237 }
238 
xlnx_remove_cb_for_notify_event(const u32 node_id,const u32 event,event_cb_func_t cb_fun,void * data)239 static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
240 					   event_cb_func_t cb_fun, void *data)
241 {
242 	bool is_callback_found = false;
243 	struct registered_event_data *eve_data;
244 	u64 key = ((u64)node_id << 32U) | (u64)event;
245 	struct agent_cb *cb_pos;
246 	struct agent_cb *cb_next;
247 	struct hlist_node *tmp;
248 
249 	is_need_to_unregister = false;
250 
251 	/* Check for existing entry in hash table for given key id */
252 	hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, key) {
253 		if (eve_data->key == key) {
254 			/* Delete the list of callback */
255 			list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
256 				if (cb_pos->eve_cb == cb_fun &&
257 				    cb_pos->agent_data == data) {
258 					is_callback_found = true;
259 					list_del_init(&cb_pos->list);
260 					kfree(cb_pos);
261 				}
262 			}
263 
264 			/* Remove HASH table if callback list is empty */
265 			if (list_empty(&eve_data->cb_list_head)) {
266 				/* remove an object from a HASH table */
267 				hash_del(&eve_data->hentry);
268 				kfree(eve_data);
269 				is_need_to_unregister = true;
270 			}
271 		}
272 	}
273 	if (!is_callback_found) {
274 		pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
275 			node_id, event);
276 		return -EINVAL;
277 	}
278 
279 	return 0;
280 }
281 
282 /**
283  * xlnx_register_event() - Register for the event.
284  * @cb_type:	Type of callback from pm_api_cb_id,
285  *			PM_NOTIFY_CB - for Error Events,
286  *			PM_INIT_SUSPEND_CB - for suspend callback.
287  * @node_id:	Node-Id related to event.
288  * @event:	Event Mask for the Error Event.
289  * @wake:	Flag specifying whether the subsystem should be woken upon
290  *		event notification.
291  * @cb_fun:	Function pointer to store the callback function.
292  * @data:	Pointer for the driver instance.
293  *
294  * Return:	Returns 0 on successful registration else error code.
295  */
xlnx_register_event(const enum pm_api_cb_id cb_type,const u32 node_id,const u32 event,const bool wake,event_cb_func_t cb_fun,void * data)296 int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
297 			const bool wake, event_cb_func_t cb_fun, void *data)
298 {
299 	int ret = 0;
300 	u32 eve;
301 	int pos;
302 
303 	if (event_manager_availability)
304 		return event_manager_availability;
305 
306 	if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
307 		pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
308 		return -EINVAL;
309 	}
310 
311 	if (!cb_fun)
312 		return -EFAULT;
313 
314 	if (cb_type == PM_INIT_SUSPEND_CB) {
315 		ret = xlnx_add_cb_for_suspend(cb_fun, data);
316 	} else {
317 		if (!xlnx_is_error_event(node_id)) {
318 			/* Add entry for Node-Id/Event in hash table */
319 			ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data);
320 		} else {
321 			/* Add into Hash table */
322 			for (pos = 0; pos < MAX_BITS; pos++) {
323 				eve = event & (1 << pos);
324 				if (!eve)
325 					continue;
326 
327 				/* Add entry for Node-Id/Eve in hash table */
328 				ret = xlnx_add_cb_for_notify_event(node_id, eve, wake, cb_fun,
329 								   data);
330 				/* Break the loop if got error */
331 				if (ret)
332 					break;
333 			}
334 			if (ret) {
335 				/* Skip the Event for which got the error */
336 				pos--;
337 				/* Remove registered(during this call) event from hash table */
338 				for ( ; pos >= 0; pos--) {
339 					eve = event & (1 << pos);
340 					if (!eve)
341 						continue;
342 					xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
343 				}
344 			}
345 		}
346 
347 		if (ret) {
348 			pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
349 			       event, ret);
350 			return ret;
351 		}
352 
353 		/* Register for Node-Id/Event combination in firmware */
354 		ret = zynqmp_pm_register_notifier(node_id, event, wake, true);
355 		if (ret) {
356 			pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
357 			       event, ret);
358 			/* Remove already registered event from hash table */
359 			if (xlnx_is_error_event(node_id)) {
360 				for (pos = 0; pos < MAX_BITS; pos++) {
361 					eve = event & (1 << pos);
362 					if (!eve)
363 						continue;
364 					xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
365 				}
366 			} else {
367 				xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
368 			}
369 			return ret;
370 		}
371 	}
372 
373 	return ret;
374 }
375 EXPORT_SYMBOL_GPL(xlnx_register_event);
376 
377 /**
378  * xlnx_unregister_event() - Unregister for the event.
379  * @cb_type:	Type of callback from pm_api_cb_id,
380  *			PM_NOTIFY_CB - for Error Events,
381  *			PM_INIT_SUSPEND_CB - for suspend callback.
382  * @node_id:	Node-Id related to event.
383  * @event:	Event Mask for the Error Event.
384  * @cb_fun:	Function pointer of callback function.
385  * @data:	Pointer of agent's private data.
386  *
387  * Return:	Returns 0 on successful unregistration else error code.
388  */
xlnx_unregister_event(const enum pm_api_cb_id cb_type,const u32 node_id,const u32 event,event_cb_func_t cb_fun,void * data)389 int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
390 			  event_cb_func_t cb_fun, void *data)
391 {
392 	int ret = 0;
393 	u32 eve, pos;
394 
395 	is_need_to_unregister = false;
396 
397 	if (event_manager_availability)
398 		return event_manager_availability;
399 
400 	if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
401 		pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
402 		return -EINVAL;
403 	}
404 
405 	if (!cb_fun)
406 		return -EFAULT;
407 
408 	if (cb_type == PM_INIT_SUSPEND_CB) {
409 		ret = xlnx_remove_cb_for_suspend(cb_fun);
410 	} else {
411 		/* Remove Node-Id/Event from hash table */
412 		if (!xlnx_is_error_event(node_id)) {
413 			xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
414 		} else {
415 			for (pos = 0; pos < MAX_BITS; pos++) {
416 				eve = event & (1 << pos);
417 				if (!eve)
418 					continue;
419 
420 				xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
421 			}
422 		}
423 
424 		/* Un-register if list is empty */
425 		if (is_need_to_unregister) {
426 			/* Un-register for Node-Id/Event combination */
427 			ret = zynqmp_pm_register_notifier(node_id, event, false, false);
428 			if (ret) {
429 				pr_err("%s() failed for 0x%x and 0x%x: %d\n",
430 				       __func__, node_id, event, ret);
431 				return ret;
432 			}
433 		}
434 	}
435 
436 	return ret;
437 }
438 EXPORT_SYMBOL_GPL(xlnx_unregister_event);
439 
xlnx_call_suspend_cb_handler(const u32 * payload)440 static void xlnx_call_suspend_cb_handler(const u32 *payload)
441 {
442 	bool is_callback_found = false;
443 	struct registered_event_data *eve_data;
444 	u32 cb_type = payload[0];
445 	struct agent_cb *cb_pos;
446 	struct agent_cb *cb_next;
447 
448 	/* Check for existing entry in hash table for given cb_type */
449 	hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
450 		if (eve_data->cb_type == cb_type) {
451 			list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
452 				cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
453 				is_callback_found = true;
454 			}
455 		}
456 	}
457 	if (!is_callback_found)
458 		pr_warn("Didn't find any registered callback for suspend event\n");
459 }
460 
xlnx_call_notify_cb_handler(const u32 * payload)461 static void xlnx_call_notify_cb_handler(const u32 *payload)
462 {
463 	bool is_callback_found = false;
464 	struct registered_event_data *eve_data;
465 	u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
466 	int ret;
467 	struct agent_cb *cb_pos;
468 	struct agent_cb *cb_next;
469 
470 	/* Check for existing entry in hash table for given key id */
471 	hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
472 		if (eve_data->key == key) {
473 			list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
474 				cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
475 				is_callback_found = true;
476 			}
477 
478 			/* re register with firmware to get future events */
479 			ret = zynqmp_pm_register_notifier(payload[1], payload[2],
480 							  eve_data->wake, true);
481 			if (ret) {
482 				pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
483 				       payload[1], payload[2], ret);
484 				list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head,
485 							 list) {
486 					/* Remove already registered event from hash table */
487 					xlnx_remove_cb_for_notify_event(payload[1], payload[2],
488 									cb_pos->eve_cb,
489 									cb_pos->agent_data);
490 				}
491 			}
492 		}
493 	}
494 	if (!is_callback_found)
495 		pr_warn("Unhandled SGI node 0x%x event 0x%x. Expected with Xen hypervisor\n",
496 			payload[1], payload[2]);
497 }
498 
xlnx_get_event_callback_data(u32 * buf)499 static void xlnx_get_event_callback_data(u32 *buf)
500 {
501 	zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0);
502 }
503 
xlnx_event_handler(int irq,void * dev_id)504 static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
505 {
506 	u32 cb_type, node_id, event, pos;
507 	u32 payload[CB_MAX_PAYLOAD_SIZE] = {0};
508 	u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0};
509 
510 	/* Get event data */
511 	xlnx_get_event_callback_data(payload);
512 
513 	/* First element is callback type, others are callback arguments */
514 	cb_type = payload[0];
515 
516 	if (cb_type == PM_NOTIFY_CB) {
517 		node_id = payload[1];
518 		event = payload[2];
519 		if (!xlnx_is_error_event(node_id)) {
520 			xlnx_call_notify_cb_handler(payload);
521 		} else {
522 			/*
523 			 * Each call back function expecting payload as an input arguments.
524 			 * We can get multiple error events as in one call back through error
525 			 * mask. So payload[2] may can contain multiple error events.
526 			 * In reg_driver_map database we store data in the combination of single
527 			 * node_id-error combination.
528 			 * So coping the payload message into event_data and update the
529 			 * event_data[2] with Error Mask for single error event and use
530 			 * event_data as input argument for registered call back function.
531 			 *
532 			 */
533 			memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE));
534 			/* Support Multiple Error Event */
535 			for (pos = 0; pos < MAX_BITS; pos++) {
536 				if ((0 == (event & (1 << pos))))
537 					continue;
538 				event_data[2] = (event & (1 << pos));
539 				xlnx_call_notify_cb_handler(event_data);
540 			}
541 		}
542 	} else if (cb_type == PM_INIT_SUSPEND_CB) {
543 		xlnx_call_suspend_cb_handler(payload);
544 	} else {
545 		pr_err("%s() Unsupported Callback %d\n", __func__, cb_type);
546 	}
547 
548 	return IRQ_HANDLED;
549 }
550 
xlnx_event_cpuhp_start(unsigned int cpu)551 static int xlnx_event_cpuhp_start(unsigned int cpu)
552 {
553 	enable_percpu_irq(virq_sgi, IRQ_TYPE_NONE);
554 
555 	return 0;
556 }
557 
xlnx_event_cpuhp_down(unsigned int cpu)558 static int xlnx_event_cpuhp_down(unsigned int cpu)
559 {
560 	disable_percpu_irq(virq_sgi);
561 
562 	return 0;
563 }
564 
xlnx_disable_percpu_irq(void * data)565 static void xlnx_disable_percpu_irq(void *data)
566 {
567 	disable_percpu_irq(virq_sgi);
568 }
569 
xlnx_event_init_sgi(struct platform_device * pdev)570 static int xlnx_event_init_sgi(struct platform_device *pdev)
571 {
572 	int ret = 0;
573 	/*
574 	 * IRQ related structures are used for the following:
575 	 * for each SGI interrupt ensure its mapped by GIC IRQ domain
576 	 * and that each corresponding linux IRQ for the HW IRQ has
577 	 * a handler for when receiving an interrupt from the remote
578 	 * processor.
579 	 */
580 	struct irq_domain *domain;
581 	struct irq_fwspec sgi_fwspec;
582 	struct device_node *interrupt_parent = NULL;
583 	struct device *parent = pdev->dev.parent;
584 
585 	/* Find GIC controller to map SGIs. */
586 	interrupt_parent = of_irq_find_parent(parent->of_node);
587 	if (!interrupt_parent) {
588 		dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n");
589 		return -EINVAL;
590 	}
591 
592 	/* Each SGI needs to be associated with GIC's IRQ domain. */
593 	domain = irq_find_host(interrupt_parent);
594 	of_node_put(interrupt_parent);
595 
596 	/* Each mapping needs GIC domain when finding IRQ mapping. */
597 	sgi_fwspec.fwnode = domain->fwnode;
598 
599 	/*
600 	 * When irq domain looks at mapping each arg is as follows:
601 	 * 3 args for: interrupt type (SGI), interrupt # (set later), type
602 	 */
603 	sgi_fwspec.param_count = 1;
604 
605 	/* Set SGI's hwirq */
606 	sgi_fwspec.param[0] = sgi_num;
607 	virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
608 
609 	ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
610 				 &dummy_cpu_number);
611 
612 	WARN_ON(ret);
613 	if (ret) {
614 		irq_dispose_mapping(virq_sgi);
615 		return ret;
616 	}
617 
618 	irq_to_desc(virq_sgi);
619 	irq_set_status_flags(virq_sgi, IRQ_PER_CPU);
620 
621 	return ret;
622 }
623 
xlnx_event_cleanup_sgi(struct platform_device * pdev)624 static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
625 {
626 	cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
627 
628 	on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
629 
630 	irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
631 	free_percpu_irq(virq_sgi, &dummy_cpu_number);
632 	irq_dispose_mapping(virq_sgi);
633 }
634 
xlnx_event_manager_probe(struct platform_device * pdev)635 static int xlnx_event_manager_probe(struct platform_device *pdev)
636 {
637 	int ret;
638 
639 	ret = zynqmp_pm_feature(PM_REGISTER_NOTIFIER);
640 	if (ret < 0) {
641 		dev_err(&pdev->dev, "Feature check failed with %d\n", ret);
642 		return ret;
643 	}
644 
645 	if ((ret & FIRMWARE_VERSION_MASK) <
646 	    REGISTER_NOTIFIER_FIRMWARE_VERSION) {
647 		dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n",
648 			REGISTER_NOTIFIER_FIRMWARE_VERSION,
649 			ret & FIRMWARE_VERSION_MASK);
650 		return -EOPNOTSUPP;
651 	}
652 
653 	/* Initialize the SGI */
654 	ret = xlnx_event_init_sgi(pdev);
655 	if (ret) {
656 		dev_err(&pdev->dev, "SGI Init has been failed with %d\n", ret);
657 		return ret;
658 	}
659 
660 	/* Setup function for the CPU hot-plug cases */
661 	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting",
662 			  xlnx_event_cpuhp_start, xlnx_event_cpuhp_down);
663 
664 	ret = zynqmp_pm_register_sgi(sgi_num, 0);
665 	if (ret) {
666 		if (ret == -EOPNOTSUPP)
667 			dev_err(&pdev->dev, "SGI registration not supported by TF-A or Xen\n");
668 		else
669 			dev_err(&pdev->dev, "SGI %d registration failed, err %d\n", sgi_num, ret);
670 
671 		xlnx_event_cleanup_sgi(pdev);
672 		return ret;
673 	}
674 
675 	event_manager_availability = 0;
676 
677 	dev_info(&pdev->dev, "SGI %d Registered over TF-A\n", sgi_num);
678 	dev_info(&pdev->dev, "Xilinx Event Management driver probed\n");
679 
680 	return ret;
681 }
682 
xlnx_event_manager_remove(struct platform_device * pdev)683 static void xlnx_event_manager_remove(struct platform_device *pdev)
684 {
685 	int i;
686 	struct registered_event_data *eve_data;
687 	struct hlist_node *tmp;
688 	int ret;
689 	struct agent_cb *cb_pos;
690 	struct agent_cb *cb_next;
691 
692 	hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
693 		list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
694 			list_del_init(&cb_pos->list);
695 			kfree(cb_pos);
696 		}
697 		hash_del(&eve_data->hentry);
698 		kfree(eve_data);
699 	}
700 
701 	ret = zynqmp_pm_register_sgi(0, 1);
702 	if (ret)
703 		dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret);
704 
705 	xlnx_event_cleanup_sgi(pdev);
706 
707 	event_manager_availability = -EACCES;
708 }
709 
710 static struct platform_driver xlnx_event_manager_driver = {
711 	.probe = xlnx_event_manager_probe,
712 	.remove_new = xlnx_event_manager_remove,
713 	.driver = {
714 		.name = "xlnx_event_manager",
715 	},
716 };
717 module_param(sgi_num, uint, 0);
718 module_platform_driver(xlnx_event_manager_driver);
719