1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /*
3   * Copyright 2016,2017 IBM Corporation.
4   */
5  #ifndef __XIVE_INTERNAL_H
6  #define __XIVE_INTERNAL_H
7  
8  /*
9   * A "disabled" interrupt should never fire, to catch problems
10   * we set its logical number to this
11   */
12  #define XIVE_BAD_IRQ		0x7fffffff
13  #define XIVE_MAX_IRQ		(XIVE_BAD_IRQ - 1)
14  
15  /* Each CPU carry one of these with various per-CPU state */
16  struct xive_cpu {
17  #ifdef CONFIG_SMP
18  	/* HW irq number and data of IPI */
19  	u32 hw_ipi;
20  	struct xive_irq_data ipi_data;
21  #endif /* CONFIG_SMP */
22  
23  	int chip_id;
24  
25  	/* Queue datas. Only one is populated */
26  #define XIVE_MAX_QUEUES	8
27  	struct xive_q queue[XIVE_MAX_QUEUES];
28  
29  	/*
30  	 * Pending mask. Each bit corresponds to a priority that
31  	 * potentially has pending interrupts.
32  	 */
33  	u8 pending_prio;
34  
35  	/* Cache of HW CPPR */
36  	u8 cppr;
37  };
38  
39  /* Backend ops */
40  struct xive_ops {
41  	int	(*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
42  	int 	(*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
43  	int	(*get_irq_config)(u32 hw_irq, u32 *target, u8 *prio,
44  				  u32 *sw_irq);
45  	int	(*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
46  	void	(*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
47  	void	(*prepare_cpu)(unsigned int cpu, struct xive_cpu *xc);
48  	void	(*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
49  	void	(*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
50  	bool	(*match)(struct device_node *np);
51  	void	(*shutdown)(void);
52  
53  	void	(*update_pending)(struct xive_cpu *xc);
54  	void	(*sync_source)(u32 hw_irq);
55  	u64	(*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write);
56  #ifdef CONFIG_SMP
57  	int	(*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
58  	void	(*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
59  #endif
60  	int	(*debug_show)(struct seq_file *m, void *private);
61  	int	(*debug_create)(struct dentry *xive_dir);
62  	const char *name;
63  };
64  
65  bool xive_core_init(struct device_node *np, const struct xive_ops *ops,
66  		    void __iomem *area, u32 offset, u8 max_prio);
67  __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
68  int xive_core_debug_init(void);
69  
xive_alloc_order(u32 queue_shift)70  static inline u32 xive_alloc_order(u32 queue_shift)
71  {
72  	return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
73  }
74  
75  extern bool xive_cmdline_disabled;
76  extern bool xive_has_save_restore;
77  
78  #endif /*  __XIVE_INTERNAL_H */
79