1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /*
3   * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4   * Rewrite, cleanup:
5   * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
6   */
7  
8  #ifndef _ASM_IOMMU_H
9  #define _ASM_IOMMU_H
10  #ifdef __KERNEL__
11  
12  #include <linux/compiler.h>
13  #include <linux/spinlock.h>
14  #include <linux/device.h>
15  #include <linux/dma-map-ops.h>
16  #include <linux/bitops.h>
17  #include <asm/machdep.h>
18  #include <asm/types.h>
19  #include <asm/pci-bridge.h>
20  #include <asm/asm-const.h>
21  
22  #define IOMMU_PAGE_SHIFT_4K      12
23  #define IOMMU_PAGE_SIZE_4K       (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
24  #define IOMMU_PAGE_MASK_4K       (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
25  #define IOMMU_PAGE_ALIGN_4K(addr) ALIGN(addr, IOMMU_PAGE_SIZE_4K)
26  
27  #define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
28  #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
29  #define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr))
30  
31  #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
32  #define DMA64_PROPNAME "linux,dma64-ddr-window-info"
33  
34  #define	MIN_DDW_VPMEM_DMA_WINDOW	SZ_2G
35  
36  /* Boot time flags */
37  extern int iommu_is_off;
38  extern int iommu_force_on;
39  
40  struct iommu_table_ops {
41  	/*
42  	 * When called with direction==DMA_NONE, it is equal to clear().
43  	 * uaddr is a linear map address.
44  	 */
45  	int (*set)(struct iommu_table *tbl,
46  			long index, long npages,
47  			unsigned long uaddr,
48  			enum dma_data_direction direction,
49  			unsigned long attrs);
50  #ifdef CONFIG_IOMMU_API
51  	/*
52  	 * Exchanges existing TCE with new TCE plus direction bits;
53  	 * returns old TCE and DMA direction mask.
54  	 * @tce is a physical address.
55  	 */
56  	int (*xchg_no_kill)(struct iommu_table *tbl,
57  			long index,
58  			unsigned long *hpa,
59  			enum dma_data_direction *direction);
60  
61  	void (*tce_kill)(struct iommu_table *tbl,
62  			unsigned long index,
63  			unsigned long pages);
64  
65  	__be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
66  #endif
67  	void (*clear)(struct iommu_table *tbl,
68  			long index, long npages);
69  	/* get() returns a physical address */
70  	unsigned long (*get)(struct iommu_table *tbl, long index);
71  	void (*flush)(struct iommu_table *tbl);
72  	void (*free)(struct iommu_table *tbl);
73  };
74  
75  /* These are used by VIO */
76  extern struct iommu_table_ops iommu_table_lpar_multi_ops;
77  extern struct iommu_table_ops iommu_table_pseries_ops;
78  
79  /*
80   * IOMAP_MAX_ORDER defines the largest contiguous block
81   * of dma space we can get.  IOMAP_MAX_ORDER = 13
82   * allows up to 2**12 pages (4096 * 4096) = 16 MB
83   */
84  #define IOMAP_MAX_ORDER		13
85  
86  #define IOMMU_POOL_HASHBITS	2
87  #define IOMMU_NR_POOLS		(1 << IOMMU_POOL_HASHBITS)
88  
89  struct iommu_pool {
90  	unsigned long start;
91  	unsigned long end;
92  	unsigned long hint;
93  	spinlock_t lock;
94  } ____cacheline_aligned_in_smp;
95  
96  struct iommu_table {
97  	unsigned long  it_busno;     /* Bus number this table belongs to */
98  	unsigned long  it_size;      /* Size of iommu table in entries */
99  	unsigned long  it_indirect_levels;
100  	unsigned long  it_level_size;
101  	unsigned long  it_allocated_size;
102  	unsigned long  it_offset;    /* Offset into global table */
103  	unsigned long  it_base;      /* mapped address of tce table */
104  	unsigned long  it_index;     /* which iommu table this is */
105  	unsigned long  it_type;      /* type: PCI or Virtual Bus */
106  	unsigned long  it_blocksize; /* Entries in each block (cacheline) */
107  	unsigned long  poolsize;
108  	unsigned long  nr_pools;
109  	struct iommu_pool large_pool;
110  	struct iommu_pool pools[IOMMU_NR_POOLS];
111  	unsigned long *it_map;       /* A simple allocation bitmap for now */
112  	unsigned long  it_page_shift;/* table iommu page size */
113  	struct list_head it_group_list;/* List of iommu_table_group_link */
114  	__be64 *it_userspace; /* userspace view of the table */
115  	struct iommu_table_ops *it_ops;
116  	struct kref    it_kref;
117  	int it_nid;
118  	unsigned long it_reserved_start; /* Start of not-DMA-able (MMIO) area */
119  	unsigned long it_reserved_end;
120  };
121  
122  #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
123  		((tbl)->it_ops->useraddrptr((tbl), (entry), false))
124  #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
125  		((tbl)->it_ops->useraddrptr((tbl), (entry), true))
126  
127  /* Pure 2^n version of get_order */
128  static inline __attribute_const__
get_iommu_order(unsigned long size,struct iommu_table * tbl)129  int get_iommu_order(unsigned long size, struct iommu_table *tbl)
130  {
131  	return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
132  }
133  
134  
135  struct scatterlist;
136  
137  #ifdef CONFIG_PPC64
138  
set_iommu_table_base(struct device * dev,struct iommu_table * base)139  static inline void set_iommu_table_base(struct device *dev,
140  					struct iommu_table *base)
141  {
142  	dev->archdata.iommu_table_base = base;
143  }
144  
get_iommu_table_base(struct device * dev)145  static inline void *get_iommu_table_base(struct device *dev)
146  {
147  	return dev->archdata.iommu_table_base;
148  }
149  
150  extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
151  
152  extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl);
153  extern int iommu_tce_table_put(struct iommu_table *tbl);
154  
155  /* Initializes an iommu_table based in values set in the passed-in
156   * structure
157   */
158  extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
159  		int nid, unsigned long res_start, unsigned long res_end);
160  bool iommu_table_in_use(struct iommu_table *tbl);
161  extern void iommu_table_reserve_pages(struct iommu_table *tbl,
162  		unsigned long res_start, unsigned long res_end);
163  extern void iommu_table_clear(struct iommu_table *tbl);
164  
165  #define IOMMU_TABLE_GROUP_MAX_TABLES	2
166  
167  struct iommu_table_group;
168  
169  struct iommu_table_group_ops {
170  	unsigned long (*get_table_size)(
171  			__u32 page_shift,
172  			__u64 window_size,
173  			__u32 levels);
174  	long (*create_table)(struct iommu_table_group *table_group,
175  			int num,
176  			__u32 page_shift,
177  			__u64 window_size,
178  			__u32 levels,
179  			struct iommu_table **ptbl);
180  	long (*set_window)(struct iommu_table_group *table_group,
181  			int num,
182  			struct iommu_table *tblnew);
183  	long (*unset_window)(struct iommu_table_group *table_group,
184  			int num);
185  	/* Switch ownership from platform code to external user (e.g. VFIO) */
186  	long (*take_ownership)(struct iommu_table_group *table_group, struct device *dev);
187  	/* Switch ownership from external user (e.g. VFIO) back to core */
188  	void (*release_ownership)(struct iommu_table_group *table_group, struct device *dev);
189  };
190  
191  struct iommu_table_group_link {
192  	struct list_head next;
193  	struct rcu_head rcu;
194  	struct iommu_table_group *table_group;
195  };
196  
197  struct iommu_table_group {
198  	/* IOMMU properties */
199  	__u32 tce32_start;
200  	__u32 tce32_size;
201  	__u64 pgsizes; /* Bitmap of supported page sizes */
202  	__u32 max_dynamic_windows_supported;
203  	__u32 max_levels;
204  
205  	struct iommu_group *group;
206  	struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
207  	struct iommu_table_group_ops *ops;
208  };
209  
210  #ifdef CONFIG_IOMMU_API
211  
212  extern void iommu_register_group(struct iommu_table_group *table_group,
213  				 int pci_domain_number, unsigned long pe_num);
214  extern int iommu_add_device(struct iommu_table_group *table_group,
215  		struct device *dev);
216  extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
217  		unsigned long entry, unsigned long *hpa,
218  		enum dma_data_direction *direction);
219  extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
220  		struct iommu_table *tbl,
221  		unsigned long entry, unsigned long *hpa,
222  		enum dma_data_direction *direction);
223  extern void iommu_tce_kill(struct iommu_table *tbl,
224  		unsigned long entry, unsigned long pages);
225  int dev_has_iommu_table(struct device *dev, void *data);
226  
227  #else
iommu_register_group(struct iommu_table_group * table_group,int pci_domain_number,unsigned long pe_num)228  static inline void iommu_register_group(struct iommu_table_group *table_group,
229  					int pci_domain_number,
230  					unsigned long pe_num)
231  {
232  }
233  
iommu_add_device(struct iommu_table_group * table_group,struct device * dev)234  static inline int iommu_add_device(struct iommu_table_group *table_group,
235  		struct device *dev)
236  {
237  	return 0;
238  }
239  
dev_has_iommu_table(struct device * dev,void * data)240  static inline int dev_has_iommu_table(struct device *dev, void *data)
241  {
242  	return 0;
243  }
244  #endif /* !CONFIG_IOMMU_API */
245  
246  u64 dma_iommu_get_required_mask(struct device *dev);
247  #else
248  
get_iommu_table_base(struct device * dev)249  static inline void *get_iommu_table_base(struct device *dev)
250  {
251  	return NULL;
252  }
253  
dma_iommu_dma_supported(struct device * dev,u64 mask)254  static inline int dma_iommu_dma_supported(struct device *dev, u64 mask)
255  {
256  	return 0;
257  }
258  
259  #endif /* CONFIG_PPC64 */
260  
261  extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
262  			    struct scatterlist *sglist, int nelems,
263  			    unsigned long mask,
264  			    enum dma_data_direction direction,
265  			    unsigned long attrs);
266  extern void ppc_iommu_unmap_sg(struct iommu_table *tbl,
267  			       struct scatterlist *sglist,
268  			       int nelems,
269  			       enum dma_data_direction direction,
270  			       unsigned long attrs);
271  
272  extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
273  				  size_t size, dma_addr_t *dma_handle,
274  				  unsigned long mask, gfp_t flag, int node);
275  extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
276  				void *vaddr, dma_addr_t dma_handle);
277  extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
278  				 struct page *page, unsigned long offset,
279  				 size_t size, unsigned long mask,
280  				 enum dma_data_direction direction,
281  				 unsigned long attrs);
282  extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
283  			     size_t size, enum dma_data_direction direction,
284  			     unsigned long attrs);
285  
286  void __init iommu_init_early_pSeries(void);
287  extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
288  extern void iommu_init_early_pasemi(void);
289  
290  #if defined(CONFIG_PPC64) && defined(CONFIG_PM)
iommu_restore(void)291  static inline void iommu_restore(void)
292  {
293  	if (ppc_md.iommu_restore)
294  		ppc_md.iommu_restore();
295  }
296  #endif
297  
298  /* The API to support IOMMU operations for VFIO */
299  extern int iommu_tce_check_ioba(unsigned long page_shift,
300  		unsigned long offset, unsigned long size,
301  		unsigned long ioba, unsigned long npages);
302  extern int iommu_tce_check_gpa(unsigned long page_shift,
303  		unsigned long gpa);
304  
305  #define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
306  		(iommu_tce_check_ioba((tbl)->it_page_shift,       \
307  				(tbl)->it_offset, (tbl)->it_size, \
308  				(ioba), (npages)) || (tce_value))
309  #define iommu_tce_put_param_check(tbl, ioba, gpa)                 \
310  		(iommu_tce_check_ioba((tbl)->it_page_shift,       \
311  				(tbl)->it_offset, (tbl)->it_size, \
312  				(ioba), 1) ||                     \
313  		iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
314  
315  extern void iommu_flush_tce(struct iommu_table *tbl);
316  
317  extern enum dma_data_direction iommu_tce_direction(unsigned long tce);
318  extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir);
319  
320  #ifdef CONFIG_PPC_CELL_NATIVE
321  extern bool iommu_fixed_is_weak;
322  #else
323  #define iommu_fixed_is_weak false
324  #endif
325  
326  extern const struct dma_map_ops dma_iommu_ops;
327  
328  #endif /* __KERNEL__ */
329  #endif /* _ASM_IOMMU_H */
330