1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   * Copyright (C) 2012 ARM Ltd.
4   */
5  #ifndef __ASM_PGTABLE_HWDEF_H
6  #define __ASM_PGTABLE_HWDEF_H
7  
8  #include <asm/memory.h>
9  
10  /*
11   * Number of page-table levels required to address 'va_bits' wide
12   * address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT)
13   * bits with (PAGE_SHIFT - 3) bits at each page table level. Hence:
14   *
15   *  levels = DIV_ROUND_UP((va_bits - PAGE_SHIFT), (PAGE_SHIFT - 3))
16   *
17   * where DIV_ROUND_UP(n, d) => (((n) + (d) - 1) / (d))
18   *
19   * We cannot include linux/kernel.h which defines DIV_ROUND_UP here
20   * due to build issues. So we open code DIV_ROUND_UP here:
21   *
22   *	((((va_bits) - PAGE_SHIFT) + (PAGE_SHIFT - 3) - 1) / (PAGE_SHIFT - 3))
23   *
24   * which gets simplified as :
25   */
26  #define ARM64_HW_PGTABLE_LEVELS(va_bits) (((va_bits) - 4) / (PAGE_SHIFT - 3))
27  
28  /*
29   * Size mapped by an entry at level n ( -1 <= n <= 3)
30   * We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits
31   * in the final page. The maximum number of translation levels supported by
32   * the architecture is 5. Hence, starting at level n, we have further
33   * ((4 - n) - 1) levels of translation excluding the offset within the page.
34   * So, the total number of bits mapped by an entry at level n is :
35   *
36   *  ((4 - n) - 1) * (PAGE_SHIFT - 3) + PAGE_SHIFT
37   *
38   * Rearranging it a bit we get :
39   *   (4 - n) * (PAGE_SHIFT - 3) + 3
40   */
41  #define ARM64_HW_PGTABLE_LEVEL_SHIFT(n)	((PAGE_SHIFT - 3) * (4 - (n)) + 3)
42  
43  #define PTRS_PER_PTE		(1 << (PAGE_SHIFT - 3))
44  
45  /*
46   * PMD_SHIFT determines the size a level 2 page table entry can map.
47   */
48  #if CONFIG_PGTABLE_LEVELS > 2
49  #define PMD_SHIFT		ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
50  #define PMD_SIZE		(_AC(1, UL) << PMD_SHIFT)
51  #define PMD_MASK		(~(PMD_SIZE-1))
52  #define PTRS_PER_PMD		(1 << (PAGE_SHIFT - 3))
53  #endif
54  
55  /*
56   * PUD_SHIFT determines the size a level 1 page table entry can map.
57   */
58  #if CONFIG_PGTABLE_LEVELS > 3
59  #define PUD_SHIFT		ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
60  #define PUD_SIZE		(_AC(1, UL) << PUD_SHIFT)
61  #define PUD_MASK		(~(PUD_SIZE-1))
62  #define PTRS_PER_PUD		(1 << (PAGE_SHIFT - 3))
63  #endif
64  
65  #if CONFIG_PGTABLE_LEVELS > 4
66  #define P4D_SHIFT		ARM64_HW_PGTABLE_LEVEL_SHIFT(0)
67  #define P4D_SIZE		(_AC(1, UL) << P4D_SHIFT)
68  #define P4D_MASK		(~(P4D_SIZE-1))
69  #define PTRS_PER_P4D		(1 << (PAGE_SHIFT - 3))
70  #endif
71  
72  /*
73   * PGDIR_SHIFT determines the size a top-level page table entry can map
74   * (depending on the configuration, this level can be -1, 0, 1 or 2).
75   */
76  #define PGDIR_SHIFT		ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS)
77  #define PGDIR_SIZE		(_AC(1, UL) << PGDIR_SHIFT)
78  #define PGDIR_MASK		(~(PGDIR_SIZE-1))
79  #define PTRS_PER_PGD		(1 << (VA_BITS - PGDIR_SHIFT))
80  
81  /*
82   * Contiguous page definitions.
83   */
84  #define CONT_PTE_SHIFT		(CONFIG_ARM64_CONT_PTE_SHIFT + PAGE_SHIFT)
85  #define CONT_PTES		(1 << (CONT_PTE_SHIFT - PAGE_SHIFT))
86  #define CONT_PTE_SIZE		(CONT_PTES * PAGE_SIZE)
87  #define CONT_PTE_MASK		(~(CONT_PTE_SIZE - 1))
88  
89  #define CONT_PMD_SHIFT		(CONFIG_ARM64_CONT_PMD_SHIFT + PMD_SHIFT)
90  #define CONT_PMDS		(1 << (CONT_PMD_SHIFT - PMD_SHIFT))
91  #define CONT_PMD_SIZE		(CONT_PMDS * PMD_SIZE)
92  #define CONT_PMD_MASK		(~(CONT_PMD_SIZE - 1))
93  
94  /*
95   * Hardware page table definitions.
96   *
97   * Level -1 descriptor (PGD).
98   */
99  #define PGD_TYPE_TABLE		(_AT(pgdval_t, 3) << 0)
100  #define PGD_TABLE_BIT		(_AT(pgdval_t, 1) << 1)
101  #define PGD_TYPE_MASK		(_AT(pgdval_t, 3) << 0)
102  #define PGD_TABLE_PXN		(_AT(pgdval_t, 1) << 59)
103  #define PGD_TABLE_UXN		(_AT(pgdval_t, 1) << 60)
104  
105  /*
106   * Level 0 descriptor (P4D).
107   */
108  #define P4D_TYPE_TABLE		(_AT(p4dval_t, 3) << 0)
109  #define P4D_TABLE_BIT		(_AT(p4dval_t, 1) << 1)
110  #define P4D_TYPE_MASK		(_AT(p4dval_t, 3) << 0)
111  #define P4D_TYPE_SECT		(_AT(p4dval_t, 1) << 0)
112  #define P4D_SECT_RDONLY		(_AT(p4dval_t, 1) << 7)		/* AP[2] */
113  #define P4D_TABLE_PXN		(_AT(p4dval_t, 1) << 59)
114  #define P4D_TABLE_UXN		(_AT(p4dval_t, 1) << 60)
115  
116  /*
117   * Level 1 descriptor (PUD).
118   */
119  #define PUD_TYPE_TABLE		(_AT(pudval_t, 3) << 0)
120  #define PUD_TABLE_BIT		(_AT(pudval_t, 1) << 1)
121  #define PUD_TYPE_MASK		(_AT(pudval_t, 3) << 0)
122  #define PUD_TYPE_SECT		(_AT(pudval_t, 1) << 0)
123  #define PUD_SECT_RDONLY		(_AT(pudval_t, 1) << 7)		/* AP[2] */
124  #define PUD_TABLE_PXN		(_AT(pudval_t, 1) << 59)
125  #define PUD_TABLE_UXN		(_AT(pudval_t, 1) << 60)
126  
127  /*
128   * Level 2 descriptor (PMD).
129   */
130  #define PMD_TYPE_MASK		(_AT(pmdval_t, 3) << 0)
131  #define PMD_TYPE_TABLE		(_AT(pmdval_t, 3) << 0)
132  #define PMD_TYPE_SECT		(_AT(pmdval_t, 1) << 0)
133  #define PMD_TABLE_BIT		(_AT(pmdval_t, 1) << 1)
134  
135  /*
136   * Section
137   */
138  #define PMD_SECT_USER		(_AT(pmdval_t, 1) << 6)		/* AP[1] */
139  #define PMD_SECT_RDONLY		(_AT(pmdval_t, 1) << 7)		/* AP[2] */
140  #define PMD_SECT_S		(_AT(pmdval_t, 3) << 8)
141  #define PMD_SECT_AF		(_AT(pmdval_t, 1) << 10)
142  #define PMD_SECT_NG		(_AT(pmdval_t, 1) << 11)
143  #define PMD_SECT_CONT		(_AT(pmdval_t, 1) << 52)
144  #define PMD_SECT_PXN		(_AT(pmdval_t, 1) << 53)
145  #define PMD_SECT_UXN		(_AT(pmdval_t, 1) << 54)
146  #define PMD_TABLE_PXN		(_AT(pmdval_t, 1) << 59)
147  #define PMD_TABLE_UXN		(_AT(pmdval_t, 1) << 60)
148  
149  /*
150   * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
151   */
152  #define PMD_ATTRINDX(t)		(_AT(pmdval_t, (t)) << 2)
153  #define PMD_ATTRINDX_MASK	(_AT(pmdval_t, 7) << 2)
154  
155  /*
156   * Level 3 descriptor (PTE).
157   */
158  #define PTE_VALID		(_AT(pteval_t, 1) << 0)
159  #define PTE_TYPE_MASK		(_AT(pteval_t, 3) << 0)
160  #define PTE_TYPE_PAGE		(_AT(pteval_t, 3) << 0)
161  #define PTE_TABLE_BIT		(_AT(pteval_t, 1) << 1)
162  #define PTE_USER		(_AT(pteval_t, 1) << 6)		/* AP[1] */
163  #define PTE_RDONLY		(_AT(pteval_t, 1) << 7)		/* AP[2] */
164  #define PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
165  #define PTE_AF			(_AT(pteval_t, 1) << 10)	/* Access Flag */
166  #define PTE_NG			(_AT(pteval_t, 1) << 11)	/* nG */
167  #define PTE_GP			(_AT(pteval_t, 1) << 50)	/* BTI guarded */
168  #define PTE_DBM			(_AT(pteval_t, 1) << 51)	/* Dirty Bit Management */
169  #define PTE_CONT		(_AT(pteval_t, 1) << 52)	/* Contiguous range */
170  #define PTE_PXN			(_AT(pteval_t, 1) << 53)	/* Privileged XN */
171  #define PTE_UXN			(_AT(pteval_t, 1) << 54)	/* User XN */
172  #define PTE_SWBITS_MASK		_AT(pteval_t, (BIT(63) | GENMASK(58, 55)))
173  
174  #define PTE_ADDR_LOW		(((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
175  #ifdef CONFIG_ARM64_PA_BITS_52
176  #ifdef CONFIG_ARM64_64K_PAGES
177  #define PTE_ADDR_HIGH		(_AT(pteval_t, 0xf) << 12)
178  #define PTE_ADDR_HIGH_SHIFT	36
179  #define PHYS_TO_PTE_ADDR_MASK	(PTE_ADDR_LOW | PTE_ADDR_HIGH)
180  #else
181  #define PTE_ADDR_HIGH		(_AT(pteval_t, 0x3) << 8)
182  #define PTE_ADDR_HIGH_SHIFT	42
183  #define PHYS_TO_PTE_ADDR_MASK	GENMASK_ULL(49, 8)
184  #endif
185  #endif
186  
187  /*
188   * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
189   */
190  #define PTE_ATTRINDX(t)		(_AT(pteval_t, (t)) << 2)
191  #define PTE_ATTRINDX_MASK	(_AT(pteval_t, 7) << 2)
192  
193  /*
194   * PIIndex[3:0] encoding (Permission Indirection Extension)
195   */
196  #define PTE_PI_IDX_0	6	/* AP[1], USER */
197  #define PTE_PI_IDX_1	51	/* DBM */
198  #define PTE_PI_IDX_2	53	/* PXN */
199  #define PTE_PI_IDX_3	54	/* UXN */
200  
201  /*
202   * POIndex[2:0] encoding (Permission Overlay Extension)
203   */
204  #define PTE_PO_IDX_0	(_AT(pteval_t, 1) << 60)
205  #define PTE_PO_IDX_1	(_AT(pteval_t, 1) << 61)
206  #define PTE_PO_IDX_2	(_AT(pteval_t, 1) << 62)
207  
208  #define PTE_PO_IDX_MASK		GENMASK_ULL(62, 60)
209  
210  
211  /*
212   * Memory Attribute override for Stage-2 (MemAttr[3:0])
213   */
214  #define PTE_S2_MEMATTR(t)	(_AT(pteval_t, (t)) << 2)
215  
216  /*
217   * Hierarchical permission for Stage-1 tables
218   */
219  #define S1_TABLE_AP		(_AT(pmdval_t, 3) << 61)
220  
221  /*
222   * Highest possible physical address supported.
223   */
224  #define PHYS_MASK_SHIFT		(CONFIG_ARM64_PA_BITS)
225  #define PHYS_MASK		((UL(1) << PHYS_MASK_SHIFT) - 1)
226  
227  #define TTBR_CNP_BIT		(UL(1) << 0)
228  
229  /*
230   * TCR flags.
231   */
232  #define TCR_T0SZ_OFFSET		0
233  #define TCR_T1SZ_OFFSET		16
234  #define TCR_T0SZ(x)		((UL(64) - (x)) << TCR_T0SZ_OFFSET)
235  #define TCR_T1SZ(x)		((UL(64) - (x)) << TCR_T1SZ_OFFSET)
236  #define TCR_TxSZ(x)		(TCR_T0SZ(x) | TCR_T1SZ(x))
237  #define TCR_TxSZ_WIDTH		6
238  #define TCR_T0SZ_MASK		(((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T0SZ_OFFSET)
239  #define TCR_T1SZ_MASK		(((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T1SZ_OFFSET)
240  
241  #define TCR_EPD0_SHIFT		7
242  #define TCR_EPD0_MASK		(UL(1) << TCR_EPD0_SHIFT)
243  #define TCR_IRGN0_SHIFT		8
244  #define TCR_IRGN0_MASK		(UL(3) << TCR_IRGN0_SHIFT)
245  #define TCR_IRGN0_NC		(UL(0) << TCR_IRGN0_SHIFT)
246  #define TCR_IRGN0_WBWA		(UL(1) << TCR_IRGN0_SHIFT)
247  #define TCR_IRGN0_WT		(UL(2) << TCR_IRGN0_SHIFT)
248  #define TCR_IRGN0_WBnWA		(UL(3) << TCR_IRGN0_SHIFT)
249  
250  #define TCR_EPD1_SHIFT		23
251  #define TCR_EPD1_MASK		(UL(1) << TCR_EPD1_SHIFT)
252  #define TCR_IRGN1_SHIFT		24
253  #define TCR_IRGN1_MASK		(UL(3) << TCR_IRGN1_SHIFT)
254  #define TCR_IRGN1_NC		(UL(0) << TCR_IRGN1_SHIFT)
255  #define TCR_IRGN1_WBWA		(UL(1) << TCR_IRGN1_SHIFT)
256  #define TCR_IRGN1_WT		(UL(2) << TCR_IRGN1_SHIFT)
257  #define TCR_IRGN1_WBnWA		(UL(3) << TCR_IRGN1_SHIFT)
258  
259  #define TCR_IRGN_NC		(TCR_IRGN0_NC | TCR_IRGN1_NC)
260  #define TCR_IRGN_WBWA		(TCR_IRGN0_WBWA | TCR_IRGN1_WBWA)
261  #define TCR_IRGN_WT		(TCR_IRGN0_WT | TCR_IRGN1_WT)
262  #define TCR_IRGN_WBnWA		(TCR_IRGN0_WBnWA | TCR_IRGN1_WBnWA)
263  #define TCR_IRGN_MASK		(TCR_IRGN0_MASK | TCR_IRGN1_MASK)
264  
265  
266  #define TCR_ORGN0_SHIFT		10
267  #define TCR_ORGN0_MASK		(UL(3) << TCR_ORGN0_SHIFT)
268  #define TCR_ORGN0_NC		(UL(0) << TCR_ORGN0_SHIFT)
269  #define TCR_ORGN0_WBWA		(UL(1) << TCR_ORGN0_SHIFT)
270  #define TCR_ORGN0_WT		(UL(2) << TCR_ORGN0_SHIFT)
271  #define TCR_ORGN0_WBnWA		(UL(3) << TCR_ORGN0_SHIFT)
272  
273  #define TCR_ORGN1_SHIFT		26
274  #define TCR_ORGN1_MASK		(UL(3) << TCR_ORGN1_SHIFT)
275  #define TCR_ORGN1_NC		(UL(0) << TCR_ORGN1_SHIFT)
276  #define TCR_ORGN1_WBWA		(UL(1) << TCR_ORGN1_SHIFT)
277  #define TCR_ORGN1_WT		(UL(2) << TCR_ORGN1_SHIFT)
278  #define TCR_ORGN1_WBnWA		(UL(3) << TCR_ORGN1_SHIFT)
279  
280  #define TCR_ORGN_NC		(TCR_ORGN0_NC | TCR_ORGN1_NC)
281  #define TCR_ORGN_WBWA		(TCR_ORGN0_WBWA | TCR_ORGN1_WBWA)
282  #define TCR_ORGN_WT		(TCR_ORGN0_WT | TCR_ORGN1_WT)
283  #define TCR_ORGN_WBnWA		(TCR_ORGN0_WBnWA | TCR_ORGN1_WBnWA)
284  #define TCR_ORGN_MASK		(TCR_ORGN0_MASK | TCR_ORGN1_MASK)
285  
286  #define TCR_SH0_SHIFT		12
287  #define TCR_SH0_MASK		(UL(3) << TCR_SH0_SHIFT)
288  #define TCR_SH0_INNER		(UL(3) << TCR_SH0_SHIFT)
289  
290  #define TCR_SH1_SHIFT		28
291  #define TCR_SH1_MASK		(UL(3) << TCR_SH1_SHIFT)
292  #define TCR_SH1_INNER		(UL(3) << TCR_SH1_SHIFT)
293  #define TCR_SHARED		(TCR_SH0_INNER | TCR_SH1_INNER)
294  
295  #define TCR_TG0_SHIFT		14
296  #define TCR_TG0_MASK		(UL(3) << TCR_TG0_SHIFT)
297  #define TCR_TG0_4K		(UL(0) << TCR_TG0_SHIFT)
298  #define TCR_TG0_64K		(UL(1) << TCR_TG0_SHIFT)
299  #define TCR_TG0_16K		(UL(2) << TCR_TG0_SHIFT)
300  
301  #define TCR_TG1_SHIFT		30
302  #define TCR_TG1_MASK		(UL(3) << TCR_TG1_SHIFT)
303  #define TCR_TG1_16K		(UL(1) << TCR_TG1_SHIFT)
304  #define TCR_TG1_4K		(UL(2) << TCR_TG1_SHIFT)
305  #define TCR_TG1_64K		(UL(3) << TCR_TG1_SHIFT)
306  
307  #define TCR_IPS_SHIFT		32
308  #define TCR_IPS_MASK		(UL(7) << TCR_IPS_SHIFT)
309  #define TCR_A1			(UL(1) << 22)
310  #define TCR_ASID16		(UL(1) << 36)
311  #define TCR_TBI0		(UL(1) << 37)
312  #define TCR_TBI1		(UL(1) << 38)
313  #define TCR_HA			(UL(1) << 39)
314  #define TCR_HD			(UL(1) << 40)
315  #define TCR_HPD0_SHIFT		41
316  #define TCR_HPD0		(UL(1) << TCR_HPD0_SHIFT)
317  #define TCR_HPD1_SHIFT		42
318  #define TCR_HPD1		(UL(1) << TCR_HPD1_SHIFT)
319  #define TCR_TBID0		(UL(1) << 51)
320  #define TCR_TBID1		(UL(1) << 52)
321  #define TCR_NFD0		(UL(1) << 53)
322  #define TCR_NFD1		(UL(1) << 54)
323  #define TCR_E0PD0		(UL(1) << 55)
324  #define TCR_E0PD1		(UL(1) << 56)
325  #define TCR_TCMA0		(UL(1) << 57)
326  #define TCR_TCMA1		(UL(1) << 58)
327  #define TCR_DS			(UL(1) << 59)
328  
329  /*
330   * TTBR.
331   */
332  #ifdef CONFIG_ARM64_PA_BITS_52
333  /*
334   * TTBR_ELx[1] is RES0 in this configuration.
335   */
336  #define TTBR_BADDR_MASK_52	GENMASK_ULL(47, 2)
337  #endif
338  
339  #ifdef CONFIG_ARM64_VA_BITS_52
340  /* Must be at least 64-byte aligned to prevent corruption of the TTBR */
341  #define TTBR1_BADDR_4852_OFFSET	(((UL(1) << (52 - PGDIR_SHIFT)) - \
342  				 (UL(1) << (48 - PGDIR_SHIFT))) * 8)
343  #endif
344  
345  #endif
346