1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _MCF_PGTABLE_H
3 #define _MCF_PGTABLE_H
4
5 #include <asm/mcfmmu.h>
6 #include <asm/page.h>
7
8 /*
9 * MMUDR bits, in proper place. We write these directly into the MMUDR
10 * after masking from the pte.
11 */
12 #define CF_PAGE_LOCKED MMUDR_LK /* 0x00000002 */
13 #define CF_PAGE_EXEC MMUDR_X /* 0x00000004 */
14 #define CF_PAGE_WRITABLE MMUDR_W /* 0x00000008 */
15 #define CF_PAGE_READABLE MMUDR_R /* 0x00000010 */
16 #define CF_PAGE_SYSTEM MMUDR_SP /* 0x00000020 */
17 #define CF_PAGE_COPYBACK MMUDR_CM_CCB /* 0x00000040 */
18 #define CF_PAGE_NOCACHE MMUDR_CM_NCP /* 0x00000080 */
19
20 #define CF_CACHEMASK (~MMUDR_CM_CCB)
21 #define CF_PAGE_MMUDR_MASK 0x000000fe
22
23 #define _PAGE_NOCACHE030 CF_PAGE_NOCACHE
24
25 /*
26 * MMUTR bits, need shifting down.
27 */
28 #define CF_PAGE_MMUTR_MASK 0x00000c00
29 #define CF_PAGE_MMUTR_SHIFT 10
30
31 #define CF_PAGE_VALID (MMUTR_V << CF_PAGE_MMUTR_SHIFT)
32 #define CF_PAGE_SHARED (MMUTR_SG << CF_PAGE_MMUTR_SHIFT)
33
34 /*
35 * Fake bits, not implemented in CF, will get masked out before
36 * hitting hardware.
37 */
38 #define CF_PAGE_DIRTY 0x00000001
39 #define CF_PAGE_ACCESSED 0x00001000
40
41 #define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
42 #define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */
43 #define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */
44 #define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */
45 #define _DESCTYPE_MASK 0x003
46 #define _CACHEMASK040 (~0x060)
47 #define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */
48
49 /* We borrow bit 7 to store the exclusive marker in swap PTEs. */
50 #define _PAGE_SWP_EXCLUSIVE CF_PAGE_NOCACHE
51
52 /*
53 * Externally used page protection values.
54 */
55 #define _PAGE_PRESENT (CF_PAGE_VALID)
56 #define _PAGE_ACCESSED (CF_PAGE_ACCESSED)
57 #define _PAGE_DIRTY (CF_PAGE_DIRTY)
58 #define _PAGE_READWRITE (CF_PAGE_READABLE \
59 | CF_PAGE_WRITABLE \
60 | CF_PAGE_SYSTEM \
61 | CF_PAGE_SHARED)
62
63 /*
64 * Compound page protection values.
65 */
66 #define PAGE_NONE __pgprot(CF_PAGE_VALID \
67 | CF_PAGE_ACCESSED)
68
69 #define PAGE_SHARED __pgprot(CF_PAGE_VALID \
70 | CF_PAGE_ACCESSED \
71 | CF_PAGE_SHARED)
72
73 #define PAGE_INIT __pgprot(CF_PAGE_VALID \
74 | CF_PAGE_READABLE \
75 | CF_PAGE_WRITABLE \
76 | CF_PAGE_EXEC \
77 | CF_PAGE_SYSTEM)
78
79 #define PAGE_KERNEL __pgprot(CF_PAGE_VALID \
80 | CF_PAGE_ACCESSED \
81 | CF_PAGE_READABLE \
82 | CF_PAGE_WRITABLE \
83 | CF_PAGE_EXEC \
84 | CF_PAGE_SYSTEM \
85 | CF_PAGE_SHARED)
86
87 #define PAGE_COPY __pgprot(CF_PAGE_VALID \
88 | CF_PAGE_ACCESSED \
89 | CF_PAGE_READABLE \
90 | CF_PAGE_DIRTY)
91
92 #define PTE_MASK PAGE_MASK
93 #define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
94
95 #ifndef __ASSEMBLY__
96
97 #define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
98
99 /*
100 * Conversion functions: convert a page and protection to a page entry,
101 * and a page entry and page directory to the page they refer to.
102 */
103 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
104
pte_modify(pte_t pte,pgprot_t newprot)105 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
106 {
107 pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot);
108 return pte;
109 }
110
111 #define pmd_set(pmdp, ptep) do {} while (0)
112
pgd_set(pgd_t * pgdp,pmd_t * pmdp)113 static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
114 {
115 pgd_val(*pgdp) = virt_to_phys(pmdp);
116 }
117
118 #define __pte_page(pte) ((void *) (pte_val(pte) & PAGE_MASK))
119 #define pmd_page_vaddr(pmd) ((unsigned long) (pmd_val(pmd)))
120
pte_none(pte_t pte)121 static inline int pte_none(pte_t pte)
122 {
123 return !pte_val(pte);
124 }
125
pte_present(pte_t pte)126 static inline int pte_present(pte_t pte)
127 {
128 return pte_val(pte) & CF_PAGE_VALID;
129 }
130
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)131 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
132 pte_t *ptep)
133 {
134 pte_val(*ptep) = 0;
135 }
136
137 #define pte_page(pte) virt_to_page(__pte_page(pte))
138
pmd_none2(pmd_t * pmd)139 static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
140 #define pmd_none(pmd) pmd_none2(&(pmd))
pmd_bad2(pmd_t * pmd)141 static inline int pmd_bad2(pmd_t *pmd) { return 0; }
142 #define pmd_bad(pmd) pmd_bad2(&(pmd))
143 #define pmd_present(pmd) (!pmd_none2(&(pmd)))
pmd_clear(pmd_t * pmdp)144 static inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
145
146 #define pte_ERROR(e) \
147 printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
148 __FILE__, __LINE__, pte_val(e))
149 #define pgd_ERROR(e) \
150 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
151 __FILE__, __LINE__, pgd_val(e))
152
153 /*
154 * The following only work if pte_present() is true.
155 * Undefined behaviour if not...
156 * [we have the full set here even if they don't change from m68k]
157 */
pte_read(pte_t pte)158 static inline int pte_read(pte_t pte)
159 {
160 return pte_val(pte) & CF_PAGE_READABLE;
161 }
162
pte_write(pte_t pte)163 static inline int pte_write(pte_t pte)
164 {
165 return pte_val(pte) & CF_PAGE_WRITABLE;
166 }
167
pte_exec(pte_t pte)168 static inline int pte_exec(pte_t pte)
169 {
170 return pte_val(pte) & CF_PAGE_EXEC;
171 }
172
pte_dirty(pte_t pte)173 static inline int pte_dirty(pte_t pte)
174 {
175 return pte_val(pte) & CF_PAGE_DIRTY;
176 }
177
pte_young(pte_t pte)178 static inline int pte_young(pte_t pte)
179 {
180 return pte_val(pte) & CF_PAGE_ACCESSED;
181 }
182
pte_wrprotect(pte_t pte)183 static inline pte_t pte_wrprotect(pte_t pte)
184 {
185 pte_val(pte) &= ~CF_PAGE_WRITABLE;
186 return pte;
187 }
188
pte_rdprotect(pte_t pte)189 static inline pte_t pte_rdprotect(pte_t pte)
190 {
191 pte_val(pte) &= ~CF_PAGE_READABLE;
192 return pte;
193 }
194
pte_exprotect(pte_t pte)195 static inline pte_t pte_exprotect(pte_t pte)
196 {
197 pte_val(pte) &= ~CF_PAGE_EXEC;
198 return pte;
199 }
200
pte_mkclean(pte_t pte)201 static inline pte_t pte_mkclean(pte_t pte)
202 {
203 pte_val(pte) &= ~CF_PAGE_DIRTY;
204 return pte;
205 }
206
pte_mkold(pte_t pte)207 static inline pte_t pte_mkold(pte_t pte)
208 {
209 pte_val(pte) &= ~CF_PAGE_ACCESSED;
210 return pte;
211 }
212
pte_mkwrite_novma(pte_t pte)213 static inline pte_t pte_mkwrite_novma(pte_t pte)
214 {
215 pte_val(pte) |= CF_PAGE_WRITABLE;
216 return pte;
217 }
218
pte_mkread(pte_t pte)219 static inline pte_t pte_mkread(pte_t pte)
220 {
221 pte_val(pte) |= CF_PAGE_READABLE;
222 return pte;
223 }
224
pte_mkexec(pte_t pte)225 static inline pte_t pte_mkexec(pte_t pte)
226 {
227 pte_val(pte) |= CF_PAGE_EXEC;
228 return pte;
229 }
230
pte_mkdirty(pte_t pte)231 static inline pte_t pte_mkdirty(pte_t pte)
232 {
233 pte_val(pte) |= CF_PAGE_DIRTY;
234 return pte;
235 }
236
pte_mkyoung(pte_t pte)237 static inline pte_t pte_mkyoung(pte_t pte)
238 {
239 pte_val(pte) |= CF_PAGE_ACCESSED;
240 return pte;
241 }
242
pte_mknocache(pte_t pte)243 static inline pte_t pte_mknocache(pte_t pte)
244 {
245 pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40);
246 return pte;
247 }
248
pte_mkcache(pte_t pte)249 static inline pte_t pte_mkcache(pte_t pte)
250 {
251 pte_val(pte) &= ~CF_PAGE_NOCACHE;
252 return pte;
253 }
254
255 #define swapper_pg_dir kernel_pg_dir
256 extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
257
258 /*
259 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
260 * are !pte_none() && !pte_present().
261 *
262 * Format of swap PTEs:
263 *
264 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
265 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
266 * <------------------ offset -------------> 0 0 0 E <-- type --->
267 *
268 * E is the exclusive marker that is not stored in swap entries.
269 */
270 #define __swp_type(x) ((x).val & 0x7f)
271 #define __swp_offset(x) ((x).val >> 11)
272 #define __swp_entry(typ, off) ((swp_entry_t) { ((typ) & 0x7f) | \
273 (off << 11) })
274 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
275 #define __swp_entry_to_pte(x) (__pte((x).val))
276
pte_swp_exclusive(pte_t pte)277 static inline int pte_swp_exclusive(pte_t pte)
278 {
279 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
280 }
281
pte_swp_mkexclusive(pte_t pte)282 static inline pte_t pte_swp_mkexclusive(pte_t pte)
283 {
284 pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
285 return pte;
286 }
287
pte_swp_clear_exclusive(pte_t pte)288 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
289 {
290 pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
291 return pte;
292 }
293
294 #define PFN_PTE_SHIFT PAGE_SHIFT
295 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
296 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
297
298 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
299 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
300
301 #endif /* !__ASSEMBLY__ */
302 #endif /* _MCF_PGTABLE_H */
303