Lines Matching +full:1 +full:- +full:9 +full:a +full:- +full:e

1 /* SPDX-License-Identifier: GPL-2.0 */
6 * Intel Physical Address Extension (PAE) Mode - three-level page
12 #define pte_ERROR(e) \ argument
14 __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
15 #define pmd_ERROR(e) \ argument
17 __FILE__, __LINE__, &(e), pmd_val(e))
18 #define pgd_ERROR(e) \ argument
20 __FILE__, __LINE__, &(e), pgd_val(e))
31 * either not present or in a state where the hardware will
34 * value and then use set_pte to update it. -ben
38 WRITE_ONCE(ptep->pte_high, pte.pte_high); in native_set_pte()
40 WRITE_ONCE(ptep->pte_low, pte.pte_low); in native_set_pte()
56 pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd); in native_set_pud()
62 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
63 * entry, so clear the bottom half first and enforce ordering with a compiler
69 WRITE_ONCE(ptep->pte_low, 0); in native_pte_clear()
71 WRITE_ONCE(ptep->pte_high, 0); in native_pte_clear()
76 WRITE_ONCE(pmdp->pmd_low, 0); in native_pmd_clear()
78 WRITE_ONCE(pmdp->pmd_high, 0); in native_pmd_clear()
90 * According to Intel App note "TLBs, Paging-Structure Caches, in pud_clear()
91 * and Their Invalidation", April 2007, document 317080-001, in pud_clear()
93 * TLB via cr3 if the top-level pgd is changed... in pud_clear()
132 * cmpxchg64: we can update pmdp half-by-half without racing with in pmdp_establish()
136 /* xchg acts as a barrier before setting of the high bits */ in pmdp_establish()
137 old.pmd_low = xchg(&pmdp->pmd_low, pmd.pmd_low); in pmdp_establish()
138 old.pmd_high = READ_ONCE(pmdp->pmd_high); in pmdp_establish()
139 WRITE_ONCE(pmdp->pmd_high, pmd.pmd_high); in pmdp_establish()
155 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
156 * < type -> <---------------------- offset ----------------------
158 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
159 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
160 * --------------------------------------------> 0 E 0 0 0 0 0 0 0
162 * E is the exclusive marker that is not stored in swap entries.
165 #define _SWP_TYPE_MASK ((1U << SWP_TYPE_BITS) - 1)
167 #define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
179 * Normally, __swp_entry() converts from arch-independent swp_entry_t to
180 * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
182 * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
188 | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
193 * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
198 #define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
207 #include <asm/pgtable-invert.h>