Lines Matching refs:pgd

92 pgdval_t xen_pgd_val(pgd_t pgd);
97 pgd_t xen_make_pgd(pgdval_t pgd);
381 __visible pgdval_t xen_pgd_val(pgd_t pgd) in xen_pgd_val() argument
383 return pte_mfn_to_pfn(pgd.pgd); in xen_pgd_val()
395 __visible pgd_t xen_make_pgd(pgdval_t pgd) in xen_make_pgd() argument
397 pgd = pte_pfn_to_mfn(pgd); in xen_make_pgd()
398 return native_make_pgd(pgd); in xen_make_pgd()
461 static pgd_t *xen_get_user_pgd(pgd_t *pgd) in xen_get_user_pgd() argument
463 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); in xen_get_user_pgd()
464 unsigned offset = pgd - pgd_page; in xen_get_user_pgd()
519 pgd_val.pgd = p4d_val_ma(val); in xen_set_p4d()
616 static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, in __xen_pgd_walk() argument
642 if (pgd_none(pgd[i])) in __xen_pgd_walk()
645 p4d = p4d_offset(&pgd[i], 0); in __xen_pgd_walk()
651 (*func)(mm, virt_to_page(pgd), PT_PGD); in __xen_pgd_walk()
659 __xen_pgd_walk(mm, mm->pgd, func, limit); in xen_pgd_walk()
744 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) in __xen_pgd_pin() argument
746 pgd_t *user_pgd = xen_get_user_pgd(pgd); in __xen_pgd_pin()
748 trace_xen_mmu_pgd_pin(mm, pgd); in __xen_pgd_pin()
752 __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT); in __xen_pgd_pin()
754 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_pin()
767 __xen_pgd_pin(mm, mm->pgd); in xen_pgd_pin()
856 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) in __xen_pgd_unpin() argument
858 pgd_t *user_pgd = xen_get_user_pgd(pgd); in __xen_pgd_unpin()
860 trace_xen_mmu_pgd_unpin(mm, pgd); in __xen_pgd_unpin()
864 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_unpin()
872 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); in __xen_pgd_unpin()
879 __xen_pgd_unpin(mm, mm->pgd); in xen_pgd_unpin()
921 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) in drop_mm_ref_this_cpu()
940 if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) in xen_drop_mm_ref()
956 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) in xen_drop_mm_ref()
993 if (xen_page_pinned(mm->pgd)) in xen_exit_mmap()
1128 pgd_t *pgd; in xen_cleanmfnmap() local
1134 pgd = pgd_offset_k(vaddr); in xen_cleanmfnmap()
1135 p4d = p4d_offset(pgd, 0); in xen_cleanmfnmap()
1403 pgd_t *pgd = mm->pgd; in xen_pgd_alloc() local
1404 struct page *page = virt_to_page(pgd); in xen_pgd_alloc()
1408 BUG_ON(PagePinned(virt_to_page(pgd))); in xen_pgd_alloc()
1422 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); in xen_pgd_alloc()
1427 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) in xen_pgd_free() argument
1429 pgd_t *user_pgd = xen_get_user_pgd(pgd); in xen_pgd_free()
1539 bool pinned = xen_page_pinned(mm->pgd); in xen_alloc_ptpage()
1703 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) in xen_setup_kernel_pagetable() argument
1741 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); in xen_setup_kernel_pagetable()
1744 addr[0] = (unsigned long)pgd; in xen_setup_kernel_pagetable()
1792 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); in xen_setup_kernel_pagetable()
1849 pgd_t pgd; in xen_early_virt_to_phys() local
1855 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) * in xen_early_virt_to_phys()
1856 sizeof(pgd))); in xen_early_virt_to_phys()
1857 if (!pgd_present(pgd)) in xen_early_virt_to_phys()
1860 pa = pgd_val(pgd) & PTE_PFN_MASK; in xen_early_virt_to_phys()
1898 pgd_t *pgd; in xen_relocate_p2m() local
1927 pgd = __va(read_cr3_pa()); in xen_relocate_p2m()
1967 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); in xen_relocate_p2m()
1984 set_pgd(pgd + 1, __pgd(0)); in xen_relocate_p2m()