Lines Matching +full:no +full:- +full:unaligned +full:- +full:direct +full:- +full:access

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
8 #define pr_fmt(fmt) "radix-mmu: " fmt
157 return -ENOMEM; in __map_kernel_page()
164 return -ENOMEM; in __map_kernel_page()
171 return -ENOMEM; in __map_kernel_page()
183 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0); in radix__map_kernel_page()
200 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n", in radix__change_memory_range()
267 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf, in print_mapping()
278 // Relocatable kernel running at non-zero real address in next_boundary()
281 unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys; in next_boundary()
319 gap = next_boundary(addr, end) - addr; in create_physical_mapping()
414 -1, PAGE_KERNEL, PAGE_SIZE)) in map_kfence_pool()
448 * page tables will be allocated within the range. No in radix_init_pgtable()
458 -1, PAGE_KERNEL, ~0UL)); in radix_init_pgtable()
478 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0); in radix_init_pgtable()
483 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); in radix_init_pgtable()
486 * The init_mm context is given the first available (non-zero) PID, in radix_init_pgtable()
487 * which is the "guard PID" and contains no page table. PIDR should in radix_init_pgtable()
509 dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR; in radix_init_partition_table()
517 int idx = -1; in get_idx_from_shift()
551 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size); in radix_dt_scan_page_sizes()
555 pr_info("Page sizes from device-tree:\n"); in radix_dt_scan_page_sizes()
556 for (; size >= 4; size -= 4, ++prop) { in radix_dt_scan_page_sizes()
570 def->shift = shift; in radix_dt_scan_page_sizes()
571 def->ap = ap; in radix_dt_scan_page_sizes()
572 def->h_rpt_pgsize = psize_to_rpti_pgsize(idx); in radix_dt_scan_page_sizes()
576 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; in radix_dt_scan_page_sizes()
585 * Try to find the available page sizes in the device-tree in radix__early_init_devtree()
590 * No page size details found in device tree. in radix__early_init_devtree()
679 (PATB_SIZE_SHIFT - 12)); in radix__early_init_mmu_secondary()
781 alt_start = altmap->base_pfn; in free_vmemmap_pages()
782 alt_end = altmap->base_pfn + altmap->reserve + altmap->free; in free_vmemmap_pages()
792 while (nr_pages--) in free_vmemmap_pages()
799 unsigned long end, bool direct, in remove_pte_table() argument
815 if (!direct) in remove_pte_table()
821 else if (!direct && vmemmap_page_is_unused(addr, next)) { in remove_pte_table()
827 if (direct) in remove_pte_table()
828 update_page_count(mmu_virtual_psize, -pages); in remove_pte_table()
832 unsigned long end, bool direct, in remove_pmd_table() argument
849 if (!direct) in remove_pmd_table()
855 else if (!direct && vmemmap_pmd_is_unused(addr, next)) { in remove_pmd_table()
864 remove_pte_table(pte_base, addr, next, direct, altmap); in remove_pmd_table()
867 if (direct) in remove_pmd_table()
868 update_page_count(MMU_PAGE_2M, -pages); in remove_pmd_table()
872 unsigned long end, bool direct, in remove_pud_table() argument
889 WARN_ONCE(1, "%s: unaligned range\n", __func__); in remove_pud_table()
898 remove_pmd_table(pmd_base, addr, next, direct, altmap); in remove_pud_table()
901 if (direct) in remove_pud_table()
902 update_page_count(MMU_PAGE_1G, -pages); in remove_pud_table()
906 remove_pagetable(unsigned long start, unsigned long end, bool direct, in remove_pagetable() argument
927 WARN_ONCE(1, "%s: unaligned range\n", __func__); in remove_pagetable()
936 remove_pud_table(pud_base, addr, next, direct, altmap); in remove_pagetable()
950 return -1; in radix__create_section_mapping()
982 return -1; in radix__vmemmap_create_mapping()
1142 return -ENOMEM; in radix__vmemmap_populate()
1145 return -ENOMEM; in radix__vmemmap_populate()
1198 return -ENOMEM; in radix__vmemmap_populate()
1202 return -ENOMEM; in radix__vmemmap_populate()
1254 map_addr = addr - pfn_offset * sizeof(struct page) + PAGE_SIZE; in vmemmap_compound_tail_page()
1281 pte = radix__vmemmap_populate_address(map_addr - PAGE_SIZE, node, NULL, NULL); in vmemmap_compound_tail_page()
1321 return -ENOMEM; in vmemmap_populate_compound_pages()
1324 return -ENOMEM; in vmemmap_populate_compound_pages()
1334 return -ENOMEM; in vmemmap_populate_compound_pages()
1346 unsigned long pfn_offset = addr_pfn - ALIGN_DOWN(addr_pfn, nr_pages); in vmemmap_populate_compound_pages()
1357 return -ENOMEM; in vmemmap_populate_compound_pages()
1367 return -ENOMEM; in vmemmap_populate_compound_pages()
1382 return -ENOMEM; in vmemmap_populate_compound_pages()
1392 return -ENOMEM; in vmemmap_populate_compound_pages()
1469 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); in radix__pmdp_collapse_flush()
1509 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; in radix__pgtable_trans_huge_withdraw()
1546 struct mm_struct *mm = vma->vm_mm; in radix__ptep_set_access_flags()
1552 * On POWER9, the NMMU is not able to relax PTE access permissions in radix__ptep_set_access_flags()
1563 atomic_read(&mm->context.copros) > 0) { in radix__ptep_set_access_flags()
1573 * Book3S does not require a TLB flush when relaxing access in radix__ptep_set_access_flags()
1576 * taking an access fault, as defined by the architecture. See in radix__ptep_set_access_flags()
1577 * "Setting a Reference or Change Bit or Upgrading Access in radix__ptep_set_access_flags()
1589 struct mm_struct *mm = vma->vm_mm; in radix__ptep_modify_prot_commit()
1593 * installing a PTE with more relaxed access permissions, see in radix__ptep_modify_prot_commit()
1598 (atomic_read(&mm->context.copros) > 0)) in radix__ptep_modify_prot_commit()