Lines Matching +full:nr +full:- +full:outputs
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
70 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
102 if (x->phys_addr > y->phys_addr) in cmp_p64()
104 if (x->phys_addr < y->phys_addr) in cmp_p64()
105 return -1; in cmp_p64()
126 if (ret == -1) { in read_obp_memory()
145 size -= new_base - base; in read_obp_memory()
156 (ents - i - 1) * sizeof(regs[0])); in read_obp_memory()
157 i--; in read_obp_memory()
158 ents--; in read_obp_memory()
200 unsigned int i, nr = folio_nr_pages(folio); in flush_dcache_folio_impl() local
208 for (i = 0; i < nr; i++) in flush_dcache_folio_impl()
215 for (i = 0; i < nr; i++) in flush_dcache_folio_impl()
224 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
227 (((folio)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
245 : /* no outputs */ in set_dcache_dirty()
246 : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags) in set_dcache_dirty()
267 : /* no outputs */ in clear_dcache_dirty_cpu()
268 : "r" (cpu), "r" (mask), "r" (&folio->flags), in clear_dcache_dirty_cpu()
295 pg_flags = folio->flags; in flush_dcache()
316 /* mm->context.lock must be held */
321 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; in __update_mmu_tsb_insert()
328 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); in __update_mmu_tsb_insert()
336 hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT); in hugetlbpage_init()
337 hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); in hugetlbpage_init()
338 hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT); in hugetlbpage_init()
339 hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT); in hugetlbpage_init()
352 addr = p->addr; in pud_huge_patch()
353 *(unsigned int *)addr = p->insn; in pud_huge_patch()
398 unsigned long address, pte_t *ptep, unsigned int nr) in update_mmu_cache_range() argument
413 mm = vma->vm_mm; in update_mmu_cache_range()
415 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */ in update_mmu_cache_range()
419 spin_lock_irqsave(&mm->context.lock, flags); in update_mmu_cache_range()
423 if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) { in update_mmu_cache_range()
452 for (i = 0; i < nr; i++) { in update_mmu_cache_range()
460 spin_unlock_irqrestore(&mm->context.lock, flags); in update_mmu_cache_range()
472 /* Do not bother with the expensive D-cache flush if it in flush_dcache_folio()
483 bool dirty = test_bit(PG_dcache_dirty, &folio->flags); in flush_dcache_folio()
508 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ in flush_icache_range()
577 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
590 if (x->virt > y->virt) in cmp_ptrans()
592 if (x->virt < y->virt) in cmp_ptrans()
593 return -1; in cmp_ptrans()
602 node = prom_finddevice("/virtual-memory"); in read_obp_translations()
604 if (unlikely(n == 0 || n == -1)) { in read_obp_translations()
615 sizeof(prom_trans))) == -1) { in read_obp_translations()
627 /* Now kick out all the non-OBP entries. */ in read_obp_translations()
639 for (i = 0; i < (last - first); i++) { in read_obp_translations()
647 dest->virt = dest->size = dest->data = 0x0UL; in read_obp_translations()
650 prom_trans_ents = last - first; in read_obp_translations()
700 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); in remap_kernel()
701 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); in remap_kernel()
705 sparc64_highest_unlocked_tlb_ent = tlb_ent - i; in remap_kernel()
751 : /* no outputs */ in __flush_dcache_range()
804 old_ctx = mm->context.sparc64_ctx_val; in mmu_context_wrap()
808 mm->context.sparc64_ctx_val = new_ctx; in mmu_context_wrap()
814 * The caller also ensures that CTX_VALID(mm->context) is false.
819 * by version mis-match tests in mmu_context.h).
831 if (unlikely(CTX_VALID(mm->context))) in get_new_mmu_context()
833 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); in get_new_mmu_context()
843 if (mm->context.sparc64_ctx_val) in get_new_mmu_context()
848 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; in get_new_mmu_context()
883 * 32-bit physical address for the ramdisk image in find_ramdisk()
886 * provide a full 64-bit physical address at in find_ramdisk()
898 ramdisk_image -= KERNBASE; in find_ramdisk()
940 u64 offset; /* RA-to-PA */
953 if (addr >= m->base && in addr_to_mblock()
954 addr < (m->base + m->size)) { in addr_to_mblock()
971 if ((start & p->mask) == p->match) { in memblock_nid_range_sun4u()
1012 pa_start = start + mblock->offset; in memblock_nid_range()
1019 if ((pa_start & m->mask) == m->match) { in memblock_nid_range()
1020 m_match = m->match; in memblock_nid_range()
1021 m_mask = m->mask; in memblock_nid_range()
1036 if ((pa_start & m->mask) == m->match) { in memblock_nid_range()
1037 m_match = m->match; in memblock_nid_range()
1038 m_mask = m->mask; in memblock_nid_range()
1060 m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset; in memblock_nid_range()
1061 m_end += pa_start & ~((1ul << fls64(m_mask)) - 1); in memblock_nid_range()
1082 NODE_DATA(nid)->node_id = nid; in allocate_node_data()
1088 p->node_start_pfn = start_pfn; in allocate_node_data()
1089 p->node_spanned_pages = end_pfn - start_pfn; in allocate_node_data()
1098 numadbg("Initializing tables for non-numa.\n"); in init_node_masks_nonnuma()
1127 "cfg-handle", NULL); in scan_pio_for_cfg_handle()
1131 return -ENODEV; in scan_pio_for_cfg_handle()
1145 if (strcmp(name, "pio-latency-group")) in scan_arcs_for_cfg_handle()
1159 return -ENODEV; in scan_arcs_for_cfg_handle()
1177 return -1; in of_node_to_nid()
1181 return -1; in of_node_to_nid()
1183 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; in of_node_to_nid()
1222 memblock_set_node(start, this_end - start, in add_node_ranges()
1237 mdesc_for_each_node_by_name(md, node, "memory-latency-group") in grab_mlgroups()
1240 return -ENOENT; in grab_mlgroups()
1245 return -ENOMEM; in grab_mlgroups()
1251 mdesc_for_each_node_by_name(md, node, "memory-latency-group") { in grab_mlgroups()
1255 m->node = node; in grab_mlgroups()
1258 m->latency = *val; in grab_mlgroups()
1259 val = mdesc_get_property(md, node, "address-match", NULL); in grab_mlgroups()
1260 m->match = *val; in grab_mlgroups()
1261 val = mdesc_get_property(md, node, "address-mask", NULL); in grab_mlgroups()
1262 m->mask = *val; in grab_mlgroups()
1266 count - 1, m->node, m->latency, m->match, m->mask); in grab_mlgroups()
1281 return -ENOENT; in grab_mblocks()
1286 return -ENOMEM; in grab_mblocks()
1297 m->base = *val; in grab_mblocks()
1299 m->size = *val; in grab_mblocks()
1301 "address-congruence-offset", NULL); in grab_mblocks()
1303 /* The address-congruence-offset property is optional. in grab_mblocks()
1307 m->offset = *val; in grab_mblocks()
1309 m->offset = 0UL; in grab_mblocks()
1312 count - 1, m->base, m->size, m->offset); in grab_mblocks()
1344 if (m->node == node) in find_mlgroup()
1353 pr_warn("Returning default NUMA distance value for %d->%d\n", in __node_distance()
1368 if ((grp->mask == n->mask) && (grp->match == n->match)) in find_best_numa_node_for_mlgroup()
1389 numa_latency[index][tnode] = m->latency; in find_numa_latencies_for_group()
1405 if (m->latency < best_latency) { in numa_attach_mlgroup()
1407 best_latency = m->latency; in numa_attach_mlgroup()
1411 return -ENOENT; in numa_attach_mlgroup()
1417 return -EINVAL; in numa_attach_mlgroup()
1422 n->mask = candidate->mask; in numa_attach_mlgroup()
1423 n->match = candidate->match; in numa_attach_mlgroup()
1426 index, n->mask, n->match, candidate->latency); in numa_attach_mlgroup()
1459 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); in numa_parse_mdesc()
1462 return -ENOENT; in numa_parse_mdesc()
1516 * a 1-to-1 mapping from CPU ID to NUMA node ID. in numa_parse_jbus()
1522 node_masks[index].mask = ~((1UL << 36UL) - 1UL); in numa_parse_jbus()
1549 return -1; in numa_parse_sun4u()
1555 int err = -1; in bootmem_init_numa()
1579 return -1; in bootmem_init_numa()
1594 (top_of_ram - total_ram) >> 20); in bootmem_init_nonnuma()
1682 const unsigned long mask16gb = (1UL << 34) - 1UL; in kernel_map_hugepud()
1687 (vend - vstart <= mask16gb)) { in kernel_map_hugepud()
1711 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE) in kernel_can_map_hugepud()
1721 const unsigned long mask256mb = (1UL << 28) - 1UL; in kernel_map_hugepmd()
1722 const unsigned long mask2gb = (1UL << 31) - 1UL; in kernel_map_hugepmd()
1727 (vend - vstart <= mask256mb)) { in kernel_map_hugepmd()
1735 (vend - vstart <= mask2gb)) { in kernel_map_hugepmd()
1759 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE) in kernel_can_map_hugepmd()
1861 return -ENOMEM; in kernel_map_range()
1871 ent->tag = (1UL << TSB_TAG_INVALID_BIT); in flush_all_kernel_tsbs()
1877 ent->tag = (1UL << TSB_TAG_INVALID_BIT); in flush_all_kernel_tsbs()
1926 * but that can deadlock->flush only current cpu. in __kernel_map_pages()
1957 /* Cheetah/Panther support a full 64-bit virtual in setup_page_offset()
1969 /* T1 and T2 support 48-bit virtual addresses. */ in setup_page_offset()
1976 /* T3 supports 48-bit virtual addresses. */ in setup_page_offset()
1986 /* T4 and later support 52-bit virtual addresses. */ in setup_page_offset()
1993 /* M7 and later support 52-bit virtual addresses. */ in setup_page_offset()
2000 /* M8 and later support 54-bit virtual addresses. in setup_page_offset()
2002 * as 4-level page table cannot support more than in setup_page_offset()
2024 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n", in setup_page_offset()
2026 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n", in setup_page_offset()
2037 unsigned long addr = pquad->addr; in tsb_phys_patch()
2040 *(unsigned int *) addr = pquad->sun4v_insn; in tsb_phys_patch()
2042 *(unsigned int *) addr = pquad->sun4u_insn; in tsb_phys_patch()
2045 : /* no outputs */ in tsb_phys_patch()
2053 unsigned long addr = p->addr; in tsb_phys_patch()
2055 *(unsigned int *) addr = p->insn; in tsb_phys_patch()
2058 : /* no outputs */ in tsb_phys_patch()
2118 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); in ktsb_phys_patch()
2126 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); in ktsb_phys_patch()
2138 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); in sun4v_ktsb_init()
2172 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); in sun4v_ktsb_init()
2192 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); in sun4v_ktsb_register()
2290 * folio->flags usage will work. in paging_init()
2292 * When a page gets marked as dcache-dirty, we store the in paging_init()
2293 * cpu number starting at bit 32 in the folio->flags. Also, in paging_init()
2295 * in 13-bit signed-immediate instruction fields. in paging_init()
2315 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; in paging_init()
2389 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); in paging_init()
2392 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB); in paging_init()
2502 if (NODE_DATA(i)->node_spanned_pages) in register_page_bootmem_info()
2558 ((unsigned long) __va(kern_base)) - in free_initmem()
2612 return -ENOMEM; in vmemmap_populate()
2616 return -ENOMEM; in vmemmap_populate()
2620 return -ENOMEM; in vmemmap_populate()
2628 return -ENOMEM; in vmemmap_populate()
2847 : /* No outputs */ in __flush_tlb_all()
2854 : /* no outputs */ in __flush_tlb_all()
2865 : /* No outputs */ in __flush_tlb_all()
2872 : /* no outputs */ in __flush_tlb_all()
2949 call_rcu(&page->rcu_head, pte_free_now); in pte_free_defer()
2964 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */ in update_mmu_cache_pmd()
2971 mm = vma->vm_mm; in update_mmu_cache_pmd()
2973 spin_lock_irqsave(&mm->context.lock, flags); in update_mmu_cache_pmd()
2975 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) in update_mmu_cache_pmd()
2979 spin_unlock_irqrestore(&mm->context.lock, flags); in update_mmu_cache_pmd()
2988 if (mm == current->mm) in context_reload()
2994 struct mm_struct *mm = current->mm; in hugetlb_setup()
3000 entry = search_exception_tables(regs->tpc); in hugetlb_setup()
3002 regs->tpc = entry->fixup; in hugetlb_setup()
3003 regs->tnpc = regs->tpc + 4; in hugetlb_setup()
3010 tp = &mm->context.tsb_block[MM_TSB_HUGE]; in hugetlb_setup()
3011 if (likely(tp->tsb == NULL)) in hugetlb_setup()
3017 /* On UltraSPARC-III+ and later, configure the second half of in hugetlb_setup()
3018 * the Data-TLB for huge pages. in hugetlb_setup()
3025 ctx = mm->context.sparc64_ctx_val; in hugetlb_setup()
3030 if (ctx != mm->context.sparc64_ctx_val) { in hugetlb_setup()
3042 mm->context.sparc64_ctx_val = ctx; in hugetlb_setup()
3070 return (resource_size_t) (addr - KERNBASE + kern_base); in compute_kern_paddr()
3076 code_resource.end = compute_kern_paddr(_etext - 1); in kernel_lds_init()
3078 data_resource.end = compute_kern_paddr(_edata - 1); in kernel_lds_init()
3080 bss_resource.end = compute_kern_paddr(_end - 1); in kernel_lds_init()
3098 res->name = "System RAM"; in report_memory()
3099 res->start = pavail[i].phys_addr; in report_memory()
3100 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1; in report_memory()
3101 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; in report_memory()
3154 if (vma->vm_flags & VM_SPARC_ADI) { in copy_user_highpage()