Lines Matching +full:long +full:- +full:ram +full:- +full:code

28 #include <asm/text-patching.h>
44 * WC and WT fall back to UC-. pat_init() updates these values to support
46 * for the details. Note, __early_ioremap() used during early boot-time
63 unsigned long cachemode2protval(enum page_cache_mode pcm) in cachemode2protval()
83 * Check that the write-protect PAT entry is set for write-protect.
100 unsigned long masked; in pgprot2cachemode()
108 static unsigned long __initdata pgt_buf_start;
109 static unsigned long __initdata pgt_buf_end;
110 static unsigned long __initdata pgt_buf_top;
112 static unsigned long min_pfn_mapped;
127 unsigned long pfn; in alloc_low_pages()
133 order = get_order((unsigned long)num << PAGE_SHIFT); in alloc_low_pages()
138 unsigned long ret = 0; in alloc_low_pages()
170 * the 0-ISA_END_ADDRESS range and secondly for the initial PMD_SIZE mapping.
192 unsigned long tables = INIT_PGT_BUF_SIZE; in early_alloc_pgt_buf()
207 unsigned long start;
208 unsigned long end;
219 static inline void cr4_set_bits_and_update_boot(unsigned long mask) in cr4_set_bits_and_update_boot()
252 /* Except when with PTI where the kernel is mostly non-Global: */ in probe_page_size_mask()
295 * This can't be cr4_set_bits_and_update_boot() -- the in setup_pcid()
296 * trampoline code can't handle CR4.PCIDE and it wouldn't in setup_pcid()
302 * Instead, we brute-force it and set CR4.PCIDE manually in in setup_pcid()
325 unsigned long start_pfn, unsigned long end_pfn, in save_mr()
326 unsigned long page_size_mask) in save_mr()
342 * big page size instead small one if nearby are ram too.
352 unsigned long start = round_down(mr[i].start, PMD_SIZE); in adjust_range_page_size_mask()
353 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask()
360 if (memblock_is_region_memory(start, end - start)) in adjust_range_page_size_mask()
365 unsigned long start = round_down(mr[i].start, PUD_SIZE); in adjust_range_page_size_mask()
366 unsigned long end = round_up(mr[i].end, PUD_SIZE); in adjust_range_page_size_mask()
368 if (memblock_is_region_memory(start, end - start)) in adjust_range_page_size_mask()
381 if (mr->page_size_mask & (1<<PG_LEVEL_1G)) in page_size_string()
384 * 32-bit without PAE has a 4M large page size. in page_size_string()
390 mr->page_size_mask & (1<<PG_LEVEL_2M)) in page_size_string()
393 if (mr->page_size_mask & (1<<PG_LEVEL_2M)) in page_size_string()
400 unsigned long start, in split_mem_range()
401 unsigned long end) in split_mem_range()
403 unsigned long start_pfn, end_pfn, limit_pfn; in split_mem_range()
404 unsigned long pfn; in split_mem_range()
478 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { in split_mem_range()
479 unsigned long old_start; in split_mem_range()
486 (nr_range - 1 - i) * sizeof(struct map_range)); in split_mem_range()
487 mr[i--].start = old_start; in split_mem_range()
488 nr_range--; in split_mem_range()
492 pr_debug(" [mem %#010lx-%#010lx] page %s\n", in split_mem_range()
493 mr[i].start, mr[i].end - 1, in split_mem_range()
502 static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn) in add_pfn_range_mapped()
510 if (start_pfn < (1UL<<(32-PAGE_SHIFT))) in add_pfn_range_mapped()
512 min(end_pfn, 1UL<<(32-PAGE_SHIFT))); in add_pfn_range_mapped()
515 bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn) in pfn_range_is_mapped()
532 unsigned long __ref init_memory_mapping(unsigned long start, in init_memory_mapping()
533 unsigned long end, pgprot_t prot) in init_memory_mapping()
536 unsigned long ret = 0; in init_memory_mapping()
539 pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n", in init_memory_mapping()
540 start, end - 1); in init_memory_mapping()
565 * That range would have hole in the middle or ends, and only ram parts
568 static unsigned long __init init_range_memory_mapping( in init_range_memory_mapping()
569 unsigned long r_start, in init_range_memory_mapping()
570 unsigned long r_end) in init_range_memory_mapping()
572 unsigned long start_pfn, end_pfn; in init_range_memory_mapping()
573 unsigned long mapped_ram_size = 0; in init_range_memory_mapping()
589 mapped_ram_size += end - start; in init_range_memory_mapping()
596 static unsigned long __init get_new_step_size(unsigned long step_size) in get_new_step_size()
606 * Don't need to worry about overflow in the top-down case, on 32bit, in get_new_step_size()
609 * In the bottom-up case, round_up(x, 0) returns 0 though too, which in get_new_step_size()
610 * needs to be taken into consideration by the code below. in get_new_step_size()
612 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); in get_new_step_size()
616 * memory_map_top_down - Map [map_start, map_end) top down
621 * [map_start, map_end) in top-down. That said, the page tables
623 * memory in top-down.
625 static void __init memory_map_top_down(unsigned long map_start, in memory_map_top_down()
626 unsigned long map_end) in memory_map_top_down()
628 unsigned long real_end, last_start; in memory_map_top_down()
629 unsigned long step_size; in memory_map_top_down()
630 unsigned long addr; in memory_map_top_down()
631 unsigned long mapped_ram_size = 0; in memory_map_top_down()
635 * e.g. QEMU with less than 1G RAM and EFI enabled, or Xen, will in memory_map_top_down()
637 * Start with top-most PMD_SIZE range aligned at PMD_SIZE to ensure in memory_map_top_down()
654 * The memblock_find_in_range() gets us a block of RAM from the in memory_map_top_down()
655 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages in memory_map_top_down()
659 unsigned long start; in memory_map_top_down()
662 start = round_down(last_start - 1, step_size); in memory_map_top_down()
680 * memory_map_bottom_up - Map [map_start, map_end) bottom up
685 * [map_start, map_end) in bottom-up. Since we have limited the
686 * bottom-up allocation above the kernel, the page tables will
688 * in [map_start, map_end) in bottom-up.
690 static void __init memory_map_bottom_up(unsigned long map_start, in memory_map_bottom_up()
691 unsigned long map_end) in memory_map_bottom_up()
693 unsigned long next, start; in memory_map_bottom_up()
694 unsigned long mapped_ram_size = 0; in memory_map_bottom_up()
696 unsigned long step_size = PMD_SIZE; in memory_map_bottom_up()
703 * The memblock_find_in_range() gets us a block of RAM from the in memory_map_bottom_up()
704 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages in memory_map_bottom_up()
708 if (step_size && map_end - start > step_size) { in memory_map_bottom_up()
733 * area. This limits the randomization granularity to 1GB for both 4-level
734 * and 5-level paging.
740 * The code below will alias kernel page-tables in the user-range of the in init_trampoline()
742 * be created when using the trampoline page-table. in init_trampoline()
753 unsigned long end; in init_mem_mapping()
772 * If the allocation is in bottom-up direction, we setup direct mapping in init_mem_mapping()
773 * in bottom-up, otherwise we setup direct mapping in top-down. in init_mem_mapping()
776 unsigned long kernel_end = __pa_symbol(_end); in init_mem_mapping()
831 (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE); in poking_init()
837 * We need to trigger the allocation of the page-tables that will be in poking_init()
850 * On x86, access has to be given to the first megabyte of RAM because that
851 * area traditionally contains BIOS code and data regions used by X, dosemu,
855 * Access has to be given to non-kernel-ram areas as well, these contain the
858 int devmem_is_allowed(unsigned long pagenr) in devmem_is_allowed()
874 * This must follow RAM test, since System RAM is considered a in devmem_is_allowed()
888 void free_init_pages(const char *what, unsigned long begin, unsigned long end) in free_init_pages()
890 unsigned long begin_aligned, end_aligned; in free_init_pages()
906 * mark them not present - any buggy init-section access will in free_init_pages()
910 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n", in free_init_pages()
911 begin, end - 1); in free_init_pages()
916 kmemleak_free_part((void *)begin, end - begin); in free_init_pages()
917 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); in free_init_pages()
922 * writeable and non-executable first. in free_init_pages()
924 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); in free_init_pages()
925 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); in free_init_pages()
939 unsigned long begin_ul = (unsigned long)begin; in free_kernel_image_pages()
940 unsigned long end_ul = (unsigned long)end; in free_kernel_image_pages()
941 unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT; in free_kernel_image_pages()
975 void __init free_initrd_mem(unsigned long start, unsigned long end) in free_initrd_mem()
981 * - i386_start_kernel() in free_initrd_mem()
982 * - x86_64_start_kernel() in free_initrd_mem()
983 * - relocate_initrd() in free_initrd_mem()
992 unsigned long max_zone_pfns[MAX_NR_ZONES]; in zone_sizes_init()
1031 unsigned long arch_max_swapfile_size(void) in arch_max_swapfile_size()
1033 unsigned long pages; in arch_max_swapfile_size()
1039 unsigned long long l1tf_limit = l1tf_pfn_limit(); in arch_max_swapfile_size()
1045 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; in arch_max_swapfile_size()
1047 pages = min_t(unsigned long long, l1tf_limit, pages); in arch_max_swapfile_size()
1058 unsigned long start, offset = 0; in execmem_arch_setup()