Lines Matching +full:0 +full:x11800000

61 	.start	= 0,
62 .end = 0x9ff,
92 if (memcmp(cp, "mem=", 4) == 0) { in mem_limit_func()
110 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
132 for (j = i; j > 0; j--) { in setup_bootmem()
166 for (i = 0; i < npmem_ranges; i++) { in setup_bootmem()
173 pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", in setup_bootmem()
197 mem_max = 0; in setup_bootmem()
198 for (i = 0; i < npmem_ranges; i++) { in setup_bootmem()
225 npmem_holes = 0; in setup_bootmem()
226 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; in setup_bootmem()
238 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; in setup_bootmem()
247 max_pfn = 0; in setup_bootmem()
248 for (i = 0; i < npmem_ranges; i++) { in setup_bootmem()
285 memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free + in setup_bootmem()
294 for (i = 0; i < npmem_holes; i++) { in setup_bootmem()
327 for (i = 0; i < sysram_resource_count; i++) { in setup_bootmem()
332 request_resource(&sysram_resources[0], &pdcdata_resource); in setup_bootmem()
367 /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */ in map_pages()
433 start_pte = 0; in map_pages()
438 start_pmd = 0; in map_pages()
448 PAGE_KERNEL_RWX, enable_read_write ? 1:0); in set_kernel_text_rw()
463 PAGE_KERNEL, 0); in free_initmem()
479 __flush_tlb_range(0, init_begin, kernel_end); in free_initmem()
502 map_pages(start, __pa(start), end - start, PAGE_KERNEL, 0); in mark_rodata_ro()
565 BUILD_BUG_ON(__PAGE_OFFSET >= 0x80000000); in mem_init()
566 BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000); in mem_init()
582 #if 0 in mem_init()
588 " vmalloc : 0x%px - 0x%px (%4ld MB)\n" in mem_init()
589 " fixmap : 0x%px - 0x%px (%4ld kB)\n" in mem_init()
590 " memory : 0x%px - 0x%px (%4ld MB)\n" in mem_init()
591 " .init : 0x%px - 0x%px (%4ld kB)\n" in mem_init()
592 " .data : 0x%px - 0x%px (%4ld kB)\n" in mem_init()
593 " .text : 0x%px - 0x%px (%4ld kB)\n", in mem_init()
601 __va(0), high_memory, in mem_init()
602 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, in mem_init()
621 * Note that gateway_init() places the Linux gateway page at page 0.
632 for (range = 0; range < npmem_ranges; range++) { in pagetable_init()
640 size, PAGE_KERNEL, 0); in pagetable_init()
647 initrd_end - initrd_start, PAGE_KERNEL, 0); in pagetable_init()
670 * page 0), so it doesn't need to be aliased into user space. in gateway_init()
712 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; in parisc_bootmem_free()
714 max_zone_pfn[0] = memblock_end_of_DRAM(); in parisc_bootmem_free()
747 if (((start & (2 * size - 1)) == 0) && in alloc_btlb()
753 if ((start & (size - 1)) != 0) { in alloc_btlb()
778 else if (pdc_btlb_info(&btlb_info) < 0) { in btlb_init_per_cpu()
779 memset(&btlb_info, 0, sizeof btlb_info); in btlb_init_per_cpu()
789 slot = 0; in btlb_init_per_cpu()
790 alloc_btlb(s, e, &slot, 0x13800000); in btlb_init_per_cpu()
800 alloc_btlb(s, e, &slot, 0x11800000); in btlb_init_per_cpu()
829 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
843 if (free_space_ids == 0) { in alloc_sid()
844 if (dirty_space_ids != 0) { in alloc_sid()
849 BUG_ON(free_space_ids == 0); in alloc_sid()
890 if (dirty_space_ids != 0) { in get_dirty_sids()
891 for (i = 0; i < SID_ARRAY_SIZE; i++) { in get_dirty_sids()
893 dirty_space_id[i] = 0; in get_dirty_sids()
895 dirty_space_ids = 0; in get_dirty_sids()
907 if (ndirty != 0) { in recycle_sids()
908 for (i = 0; i < SID_ARRAY_SIZE; i++) { in recycle_sids()
913 space_id_index = 0; in recycle_sids()
925 if (dirty_space_ids != 0) { in recycle_sids()
926 for (i = 0; i < SID_ARRAY_SIZE; i++) { in recycle_sids()
928 dirty_space_id[i] = 0; in recycle_sids()
932 dirty_space_ids = 0; in recycle_sids()
933 space_id_index = 0; in recycle_sids()
954 do_recycle = 0; in flush_tlb_all()
968 recycle_inuse = 0; in flush_tlb_all()