Lines Matching +full:data +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Re-map IO memory to kernel address space so that we can access it.
5 * 640k-1MB IO memory area on PC's
45 int ioremap_change_attr(unsigned long vaddr, unsigned long size, in ioremap_change_attr() argument
48 unsigned long nrpages = size >> PAGE_SHIFT; in ioremap_change_attr()
76 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM) in __ioremap_check_ram()
79 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT; in __ioremap_check_ram()
80 stop_pfn = (res->end + 1) >> PAGE_SHIFT; in __ioremap_check_ram()
82 for (i = 0; i < (stop_pfn - start_pfn); ++i) in __ioremap_check_ram()
100 switch (res->desc) { in __ioremap_check_encrypted()
112 * The EFI runtime services data area is not covered by walk_mem_res(), but must
121 desc->flags |= IORES_MAP_ENCRYPTED; in __ioremap_check_other()
131 desc->flags |= IORES_MAP_ENCRYPTED; in __ioremap_check_other()
138 if (!(desc->flags & IORES_MAP_SYSTEM_RAM)) in __ioremap_collect_map_flags()
139 desc->flags |= __ioremap_check_ram(res); in __ioremap_collect_map_flags()
141 if (!(desc->flags & IORES_MAP_ENCRYPTED)) in __ioremap_collect_map_flags()
142 desc->flags |= __ioremap_check_encrypted(res); in __ioremap_collect_map_flags()
144 return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) == in __ioremap_collect_map_flags()
156 static void __ioremap_check_mem(resource_size_t addr, unsigned long size, in __ioremap_check_mem() argument
162 end = start + size - 1; in __ioremap_check_mem()
173 * the physical address is aligned by a huge page size (1GB or 2MB) and
174 * the requested size is at least the huge page size.
178 * when a mapping range is covered by non-WB type of MTRRs.
180 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
181 * have to convert them into an offset in a page-aligned mapping, but the
185 __ioremap_caller(resource_size_t phys_addr, unsigned long size, in __ioremap_caller() argument
191 const unsigned long unaligned_size = size; in __ioremap_caller()
199 /* Don't allow wraparound or zero size */ in __ioremap_caller()
200 last_addr = phys_addr + size - 1; in __ioremap_caller()
201 if (!size || last_addr < phys_addr) in __ioremap_caller()
211 __ioremap_check_mem(phys_addr, size, &io_desc); in __ioremap_caller()
217 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", in __ioremap_caller()
223 * Mappings have to be page-aligned in __ioremap_caller()
227 size = PAGE_ALIGN(last_addr+1) - phys_addr; in __ioremap_caller()
235 retval = memtype_reserve(phys_addr, (u64)phys_addr + size, in __ioremap_caller()
243 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { in __ioremap_caller()
245 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", in __ioremap_caller()
247 (unsigned long long)(phys_addr + size), in __ioremap_caller()
293 area = get_vm_area_caller(size, VM_IOREMAP, caller); in __ioremap_caller()
296 area->phys_addr = phys_addr; in __ioremap_caller()
297 vaddr = (unsigned long) area->addr; in __ioremap_caller()
299 if (memtype_kernel_map_sync(phys_addr, size, pcm)) in __ioremap_caller()
302 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) in __ioremap_caller()
319 memtype_free(phys_addr, phys_addr + size); in __ioremap_caller()
324 * ioremap - map bus memory into CPU space
326 * @size: size of the resource to map
344 void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) in ioremap() argument
356 return __ioremap_caller(phys_addr, size, pcm, in ioremap()
362 * ioremap_uc - map bus memory into CPU space as strongly uncachable
364 * @size: size of the resource to map
373 * preference as completely uncachable on the CPU when possible. For non-PAT
374 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
385 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size) in ioremap_uc() argument
389 return __ioremap_caller(phys_addr, size, pcm, in ioremap_uc()
395 * ioremap_wc - map memory into CPU space write combined
397 * @size: size of the resource to map
404 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) in ioremap_wc() argument
406 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, in ioremap_wc()
412 * ioremap_wt - map memory into CPU space write through
414 * @size: size of the resource to map
417 * Write through stores data into memory while keeping the cache up-to-date.
421 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size) in ioremap_wt() argument
423 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT, in ioremap_wt()
428 void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size) in ioremap_encrypted() argument
430 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, in ioremap_encrypted()
435 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) in ioremap_cache() argument
437 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, in ioremap_cache()
442 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, in ioremap_prot() argument
445 return __ioremap_caller(phys_addr, size, in ioremap_prot()
452 * iounmap - Free a IO remapping
465 * The PCI/ISA range special-casing was removed from __ioremap() in iounmap()
497 memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p)); in iounmap()
544 unsigned long size) in memremap_should_map_decrypted() argument
552 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM, in memremap_should_map_decrypted()
558 * Check if the non-volatile attribute is set for an EFI in memremap_should_map_decrypted()
573 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) { in memremap_should_map_decrypted()
593 * Examine the physical address to determine if it is EFI data. Check
597 unsigned long size) in memremap_is_efi_data() argument
601 /* Check if the address is part of EFI boot/runtime data */ in memremap_is_efi_data()
632 * Examine the physical address to determine if it is boot data by checking
636 unsigned long size) in memremap_is_setup_data() argument
639 struct setup_data *data; in memremap_is_setup_data() local
649 data = memremap(paddr, sizeof(*data), in memremap_is_setup_data()
651 if (!data) { in memremap_is_setup_data()
656 paddr_next = data->next; in memremap_is_setup_data()
657 len = data->len; in memremap_is_setup_data()
661 memunmap(data); in memremap_is_setup_data()
665 if (data->type == SETUP_INDIRECT) { in memremap_is_setup_data()
666 memunmap(data); in memremap_is_setup_data()
667 data = memremap(paddr, sizeof(*data) + len, in memremap_is_setup_data()
669 if (!data) { in memremap_is_setup_data()
674 indirect = (struct setup_indirect *)data->data; in memremap_is_setup_data()
676 if (indirect->type != SETUP_INDIRECT) { in memremap_is_setup_data()
677 paddr = indirect->addr; in memremap_is_setup_data()
678 len = indirect->len; in memremap_is_setup_data()
682 memunmap(data); in memremap_is_setup_data()
694 * Examine the physical address to determine if it is boot data by checking
698 unsigned long size) in early_memremap_is_setup_data() argument
701 struct setup_data *data; in early_memremap_is_setup_data() local
706 unsigned int len, size; in early_memremap_is_setup_data() local
711 data = early_memremap_decrypted(paddr, sizeof(*data)); in early_memremap_is_setup_data()
712 if (!data) { in early_memremap_is_setup_data()
717 size = sizeof(*data); in early_memremap_is_setup_data()
719 paddr_next = data->next; in early_memremap_is_setup_data()
720 len = data->len; in early_memremap_is_setup_data()
724 early_memunmap(data, sizeof(*data)); in early_memremap_is_setup_data()
728 if (data->type == SETUP_INDIRECT) { in early_memremap_is_setup_data()
729 size += len; in early_memremap_is_setup_data()
730 early_memunmap(data, sizeof(*data)); in early_memremap_is_setup_data()
731 data = early_memremap_decrypted(paddr, size); in early_memremap_is_setup_data()
732 if (!data) { in early_memremap_is_setup_data()
737 indirect = (struct setup_indirect *)data->data; in early_memremap_is_setup_data()
739 if (indirect->type != SETUP_INDIRECT) { in early_memremap_is_setup_data()
740 paddr = indirect->addr; in early_memremap_is_setup_data()
741 len = indirect->len; in early_memremap_is_setup_data()
745 early_memunmap(data, size); in early_memremap_is_setup_data()
758 * RAM remap will map the data as encrypted. Determine if a RAM remap should
759 * not be done so that the data will be mapped decrypted.
761 bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, in arch_memremap_can_ram_remap() argument
774 if (memremap_is_setup_data(phys_addr, size) || in arch_memremap_can_ram_remap()
775 memremap_is_efi_data(phys_addr, size)) in arch_memremap_can_ram_remap()
779 return !memremap_should_map_decrypted(phys_addr, size); in arch_memremap_can_ram_remap()
784 * used when remapping memory. By default, early_memremap() will map the data
789 unsigned long size, in early_memremap_pgprot_adjust() argument
800 if (early_memremap_is_setup_data(phys_addr, size) || in early_memremap_pgprot_adjust()
801 memremap_is_efi_data(phys_addr, size)) in early_memremap_pgprot_adjust()
805 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size)) in early_memremap_pgprot_adjust()
812 bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) in phys_mem_access_encrypted() argument
814 return arch_memremap_can_ram_remap(phys_addr, size, 0); in phys_mem_access_encrypted()
819 unsigned long size) in early_memremap_encrypted() argument
821 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC); in early_memremap_encrypted()
825 * Remap memory with encryption and write-protected - cannot be called
829 unsigned long size) in early_memremap_encrypted_wp() argument
833 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP); in early_memremap_encrypted_wp()
838 unsigned long size) in early_memremap_decrypted() argument
840 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC); in early_memremap_decrypted()
844 * Remap memory without encryption and write-protected - cannot be called
848 unsigned long size) in early_memremap_decrypted_wp() argument
852 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP); in early_memremap_decrypted_wp()
885 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); in early_ioremap_init()
887 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); in early_ioremap_init()
897 * The boot-ioremap range spans multiple pmds, for which in early_ioremap_init()
900 #define __FIXADDR_TOP (-PAGE_SIZE) in early_ioremap_init()