Lines Matching +full:reserved +full:- +full:memory

1 // SPDX-License-Identifier: GPL-2.0+
3 * Contiguous Memory Allocator for DMA mapping framework
4 * Copyright (c) 2010-2011 by Samsung Electronics.
9 * Contiguous Memory Allocator
11 * The Contiguous Memory Allocator (CMA) makes it possible to
12 * allocate big contiguous chunks of memory after the system has
17 * Various devices on embedded systems have no scatter-getter and/or
18 * IO map support and require contiguous blocks of memory to
22 * Such devices often require big memory buffers (a full HD frame
24 * MB of memory), which makes mechanisms such as kmalloc() or
27 * At the same time, a solution where a big memory region is
28 * reserved for a device is suboptimal since often more memory is
29 * reserved then strictly required and, moreover, the memory is
32 * CMA tries to solve this issue by operating on memory regions
34 * can use the memory for pagecache and when device driver requests
45 #include <linux/dma-map-ops.h>
61 * The size can be set in bytes or as a percentage of the total memory
69 static phys_addr_t size_cmdline __initdata = -1;
77 return -EINVAL; in early_cma()
84 if (*p != '-') { in early_cma()
204 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
205 * @limit: End address of the reserved memory (optional, 0 for any).
207 * This function reserves memory from early allocator. It should be
209 * has been activated and all other subsystems have already allocated/reserved
210 * memory.
223 if (size_cmdline != -1) { in dma_contiguous_reserve()
258 * dma_contiguous_reserve_area() - reserve custom contiguous area
259 * @size: Size of the reserved area (in bytes),
260 * @base: Base address of the reserved area optional, use 0 for any
261 * @limit: End address of the reserved memory (optional, 0 for any).
263 * @fixed: hint about where to place the reserved area
265 * This function reserves memory from early allocator. It should be
267 * has been activated and all other subsystems have already allocated/reserved
268 * memory. This function allows to create custom reserved areas for specific
281 "reserved", res_cma); in dma_contiguous_reserve_area()
285 /* Architecture specific contiguous memory fixup. */ in dma_contiguous_reserve_area()
293 * dma_alloc_from_contiguous() - allocate pages from contiguous area
299 * This function allocates memory buffer for specified device. It uses
300 * device specific contiguous memory area if available or the default
314 * dma_release_from_contiguous() - release allocated pages
319 * This function releases memory allocated by dma_alloc_from_contiguous().
337 * dma_alloc_contiguous() - allocate contiguous pages
342 * tries to use device specific contiguous memory area if available, or it
343 * tries to use per-numa cma, if the allocation fails, it will fallback to
346 * Note that it bypass one-page size of allocations from the per-numa and
360 if (dev->cma_area) in dma_alloc_contiguous()
361 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous()
391 * dma_free_contiguous() - release allocated pages
396 * This function releases memory allocated by dma_alloc_contiguous(). As the
399 * upon a false-return.
406 if (dev->cma_area) { in dma_free_contiguous()
407 if (cma_release(dev->cma_area, page, count)) in dma_free_contiguous()
411 * otherwise, page is from either per-numa cma or default cma in dma_free_contiguous()
430 * Support for reserved memory regions defined in device tree
442 dev->cma_area = rmem->priv; in rmem_cma_device_init()
449 dev->cma_area = NULL; in rmem_cma_device_release()
459 unsigned long node = rmem->fdt_node; in rmem_cma_setup()
460 bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); in rmem_cma_setup()
464 if (size_cmdline != -1 && default_cma) { in rmem_cma_setup()
465 pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n", in rmem_cma_setup()
466 rmem->name); in rmem_cma_setup()
467 return -EBUSY; in rmem_cma_setup()
471 of_get_flat_dt_prop(node, "no-map", NULL)) in rmem_cma_setup()
472 return -EINVAL; in rmem_cma_setup()
474 if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) { in rmem_cma_setup()
475 pr_err("Reserved memory: incorrect alignment of CMA region\n"); in rmem_cma_setup()
476 return -EINVAL; in rmem_cma_setup()
479 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); in rmem_cma_setup()
481 pr_err("Reserved memory: unable to setup CMA region\n"); in rmem_cma_setup()
484 /* Architecture specific contiguous memory fixup. */ in rmem_cma_setup()
485 dma_contiguous_early_fixup(rmem->base, rmem->size); in rmem_cma_setup()
490 rmem->ops = &rmem_cma_ops; in rmem_cma_setup()
491 rmem->priv = cma; in rmem_cma_setup()
493 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", in rmem_cma_setup()
494 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_cma_setup()
498 RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);