Lines Matching full:cma
15 #define pr_fmt(fmt) "cma: " fmt
25 #include <linux/cma.h>
29 #include <trace/events/cma.h>
32 #include "cma.h"
34 struct cma cma_areas[MAX_CMA_AREAS];
38 phys_addr_t cma_get_base(const struct cma *cma) in cma_get_base() argument
40 return PFN_PHYS(cma->base_pfn); in cma_get_base()
43 unsigned long cma_get_size(const struct cma *cma) in cma_get_size() argument
45 return cma->count << PAGE_SHIFT; in cma_get_size()
48 const char *cma_get_name(const struct cma *cma) in cma_get_name() argument
50 return cma->name; in cma_get_name()
53 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, in cma_bitmap_aligned_mask() argument
56 if (align_order <= cma->order_per_bit) in cma_bitmap_aligned_mask()
58 return (1UL << (align_order - cma->order_per_bit)) - 1; in cma_bitmap_aligned_mask()
65 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, in cma_bitmap_aligned_offset() argument
68 return (cma->base_pfn & ((1UL << align_order) - 1)) in cma_bitmap_aligned_offset()
69 >> cma->order_per_bit; in cma_bitmap_aligned_offset()
72 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, in cma_bitmap_pages_to_bits() argument
75 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
78 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, in cma_clear_bitmap() argument
84 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; in cma_clear_bitmap()
85 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_clear_bitmap()
87 spin_lock_irqsave(&cma->lock, flags); in cma_clear_bitmap()
88 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); in cma_clear_bitmap()
89 spin_unlock_irqrestore(&cma->lock, flags); in cma_clear_bitmap()
92 static void __init cma_activate_area(struct cma *cma) in cma_activate_area() argument
94 unsigned long base_pfn = cma->base_pfn, pfn; in cma_activate_area()
97 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); in cma_activate_area()
98 if (!cma->bitmap) in cma_activate_area()
103 * same zone. Simplify by forcing the entire CMA resv range to be in the in cma_activate_area()
108 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { in cma_activate_area()
114 for (pfn = base_pfn; pfn < base_pfn + cma->count; in cma_activate_area()
118 spin_lock_init(&cma->lock); in cma_activate_area()
121 INIT_HLIST_HEAD(&cma->mem_head); in cma_activate_area()
122 spin_lock_init(&cma->mem_head_lock); in cma_activate_area()
128 bitmap_free(cma->bitmap); in cma_activate_area()
130 /* Expose all pages to the buddy, they are useless for CMA. */ in cma_activate_area()
131 if (!cma->reserve_pages_on_error) { in cma_activate_area()
132 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) in cma_activate_area()
135 totalcma_pages -= cma->count; in cma_activate_area()
136 cma->count = 0; in cma_activate_area()
137 pr_err("CMA area %s could not be activated\n", cma->name); in cma_activate_area()
152 void __init cma_reserve_pages_on_error(struct cma *cma) in cma_reserve_pages_on_error() argument
154 cma->reserve_pages_on_error = true; in cma_reserve_pages_on_error()
165 * @res_cma: Pointer to store the created cma region.
172 struct cma **res_cma) in cma_init_reserved_mem()
174 struct cma *cma; in cma_init_reserved_mem() local
178 pr_err("Not enough slots for CMA reserved regions!\n"); in cma_init_reserved_mem()
193 cma = &cma_areas[cma_area_count]; in cma_init_reserved_mem()
196 snprintf(cma->name, CMA_MAX_NAME, name); in cma_init_reserved_mem()
198 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); in cma_init_reserved_mem()
200 cma->base_pfn = PFN_DOWN(base); in cma_init_reserved_mem()
201 cma->count = size >> PAGE_SHIFT; in cma_init_reserved_mem()
202 cma->order_per_bit = order_per_bit; in cma_init_reserved_mem()
203 *res_cma = cma; in cma_init_reserved_mem()
205 totalcma_pages += cma->count; in cma_init_reserved_mem()
215 * @alignment: Alignment for the CMA area, should be power of 2 or zero
219 * @res_cma: Pointer to store the created cma region.
233 bool fixed, const char *name, struct cma **res_cma, in cma_declare_contiguous_nid()
251 pr_err("Not enough slots for CMA reserved regions!\n"); in cma_declare_contiguous_nid()
321 * It will place the new cma area close to the start of the node in cma_declare_contiguous_nid()
323 * cma area and not into it. in cma_declare_contiguous_nid()
381 static void cma_debug_show_areas(struct cma *cma) in cma_debug_show_areas() argument
386 unsigned long nbits = cma_bitmap_maxno(cma); in cma_debug_show_areas()
388 spin_lock_irq(&cma->lock); in cma_debug_show_areas()
391 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); in cma_debug_show_areas()
394 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); in cma_debug_show_areas()
396 nr_part = nr_zero << cma->order_per_bit; in cma_debug_show_areas()
402 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); in cma_debug_show_areas()
403 spin_unlock_irq(&cma->lock); in cma_debug_show_areas()
406 static struct page *__cma_alloc(struct cma *cma, unsigned long count, in __cma_alloc() argument
416 const char *name = cma ? cma->name : NULL; in __cma_alloc()
420 if (!cma || !cma->count || !cma->bitmap) in __cma_alloc()
423 pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__, in __cma_alloc()
424 (void *)cma, cma->name, count, align); in __cma_alloc()
429 mask = cma_bitmap_aligned_mask(cma, align); in __cma_alloc()
430 offset = cma_bitmap_aligned_offset(cma, align); in __cma_alloc()
431 bitmap_maxno = cma_bitmap_maxno(cma); in __cma_alloc()
432 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in __cma_alloc()
438 spin_lock_irq(&cma->lock); in __cma_alloc()
439 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, in __cma_alloc()
443 spin_unlock_irq(&cma->lock); in __cma_alloc()
446 bitmap_set(cma->bitmap, bitmap_no, bitmap_count); in __cma_alloc()
452 spin_unlock_irq(&cma->lock); in __cma_alloc()
454 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); in __cma_alloc()
463 cma_clear_bitmap(cma, pfn, count); in __cma_alloc()
470 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), in __cma_alloc()
477 * CMA can allocate multiple page blocks, which results in different in __cma_alloc()
488 __func__, cma->name, count, ret); in __cma_alloc()
489 cma_debug_show_areas(cma); in __cma_alloc()
496 cma_sysfs_account_success_pages(cma, count); in __cma_alloc()
499 cma_sysfs_account_fail_pages(cma, count); in __cma_alloc()
507 * @cma: Contiguous memory region for which the allocation is performed.
515 struct page *cma_alloc(struct cma *cma, unsigned long count, in cma_alloc() argument
518 return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); in cma_alloc()
521 struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) in cma_alloc_folio() argument
528 page = __cma_alloc(cma, 1 << order, order, gfp); in cma_alloc_folio()
533 bool cma_pages_valid(struct cma *cma, const struct page *pages, in cma_pages_valid() argument
538 if (!cma || !pages) in cma_pages_valid()
543 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) { in cma_pages_valid()
554 * @cma: Contiguous memory region for which the allocation is performed.
562 bool cma_release(struct cma *cma, const struct page *pages, in cma_release() argument
567 if (!cma_pages_valid(cma, pages, count)) in cma_release()
574 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); in cma_release()
577 cma_clear_bitmap(cma, pfn, count); in cma_release()
578 cma_sysfs_account_release_pages(cma, count); in cma_release()
579 trace_cma_release(cma->name, pfn, pages, count); in cma_release()
584 bool cma_free_folio(struct cma *cma, const struct folio *folio) in cma_free_folio() argument
589 return cma_release(cma, &folio->page, folio_nr_pages(folio)); in cma_free_folio()
592 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) in cma_for_each_area() argument