Lines Matching full:pages
23 * pcpu_get_pages - get temp pages array
30 * Pointer to temp pages array on success.
34 static struct page **pages; in pcpu_get_pages() local
35 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages()
39 if (!pages) in pcpu_get_pages()
40 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages()
41 return pages; in pcpu_get_pages()
45 * pcpu_free_pages - free pages which were allocated for @chunk
46 * @chunk: chunk pages were allocated for
47 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
51 * Free pages [@page_start and @page_end) in @pages for all units.
52 * The pages were allocated for @chunk.
55 struct page **pages, int page_start, int page_end) in pcpu_free_pages() argument
62 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
71 * pcpu_alloc_pages - allocates pages for @chunk
73 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
78 * Allocate pages [@page_start,@page_end) into @pages for all units.
80 * content of @pages and will pass it verbatim to pcpu_map_pages().
83 struct page **pages, int page_start, int page_end, in pcpu_alloc_pages() argument
93 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
104 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
110 __free_page(pages[pcpu_page_idx(tcpu, i)]); in pcpu_alloc_pages()
121 * Pages in [@page_start,@page_end) of @chunk are about to be
141 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
143 * @pages: pages array which can be used to pass information to free
147 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
148 * Corresponding elements in @pages were cleared by the caller and can
154 struct page **pages, int page_start, int page_end) in pcpu_unmap_pages() argument
165 pages[pcpu_page_idx(cpu, i)] = page; in pcpu_unmap_pages()
178 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
193 static int __pcpu_map_pages(unsigned long addr, struct page **pages, in __pcpu_map_pages() argument
197 PAGE_KERNEL, pages, PAGE_SHIFT); in __pcpu_map_pages()
201 * pcpu_map_pages - map pages into a pcpu_chunk
203 * @pages: pages array containing pages to be mapped
207 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
215 struct page **pages, int page_start, int page_end) in pcpu_map_pages() argument
222 &pages[pcpu_page_idx(cpu, page_start)], in pcpu_map_pages()
228 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], in pcpu_map_pages()
249 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
270 * For each cpu, populate and map pages [@page_start,@page_end) into
279 struct page **pages; in pcpu_populate_chunk() local
281 pages = pcpu_get_pages(); in pcpu_populate_chunk()
282 if (!pages) in pcpu_populate_chunk()
285 if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp)) in pcpu_populate_chunk()
288 if (pcpu_map_pages(chunk, pages, page_start, page_end)) { in pcpu_populate_chunk()
289 pcpu_free_pages(chunk, pages, page_start, page_end); in pcpu_populate_chunk()
303 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
315 struct page **pages; in pcpu_depopulate_chunk() local
319 * successful population attempt so the temp pages array must in pcpu_depopulate_chunk()
322 pages = pcpu_get_pages(); in pcpu_depopulate_chunk()
323 BUG_ON(!pages); in pcpu_depopulate_chunk()
328 pcpu_unmap_pages(chunk, pages, page_start, page_end); in pcpu_depopulate_chunk()
330 pcpu_free_pages(chunk, pages, page_start, page_end); in pcpu_depopulate_chunk()
402 * the to_depopulate list. If we hit at least 1/4 pages empty pages AND in pcpu_should_reclaim_chunk()
403 * there is no system-wide shortage of empty pages aside from this in pcpu_should_reclaim_chunk()