/linux-6.12.1/kernel/module/ |
D | decompress.c | 19 struct page **new_pages; in module_extend_max_pages() local 21 new_pages = kvmalloc_array(info->max_pages + extent, in module_extend_max_pages() 23 if (!new_pages) in module_extend_max_pages() 26 memcpy(new_pages, info->pages, info->max_pages * sizeof(info->pages)); in module_extend_max_pages() 28 info->pages = new_pages; in module_extend_max_pages()
|
/linux-6.12.1/drivers/block/drbd/ |
D | drbd_bitmap.c | 381 struct page **new_pages, *page; in bm_realloc_pages() local 397 new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN); in bm_realloc_pages() 398 if (!new_pages) { in bm_realloc_pages() 399 new_pages = __vmalloc(bytes, GFP_NOIO | __GFP_ZERO); in bm_realloc_pages() 400 if (!new_pages) in bm_realloc_pages() 406 new_pages[i] = old_pages[i]; in bm_realloc_pages() 410 bm_free_pages(new_pages + have, i - have); in bm_realloc_pages() 411 bm_vk_free(new_pages); in bm_realloc_pages() 417 new_pages[i] = page; in bm_realloc_pages() 421 new_pages[i] = old_pages[i]; in bm_realloc_pages() [all …]
|
/linux-6.12.1/arch/s390/kvm/ |
D | pci.c | 207 unsigned long page_limit, cur_pages, new_pages; in account_mem() local 213 new_pages = cur_pages + nr_pages; in account_mem() 214 if (new_pages > page_limit) in account_mem() 217 new_pages) != cur_pages); in account_mem()
|
/linux-6.12.1/drivers/base/firmware_loader/ |
D | main.c | 264 struct page **new_pages; in fw_grow_paged_buf() local 266 new_pages = kvmalloc_array(new_array_size, sizeof(void *), in fw_grow_paged_buf() 268 if (!new_pages) in fw_grow_paged_buf() 270 memcpy(new_pages, fw_priv->pages, in fw_grow_paged_buf() 272 memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * in fw_grow_paged_buf() 275 fw_priv->pages = new_pages; in fw_grow_paged_buf()
|
/linux-6.12.1/drivers/virtio/ |
D | virtio_mem.c | 403 int new_pages = PFN_UP(new_bytes); in virtio_mem_bbm_bb_states_prepare_next_bb() local 406 if (vm->bbm.bb_states && old_pages == new_pages) in virtio_mem_bbm_bb_states_prepare_next_bb() 409 new_array = vzalloc(new_pages * PAGE_SIZE); in virtio_mem_bbm_bb_states_prepare_next_bb() 469 int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1); in virtio_mem_sbm_mb_states_prepare_next_mb() local 472 if (vm->sbm.mb_states && old_pages == new_pages) in virtio_mem_sbm_mb_states_prepare_next_mb() 475 new_array = vzalloc(new_pages * PAGE_SIZE); in virtio_mem_sbm_mb_states_prepare_next_mb() 592 int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long)); in virtio_mem_sbm_sb_states_prepare_next_mb() local 595 if (vm->sbm.sb_states && old_pages == new_pages) in virtio_mem_sbm_sb_states_prepare_next_mb() 598 new_bitmap = vzalloc(new_pages * PAGE_SIZE); in virtio_mem_sbm_sb_states_prepare_next_mb()
|
/linux-6.12.1/kernel/trace/ |
D | ring_buffer.c | 522 struct list_head new_pages; /* new pages to add */ member 2170 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer() 2589 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages() 2658 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages() 2756 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize() 2758 &cpu_buffer->new_pages)) { in ring_buffer_resize() 2827 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize() 2830 &cpu_buffer->new_pages)) { in ring_buffer_resize() 2897 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize() 2900 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize() [all …]
|
/linux-6.12.1/drivers/md/dm-vdo/ |
D | encodings.c | 450 block_count_t new_pages; in vdo_compute_new_forest_pages() local 454 new_pages = level_size; in vdo_compute_new_forest_pages() 456 new_pages -= old_sizes->levels[height]; in vdo_compute_new_forest_pages() 457 total_pages += (new_pages * root_count); in vdo_compute_new_forest_pages()
|
D | block-map.c | 2337 static int make_segment(struct forest *old_forest, block_count_t new_pages, in make_segment() argument 2359 result = vdo_allocate(new_pages, struct tree_page, in make_segment() 2449 block_count_t new_pages; in make_forest() local 2455 new_pages = vdo_compute_new_forest_pages(map->root_count, old_boundary, in make_forest() 2457 if (new_pages == 0) { in make_forest() 2469 result = make_segment(old_forest, new_pages, &new_boundary, forest); in make_forest()
|
/linux-6.12.1/io_uring/ |
D | rsrc.c | 46 unsigned long page_limit, cur_pages, new_pages; in __io_account_mem() local 56 new_pages = cur_pages + nr_pages; in __io_account_mem() 57 if (new_pages > page_limit) in __io_account_mem() 60 &cur_pages, new_pages)); in __io_account_mem()
|
/linux-6.12.1/arch/s390/kernel/ |
D | debug.c | 1308 int rc, new_pages; in debug_input_pages_fn() local 1322 new_pages = debug_get_uint(str); in debug_input_pages_fn() 1323 if (new_pages < 0) { in debug_input_pages_fn() 1327 rc = debug_set_size(id, id->nr_areas, new_pages); in debug_input_pages_fn()
|
/linux-6.12.1/drivers/iommu/iommufd/ |
D | pages.c | 808 unsigned long new_pages; in incr_user_locked_vm() local 815 new_pages = cur_pages + npages; in incr_user_locked_vm() 816 if (new_pages > lock_limit) in incr_user_locked_vm() 819 &cur_pages, new_pages)); in incr_user_locked_vm()
|
/linux-6.12.1/drivers/gpu/drm/imagination/ |
D | pvr_free_list.c | 387 resp->new_pages = free_list->current_pages + free_list->ready_pages; in pvr_free_list_process_grow_req()
|
D | pvr_rogue_fwif_check.h | 264 OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, new_pages, 8);
|
D | pvr_rogue_fwif.h | 1027 u32 new_pages; member
|