/linux-6.12.1/arch/s390/mm/ |
D | vmem.c | 187 void *new_page = vmemmap_alloc_block_buf(PAGE_SIZE, NUMA_NO_NODE, altmap); in modify_pte_table() local 189 if (!new_page) in modify_pte_table() 191 set_pte(pte, __pte(__pa(new_page) | prot)); in modify_pte_table() 264 void *new_page; in modify_pmd_table() local 273 new_page = vmemmap_alloc_block_buf(PMD_SIZE, NUMA_NO_NODE, altmap); in modify_pmd_table() 274 if (new_page) { in modify_pmd_table() 275 set_pmd(pmd, __pmd(__pa(new_page) | prot)); in modify_pmd_table()
|
/linux-6.12.1/fs/f2fs/ |
D | namei.c | 894 struct page *old_page, *new_page = NULL; in f2fs_rename() local 977 &new_page); in f2fs_rename() 979 if (IS_ERR(new_page)) in f2fs_rename() 980 err = PTR_ERR(new_page); in f2fs_rename() 992 f2fs_set_link(new_dir, new_entry, new_page, old_inode); in f2fs_rename() 993 new_page = NULL; in f2fs_rename() 1070 f2fs_put_page(new_page, 0); in f2fs_rename() 1088 struct page *old_page, *new_page; in f2fs_cross_rename() local 1123 new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); in f2fs_cross_rename() 1125 if (IS_ERR(new_page)) in f2fs_cross_rename() [all …]
|
/linux-6.12.1/kernel/events/ |
D | uprobes.c | 162 struct page *old_page, struct page *new_page) in __replace_page() argument 174 if (new_page) { in __replace_page() 175 new_folio = page_folio(new_page); in __replace_page() 190 if (new_page) { in __replace_page() 205 if (new_page) in __replace_page() 207 mk_pte(new_page, vma->vm_page_prot)); in __replace_page() 471 struct page *old_page, *new_page; in uprobe_write_opcode() local 516 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); in uprobe_write_opcode() 517 if (!new_page) in uprobe_write_opcode() 520 __SetPageUptodate(new_page); in uprobe_write_opcode() [all …]
|
/linux-6.12.1/fs/ubifs/ |
D | budget.c | 363 znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) + in calc_idx_growth() 380 if (req->new_page) in calc_data_growth() 426 ubifs_assert(c, req->new_page <= 1); in ubifs_budget_space() 513 ubifs_assert(c, req->new_page <= 1); in ubifs_release_budget()
|
D | file.c | 194 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 }; in release_new_page_budget() 219 struct ubifs_budget_req req = { .new_page = 1 }; in write_begin_slow() 359 req.new_page = 1; in allocate_budget() 1517 struct ubifs_budget_req req = { .new_page = 1 }; in ubifs_vm_page_mkwrite()
|
D | ubifs.h | 890 unsigned int new_page:1; member 900 unsigned int new_page; member
|
D | debug.c | 591 req->new_page, req->dirtied_page); in ubifs_dump_budget_req()
|
/linux-6.12.1/drivers/net/ethernet/ti/ |
D | cpsw.c | 346 struct page *new_page, *page = token; in cpsw_rx_handler() local 378 new_page = page; in cpsw_rx_handler() 387 new_page = page_pool_dev_alloc_pages(pool); in cpsw_rx_handler() 388 if (unlikely(!new_page)) { in cpsw_rx_handler() 389 new_page = page; in cpsw_rx_handler() 441 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; in cpsw_rx_handler() 445 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA; in cpsw_rx_handler() 446 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, in cpsw_rx_handler() 450 page_pool_recycle_direct(pool, new_page); in cpsw_rx_handler()
|
D | cpsw_new.c | 285 struct page *new_page, *page = token; in cpsw_rx_handler() local 323 new_page = page; in cpsw_rx_handler() 332 new_page = page_pool_dev_alloc_pages(pool); in cpsw_rx_handler() 333 if (unlikely(!new_page)) { in cpsw_rx_handler() 334 new_page = page; in cpsw_rx_handler() 386 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; in cpsw_rx_handler() 390 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA; in cpsw_rx_handler() 391 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, in cpsw_rx_handler() 395 page_pool_recycle_direct(pool, new_page); in cpsw_rx_handler()
|
D | am65-cpsw-nuss.c | 1149 struct page *page, *new_page; in am65_cpsw_nuss_rx_packets() local 1200 new_page = page; in am65_cpsw_nuss_rx_packets() 1236 new_page = page_pool_dev_alloc_pages(flow->page_pool); in am65_cpsw_nuss_rx_packets() 1237 if (unlikely(!new_page)) { in am65_cpsw_nuss_rx_packets() 1243 am65_cpsw_put_page(flow, new_page, true); in am65_cpsw_nuss_rx_packets() 1249 ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx); in am65_cpsw_nuss_rx_packets() 1251 am65_cpsw_put_page(flow, new_page, true); in am65_cpsw_nuss_rx_packets()
|
/linux-6.12.1/drivers/tty/serial/ |
D | icom.c | 607 unsigned char *new_page = NULL; in load_code() local 680 new_page = dma_alloc_coherent(&dev->dev, 4096, &temp_pci, GFP_KERNEL); in load_code() 682 if (!new_page) { in load_code() 702 new_page[index] = fw->data[index]; in load_code() 759 if (new_page != NULL) in load_code() 760 dma_free_coherent(&dev->dev, 4096, new_page, temp_pci); in load_code()
|
/linux-6.12.1/Documentation/networking/ |
D | page_pool.rst | 173 new_page = page_pool_dev_alloc_pages(page_pool);
|
/linux-6.12.1/drivers/net/wireless/intel/iwlwifi/fw/ |
D | dbg.c | 568 struct page *new_page; in alloc_sgtable() local 579 new_page = alloc_page(GFP_KERNEL); in alloc_sgtable() 580 if (!new_page) { in alloc_sgtable() 584 new_page = sg_page(iter); in alloc_sgtable() 585 if (new_page) in alloc_sgtable() 586 __free_page(new_page); in alloc_sgtable() 593 sg_set_page(iter, new_page, alloc_size, 0); in alloc_sgtable()
|
/linux-6.12.1/drivers/staging/rts5208/ |
D | xd.c | 1095 u32 old_page, new_page; in xd_copy_page() local 1109 new_page = (new_blk << xd_card->block_shift) + start_page; in xd_copy_page() 1178 xd_assign_phy_addr(chip, new_page, XD_RW_ADDR); in xd_copy_page() 1201 new_page++; in xd_copy_page()
|
/linux-6.12.1/drivers/net/vmxnet3/ |
D | vmxnet3_drv.c | 1588 struct page *new_page = NULL; in vmxnet3_rq_rx_complete() local 1793 new_page = alloc_page(GFP_ATOMIC); in vmxnet3_rq_rx_complete() 1799 if (unlikely(!new_page)) { in vmxnet3_rq_rx_complete() 1807 new_page, in vmxnet3_rq_rx_complete() 1812 put_page(new_page); in vmxnet3_rq_rx_complete() 1827 rbi->page = new_page; in vmxnet3_rq_rx_complete()
|
/linux-6.12.1/drivers/net/ethernet/freescale/ |
D | fec_main.c | 1597 struct page *new_page; in fec_enet_update_cbd() local 1600 new_page = page_pool_dev_alloc_pages(rxq->page_pool); in fec_enet_update_cbd() 1601 WARN_ON(!new_page); in fec_enet_update_cbd() 1602 rxq->rx_skb_info[index].page = new_page; in fec_enet_update_cbd() 1605 phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; in fec_enet_update_cbd()
|