/linux/drivers/gpu/drm/xe/display/ |
H A D | intel_bo.c | 104 unsigned int new_page; in xe_panic_page_set_pixel() local 112 new_page = offset >> PAGE_SHIFT; in xe_panic_page_set_pixel() 114 if (new_page != panic->page) { in xe_panic_page_set_pixel() 116 panic->page = new_page; in xe_panic_page_set_pixel()
|
/linux/arch/s390/mm/ |
H A D | vmem.c | 184 void *new_page = vmemmap_alloc_block_buf(PAGE_SIZE, NUMA_NO_NODE, altmap); in modify_pte_table() local 186 if (!new_page) in modify_pte_table() 188 set_pte(pte, __pte(__pa(new_page) | prot)); in modify_pte_table() 258 void *new_page; in modify_pmd_table() local 267 new_page = vmemmap_alloc_block_buf(PMD_SIZE, NUMA_NO_NODE, altmap); in modify_pmd_table() 268 if (new_page) { in modify_pmd_table() 269 set_pmd(pmd, __pmd(__pa(new_page) | prot)); in modify_pmd_table()
|
/linux/drivers/gpu/drm/ |
H A D | drm_panic.c | 220 unsigned int new_page; in drm_panic_blit_page() local 224 new_page = offset >> PAGE_SHIFT; in drm_panic_blit_page() 226 if (new_page != page) { in drm_panic_blit_page() 227 if (!pages[new_page]) in drm_panic_blit_page() 231 page = new_page; in drm_panic_blit_page() 312 unsigned int new_page; in drm_panic_fill_page() local 316 new_page = offset >> PAGE_SHIFT; in drm_panic_fill_page() 318 if (new_page != page) { in drm_panic_fill_page() 321 page = new_page; in drm_panic_fill_page()
|
/linux/fs/ubifs/ |
H A D | budget.c | 363 znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) + in calc_idx_growth() 380 if (req->new_page) in calc_data_growth() 426 ubifs_assert(c, req->new_page <= 1); in ubifs_budget_space() 513 ubifs_assert(c, req->new_page <= 1); in ubifs_release_budget()
|
H A D | file.c | 186 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 }; in release_new_page_budget() 211 struct ubifs_budget_req req = { .new_page = 1 }; in write_begin_slow() 351 req.new_page = 1; in allocate_budget() 1499 struct ubifs_budget_req req = { .new_page = 1 }; in ubifs_vm_page_mkwrite()
|
H A D | ubifs.h | 850 * @new_page: non-zero if the operation adds a new page 881 unsigned int new_page:1; member 891 unsigned int new_page; member
|
H A D | debug.c | 591 req->new_page, req->dirtied_page); in ubifs_dump_budget_req()
|
/linux/drivers/net/ethernet/ti/ |
H A D | cpsw.c | 346 struct page *new_page, *page = token; in cpsw_rx_handler() local 379 new_page = page; in cpsw_rx_handler() 388 new_page = page_pool_dev_alloc_pages(pool); in cpsw_rx_handler() 389 if (unlikely(!new_page)) { in cpsw_rx_handler() 390 new_page = page; in cpsw_rx_handler() 445 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; in cpsw_rx_handler() 449 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA; in cpsw_rx_handler() 450 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, in cpsw_rx_handler() 454 page_pool_recycle_direct(pool, new_page); in cpsw_rx_handler()
|
H A D | cpsw_new.c | 285 struct page *new_page, *page = token; in cpsw_rx_handler() local 324 new_page = page; in cpsw_rx_handler() 333 new_page = page_pool_dev_alloc_pages(pool); in cpsw_rx_handler() 334 if (unlikely(!new_page)) { in cpsw_rx_handler() 335 new_page = page; in cpsw_rx_handler() 390 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; in cpsw_rx_handler() 394 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA; in cpsw_rx_handler() 395 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, in cpsw_rx_handler() 399 page_pool_recycle_direct(pool, new_page); in cpsw_rx_handler()
|
H A D | am65-cpsw-nuss.c | 1278 struct page *page, *new_page; in am65_cpsw_nuss_rx_packets() local 1347 new_page = page; in am65_cpsw_nuss_rx_packets() 1364 new_page = page_pool_dev_alloc_pages(flow->page_pool); in am65_cpsw_nuss_rx_packets() 1365 if (unlikely(!new_page)) { in am65_cpsw_nuss_rx_packets() 1371 am65_cpsw_put_page(flow, new_page, true); in am65_cpsw_nuss_rx_packets() 1377 ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx); in am65_cpsw_nuss_rx_packets() 1379 am65_cpsw_put_page(flow, new_page, true); in am65_cpsw_nuss_rx_packets()
|
/linux/drivers/net/ethernet/ti/icssg/ |
H A D | icssg_prueth_sr1.c | 278 struct page *page, *new_page; in prueth_process_rx_mgm() local 309 new_page = page_pool_dev_alloc_pages(rx_chn->pg_pool); in prueth_process_rx_mgm() 313 if (!new_page) { in prueth_process_rx_mgm() 317 new_page = page; in prueth_process_rx_mgm() 322 ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page, in prueth_process_rx_mgm() 325 page_pool_recycle_direct(rx_chn->pg_pool, new_page); in prueth_process_rx_mgm()
|
H A D | icssg_common.c | 705 struct page *page, *new_page; in emac_rx_packet() local 747 new_page = page_pool_dev_alloc_pages(pool); in emac_rx_packet() 748 if (unlikely(!new_page)) { in emac_rx_packet() 749 new_page = page; in emac_rx_packet() 796 ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page, in emac_rx_packet() 799 page_pool_recycle_direct(pool, new_page); in emac_rx_packet()
|
/linux/Documentation/networking/ |
H A D | page_pool.rst | 173 new_page = page_pool_dev_alloc_pages(page_pool);
|
/linux/drivers/net/vmxnet3/ |
H A D | vmxnet3_drv.c | 1624 struct page *new_page = NULL; in vmxnet3_rq_rx_complete() local 1829 new_page = alloc_page(GFP_ATOMIC); in vmxnet3_rq_rx_complete() 1835 if (unlikely(!new_page)) { in vmxnet3_rq_rx_complete() 1843 new_page, in vmxnet3_rq_rx_complete() 1848 put_page(new_page); in vmxnet3_rq_rx_complete() 1863 rbi->page = new_page; in vmxnet3_rq_rx_complete()
|
/linux/drivers/net/ethernet/freescale/ |
H A D | fec_main.c | 1637 struct page *new_page; in fec_enet_update_cbd() local 1640 new_page = page_pool_dev_alloc_pages(rxq->page_pool); in fec_enet_update_cbd() 1641 if (unlikely(!new_page)) in fec_enet_update_cbd() 1644 rxq->rx_skb_info[index].page = new_page; in fec_enet_update_cbd() 1646 phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; in fec_enet_update_cbd()
|