Searched refs:__GFP_IO (Results 1 – 13 of 13) sorted by relevance
72 #define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */ macro105 #define GFP_NOFS (__GFP_WAIT | __GFP_IO)106 #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)107 #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \109 #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)110 #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \112 #define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \115 #define GFP_IOFS (__GFP_IO | __GFP_FS)130 #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\135 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
24 {(unsigned long)__GFP_IO, "GFP_IO"}, \
68 #define NILFS_MDT_GFP (__GFP_WAIT | __GFP_IO | __GFP_HIGHMEM)
4 __GFP_IO allocations.11 __GFP_IO allocation requests are made to prevent file system deadlocks.
207 gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); in mempool_alloc()
645 int may_perform_io = gfp_mask & __GFP_IO; in try_to_compact_pages()
1492 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) in throttle_vm_writeout()
812 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()847 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list()
1312 if (!(gfp & __GFP_IO)) { in __cleanup_old_buffer()1351 if (sc->gfp_mask & __GFP_IO) in shrink()
609 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in do_loop_switch()877 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_set_fd()
210 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); in bvec_alloc_bs()
1428 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; in qib_setup_eagerbufs()
903 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; in ipath_create_user_egr()