xref: /linux/include/net/page_pool/helpers.h (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce) !
1  /* SPDX-License-Identifier: GPL-2.0
2   *
3   * page_pool/helpers.h
4   *	Author:	Jesper Dangaard Brouer <netoptimizer@brouer.com>
5   *	Copyright (C) 2016 Red Hat, Inc.
6   */
7  
8  /**
9   * DOC: page_pool allocator
10   *
11   * The page_pool allocator is optimized for recycling page or page fragment used
12   * by skb packet and xdp frame.
13   *
14   * Basic use involves replacing any alloc_pages() calls with page_pool_alloc(),
15   * which allocate memory with or without page splitting depending on the
16   * requested memory size.
17   *
18   * If the driver knows that it always requires full pages or its allocations are
19   * always smaller than half a page, it can use one of the more specific API
20   * calls:
21   *
22   * 1. page_pool_alloc_pages(): allocate memory without page splitting when
23   * driver knows that the memory it need is always bigger than half of the page
24   * allocated from page pool. There is no cache line dirtying for 'struct page'
25   * when a page is recycled back to the page pool.
26   *
27   * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver
28   * knows that the memory it need is always smaller than or equal to half of the
29   * page allocated from page pool. Page splitting enables memory saving and thus
30   * avoids TLB/cache miss for data access, but there also is some cost to
31   * implement page splitting, mainly some cache line dirtying/bouncing for
32   * 'struct page' and atomic operation for page->pp_ref_count.
33   *
34   * The API keeps track of in-flight pages, in order to let API users know when
35   * it is safe to free a page_pool object, the API users must call
36   * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or
37   * attach the page_pool object to a page_pool-aware object like skbs marked with
38   * skb_mark_for_recycle().
39   *
40   * page_pool_put_page() may be called multiple times on the same page if a page
41   * is split into multiple fragments. For the last fragment, it will either
42   * recycle the page, or in case of page->_refcount > 1, it will release the DMA
43   * mapping and in-flight state accounting.
44   *
45   * dma_sync_single_range_for_device() is only called for the last fragment when
46   * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the
47   * last freed fragment to do the sync_for_device operation for all fragments in
48   * the same page when a page is split. The API user must setup pool->p.max_len
49   * and pool->p.offset correctly and ensure that page_pool_put_page() is called
50   * with dma_sync_size being -1 for fragment API.
51   */
52  #ifndef _NET_PAGE_POOL_HELPERS_H
53  #define _NET_PAGE_POOL_HELPERS_H
54  
55  #include <linux/dma-mapping.h>
56  
57  #include <net/page_pool/types.h>
58  #include <net/net_debug.h>
59  #include <net/netmem.h>
60  
61  #ifdef CONFIG_PAGE_POOL_STATS
62  /* Deprecated driver-facing API, use netlink instead */
63  int page_pool_ethtool_stats_get_count(void);
64  u8 *page_pool_ethtool_stats_get_strings(u8 *data);
65  u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats);
66  
67  bool page_pool_get_stats(const struct page_pool *pool,
68  			 struct page_pool_stats *stats);
69  #else
page_pool_ethtool_stats_get_count(void)70  static inline int page_pool_ethtool_stats_get_count(void)
71  {
72  	return 0;
73  }
74  
page_pool_ethtool_stats_get_strings(u8 * data)75  static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
76  {
77  	return data;
78  }
79  
page_pool_ethtool_stats_get(u64 * data,const void * stats)80  static inline u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
81  {
82  	return data;
83  }
84  #endif
85  
86  /**
87   * page_pool_dev_alloc_pages() - allocate a page.
88   * @pool:	pool from which to allocate
89   *
90   * Get a page from the page allocator or page_pool caches.
91   */
page_pool_dev_alloc_pages(struct page_pool * pool)92  static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
93  {
94  	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
95  
96  	return page_pool_alloc_pages(pool, gfp);
97  }
98  
99  /**
100   * page_pool_dev_alloc_frag() - allocate a page fragment.
101   * @pool: pool from which to allocate
102   * @offset: offset to the allocated page
103   * @size: requested size
104   *
105   * Get a page fragment from the page allocator or page_pool caches.
106   *
107   * Return: allocated page fragment, otherwise return NULL.
108   */
page_pool_dev_alloc_frag(struct page_pool * pool,unsigned int * offset,unsigned int size)109  static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
110  						    unsigned int *offset,
111  						    unsigned int size)
112  {
113  	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
114  
115  	return page_pool_alloc_frag(pool, offset, size, gfp);
116  }
117  
page_pool_alloc_netmem(struct page_pool * pool,unsigned int * offset,unsigned int * size,gfp_t gfp)118  static inline netmem_ref page_pool_alloc_netmem(struct page_pool *pool,
119  						unsigned int *offset,
120  						unsigned int *size, gfp_t gfp)
121  {
122  	unsigned int max_size = PAGE_SIZE << pool->p.order;
123  	netmem_ref netmem;
124  
125  	if ((*size << 1) > max_size) {
126  		*size = max_size;
127  		*offset = 0;
128  		return page_pool_alloc_netmems(pool, gfp);
129  	}
130  
131  	netmem = page_pool_alloc_frag_netmem(pool, offset, *size, gfp);
132  	if (unlikely(!netmem))
133  		return 0;
134  
135  	/* There is very likely not enough space for another fragment, so append
136  	 * the remaining size to the current fragment to avoid truesize
137  	 * underestimate problem.
138  	 */
139  	if (pool->frag_offset + *size > max_size) {
140  		*size = max_size - *offset;
141  		pool->frag_offset = max_size;
142  	}
143  
144  	return netmem;
145  }
146  
page_pool_dev_alloc_netmem(struct page_pool * pool,unsigned int * offset,unsigned int * size)147  static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool,
148  						    unsigned int *offset,
149  						    unsigned int *size)
150  {
151  	gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
152  
153  	return page_pool_alloc_netmem(pool, offset, size, gfp);
154  }
155  
page_pool_dev_alloc_netmems(struct page_pool * pool)156  static inline netmem_ref page_pool_dev_alloc_netmems(struct page_pool *pool)
157  {
158  	gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
159  
160  	return page_pool_alloc_netmems(pool, gfp);
161  }
162  
page_pool_alloc(struct page_pool * pool,unsigned int * offset,unsigned int * size,gfp_t gfp)163  static inline struct page *page_pool_alloc(struct page_pool *pool,
164  					   unsigned int *offset,
165  					   unsigned int *size, gfp_t gfp)
166  {
167  	return netmem_to_page(page_pool_alloc_netmem(pool, offset, size, gfp));
168  }
169  
170  /**
171   * page_pool_dev_alloc() - allocate a page or a page fragment.
172   * @pool: pool from which to allocate
173   * @offset: offset to the allocated page
174   * @size: in as the requested size, out as the allocated size
175   *
176   * Get a page or a page fragment from the page allocator or page_pool caches
177   * depending on the requested size in order to allocate memory with least memory
178   * utilization and performance penalty.
179   *
180   * Return: allocated page or page fragment, otherwise return NULL.
181   */
page_pool_dev_alloc(struct page_pool * pool,unsigned int * offset,unsigned int * size)182  static inline struct page *page_pool_dev_alloc(struct page_pool *pool,
183  					       unsigned int *offset,
184  					       unsigned int *size)
185  {
186  	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
187  
188  	return page_pool_alloc(pool, offset, size, gfp);
189  }
190  
page_pool_alloc_va(struct page_pool * pool,unsigned int * size,gfp_t gfp)191  static inline void *page_pool_alloc_va(struct page_pool *pool,
192  				       unsigned int *size, gfp_t gfp)
193  {
194  	unsigned int offset;
195  	struct page *page;
196  
197  	/* Mask off __GFP_HIGHMEM to ensure we can use page_address() */
198  	page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM);
199  	if (unlikely(!page))
200  		return NULL;
201  
202  	return page_address(page) + offset;
203  }
204  
205  /**
206   * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its
207   *			      va.
208   * @pool: pool from which to allocate
209   * @size: in as the requested size, out as the allocated size
210   *
211   * This is just a thin wrapper around the page_pool_alloc() API, and
212   * it returns va of the allocated page or page fragment.
213   *
214   * Return: the va for the allocated page or page fragment, otherwise return NULL.
215   */
page_pool_dev_alloc_va(struct page_pool * pool,unsigned int * size)216  static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
217  					   unsigned int *size)
218  {
219  	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
220  
221  	return page_pool_alloc_va(pool, size, gfp);
222  }
223  
224  /**
225   * page_pool_get_dma_dir() - Retrieve the stored DMA direction.
226   * @pool:	pool from which page was allocated
227   *
228   * Get the stored dma direction. A driver might decide to store this locally
229   * and avoid the extra cache line from page_pool to determine the direction.
230   */
231  static inline enum dma_data_direction
page_pool_get_dma_dir(const struct page_pool * pool)232  page_pool_get_dma_dir(const struct page_pool *pool)
233  {
234  	return pool->p.dma_dir;
235  }
236  
page_pool_fragment_netmem(netmem_ref netmem,long nr)237  static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr)
238  {
239  	atomic_long_set(netmem_get_pp_ref_count_ref(netmem), nr);
240  }
241  
242  /**
243   * page_pool_fragment_page() - split a fresh page into fragments
244   * @page:	page to split
245   * @nr:		references to set
246   *
247   * pp_ref_count represents the number of outstanding references to the page,
248   * which will be freed using page_pool APIs (rather than page allocator APIs
249   * like put_page()). Such references are usually held by page_pool-aware
250   * objects like skbs marked for page pool recycling.
251   *
252   * This helper allows the caller to take (set) multiple references to a
253   * freshly allocated page. The page must be freshly allocated (have a
254   * pp_ref_count of 1). This is commonly done by drivers and
255   * "fragment allocators" to save atomic operations - either when they know
256   * upfront how many references they will need; or to take MAX references and
257   * return the unused ones with a single atomic dec(), instead of performing
258   * multiple atomic inc() operations.
259   */
page_pool_fragment_page(struct page * page,long nr)260  static inline void page_pool_fragment_page(struct page *page, long nr)
261  {
262  	page_pool_fragment_netmem(page_to_netmem(page), nr);
263  }
264  
page_pool_unref_netmem(netmem_ref netmem,long nr)265  static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
266  {
267  	atomic_long_t *pp_ref_count = netmem_get_pp_ref_count_ref(netmem);
268  	long ret;
269  
270  	/* If nr == pp_ref_count then we have cleared all remaining
271  	 * references to the page:
272  	 * 1. 'n == 1': no need to actually overwrite it.
273  	 * 2. 'n != 1': overwrite it with one, which is the rare case
274  	 *              for pp_ref_count draining.
275  	 *
276  	 * The main advantage to doing this is that not only we avoid a atomic
277  	 * update, as an atomic_read is generally a much cheaper operation than
278  	 * an atomic update, especially when dealing with a page that may be
279  	 * referenced by only 2 or 3 users; but also unify the pp_ref_count
280  	 * handling by ensuring all pages have partitioned into only 1 piece
281  	 * initially, and only overwrite it when the page is partitioned into
282  	 * more than one piece.
283  	 */
284  	if (atomic_long_read(pp_ref_count) == nr) {
285  		/* As we have ensured nr is always one for constant case using
286  		 * the BUILD_BUG_ON(), only need to handle the non-constant case
287  		 * here for pp_ref_count draining, which is a rare case.
288  		 */
289  		BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
290  		if (!__builtin_constant_p(nr))
291  			atomic_long_set(pp_ref_count, 1);
292  
293  		return 0;
294  	}
295  
296  	ret = atomic_long_sub_return(nr, pp_ref_count);
297  	WARN_ON(ret < 0);
298  
299  	/* We are the last user here too, reset pp_ref_count back to 1 to
300  	 * ensure all pages have been partitioned into 1 piece initially,
301  	 * this should be the rare case when the last two fragment users call
302  	 * page_pool_unref_page() currently.
303  	 */
304  	if (unlikely(!ret))
305  		atomic_long_set(pp_ref_count, 1);
306  
307  	return ret;
308  }
309  
page_pool_unref_page(struct page * page,long nr)310  static inline long page_pool_unref_page(struct page *page, long nr)
311  {
312  	return page_pool_unref_netmem(page_to_netmem(page), nr);
313  }
314  
page_pool_ref_netmem(netmem_ref netmem)315  static inline void page_pool_ref_netmem(netmem_ref netmem)
316  {
317  	atomic_long_inc(netmem_get_pp_ref_count_ref(netmem));
318  }
319  
page_pool_ref_page(struct page * page)320  static inline void page_pool_ref_page(struct page *page)
321  {
322  	page_pool_ref_netmem(page_to_netmem(page));
323  }
324  
page_pool_unref_and_test(netmem_ref netmem)325  static inline bool page_pool_unref_and_test(netmem_ref netmem)
326  {
327  	/* If page_pool_unref_page() returns 0, we were the last user */
328  	return page_pool_unref_netmem(netmem, 1) == 0;
329  }
330  
page_pool_put_netmem(struct page_pool * pool,netmem_ref netmem,unsigned int dma_sync_size,bool allow_direct)331  static inline void page_pool_put_netmem(struct page_pool *pool,
332  					netmem_ref netmem,
333  					unsigned int dma_sync_size,
334  					bool allow_direct)
335  {
336  	/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
337  	 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
338  	 */
339  #ifdef CONFIG_PAGE_POOL
340  	if (!page_pool_unref_and_test(netmem))
341  		return;
342  
343  	page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct);
344  #endif
345  }
346  
347  /**
348   * page_pool_put_page() - release a reference to a page pool page
349   * @pool:	pool from which page was allocated
350   * @page:	page to release a reference on
351   * @dma_sync_size: how much of the page may have been touched by the device
352   * @allow_direct: released by the consumer, allow lockless caching
353   *
354   * The outcome of this depends on the page refcnt. If the driver bumps
355   * the refcnt > 1 this will unmap the page. If the page refcnt is 1
356   * the allocator owns the page and will try to recycle it in one of the pool
357   * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device
358   * using dma_sync_single_range_for_device().
359   */
page_pool_put_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct)360  static inline void page_pool_put_page(struct page_pool *pool,
361  				      struct page *page,
362  				      unsigned int dma_sync_size,
363  				      bool allow_direct)
364  {
365  	page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size,
366  			     allow_direct);
367  }
368  
page_pool_put_full_netmem(struct page_pool * pool,netmem_ref netmem,bool allow_direct)369  static inline void page_pool_put_full_netmem(struct page_pool *pool,
370  					     netmem_ref netmem,
371  					     bool allow_direct)
372  {
373  	page_pool_put_netmem(pool, netmem, -1, allow_direct);
374  }
375  
376  /**
377   * page_pool_put_full_page() - release a reference on a page pool page
378   * @pool:	pool from which page was allocated
379   * @page:	page to release a reference on
380   * @allow_direct: released by the consumer, allow lockless caching
381   *
382   * Similar to page_pool_put_page(), but will DMA sync the entire memory area
383   * as configured in &page_pool_params.max_len.
384   */
page_pool_put_full_page(struct page_pool * pool,struct page * page,bool allow_direct)385  static inline void page_pool_put_full_page(struct page_pool *pool,
386  					   struct page *page, bool allow_direct)
387  {
388  	page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct);
389  }
390  
391  /**
392   * page_pool_recycle_direct() - release a reference on a page pool page
393   * @pool:	pool from which page was allocated
394   * @page:	page to release a reference on
395   *
396   * Similar to page_pool_put_full_page() but caller must guarantee safe context
397   * (e.g NAPI), since it will recycle the page directly into the pool fast cache.
398   */
page_pool_recycle_direct(struct page_pool * pool,struct page * page)399  static inline void page_pool_recycle_direct(struct page_pool *pool,
400  					    struct page *page)
401  {
402  	page_pool_put_full_page(pool, page, true);
403  }
404  
page_pool_recycle_direct_netmem(struct page_pool * pool,netmem_ref netmem)405  static inline void page_pool_recycle_direct_netmem(struct page_pool *pool,
406  						   netmem_ref netmem)
407  {
408  	page_pool_put_full_netmem(pool, netmem, true);
409  }
410  
411  #define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA	\
412  		(sizeof(dma_addr_t) > sizeof(unsigned long))
413  
414  /**
415   * page_pool_free_va() - free a va into the page_pool
416   * @pool: pool from which va was allocated
417   * @va: va to be freed
418   * @allow_direct: freed by the consumer, allow lockless caching
419   *
420   * Free a va allocated from page_pool_allo_va().
421   */
page_pool_free_va(struct page_pool * pool,void * va,bool allow_direct)422  static inline void page_pool_free_va(struct page_pool *pool, void *va,
423  				     bool allow_direct)
424  {
425  	page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
426  }
427  
page_pool_get_dma_addr_netmem(netmem_ref netmem)428  static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
429  {
430  	dma_addr_t ret = netmem_get_dma_addr(netmem);
431  
432  	if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
433  		ret <<= PAGE_SHIFT;
434  
435  	return ret;
436  }
437  
438  /**
439   * page_pool_get_dma_addr() - Retrieve the stored DMA address.
440   * @page:	page allocated from a page pool
441   *
442   * Fetch the DMA address of the page. The page pool to which the page belongs
443   * must had been created with PP_FLAG_DMA_MAP.
444   */
page_pool_get_dma_addr(const struct page * page)445  static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
446  {
447  	return page_pool_get_dma_addr_netmem(page_to_netmem(page));
448  }
449  
__page_pool_dma_sync_for_cpu(const struct page_pool * pool,const dma_addr_t dma_addr,u32 offset,u32 dma_sync_size)450  static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool,
451  						const dma_addr_t dma_addr,
452  						u32 offset, u32 dma_sync_size)
453  {
454  	dma_sync_single_range_for_cpu(pool->p.dev, dma_addr,
455  				      offset + pool->p.offset, dma_sync_size,
456  				      page_pool_get_dma_dir(pool));
457  }
458  
459  /**
460   * page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW
461   * @pool: &page_pool the @page belongs to
462   * @page: page to sync
463   * @offset: offset from page start to "hard" start if using PP frags
464   * @dma_sync_size: size of the data written to the page
465   *
466   * Can be used as a shorthand to sync Rx pages before accessing them in the
467   * driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``.
468   * Note that this version performs DMA sync unconditionally, even if the
469   * associated PP doesn't perform sync-for-device.
470   */
page_pool_dma_sync_for_cpu(const struct page_pool * pool,const struct page * page,u32 offset,u32 dma_sync_size)471  static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
472  					      const struct page *page,
473  					      u32 offset, u32 dma_sync_size)
474  {
475  	__page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr(page), offset,
476  				     dma_sync_size);
477  }
478  
479  static inline void
page_pool_dma_sync_netmem_for_cpu(const struct page_pool * pool,const netmem_ref netmem,u32 offset,u32 dma_sync_size)480  page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool,
481  				  const netmem_ref netmem, u32 offset,
482  				  u32 dma_sync_size)
483  {
484  	if (!pool->dma_sync_for_cpu)
485  		return;
486  
487  	__page_pool_dma_sync_for_cpu(pool,
488  				     page_pool_get_dma_addr_netmem(netmem),
489  				     offset, dma_sync_size);
490  }
491  
page_pool_put(struct page_pool * pool)492  static inline bool page_pool_put(struct page_pool *pool)
493  {
494  	return refcount_dec_and_test(&pool->user_cnt);
495  }
496  
page_pool_nid_changed(struct page_pool * pool,int new_nid)497  static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
498  {
499  	if (unlikely(pool->p.nid != new_nid))
500  		page_pool_update_nid(pool, new_nid);
501  }
502  
page_pool_is_unreadable(struct page_pool * pool)503  static inline bool page_pool_is_unreadable(struct page_pool *pool)
504  {
505  	return !!pool->mp_ops;
506  }
507  
508  #endif /* _NET_PAGE_POOL_HELPERS_H */
509