1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* zpdesc.h: zswap.zpool memory descriptor
3 *
4 * Written by Alex Shi <alexs@kernel.org>
5 * Hyeonggon Yoo <42.hyeyoo@gmail.com>
6 */
7 #ifndef __MM_ZPDESC_H__
8 #define __MM_ZPDESC_H__
9
10 #include <linux/migrate.h>
11 #include <linux/pagemap.h>
12
13 /*
14 * struct zpdesc - Memory descriptor for zpool memory.
15 * @flags: Page flags, mostly unused by zsmalloc.
16 * @lru: Indirectly used by page migration.
17 * @movable_ops: Used by page migration.
18 * @next: Next zpdesc in a zspage in zsmalloc zpool.
19 * @handle: For huge zspage in zsmalloc zpool.
20 * @zspage: Points to the zspage this zpdesc is a part of.
21 * @first_obj_offset: First object offset in zsmalloc zpool.
22 * @_refcount: The number of references to this zpdesc.
23 *
24 * This struct overlays struct page for now. Do not modify without a good
25 * understanding of the issues. In particular, do not expand into the overlap
26 * with memcg_data.
27 *
28 * Page flags used:
29 * * PG_private identifies the first component page.
30 * * PG_locked is used by page migration code.
31 */
32 struct zpdesc {
33 unsigned long flags;
34 struct list_head lru;
35 unsigned long movable_ops;
36 union {
37 struct zpdesc *next;
38 unsigned long handle;
39 };
40 struct zspage *zspage;
41 /*
42 * Only the lower 24 bits are available for offset, limiting a page
43 * to 16 MiB. The upper 8 bits are reserved for PGTY_zsmalloc.
44 *
45 * Do not access this field directly.
46 * Instead, use {get,set}_first_obj_offset() helpers.
47 */
48 unsigned int first_obj_offset;
49 atomic_t _refcount;
50 };
51 #define ZPDESC_MATCH(pg, zp) \
52 static_assert(offsetof(struct page, pg) == offsetof(struct zpdesc, zp))
53
54 ZPDESC_MATCH(flags, flags);
55 ZPDESC_MATCH(lru, lru);
56 ZPDESC_MATCH(mapping, movable_ops);
57 ZPDESC_MATCH(__folio_index, next);
58 ZPDESC_MATCH(__folio_index, handle);
59 ZPDESC_MATCH(private, zspage);
60 ZPDESC_MATCH(page_type, first_obj_offset);
61 ZPDESC_MATCH(_refcount, _refcount);
62 #undef ZPDESC_MATCH
63 static_assert(sizeof(struct zpdesc) <= sizeof(struct page));
64
65 /*
66 * zpdesc_page - The first struct page allocated for a zpdesc
67 * @zp: The zpdesc.
68 *
69 * A convenience wrapper for converting zpdesc to the first struct page of the
70 * underlying folio, to communicate with code not yet converted to folio or
71 * struct zpdesc.
72 *
73 */
74 #define zpdesc_page(zp) (_Generic((zp), \
75 const struct zpdesc *: (const struct page *)(zp), \
76 struct zpdesc *: (struct page *)(zp)))
77
78 /**
79 * zpdesc_folio - The folio allocated for a zpdesc
80 * @zp: The zpdesc.
81 *
82 * Zpdescs are descriptors for zpool memory. The zpool memory itself is
83 * allocated as folios that contain the zpool objects, and zpdesc uses specific
84 * fields in the first struct page of the folio - those fields are now accessed
85 * by struct zpdesc.
86 *
87 * It is occasionally necessary convert to back to a folio in order to
88 * communicate with the rest of the mm. Please use this helper function
89 * instead of casting yourself, as the implementation may change in the future.
90 */
91 #define zpdesc_folio(zp) (_Generic((zp), \
92 const struct zpdesc *: (const struct folio *)(zp), \
93 struct zpdesc *: (struct folio *)(zp)))
94 /**
95 * page_zpdesc - Converts from first struct page to zpdesc.
96 * @p: The first (either head of compound or single) page of zpdesc.
97 *
98 * A temporary wrapper to convert struct page to struct zpdesc in situations
99 * where we know the page is the compound head, or single order-0 page.
100 *
101 * Long-term ideally everything would work with struct zpdesc directly or go
102 * through folio to struct zpdesc.
103 *
104 * Return: The zpdesc which contains this page
105 */
106 #define page_zpdesc(p) (_Generic((p), \
107 const struct page *: (const struct zpdesc *)(p), \
108 struct page *: (struct zpdesc *)(p)))
109
zpdesc_lock(struct zpdesc * zpdesc)110 static inline void zpdesc_lock(struct zpdesc *zpdesc)
111 {
112 folio_lock(zpdesc_folio(zpdesc));
113 }
114
zpdesc_trylock(struct zpdesc * zpdesc)115 static inline bool zpdesc_trylock(struct zpdesc *zpdesc)
116 {
117 return folio_trylock(zpdesc_folio(zpdesc));
118 }
119
zpdesc_unlock(struct zpdesc * zpdesc)120 static inline void zpdesc_unlock(struct zpdesc *zpdesc)
121 {
122 folio_unlock(zpdesc_folio(zpdesc));
123 }
124
zpdesc_wait_locked(struct zpdesc * zpdesc)125 static inline void zpdesc_wait_locked(struct zpdesc *zpdesc)
126 {
127 folio_wait_locked(zpdesc_folio(zpdesc));
128 }
129
zpdesc_get(struct zpdesc * zpdesc)130 static inline void zpdesc_get(struct zpdesc *zpdesc)
131 {
132 folio_get(zpdesc_folio(zpdesc));
133 }
134
zpdesc_put(struct zpdesc * zpdesc)135 static inline void zpdesc_put(struct zpdesc *zpdesc)
136 {
137 folio_put(zpdesc_folio(zpdesc));
138 }
139
kmap_local_zpdesc(struct zpdesc * zpdesc)140 static inline void *kmap_local_zpdesc(struct zpdesc *zpdesc)
141 {
142 return kmap_local_page(zpdesc_page(zpdesc));
143 }
144
zpdesc_pfn(struct zpdesc * zpdesc)145 static inline unsigned long zpdesc_pfn(struct zpdesc *zpdesc)
146 {
147 return page_to_pfn(zpdesc_page(zpdesc));
148 }
149
pfn_zpdesc(unsigned long pfn)150 static inline struct zpdesc *pfn_zpdesc(unsigned long pfn)
151 {
152 return page_zpdesc(pfn_to_page(pfn));
153 }
154
__zpdesc_set_movable(struct zpdesc * zpdesc)155 static inline void __zpdesc_set_movable(struct zpdesc *zpdesc)
156 {
157 SetPageMovableOps(zpdesc_page(zpdesc));
158 }
159
__zpdesc_set_zsmalloc(struct zpdesc * zpdesc)160 static inline void __zpdesc_set_zsmalloc(struct zpdesc *zpdesc)
161 {
162 __SetPageZsmalloc(zpdesc_page(zpdesc));
163 }
164
zpdesc_zone(struct zpdesc * zpdesc)165 static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc)
166 {
167 return page_zone(zpdesc_page(zpdesc));
168 }
169
zpdesc_is_locked(struct zpdesc * zpdesc)170 static inline bool zpdesc_is_locked(struct zpdesc *zpdesc)
171 {
172 return folio_test_locked(zpdesc_folio(zpdesc));
173 }
174 #endif
175