xref: /linux/fs/netfs/internal.h (revision 9ebff83e648148b9ece97d4e4890dd84ca54d6ce)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* Internal definitions for network filesystem support
3  *
4  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/seq_file.h>
10 #include <linux/netfs.h>
11 #include <linux/fscache.h>
12 #include <linux/fscache-cache.h>
13 #include <trace/events/netfs.h>
14 #include <trace/events/fscache.h>
15 
16 #ifdef pr_fmt
17 #undef pr_fmt
18 #endif
19 
20 #define pr_fmt(fmt) "netfs: " fmt
21 
22 /*
23  * buffered_read.c
24  */
25 void netfs_rreq_unlock_folios(struct netfs_io_request *rreq);
26 
27 /*
28  * io.c
29  */
30 int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
31 
32 /*
33  * main.c
34  */
35 extern unsigned int netfs_debug;
36 extern struct list_head netfs_io_requests;
37 extern spinlock_t netfs_proc_lock;
38 
39 #ifdef CONFIG_PROC_FS
40 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
41 {
42 	spin_lock(&netfs_proc_lock);
43 	list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests);
44 	spin_unlock(&netfs_proc_lock);
45 }
46 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq)
47 {
48 	if (!list_empty(&rreq->proc_link)) {
49 		spin_lock(&netfs_proc_lock);
50 		list_del_rcu(&rreq->proc_link);
51 		spin_unlock(&netfs_proc_lock);
52 	}
53 }
54 #else
55 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {}
56 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
57 #endif
58 
59 /*
60  * misc.c
61  */
62 #define NETFS_FLAG_PUT_MARK		BIT(0)
63 #define NETFS_FLAG_PAGECACHE_MARK	BIT(1)
64 int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
65 			    struct folio *folio, unsigned int flags,
66 			    gfp_t gfp_mask);
67 int netfs_add_folios_to_buffer(struct xarray *buffer,
68 			       struct address_space *mapping,
69 			       pgoff_t index, pgoff_t to, gfp_t gfp_mask);
70 void netfs_clear_buffer(struct xarray *buffer);
71 
72 /*
73  * objects.c
74  */
75 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
76 					     struct file *file,
77 					     loff_t start, size_t len,
78 					     enum netfs_io_origin origin);
79 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
80 void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
81 void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
82 		       enum netfs_rreq_ref_trace what);
83 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
84 
85 static inline void netfs_see_request(struct netfs_io_request *rreq,
86 				     enum netfs_rreq_ref_trace what)
87 {
88 	trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
89 }
90 
91 /*
92  * stats.c
93  */
94 #ifdef CONFIG_NETFS_STATS
95 extern atomic_t netfs_n_rh_readahead;
96 extern atomic_t netfs_n_rh_readpage;
97 extern atomic_t netfs_n_rh_rreq;
98 extern atomic_t netfs_n_rh_sreq;
99 extern atomic_t netfs_n_rh_download;
100 extern atomic_t netfs_n_rh_download_done;
101 extern atomic_t netfs_n_rh_download_failed;
102 extern atomic_t netfs_n_rh_download_instead;
103 extern atomic_t netfs_n_rh_read;
104 extern atomic_t netfs_n_rh_read_done;
105 extern atomic_t netfs_n_rh_read_failed;
106 extern atomic_t netfs_n_rh_zero;
107 extern atomic_t netfs_n_rh_short_read;
108 extern atomic_t netfs_n_rh_write;
109 extern atomic_t netfs_n_rh_write_begin;
110 extern atomic_t netfs_n_rh_write_done;
111 extern atomic_t netfs_n_rh_write_failed;
112 extern atomic_t netfs_n_rh_write_zskip;
113 extern atomic_t netfs_n_wh_upload;
114 extern atomic_t netfs_n_wh_upload_done;
115 extern atomic_t netfs_n_wh_upload_failed;
116 extern atomic_t netfs_n_wh_write;
117 extern atomic_t netfs_n_wh_write_done;
118 extern atomic_t netfs_n_wh_write_failed;
119 
120 int netfs_stats_show(struct seq_file *m, void *v);
121 
122 static inline void netfs_stat(atomic_t *stat)
123 {
124 	atomic_inc(stat);
125 }
126 
127 static inline void netfs_stat_d(atomic_t *stat)
128 {
129 	atomic_dec(stat);
130 }
131 
132 #else
133 #define netfs_stat(x) do {} while(0)
134 #define netfs_stat_d(x) do {} while(0)
135 #endif
136 
137 /*
138  * Miscellaneous functions.
139  */
140 static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
141 {
142 #if IS_ENABLED(CONFIG_FSCACHE)
143 	struct fscache_cookie *cookie = ctx->cache;
144 
145 	return fscache_cookie_valid(cookie) && cookie->cache_priv &&
146 		fscache_cookie_enabled(cookie);
147 #else
148 	return false;
149 #endif
150 }
151 
152 /*
153  * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
154  */
155 static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
156 {
157 	if (netfs_group)
158 		refcount_inc(&netfs_group->ref);
159 	return netfs_group;
160 }
161 
162 /*
163  * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
164  */
165 static inline void netfs_put_group(struct netfs_group *netfs_group)
166 {
167 	if (netfs_group && refcount_dec_and_test(&netfs_group->ref))
168 		netfs_group->free(netfs_group);
169 }
170 
171 /*
172  * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
173  */
174 static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
175 {
176 	if (netfs_group && refcount_sub_and_test(nr, &netfs_group->ref))
177 		netfs_group->free(netfs_group);
178 }
179 
180 /*
181  * fscache-cache.c
182  */
183 #ifdef CONFIG_PROC_FS
184 extern const struct seq_operations fscache_caches_seq_ops;
185 #endif
186 bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
187 void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
188 struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
189 void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
190 
191 static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
192 {
193 	return smp_load_acquire(&cache->state);
194 }
195 
196 static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
197 {
198 	return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
199 }
200 
201 static inline void fscache_set_cache_state(struct fscache_cache *cache,
202 					   enum fscache_cache_state new_state)
203 {
204 	smp_store_release(&cache->state, new_state);
205 
206 }
207 
208 static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
209 						 enum fscache_cache_state old_state,
210 						 enum fscache_cache_state new_state)
211 {
212 	return try_cmpxchg_release(&cache->state, &old_state, new_state);
213 }
214 
215 /*
216  * fscache-cookie.c
217  */
218 extern struct kmem_cache *fscache_cookie_jar;
219 #ifdef CONFIG_PROC_FS
220 extern const struct seq_operations fscache_cookies_seq_ops;
221 #endif
222 extern struct timer_list fscache_cookie_lru_timer;
223 
224 extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
225 extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
226 					enum fscache_access_trace why);
227 
228 static inline void fscache_see_cookie(struct fscache_cookie *cookie,
229 				      enum fscache_cookie_trace where)
230 {
231 	trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
232 			     where);
233 }
234 
235 /*
236  * fscache-main.c
237  */
238 extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
239 #ifdef CONFIG_FSCACHE
240 int __init fscache_init(void);
241 void __exit fscache_exit(void);
242 #else
243 static inline int fscache_init(void) { return 0; }
244 static inline void fscache_exit(void) {}
245 #endif
246 
247 /*
248  * fscache-proc.c
249  */
250 #ifdef CONFIG_PROC_FS
251 extern int __init fscache_proc_init(void);
252 extern void fscache_proc_cleanup(void);
253 #else
254 #define fscache_proc_init()	(0)
255 #define fscache_proc_cleanup()	do {} while (0)
256 #endif
257 
258 /*
259  * fscache-stats.c
260  */
261 #ifdef CONFIG_FSCACHE_STATS
262 extern atomic_t fscache_n_volumes;
263 extern atomic_t fscache_n_volumes_collision;
264 extern atomic_t fscache_n_volumes_nomem;
265 extern atomic_t fscache_n_cookies;
266 extern atomic_t fscache_n_cookies_lru;
267 extern atomic_t fscache_n_cookies_lru_expired;
268 extern atomic_t fscache_n_cookies_lru_removed;
269 extern atomic_t fscache_n_cookies_lru_dropped;
270 
271 extern atomic_t fscache_n_acquires;
272 extern atomic_t fscache_n_acquires_ok;
273 extern atomic_t fscache_n_acquires_oom;
274 
275 extern atomic_t fscache_n_invalidates;
276 
277 extern atomic_t fscache_n_relinquishes;
278 extern atomic_t fscache_n_relinquishes_retire;
279 extern atomic_t fscache_n_relinquishes_dropped;
280 
281 extern atomic_t fscache_n_resizes;
282 extern atomic_t fscache_n_resizes_null;
283 
284 static inline void fscache_stat(atomic_t *stat)
285 {
286 	atomic_inc(stat);
287 }
288 
289 static inline void fscache_stat_d(atomic_t *stat)
290 {
291 	atomic_dec(stat);
292 }
293 
294 #define __fscache_stat(stat) (stat)
295 
296 int fscache_stats_show(struct seq_file *m);
297 #else
298 
299 #define __fscache_stat(stat) (NULL)
300 #define fscache_stat(stat) do {} while (0)
301 #define fscache_stat_d(stat) do {} while (0)
302 
303 static inline int fscache_stats_show(struct seq_file *m) { return 0; }
304 #endif
305 
306 /*
307  * fscache-volume.c
308  */
309 #ifdef CONFIG_PROC_FS
310 extern const struct seq_operations fscache_volumes_seq_ops;
311 #endif
312 
313 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
314 					  enum fscache_volume_trace where);
315 void fscache_put_volume(struct fscache_volume *volume,
316 			enum fscache_volume_trace where);
317 bool fscache_begin_volume_access(struct fscache_volume *volume,
318 				 struct fscache_cookie *cookie,
319 				 enum fscache_access_trace why);
320 void fscache_create_volume(struct fscache_volume *volume, bool wait);
321 
322 /*****************************************************************************/
323 /*
324  * debug tracing
325  */
326 #define dbgprintk(FMT, ...) \
327 	printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
328 
329 #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
330 #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
331 #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
332 
333 #ifdef __KDEBUG
334 #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
335 #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
336 #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
337 
338 #elif defined(CONFIG_NETFS_DEBUG)
339 #define _enter(FMT, ...)			\
340 do {						\
341 	if (netfs_debug)			\
342 		kenter(FMT, ##__VA_ARGS__);	\
343 } while (0)
344 
345 #define _leave(FMT, ...)			\
346 do {						\
347 	if (netfs_debug)			\
348 		kleave(FMT, ##__VA_ARGS__);	\
349 } while (0)
350 
351 #define _debug(FMT, ...)			\
352 do {						\
353 	if (netfs_debug)			\
354 		kdebug(FMT, ##__VA_ARGS__);	\
355 } while (0)
356 
357 #else
358 #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
359 #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
360 #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
361 #endif
362 
363 /*
364  * assertions
365  */
366 #if 1 /* defined(__KDEBUGALL) */
367 
368 #define ASSERT(X)							\
369 do {									\
370 	if (unlikely(!(X))) {						\
371 		pr_err("\n");					\
372 		pr_err("Assertion failed\n");	\
373 		BUG();							\
374 	}								\
375 } while (0)
376 
377 #define ASSERTCMP(X, OP, Y)						\
378 do {									\
379 	if (unlikely(!((X) OP (Y)))) {					\
380 		pr_err("\n");					\
381 		pr_err("Assertion failed\n");	\
382 		pr_err("%lx " #OP " %lx is false\n",		\
383 		       (unsigned long)(X), (unsigned long)(Y));		\
384 		BUG();							\
385 	}								\
386 } while (0)
387 
388 #define ASSERTIF(C, X)							\
389 do {									\
390 	if (unlikely((C) && !(X))) {					\
391 		pr_err("\n");					\
392 		pr_err("Assertion failed\n");	\
393 		BUG();							\
394 	}								\
395 } while (0)
396 
397 #define ASSERTIFCMP(C, X, OP, Y)					\
398 do {									\
399 	if (unlikely((C) && !((X) OP (Y)))) {				\
400 		pr_err("\n");					\
401 		pr_err("Assertion failed\n");	\
402 		pr_err("%lx " #OP " %lx is false\n",		\
403 		       (unsigned long)(X), (unsigned long)(Y));		\
404 		BUG();							\
405 	}								\
406 } while (0)
407 
408 #else
409 
410 #define ASSERT(X)			do {} while (0)
411 #define ASSERTCMP(X, OP, Y)		do {} while (0)
412 #define ASSERTIF(C, X)			do {} while (0)
413 #define ASSERTIFCMP(C, X, OP, Y)	do {} while (0)
414 
415 #endif /* assert or not */
416