1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Internal definitions for network filesystem support 3 * 4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/seq_file.h> 10 #include <linux/folio_queue.h> 11 #include <linux/netfs.h> 12 #include <linux/fscache.h> 13 #include <linux/fscache-cache.h> 14 #include <trace/events/netfs.h> 15 #include <trace/events/fscache.h> 16 17 #ifdef pr_fmt 18 #undef pr_fmt 19 #endif 20 21 #define pr_fmt(fmt) "netfs: " fmt 22 23 /* 24 * buffered_read.c 25 */ 26 void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error); 27 int netfs_prefetch_for_write(struct file *file, struct folio *folio, 28 size_t offset, size_t len); 29 30 /* 31 * main.c 32 */ 33 extern unsigned int netfs_debug; 34 extern struct list_head netfs_io_requests; 35 extern spinlock_t netfs_proc_lock; 36 extern mempool_t netfs_request_pool; 37 extern mempool_t netfs_subrequest_pool; 38 39 #ifdef CONFIG_PROC_FS 40 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) 41 { 42 spin_lock(&netfs_proc_lock); 43 list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests); 44 spin_unlock(&netfs_proc_lock); 45 } 46 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) 47 { 48 if (!list_empty(&rreq->proc_link)) { 49 spin_lock(&netfs_proc_lock); 50 list_del_rcu(&rreq->proc_link); 51 spin_unlock(&netfs_proc_lock); 52 } 53 } 54 #else 55 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {} 56 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {} 57 #endif 58 59 /* 60 * misc.c 61 */ 62 struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq, 63 enum netfs_folioq_trace trace); 64 void netfs_reset_iter(struct netfs_io_subrequest *subreq); 65 void netfs_wake_collector(struct netfs_io_request *rreq); 66 void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq); 67 void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq, 68 struct netfs_io_stream *stream); 69 ssize_t netfs_wait_for_read(struct netfs_io_request *rreq); 70 ssize_t netfs_wait_for_write(struct netfs_io_request *rreq); 71 void netfs_wait_for_paused_read(struct netfs_io_request *rreq); 72 void netfs_wait_for_paused_write(struct netfs_io_request *rreq); 73 74 /* 75 * objects.c 76 */ 77 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, 78 struct file *file, 79 loff_t start, size_t len, 80 enum netfs_io_origin origin); 81 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); 82 void netfs_clear_subrequests(struct netfs_io_request *rreq); 83 void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); 84 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq); 85 86 static inline void netfs_see_request(struct netfs_io_request *rreq, 87 enum netfs_rreq_ref_trace what) 88 { 89 trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what); 90 } 91 92 static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq, 93 enum netfs_sreq_ref_trace what) 94 { 95 trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, 96 refcount_read(&subreq->ref), what); 97 } 98 99 /* 100 * read_collect.c 101 */ 102 bool netfs_read_collection(struct netfs_io_request *rreq); 103 void netfs_read_collection_worker(struct work_struct *work); 104 void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error); 105 106 /* 107 * read_pgpriv2.c 108 */ 109 void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio); 110 void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq); 111 bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq); 112 113 /* 114 * read_retry.c 115 */ 116 void netfs_retry_reads(struct netfs_io_request *rreq); 117 void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq); 118 119 /* 120 * stats.c 121 */ 122 #ifdef CONFIG_NETFS_STATS 123 extern atomic_t netfs_n_rh_dio_read; 124 extern atomic_t netfs_n_rh_readahead; 125 extern atomic_t netfs_n_rh_read_folio; 126 extern atomic_t netfs_n_rh_read_single; 127 extern atomic_t netfs_n_rh_rreq; 128 extern atomic_t netfs_n_rh_sreq; 129 extern atomic_t netfs_n_rh_download; 130 extern atomic_t netfs_n_rh_download_done; 131 extern atomic_t netfs_n_rh_download_failed; 132 extern atomic_t netfs_n_rh_download_instead; 133 extern atomic_t netfs_n_rh_read; 134 extern atomic_t netfs_n_rh_read_done; 135 extern atomic_t netfs_n_rh_read_failed; 136 extern atomic_t netfs_n_rh_zero; 137 extern atomic_t netfs_n_rh_short_read; 138 extern atomic_t netfs_n_rh_write; 139 extern atomic_t netfs_n_rh_write_begin; 140 extern atomic_t netfs_n_rh_write_done; 141 extern atomic_t netfs_n_rh_write_failed; 142 extern atomic_t netfs_n_rh_write_zskip; 143 extern atomic_t netfs_n_rh_retry_read_req; 144 extern atomic_t netfs_n_rh_retry_read_subreq; 145 extern atomic_t netfs_n_wh_buffered_write; 146 extern atomic_t netfs_n_wh_writethrough; 147 extern atomic_t netfs_n_wh_dio_write; 148 extern atomic_t netfs_n_wh_writepages; 149 extern atomic_t netfs_n_wh_copy_to_cache; 150 extern atomic_t netfs_n_wh_wstream_conflict; 151 extern atomic_t netfs_n_wh_upload; 152 extern atomic_t netfs_n_wh_upload_done; 153 extern atomic_t netfs_n_wh_upload_failed; 154 extern atomic_t netfs_n_wh_write; 155 extern atomic_t netfs_n_wh_write_done; 156 extern atomic_t netfs_n_wh_write_failed; 157 extern atomic_t netfs_n_wh_retry_write_req; 158 extern atomic_t netfs_n_wh_retry_write_subreq; 159 extern atomic_t netfs_n_wb_lock_skip; 160 extern atomic_t netfs_n_wb_lock_wait; 161 extern atomic_t netfs_n_folioq; 162 163 int netfs_stats_show(struct seq_file *m, void *v); 164 165 static inline void netfs_stat(atomic_t *stat) 166 { 167 atomic_inc(stat); 168 } 169 170 static inline void netfs_stat_d(atomic_t *stat) 171 { 172 atomic_dec(stat); 173 } 174 175 #else 176 #define netfs_stat(x) do {} while(0) 177 #define netfs_stat_d(x) do {} while(0) 178 #endif 179 180 /* 181 * write_collect.c 182 */ 183 int netfs_folio_written_back(struct folio *folio); 184 bool netfs_write_collection(struct netfs_io_request *wreq); 185 void netfs_write_collection_worker(struct work_struct *work); 186 187 /* 188 * write_issue.c 189 */ 190 struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, 191 struct file *file, 192 loff_t start, 193 enum netfs_io_origin origin); 194 void netfs_reissue_write(struct netfs_io_stream *stream, 195 struct netfs_io_subrequest *subreq, 196 struct iov_iter *source); 197 void netfs_issue_write(struct netfs_io_request *wreq, 198 struct netfs_io_stream *stream); 199 size_t netfs_advance_write(struct netfs_io_request *wreq, 200 struct netfs_io_stream *stream, 201 loff_t start, size_t len, bool to_eof); 202 struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len); 203 int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 204 struct folio *folio, size_t copied, bool to_page_end, 205 struct folio **writethrough_cache); 206 ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 207 struct folio *writethrough_cache); 208 int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len); 209 210 /* 211 * write_retry.c 212 */ 213 void netfs_retry_writes(struct netfs_io_request *wreq); 214 215 /* 216 * Miscellaneous functions. 217 */ 218 static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx) 219 { 220 #if IS_ENABLED(CONFIG_FSCACHE) 221 struct fscache_cookie *cookie = ctx->cache; 222 223 return fscache_cookie_valid(cookie) && cookie->cache_priv && 224 fscache_cookie_enabled(cookie); 225 #else 226 return false; 227 #endif 228 } 229 230 /* 231 * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap). 232 */ 233 static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group) 234 { 235 if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE) 236 refcount_inc(&netfs_group->ref); 237 return netfs_group; 238 } 239 240 /* 241 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap). 242 */ 243 static inline void netfs_put_group(struct netfs_group *netfs_group) 244 { 245 if (netfs_group && 246 netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 247 refcount_dec_and_test(&netfs_group->ref)) 248 netfs_group->free(netfs_group); 249 } 250 251 /* 252 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap). 253 */ 254 static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr) 255 { 256 if (netfs_group && 257 netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 258 refcount_sub_and_test(nr, &netfs_group->ref)) 259 netfs_group->free(netfs_group); 260 } 261 262 /* 263 * Clear and wake up a NETFS_RREQ_* flag bit on a request. 264 */ 265 static inline void netfs_wake_rreq_flag(struct netfs_io_request *rreq, 266 unsigned int rreq_flag, 267 enum netfs_rreq_trace trace) 268 { 269 if (test_bit(rreq_flag, &rreq->flags)) { 270 trace_netfs_rreq(rreq, trace); 271 clear_bit_unlock(rreq_flag, &rreq->flags); 272 smp_mb__after_atomic(); /* Set flag before task state */ 273 wake_up(&rreq->waitq); 274 } 275 } 276 277 /* 278 * fscache-cache.c 279 */ 280 #ifdef CONFIG_PROC_FS 281 extern const struct seq_operations fscache_caches_seq_ops; 282 #endif 283 bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why); 284 void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why); 285 struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache); 286 void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where); 287 288 static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache) 289 { 290 return smp_load_acquire(&cache->state); 291 } 292 293 static inline bool fscache_cache_is_live(const struct fscache_cache *cache) 294 { 295 return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE; 296 } 297 298 static inline void fscache_set_cache_state(struct fscache_cache *cache, 299 enum fscache_cache_state new_state) 300 { 301 smp_store_release(&cache->state, new_state); 302 303 } 304 305 static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache, 306 enum fscache_cache_state old_state, 307 enum fscache_cache_state new_state) 308 { 309 return try_cmpxchg_release(&cache->state, &old_state, new_state); 310 } 311 312 /* 313 * fscache-cookie.c 314 */ 315 extern struct kmem_cache *fscache_cookie_jar; 316 #ifdef CONFIG_PROC_FS 317 extern const struct seq_operations fscache_cookies_seq_ops; 318 #endif 319 extern struct timer_list fscache_cookie_lru_timer; 320 321 extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix); 322 extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie, 323 enum fscache_access_trace why); 324 325 static inline void fscache_see_cookie(struct fscache_cookie *cookie, 326 enum fscache_cookie_trace where) 327 { 328 trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref), 329 where); 330 } 331 332 /* 333 * fscache-main.c 334 */ 335 extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len); 336 #ifdef CONFIG_FSCACHE 337 int __init fscache_init(void); 338 void __exit fscache_exit(void); 339 #else 340 static inline int fscache_init(void) { return 0; } 341 static inline void fscache_exit(void) {} 342 #endif 343 344 /* 345 * fscache-proc.c 346 */ 347 #ifdef CONFIG_PROC_FS 348 extern int __init fscache_proc_init(void); 349 extern void fscache_proc_cleanup(void); 350 #else 351 #define fscache_proc_init() (0) 352 #define fscache_proc_cleanup() do {} while (0) 353 #endif 354 355 /* 356 * fscache-stats.c 357 */ 358 #ifdef CONFIG_FSCACHE_STATS 359 extern atomic_t fscache_n_volumes; 360 extern atomic_t fscache_n_volumes_collision; 361 extern atomic_t fscache_n_volumes_nomem; 362 extern atomic_t fscache_n_cookies; 363 extern atomic_t fscache_n_cookies_lru; 364 extern atomic_t fscache_n_cookies_lru_expired; 365 extern atomic_t fscache_n_cookies_lru_removed; 366 extern atomic_t fscache_n_cookies_lru_dropped; 367 368 extern atomic_t fscache_n_acquires; 369 extern atomic_t fscache_n_acquires_ok; 370 extern atomic_t fscache_n_acquires_oom; 371 372 extern atomic_t fscache_n_invalidates; 373 374 extern atomic_t fscache_n_relinquishes; 375 extern atomic_t fscache_n_relinquishes_retire; 376 extern atomic_t fscache_n_relinquishes_dropped; 377 378 extern atomic_t fscache_n_resizes; 379 extern atomic_t fscache_n_resizes_null; 380 381 static inline void fscache_stat(atomic_t *stat) 382 { 383 atomic_inc(stat); 384 } 385 386 static inline void fscache_stat_d(atomic_t *stat) 387 { 388 atomic_dec(stat); 389 } 390 391 #define __fscache_stat(stat) (stat) 392 393 int fscache_stats_show(struct seq_file *m); 394 #else 395 396 #define __fscache_stat(stat) (NULL) 397 #define fscache_stat(stat) do {} while (0) 398 #define fscache_stat_d(stat) do {} while (0) 399 400 static inline int fscache_stats_show(struct seq_file *m) { return 0; } 401 #endif 402 403 /* 404 * fscache-volume.c 405 */ 406 #ifdef CONFIG_PROC_FS 407 extern const struct seq_operations fscache_volumes_seq_ops; 408 #endif 409 410 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume, 411 enum fscache_volume_trace where); 412 bool fscache_begin_volume_access(struct fscache_volume *volume, 413 struct fscache_cookie *cookie, 414 enum fscache_access_trace why); 415 void fscache_create_volume(struct fscache_volume *volume, bool wait); 416 417 /*****************************************************************************/ 418 /* 419 * debug tracing 420 */ 421 #define dbgprintk(FMT, ...) \ 422 printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) 423 424 #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) 425 #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) 426 #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__) 427 428 #ifdef __KDEBUG 429 #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__) 430 #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__) 431 #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__) 432 433 #elif defined(CONFIG_NETFS_DEBUG) 434 #define _enter(FMT, ...) \ 435 do { \ 436 if (netfs_debug) \ 437 kenter(FMT, ##__VA_ARGS__); \ 438 } while (0) 439 440 #define _leave(FMT, ...) \ 441 do { \ 442 if (netfs_debug) \ 443 kleave(FMT, ##__VA_ARGS__); \ 444 } while (0) 445 446 #define _debug(FMT, ...) \ 447 do { \ 448 if (netfs_debug) \ 449 kdebug(FMT, ##__VA_ARGS__); \ 450 } while (0) 451 452 #else 453 #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__) 454 #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__) 455 #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__) 456 #endif 457 458 /* 459 * assertions 460 */ 461 #if 1 /* defined(__KDEBUGALL) */ 462 463 #define ASSERT(X) \ 464 do { \ 465 if (unlikely(!(X))) { \ 466 pr_err("\n"); \ 467 pr_err("Assertion failed\n"); \ 468 BUG(); \ 469 } \ 470 } while (0) 471 472 #define ASSERTCMP(X, OP, Y) \ 473 do { \ 474 if (unlikely(!((X) OP (Y)))) { \ 475 pr_err("\n"); \ 476 pr_err("Assertion failed\n"); \ 477 pr_err("%lx " #OP " %lx is false\n", \ 478 (unsigned long)(X), (unsigned long)(Y)); \ 479 BUG(); \ 480 } \ 481 } while (0) 482 483 #define ASSERTIF(C, X) \ 484 do { \ 485 if (unlikely((C) && !(X))) { \ 486 pr_err("\n"); \ 487 pr_err("Assertion failed\n"); \ 488 BUG(); \ 489 } \ 490 } while (0) 491 492 #define ASSERTIFCMP(C, X, OP, Y) \ 493 do { \ 494 if (unlikely((C) && !((X) OP (Y)))) { \ 495 pr_err("\n"); \ 496 pr_err("Assertion failed\n"); \ 497 pr_err("%lx " #OP " %lx is false\n", \ 498 (unsigned long)(X), (unsigned long)(Y)); \ 499 BUG(); \ 500 } \ 501 } while (0) 502 503 #else 504 505 #define ASSERT(X) do {} while (0) 506 #define ASSERTCMP(X, OP, Y) do {} while (0) 507 #define ASSERTIF(C, X) do {} while (0) 508 #define ASSERTIFCMP(C, X, OP, Y) do {} while (0) 509 510 #endif /* assert or not */ 511