1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #ifndef __XFS_LOG_PRIV_H__
7 #define __XFS_LOG_PRIV_H__
8
9 #include "xfs_extent_busy.h" /* for struct xfs_busy_extents */
10
11 struct xfs_buf;
12 struct xlog;
13 struct xlog_ticket;
14 struct xfs_mount;
15
16 struct xfs_log_iovec {
17 void *i_addr;/* beginning address of region */
18 int i_len; /* length in bytes of region */
19 uint i_type; /* type of region */
20 };
21
22 struct xfs_log_vec {
23 struct list_head lv_list; /* CIL lv chain ptrs */
24 uint32_t lv_order_id; /* chain ordering info */
25 int lv_niovecs; /* number of iovecs in lv */
26 struct xfs_log_iovec *lv_iovecp; /* iovec array */
27 struct xfs_log_item *lv_item; /* owner */
28 char *lv_buf; /* formatted buffer */
29 int lv_bytes; /* accounted space in buffer */
30 int lv_buf_used; /* buffer space used so far */
31 int lv_alloc_size; /* size of allocated lv */
32 };
33
34 /*
35 * get client id from packed copy.
36 *
37 * this hack is here because the xlog_pack code copies four bytes
38 * of xlog_op_header containing the fields oh_clientid, oh_flags
39 * and oh_res2 into the packed copy.
40 *
41 * later on this four byte chunk is treated as an int and the
42 * client id is pulled out.
43 *
44 * this has endian issues, of course.
45 */
xlog_get_client_id(__be32 i)46 static inline uint xlog_get_client_id(__be32 i)
47 {
48 return be32_to_cpu(i) >> 24;
49 }
50
51 /*
52 * In core log state
53 */
54 enum xlog_iclog_state {
55 XLOG_STATE_ACTIVE, /* Current IC log being written to */
56 XLOG_STATE_WANT_SYNC, /* Want to sync this iclog; no more writes */
57 XLOG_STATE_SYNCING, /* This IC log is syncing */
58 XLOG_STATE_DONE_SYNC, /* Done syncing to disk */
59 XLOG_STATE_CALLBACK, /* Callback functions now */
60 XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */
61 };
62
63 #define XLOG_STATE_STRINGS \
64 { XLOG_STATE_ACTIVE, "XLOG_STATE_ACTIVE" }, \
65 { XLOG_STATE_WANT_SYNC, "XLOG_STATE_WANT_SYNC" }, \
66 { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \
67 { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \
68 { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \
69 { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" }
70
71 /*
72 * In core log flags
73 */
74 #define XLOG_ICL_NEED_FLUSH (1u << 0) /* iclog needs REQ_PREFLUSH */
75 #define XLOG_ICL_NEED_FUA (1u << 1) /* iclog needs REQ_FUA */
76
77 #define XLOG_ICL_STRINGS \
78 { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \
79 { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" }
80
81
82 /*
83 * Log ticket flags
84 */
85 #define XLOG_TIC_PERM_RESERV (1u << 0) /* permanent reservation */
86
87 #define XLOG_TIC_FLAGS \
88 { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }
89
90 /*
91 * Below are states for covering allocation transactions.
92 * By covering, we mean changing the h_tail_lsn in the last on-disk
93 * log write such that no allocation transactions will be re-done during
94 * recovery after a system crash. Recovery starts at the last on-disk
95 * log write.
96 *
97 * These states are used to insert dummy log entries to cover
98 * space allocation transactions which can undo non-transactional changes
99 * after a crash. Writes to a file with space
100 * already allocated do not result in any transactions. Allocations
101 * might include space beyond the EOF. So if we just push the EOF a
102 * little, the last transaction for the file could contain the wrong
103 * size. If there is no file system activity, after an allocation
104 * transaction, and the system crashes, the allocation transaction
105 * will get replayed and the file will be truncated. This could
106 * be hours/days/... after the allocation occurred.
107 *
108 * The fix for this is to do two dummy transactions when the
109 * system is idle. We need two dummy transaction because the h_tail_lsn
110 * in the log record header needs to point beyond the last possible
111 * non-dummy transaction. The first dummy changes the h_tail_lsn to
112 * the first transaction before the dummy. The second dummy causes
113 * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
114 *
115 * These dummy transactions get committed when everything
116 * is idle (after there has been some activity).
117 *
118 * There are 5 states used to control this.
119 *
120 * IDLE -- no logging has been done on the file system or
121 * we are done covering previous transactions.
122 * NEED -- logging has occurred and we need a dummy transaction
123 * when the log becomes idle.
124 * DONE -- we were in the NEED state and have committed a dummy
125 * transaction.
126 * NEED2 -- we detected that a dummy transaction has gone to the
127 * on disk log with no other transactions.
128 * DONE2 -- we committed a dummy transaction when in the NEED2 state.
129 *
130 * There are two places where we switch states:
131 *
132 * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
133 * We commit the dummy transaction and switch to DONE or DONE2,
134 * respectively. In all other states, we don't do anything.
135 *
136 * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
137 *
138 * No matter what state we are in, if this isn't the dummy
139 * transaction going out, the next state is NEED.
140 * So, if we aren't in the DONE or DONE2 states, the next state
141 * is NEED. We can't be finishing a write of the dummy record
142 * unless it was committed and the state switched to DONE or DONE2.
143 *
144 * If we are in the DONE state and this was a write of the
145 * dummy transaction, we move to NEED2.
146 *
147 * If we are in the DONE2 state and this was a write of the
148 * dummy transaction, we move to IDLE.
149 *
150 *
151 * Writing only one dummy transaction can get appended to
152 * one file space allocation. When this happens, the log recovery
153 * code replays the space allocation and a file could be truncated.
154 * This is why we have the NEED2 and DONE2 states before going idle.
155 */
156
157 #define XLOG_STATE_COVER_IDLE 0
158 #define XLOG_STATE_COVER_NEED 1
159 #define XLOG_STATE_COVER_DONE 2
160 #define XLOG_STATE_COVER_NEED2 3
161 #define XLOG_STATE_COVER_DONE2 4
162
163 #define XLOG_COVER_OPS 5
164
165 struct xlog_ticket {
166 struct list_head t_queue; /* reserve/write queue */
167 struct task_struct *t_task; /* task that owns this ticket */
168 xlog_tid_t t_tid; /* transaction identifier */
169 atomic_t t_ref; /* ticket reference count */
170 int t_curr_res; /* current reservation */
171 int t_unit_res; /* unit reservation */
172 char t_ocnt; /* original unit count */
173 char t_cnt; /* current unit count */
174 uint8_t t_flags; /* properties of reservation */
175 int t_iclog_hdrs; /* iclog hdrs in t_curr_res */
176 };
177
178 /*
179 * In-core log structure.
180 *
181 * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
182 * - ic_next is the pointer to the next iclog in the ring.
183 * - ic_log is a pointer back to the global log structure.
184 * - ic_size is the full size of the log buffer, minus the cycle headers.
185 * - ic_offset is the current number of bytes written to in this iclog.
186 * - ic_refcnt is bumped when someone is writing to the log.
187 * - ic_state is the state of the iclog.
188 *
189 * Because of cacheline contention on large machines, we need to separate
190 * various resources onto different cachelines. To start with, make the
191 * structure cacheline aligned. The following fields can be contended on
192 * by independent processes:
193 *
194 * - ic_callbacks
195 * - ic_refcnt
196 * - fields protected by the global l_icloglock
197 *
198 * so we need to ensure that these fields are located in separate cachelines.
199 * We'll put all the read-only and l_icloglock fields in the first cacheline,
200 * and move everything else out to subsequent cachelines.
201 */
202 struct xlog_in_core {
203 wait_queue_head_t ic_force_wait;
204 wait_queue_head_t ic_write_wait;
205 struct xlog_in_core *ic_next;
206 struct xlog_in_core *ic_prev;
207 struct xlog *ic_log;
208 u32 ic_size;
209 u32 ic_offset;
210 enum xlog_iclog_state ic_state;
211 unsigned int ic_flags;
212 void *ic_datap; /* pointer to iclog data */
213 struct list_head ic_callbacks;
214
215 /* reference counts need their own cacheline */
216 atomic_t ic_refcnt ____cacheline_aligned_in_smp;
217 struct xlog_rec_header *ic_header;
218 #ifdef DEBUG
219 bool ic_fail_crc : 1;
220 #endif
221 struct semaphore ic_sema;
222 struct work_struct ic_end_io_work;
223 struct bio ic_bio;
224 struct bio_vec ic_bvec[];
225 };
226
227 /*
228 * The CIL context is used to aggregate per-transaction details as well be
229 * passed to the iclog for checkpoint post-commit processing. After being
230 * passed to the iclog, another context needs to be allocated for tracking the
231 * next set of transactions to be aggregated into a checkpoint.
232 */
233 struct xfs_cil;
234
235 struct xfs_cil_ctx {
236 struct xfs_cil *cil;
237 xfs_csn_t sequence; /* chkpt sequence # */
238 xfs_lsn_t start_lsn; /* first LSN of chkpt commit */
239 xfs_lsn_t commit_lsn; /* chkpt commit record lsn */
240 struct xlog_in_core *commit_iclog;
241 struct xlog_ticket *ticket; /* chkpt ticket */
242 atomic_t space_used; /* aggregate size of regions */
243 struct xfs_busy_extents busy_extents;
244 struct list_head log_items; /* log items in chkpt */
245 struct list_head lv_chain; /* logvecs being pushed */
246 struct list_head iclog_entry;
247 struct list_head committing; /* ctx committing list */
248 struct work_struct push_work;
249 atomic_t order_id;
250
251 /*
252 * CPUs that could have added items to the percpu CIL data. Access is
253 * coordinated with xc_ctx_lock.
254 */
255 struct cpumask cil_pcpmask;
256 };
257
258 /*
259 * Per-cpu CIL tracking items
260 */
261 struct xlog_cil_pcp {
262 int32_t space_used;
263 uint32_t space_reserved;
264 struct list_head busy_extents;
265 struct list_head log_items;
266 };
267
268 /*
269 * Committed Item List structure
270 *
271 * This structure is used to track log items that have been committed but not
272 * yet written into the log. It is used only when the delayed logging mount
273 * option is enabled.
274 *
275 * This structure tracks the list of committing checkpoint contexts so
276 * we can avoid the problem of having to hold out new transactions during a
277 * flush until we have a the commit record LSN of the checkpoint. We can
278 * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
279 * sequence match and extract the commit LSN directly from there. If the
280 * checkpoint is still in the process of committing, we can block waiting for
281 * the commit LSN to be determined as well. This should make synchronous
282 * operations almost as efficient as the old logging methods.
283 */
284 struct xfs_cil {
285 struct xlog *xc_log;
286 unsigned long xc_flags;
287 atomic_t xc_iclog_hdrs;
288 struct workqueue_struct *xc_push_wq;
289
290 struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp;
291 struct xfs_cil_ctx *xc_ctx;
292
293 spinlock_t xc_push_lock ____cacheline_aligned_in_smp;
294 xfs_csn_t xc_push_seq;
295 bool xc_push_commit_stable;
296 struct list_head xc_committing;
297 wait_queue_head_t xc_commit_wait;
298 wait_queue_head_t xc_start_wait;
299 xfs_csn_t xc_current_sequence;
300 wait_queue_head_t xc_push_wait; /* background push throttle */
301
302 void __percpu *xc_pcp; /* percpu CIL structures */
303 } ____cacheline_aligned_in_smp;
304
305 /* xc_flags bit values */
306 #define XLOG_CIL_EMPTY 1
307 #define XLOG_CIL_PCP_SPACE 2
308
309 /*
310 * The amount of log space we allow the CIL to aggregate is difficult to size.
311 * Whatever we choose, we have to make sure we can get a reservation for the
312 * log space effectively, that it is large enough to capture sufficient
313 * relogging to reduce log buffer IO significantly, but it is not too large for
314 * the log or induces too much latency when writing out through the iclogs. We
315 * track both space consumed and the number of vectors in the checkpoint
316 * context, so we need to decide which to use for limiting.
317 *
318 * Every log buffer we write out during a push needs a header reserved, which
319 * is at least one sector and more for v2 logs. Hence we need a reservation of
320 * at least 512 bytes per 32k of log space just for the LR headers. That means
321 * 16KB of reservation per megabyte of delayed logging space we will consume,
322 * plus various headers. The number of headers will vary based on the num of
323 * io vectors, so limiting on a specific number of vectors is going to result
324 * in transactions of varying size. IOWs, it is more consistent to track and
325 * limit space consumed in the log rather than by the number of objects being
326 * logged in order to prevent checkpoint ticket overruns.
327 *
328 * Further, use of static reservations through the log grant mechanism is
329 * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
330 * grant) and a significant deadlock potential because regranting write space
331 * can block on log pushes. Hence if we have to regrant log space during a log
332 * push, we can deadlock.
333 *
334 * However, we can avoid this by use of a dynamic "reservation stealing"
335 * technique during transaction commit whereby unused reservation space in the
336 * transaction ticket is transferred to the CIL ctx commit ticket to cover the
337 * space needed by the checkpoint transaction. This means that we never need to
338 * specifically reserve space for the CIL checkpoint transaction, nor do we
339 * need to regrant space once the checkpoint completes. This also means the
340 * checkpoint transaction ticket is specific to the checkpoint context, rather
341 * than the CIL itself.
342 *
343 * With dynamic reservations, we can effectively make up arbitrary limits for
344 * the checkpoint size so long as they don't violate any other size rules.
345 * Recovery imposes a rule that no transaction exceed half the log, so we are
346 * limited by that. Furthermore, the log transaction reservation subsystem
347 * tries to keep 25% of the log free, so we need to keep below that limit or we
348 * risk running out of free log space to start any new transactions.
349 *
350 * In order to keep background CIL push efficient, we only need to ensure the
351 * CIL is large enough to maintain sufficient in-memory relogging to avoid
352 * repeated physical writes of frequently modified metadata. If we allow the CIL
353 * to grow to a substantial fraction of the log, then we may be pinning hundreds
354 * of megabytes of metadata in memory until the CIL flushes. This can cause
355 * issues when we are running low on memory - pinned memory cannot be reclaimed,
356 * and the CIL consumes a lot of memory. Hence we need to set an upper physical
357 * size limit for the CIL that limits the maximum amount of memory pinned by the
358 * CIL but does not limit performance by reducing relogging efficiency
359 * significantly.
360 *
361 * As such, the CIL push threshold ends up being the smaller of two thresholds:
362 * - a threshold large enough that it allows CIL to be pushed and progress to be
363 * made without excessive blocking of incoming transaction commits. This is
364 * defined to be 12.5% of the log space - half the 25% push threshold of the
365 * AIL.
366 * - small enough that it doesn't pin excessive amounts of memory but maintains
367 * close to peak relogging efficiency. This is defined to be 16x the iclog
368 * buffer window (32MB) as measurements have shown this to be roughly the
369 * point of diminishing performance increases under highly concurrent
370 * modification workloads.
371 *
372 * To prevent the CIL from overflowing upper commit size bounds, we introduce a
373 * new threshold at which we block committing transactions until the background
374 * CIL commit commences and switches to a new context. While this is not a hard
375 * limit, it forces the process committing a transaction to the CIL to block and
376 * yeild the CPU, giving the CIL push work a chance to be scheduled and start
377 * work. This prevents a process running lots of transactions from overfilling
378 * the CIL because it is not yielding the CPU. We set the blocking limit at
379 * twice the background push space threshold so we keep in line with the AIL
380 * push thresholds.
381 *
382 * Note: this is not a -hard- limit as blocking is applied after the transaction
383 * is inserted into the CIL and the push has been triggered. It is largely a
384 * throttling mechanism that allows the CIL push to be scheduled and run. A hard
385 * limit will be difficult to implement without introducing global serialisation
386 * in the CIL commit fast path, and it's not at all clear that we actually need
387 * such hard limits given the ~7 years we've run without a hard limit before
388 * finding the first situation where a checkpoint size overflow actually
389 * occurred. Hence the simple throttle, and an ASSERT check to tell us that
390 * we've overrun the max size.
391 */
392 #define XLOG_CIL_SPACE_LIMIT(log) \
393 min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4)
394
395 #define XLOG_CIL_BLOCKING_SPACE_LIMIT(log) \
396 (XLOG_CIL_SPACE_LIMIT(log) * 2)
397
398 /*
399 * ticket grant locks, queues and accounting have their own cachlines
400 * as these are quite hot and can be operated on concurrently.
401 */
402 struct xlog_grant_head {
403 spinlock_t lock ____cacheline_aligned_in_smp;
404 struct list_head waiters;
405 atomic64_t grant;
406 };
407
408 /*
409 * The reservation head lsn is not made up of a cycle number and block number.
410 * Instead, it uses a cycle number and byte number. Logs don't expect to
411 * overflow 31 bits worth of byte offset, so using a byte number will mean
412 * that round off problems won't occur when releasing partial reservations.
413 */
414 struct xlog {
415 /* The following fields don't need locking */
416 struct xfs_mount *l_mp; /* mount point */
417 struct xfs_ail *l_ailp; /* AIL log is working with */
418 struct xfs_cil *l_cilp; /* CIL log is working with */
419 struct xfs_buftarg *l_targ; /* buftarg of log */
420 struct workqueue_struct *l_ioend_workqueue; /* for I/O completions */
421 struct delayed_work l_work; /* background flush work */
422 long l_opstate; /* operational state */
423 uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
424 struct list_head *l_buf_cancel_table;
425 struct list_head r_dfops; /* recovered log intent items */
426 int l_iclog_hsize; /* size of iclog header */
427 uint l_sectBBsize; /* sector size in BBs (2^n) */
428 int l_iclog_size; /* size of log in bytes */
429 int l_iclog_bufs; /* number of iclog buffers */
430 xfs_daddr_t l_logBBstart; /* start block of log */
431 int l_logsize; /* size of log in bytes */
432 int l_logBBsize; /* size of log in BB chunks */
433
434 /* The following block of fields are changed while holding icloglock */
435 wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp;
436 /* waiting for iclog flush */
437 int l_covered_state;/* state of "covering disk
438 * log entries" */
439 struct xlog_in_core *l_iclog; /* head log queue */
440 spinlock_t l_icloglock; /* grab to change iclog state */
441 int l_curr_cycle; /* Cycle number of log writes */
442 int l_prev_cycle; /* Cycle number before last
443 * block increment */
444 int l_curr_block; /* current logical log block */
445 int l_prev_block; /* previous logical log block */
446
447 /*
448 * l_tail_lsn is atomic so it can be set and read without needing to
449 * hold specific locks. To avoid operations contending with other hot
450 * objects, it on a separate cacheline.
451 */
452 /* lsn of 1st LR with unflushed * buffers */
453 atomic64_t l_tail_lsn ____cacheline_aligned_in_smp;
454
455 struct xlog_grant_head l_reserve_head;
456 struct xlog_grant_head l_write_head;
457 uint64_t l_tail_space;
458
459 struct xfs_kobj l_kobj;
460
461 /* log recovery lsn tracking (for buffer submission */
462 xfs_lsn_t l_recovery_lsn;
463
464 uint32_t l_iclog_roundoff;/* padding roundoff */
465 };
466
467 /*
468 * Bits for operational state
469 */
470 #define XLOG_ACTIVE_RECOVERY 0 /* in the middle of recovery */
471 #define XLOG_RECOVERY_NEEDED 1 /* log was recovered */
472 #define XLOG_IO_ERROR 2 /* log hit an I/O error, and being
473 shutdown */
474 #define XLOG_TAIL_WARN 3 /* log tail verify warning issued */
475 #define XLOG_SHUTDOWN_STARTED 4 /* xlog_force_shutdown() exclusion */
476
477 static inline bool
xlog_recovery_needed(struct xlog * log)478 xlog_recovery_needed(struct xlog *log)
479 {
480 return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
481 }
482
483 static inline bool
xlog_in_recovery(struct xlog * log)484 xlog_in_recovery(struct xlog *log)
485 {
486 return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
487 }
488
489 static inline bool
xlog_is_shutdown(struct xlog * log)490 xlog_is_shutdown(struct xlog *log)
491 {
492 return test_bit(XLOG_IO_ERROR, &log->l_opstate);
493 }
494
495 /*
496 * Wait until the xlog_force_shutdown() has marked the log as shut down
497 * so xlog_is_shutdown() will always return true.
498 */
499 static inline void
xlog_shutdown_wait(struct xlog * log)500 xlog_shutdown_wait(
501 struct xlog *log)
502 {
503 wait_var_event(&log->l_opstate, xlog_is_shutdown(log));
504 }
505
506 /* common routines */
507 extern int
508 xlog_recover(
509 struct xlog *log);
510 extern int
511 xlog_recover_finish(
512 struct xlog *log);
513 extern void
514 xlog_recover_cancel(struct xlog *);
515
516 __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
517 char *dp, unsigned int hdrsize, unsigned int size);
518
519 extern struct kmem_cache *xfs_log_ticket_cache;
520 struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes,
521 int count, bool permanent);
522
523 void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
524 void xlog_print_trans(struct xfs_trans *);
525 int xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx,
526 struct list_head *lv_chain, struct xlog_ticket *tic,
527 uint32_t len);
528 int xlog_write_one_vec(struct xlog *log, struct xfs_cil_ctx *ctx,
529 struct xfs_log_iovec *reg, struct xlog_ticket *ticket);
530 void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
531 void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
532
533 void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
534 int eventual_size);
535 int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
536 struct xlog_ticket *ticket);
537
538 /*
539 * When we crack an atomic LSN, we sample it first so that the value will not
540 * change while we are cracking it into the component values. This means we
541 * will always get consistent component values to work from. This should always
542 * be used to sample and crack LSNs that are stored and updated in atomic
543 * variables.
544 */
545 static inline void
xlog_crack_atomic_lsn(atomic64_t * lsn,uint * cycle,uint * block)546 xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
547 {
548 xfs_lsn_t val = atomic64_read(lsn);
549
550 *cycle = CYCLE_LSN(val);
551 *block = BLOCK_LSN(val);
552 }
553
554 /*
555 * Calculate and assign a value to an atomic LSN variable from component pieces.
556 */
557 static inline void
xlog_assign_atomic_lsn(atomic64_t * lsn,uint cycle,uint block)558 xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
559 {
560 atomic64_set(lsn, xlog_assign_lsn(cycle, block));
561 }
562
563 /*
564 * Committed Item List interfaces
565 */
566 int xlog_cil_init(struct xlog *log);
567 void xlog_cil_init_post_recovery(struct xlog *log);
568 void xlog_cil_destroy(struct xlog *log);
569 bool xlog_cil_empty(struct xlog *log);
570 void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp,
571 xfs_csn_t *commit_seq, bool regrant);
572 void xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx,
573 struct xlog_in_core *iclog);
574
575
576 /*
577 * CIL force routines
578 */
579 void xlog_cil_flush(struct xlog *log);
580 xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
581
582 static inline void
xlog_cil_force(struct xlog * log)583 xlog_cil_force(struct xlog *log)
584 {
585 xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
586 }
587
588 /*
589 * Wrapper function for waiting on a wait queue serialised against wakeups
590 * by a spinlock. This matches the semantics of all the wait queues used in the
591 * log code.
592 */
593 static inline void
xlog_wait(struct wait_queue_head * wq,struct spinlock * lock)594 xlog_wait(
595 struct wait_queue_head *wq,
596 struct spinlock *lock)
597 __releases(lock)
598 {
599 DECLARE_WAITQUEUE(wait, current);
600
601 add_wait_queue_exclusive(wq, &wait);
602 __set_current_state(TASK_UNINTERRUPTIBLE);
603 spin_unlock(lock);
604 schedule();
605 remove_wait_queue(wq, &wait);
606 }
607
608 int xlog_wait_on_iclog(struct xlog_in_core *iclog)
609 __releases(iclog->ic_log->l_icloglock);
610
611 /* Calculate the distance between two LSNs in bytes */
612 static inline uint64_t
xlog_lsn_sub(struct xlog * log,xfs_lsn_t high,xfs_lsn_t low)613 xlog_lsn_sub(
614 struct xlog *log,
615 xfs_lsn_t high,
616 xfs_lsn_t low)
617 {
618 uint32_t hi_cycle = CYCLE_LSN(high);
619 uint32_t hi_block = BLOCK_LSN(high);
620 uint32_t lo_cycle = CYCLE_LSN(low);
621 uint32_t lo_block = BLOCK_LSN(low);
622
623 if (hi_cycle == lo_cycle)
624 return BBTOB(hi_block - lo_block);
625 ASSERT((hi_cycle == lo_cycle + 1) || xlog_is_shutdown(log));
626 return (uint64_t)log->l_logsize - BBTOB(lo_block - hi_block);
627 }
628
629 void xlog_grant_return_space(struct xlog *log, xfs_lsn_t old_head,
630 xfs_lsn_t new_head);
631
632 /*
633 * The LSN is valid so long as it is behind the current LSN. If it isn't, this
634 * means that the next log record that includes this metadata could have a
635 * smaller LSN. In turn, this means that the modification in the log would not
636 * replay.
637 */
638 static inline bool
xlog_valid_lsn(struct xlog * log,xfs_lsn_t lsn)639 xlog_valid_lsn(
640 struct xlog *log,
641 xfs_lsn_t lsn)
642 {
643 int cur_cycle;
644 int cur_block;
645 bool valid = true;
646
647 /*
648 * First, sample the current lsn without locking to avoid added
649 * contention from metadata I/O. The current cycle and block are updated
650 * (in xlog_state_switch_iclogs()) and read here in a particular order
651 * to avoid false negatives (e.g., thinking the metadata LSN is valid
652 * when it is not).
653 *
654 * The current block is always rewound before the cycle is bumped in
655 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
656 * a transiently forward state. Instead, we can see the LSN in a
657 * transiently behind state if we happen to race with a cycle wrap.
658 */
659 cur_cycle = READ_ONCE(log->l_curr_cycle);
660 smp_rmb();
661 cur_block = READ_ONCE(log->l_curr_block);
662
663 if ((CYCLE_LSN(lsn) > cur_cycle) ||
664 (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
665 /*
666 * If the metadata LSN appears invalid, it's possible the check
667 * above raced with a wrap to the next log cycle. Grab the lock
668 * to check for sure.
669 */
670 spin_lock(&log->l_icloglock);
671 cur_cycle = log->l_curr_cycle;
672 cur_block = log->l_curr_block;
673 spin_unlock(&log->l_icloglock);
674
675 if ((CYCLE_LSN(lsn) > cur_cycle) ||
676 (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
677 valid = false;
678 }
679
680 return valid;
681 }
682
683 /*
684 * Log vector and shadow buffers can be large, so we need to use kvmalloc() here
685 * to ensure success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts
686 * to fall back to vmalloc, so we can't actually do anything useful with gfp
687 * flags to control the kmalloc() behaviour within kvmalloc(). Hence kmalloc()
688 * will do direct reclaim and compaction in the slow path, both of which are
689 * horrendously expensive. We just want kmalloc to fail fast and fall back to
690 * vmalloc if it can't get something straight away from the free lists or
691 * buddy allocator. Hence we have to open code kvmalloc outselves here.
692 *
693 * This assumes that the caller uses memalloc_nofs_save task context here, so
694 * despite the use of GFP_KERNEL here, we are going to be doing GFP_NOFS
695 * allocations. This is actually the only way to make vmalloc() do GFP_NOFS
696 * allocations, so lets just all pretend this is a GFP_KERNEL context
697 * operation....
698 */
699 static inline void *
xlog_kvmalloc(size_t buf_size)700 xlog_kvmalloc(
701 size_t buf_size)
702 {
703 gfp_t flags = GFP_KERNEL;
704 void *p;
705
706 flags &= ~__GFP_DIRECT_RECLAIM;
707 flags |= __GFP_NOWARN | __GFP_NORETRY;
708 do {
709 p = kmalloc(buf_size, flags);
710 if (!p)
711 p = vmalloc(buf_size);
712 } while (!p);
713
714 return p;
715 }
716
717 /*
718 * Given a count of iovecs and space for a log item, compute the space we need
719 * in the log to store that data plus the log headers.
720 */
721 static inline unsigned int
xlog_item_space(unsigned int niovecs,unsigned int nbytes)722 xlog_item_space(
723 unsigned int niovecs,
724 unsigned int nbytes)
725 {
726 nbytes += niovecs * (sizeof(uint64_t) + sizeof(struct xlog_op_header));
727 return round_up(nbytes, sizeof(uint64_t));
728 }
729
730 /*
731 * Cycles over XLOG_CYCLE_DATA_SIZE overflow into the extended header that was
732 * added for v2 logs. Addressing for the cycles array there is off by one,
733 * because the first batch of cycles is in the original header.
734 */
xlog_cycle_data(struct xlog_rec_header * rhead,unsigned i)735 static inline __be32 *xlog_cycle_data(struct xlog_rec_header *rhead, unsigned i)
736 {
737 if (i >= XLOG_CYCLE_DATA_SIZE) {
738 unsigned j = i / XLOG_CYCLE_DATA_SIZE;
739 unsigned k = i % XLOG_CYCLE_DATA_SIZE;
740
741 return &rhead->h_ext[j - 1].xh_cycle_data[k];
742 }
743
744 return &rhead->h_cycle_data[i];
745 }
746
747 #endif /* __XFS_LOG_PRIV_H__ */
748