Lines Matching +full:double +full:- +full:buffering

1 // SPDX-License-Identifier: GPL-2.0
9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
15 * REQ_FUA means that the data must be on non-volatile media on request
28 * The actual execution of flush is double buffered. Whenever a request
30 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
40 * double buffering sufficient.
74 #include "blk-mq.h"
75 #include "blk-mq-sched.h"
79 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
81 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
100 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; in blk_get_flush_queue()
111 if (rq->cmd_flags & REQ_PREFLUSH) in blk_flush_policy()
114 (rq->cmd_flags & REQ_FUA)) in blk_flush_policy()
122 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
128 * After flush data completion, @rq->bio is %NULL but we need to in blk_flush_restore_request()
129 * complete the bio again. @rq->biotail is guaranteed to equal the in blk_flush_restore_request()
130 * original @rq->bio. Restore it. in blk_flush_restore_request()
132 rq->bio = rq->biotail; in blk_flush_restore_request()
135 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
136 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
141 struct block_device *part = rq->q->disk->part0; in blk_account_io_flush()
146 ktime_get_ns() - rq->start_time_ns); in blk_account_io_flush()
151 * blk_flush_complete_seq - complete flush sequence
161 * spin_lock_irq(fq->mq_flush_lock)
167 struct request_queue *q = rq->q; in blk_flush_complete_seq()
168 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_flush_complete_seq()
171 BUG_ON(rq->flush.seq & seq); in blk_flush_complete_seq()
172 rq->flush.seq |= seq; in blk_flush_complete_seq()
173 cmd_flags = rq->cmd_flags; in blk_flush_complete_seq()
185 fq->flush_pending_since = jiffies; in blk_flush_complete_seq()
186 list_move_tail(&rq->queuelist, pending); in blk_flush_complete_seq()
190 fq->flush_data_in_flight++; in blk_flush_complete_seq()
191 spin_lock(&q->requeue_lock); in blk_flush_complete_seq()
192 list_move(&rq->queuelist, &q->requeue_list); in blk_flush_complete_seq()
193 spin_unlock(&q->requeue_lock); in blk_flush_complete_seq()
204 list_del_init(&rq->queuelist); in blk_flush_complete_seq()
219 struct request_queue *q = flush_rq->q; in flush_end_io()
223 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); in flush_end_io()
226 spin_lock_irqsave(&fq->mq_flush_lock, flags); in flush_end_io()
229 fq->rq_status = error; in flush_end_io()
230 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in flush_end_io()
238 * avoiding use-after-free. in flush_end_io()
240 WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE); in flush_end_io()
241 if (fq->rq_status != BLK_STS_OK) { in flush_end_io()
242 error = fq->rq_status; in flush_end_io()
243 fq->rq_status = BLK_STS_OK; in flush_end_io()
246 if (!q->elevator) { in flush_end_io()
247 flush_rq->tag = BLK_MQ_NO_TAG; in flush_end_io()
250 flush_rq->internal_tag = BLK_MQ_NO_TAG; in flush_end_io()
253 running = &fq->flush_queue[fq->flush_running_idx]; in flush_end_io()
254 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); in flush_end_io()
257 fq->flush_running_idx ^= 1; in flush_end_io()
267 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in flush_end_io()
273 return rq->end_io == flush_end_io; in is_flush_rq()
277 * blk_kick_flush - consider issuing flush request
286 * spin_lock_irq(fq->mq_flush_lock)
292 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_kick_flush()
295 struct request *flush_rq = fq->flush_rq; in blk_kick_flush()
298 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) in blk_kick_flush()
302 if (fq->flush_data_in_flight && in blk_kick_flush()
304 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) in blk_kick_flush()
311 fq->flush_pending_idx ^= 1; in blk_kick_flush()
323 flush_rq->mq_ctx = first_rq->mq_ctx; in blk_kick_flush()
324 flush_rq->mq_hctx = first_rq->mq_hctx; in blk_kick_flush()
326 if (!q->elevator) in blk_kick_flush()
327 flush_rq->tag = first_rq->tag; in blk_kick_flush()
329 flush_rq->internal_tag = first_rq->internal_tag; in blk_kick_flush()
331 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; in blk_kick_flush()
332 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK); in blk_kick_flush()
333 flush_rq->rq_flags |= RQF_FLUSH_SEQ; in blk_kick_flush()
334 flush_rq->end_io = flush_end_io; in blk_kick_flush()
336 * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one in blk_kick_flush()
338 * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref in blk_kick_flush()
339 * and READ flush_rq->end_io in blk_kick_flush()
344 spin_lock(&q->requeue_lock); in blk_kick_flush()
345 list_add_tail(&flush_rq->queuelist, &q->flush_list); in blk_kick_flush()
346 spin_unlock(&q->requeue_lock); in blk_kick_flush()
354 struct request_queue *q = rq->q; in mq_flush_data_end_io()
355 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io()
356 struct blk_mq_ctx *ctx = rq->mq_ctx; in mq_flush_data_end_io()
360 if (q->elevator) { in mq_flush_data_end_io()
361 WARN_ON(rq->tag < 0); in mq_flush_data_end_io()
369 spin_lock_irqsave(&fq->mq_flush_lock, flags); in mq_flush_data_end_io()
370 fq->flush_data_in_flight--; in mq_flush_data_end_io()
372 * May have been corrupted by rq->rq_next reuse, we need to in mq_flush_data_end_io()
373 * re-initialize rq->queuelist before reusing it here. in mq_flush_data_end_io()
375 INIT_LIST_HEAD(&rq->queuelist); in mq_flush_data_end_io()
377 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in mq_flush_data_end_io()
385 rq->flush.seq = 0; in blk_rq_init_flush()
386 rq->rq_flags |= RQF_FLUSH_SEQ; in blk_rq_init_flush()
387 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ in blk_rq_init_flush()
388 rq->end_io = mq_flush_data_end_io; in blk_rq_init_flush()
398 struct request_queue *q = rq->q; in blk_insert_flush()
399 unsigned long fflags = q->queue_flags; /* may change, cache */ in blk_insert_flush()
401 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); in blk_insert_flush()
404 WARN_ON_ONCE(rq->bio != rq->biotail); in blk_insert_flush()
410 rq->cmd_flags &= ~REQ_PREFLUSH; in blk_insert_flush()
412 rq->cmd_flags &= ~REQ_FUA; in blk_insert_flush()
419 rq->cmd_flags |= REQ_SYNC; in blk_insert_flush()
426 * advertise a write-back cache. In this case, simply in blk_insert_flush()
444 rq->flush.seq |= REQ_FSEQ_PREFLUSH; in blk_insert_flush()
445 spin_lock_irq(&fq->mq_flush_lock); in blk_insert_flush()
446 fq->flush_data_in_flight++; in blk_insert_flush()
447 spin_unlock_irq(&fq->mq_flush_lock); in blk_insert_flush()
455 spin_lock_irq(&fq->mq_flush_lock); in blk_insert_flush()
457 spin_unlock_irq(&fq->mq_flush_lock); in blk_insert_flush()
463 * blkdev_issue_flush - queue a flush
488 spin_lock_init(&fq->mq_flush_lock); in blk_alloc_flush_queue()
491 fq->flush_rq = kzalloc_node(rq_sz, flags, node); in blk_alloc_flush_queue()
492 if (!fq->flush_rq) in blk_alloc_flush_queue()
495 INIT_LIST_HEAD(&fq->flush_queue[0]); in blk_alloc_flush_queue()
496 INIT_LIST_HEAD(&fq->flush_queue[1]); in blk_alloc_flush_queue()
512 kfree(fq->flush_rq); in blk_free_flush_queue()
517 * Allow driver to set its own lock class to fq->mq_flush_lock for
521 * nvme-loop, so lockdep may complain 'possible recursive locking' because
524 * fq->mq_flush_lock for avoiding the lockdep warning.
527 * instance is over-kill, and more worse it introduces horrible boot delay
530 * destroy lots of MQ request_queues for non-existent devices, and some robot
532 * an hour is taken during SCSI MQ probe with per-fq lock class.
537 lockdep_set_class(&hctx->fq->mq_flush_lock, key); in blk_mq_hctx_set_fq_lock_class()