186db1e29SJens Axboe /* 24fed947cSTejun Heo * Functions to sequence FLUSH and FUA writes. 3ae1b1539STejun Heo * 4ae1b1539STejun Heo * Copyright (C) 2011 Max Planck Institute for Gravitational Physics 5ae1b1539STejun Heo * Copyright (C) 2011 Tejun Heo <tj@kernel.org> 6ae1b1539STejun Heo * 7ae1b1539STejun Heo * This file is released under the GPLv2. 8ae1b1539STejun Heo * 9ae1b1539STejun Heo * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three 10ae1b1539STejun Heo * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request 11ae1b1539STejun Heo * properties and hardware capability. 12ae1b1539STejun Heo * 13ae1b1539STejun Heo * If a request doesn't have data, only REQ_FLUSH makes sense, which 14ae1b1539STejun Heo * indicates a simple flush request. If there is data, REQ_FLUSH indicates 15ae1b1539STejun Heo * that the device cache should be flushed before the data is executed, and 16ae1b1539STejun Heo * REQ_FUA means that the data must be on non-volatile media on request 17ae1b1539STejun Heo * completion. 18ae1b1539STejun Heo * 19ae1b1539STejun Heo * If the device doesn't have writeback cache, FLUSH and FUA don't make any 20ae1b1539STejun Heo * difference. The requests are either completed immediately if there's no 21ae1b1539STejun Heo * data or executed as normal requests otherwise. 22ae1b1539STejun Heo * 23ae1b1539STejun Heo * If the device has writeback cache and supports FUA, REQ_FLUSH is 24ae1b1539STejun Heo * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. 25ae1b1539STejun Heo * 26ae1b1539STejun Heo * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is 27ae1b1539STejun Heo * translated to PREFLUSH and REQ_FUA to POSTFLUSH. 28ae1b1539STejun Heo * 29ae1b1539STejun Heo * The actual execution of flush is double buffered. Whenever a request 30ae1b1539STejun Heo * needs to execute PRE or POSTFLUSH, it queues at 31ae1b1539STejun Heo * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a 32ae1b1539STejun Heo * flush is issued and the pending_idx is toggled. When the flush 33ae1b1539STejun Heo * completes, all the requests which were pending are proceeded to the next 34ae1b1539STejun Heo * step. This allows arbitrary merging of different types of FLUSH/FUA 35ae1b1539STejun Heo * requests. 36ae1b1539STejun Heo * 37ae1b1539STejun Heo * Currently, the following conditions are used to determine when to issue 38ae1b1539STejun Heo * flush. 39ae1b1539STejun Heo * 40ae1b1539STejun Heo * C1. At any given time, only one flush shall be in progress. This makes 41ae1b1539STejun Heo * double buffering sufficient. 42ae1b1539STejun Heo * 43ae1b1539STejun Heo * C2. Flush is deferred if any request is executing DATA of its sequence. 44ae1b1539STejun Heo * This avoids issuing separate POSTFLUSHes for requests which shared 45ae1b1539STejun Heo * PREFLUSH. 46ae1b1539STejun Heo * 47ae1b1539STejun Heo * C3. The second condition is ignored if there is a request which has 48ae1b1539STejun Heo * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid 49ae1b1539STejun Heo * starvation in the unlikely case where there are continuous stream of 50ae1b1539STejun Heo * FUA (without FLUSH) requests. 51ae1b1539STejun Heo * 52ae1b1539STejun Heo * For devices which support FUA, it isn't clear whether C2 (and thus C3) 53ae1b1539STejun Heo * is beneficial. 54ae1b1539STejun Heo * 55ae1b1539STejun Heo * Note that a sequenced FLUSH/FUA request with DATA is completed twice. 56ae1b1539STejun Heo * Once while executing DATA and again after the whole sequence is 57ae1b1539STejun Heo * complete. The first completion updates the contained bio but doesn't 58ae1b1539STejun Heo * finish it so that the bio submitter is notified only after the whole 59ae1b1539STejun Heo * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in 60ae1b1539STejun Heo * req_bio_endio(). 61ae1b1539STejun Heo * 62ae1b1539STejun Heo * The above peculiarity requires that each FLUSH/FUA request has only one 63ae1b1539STejun Heo * bio attached to it, which is guaranteed as they aren't allowed to be 64ae1b1539STejun Heo * merged in the usual way. 6586db1e29SJens Axboe */ 66ae1b1539STejun Heo 6786db1e29SJens Axboe #include <linux/kernel.h> 6886db1e29SJens Axboe #include <linux/module.h> 6986db1e29SJens Axboe #include <linux/bio.h> 7086db1e29SJens Axboe #include <linux/blkdev.h> 715a0e3ad6STejun Heo #include <linux/gfp.h> 72320ae51fSJens Axboe #include <linux/blk-mq.h> 7386db1e29SJens Axboe 7486db1e29SJens Axboe #include "blk.h" 75320ae51fSJens Axboe #include "blk-mq.h" 7686db1e29SJens Axboe 774fed947cSTejun Heo /* FLUSH/FUA sequences */ 784fed947cSTejun Heo enum { 79ae1b1539STejun Heo REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ 80ae1b1539STejun Heo REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ 81ae1b1539STejun Heo REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ 82ae1b1539STejun Heo REQ_FSEQ_DONE = (1 << 3), 83ae1b1539STejun Heo 84ae1b1539STejun Heo REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | 85ae1b1539STejun Heo REQ_FSEQ_POSTFLUSH, 86ae1b1539STejun Heo 87ae1b1539STejun Heo /* 88ae1b1539STejun Heo * If flush has been pending longer than the following timeout, 89ae1b1539STejun Heo * it's issued even if flush_data requests are still in flight. 90ae1b1539STejun Heo */ 91ae1b1539STejun Heo FLUSH_PENDING_TIMEOUT = 5 * HZ, 924fed947cSTejun Heo }; 934fed947cSTejun Heo 94ae1b1539STejun Heo static bool blk_kick_flush(struct request_queue *q); 9528e7d184STejun Heo 96ae1b1539STejun Heo static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) 9786db1e29SJens Axboe { 98ae1b1539STejun Heo unsigned int policy = 0; 99ae1b1539STejun Heo 100fa1bf42fSJeff Moyer if (blk_rq_sectors(rq)) 101fa1bf42fSJeff Moyer policy |= REQ_FSEQ_DATA; 102fa1bf42fSJeff Moyer 103ae1b1539STejun Heo if (fflags & REQ_FLUSH) { 104ae1b1539STejun Heo if (rq->cmd_flags & REQ_FLUSH) 105ae1b1539STejun Heo policy |= REQ_FSEQ_PREFLUSH; 106ae1b1539STejun Heo if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) 107ae1b1539STejun Heo policy |= REQ_FSEQ_POSTFLUSH; 108ae1b1539STejun Heo } 109ae1b1539STejun Heo return policy; 11086db1e29SJens Axboe } 11186db1e29SJens Axboe 112ae1b1539STejun Heo static unsigned int blk_flush_cur_seq(struct request *rq) 11386db1e29SJens Axboe { 114ae1b1539STejun Heo return 1 << ffz(rq->flush.seq); 11586db1e29SJens Axboe } 11686db1e29SJens Axboe 117ae1b1539STejun Heo static void blk_flush_restore_request(struct request *rq) 11847f70d5aSTejun Heo { 11947f70d5aSTejun Heo /* 120ae1b1539STejun Heo * After flush data completion, @rq->bio is %NULL but we need to 121ae1b1539STejun Heo * complete the bio again. @rq->biotail is guaranteed to equal the 122ae1b1539STejun Heo * original @rq->bio. Restore it. 12347f70d5aSTejun Heo */ 124ae1b1539STejun Heo rq->bio = rq->biotail; 125ae1b1539STejun Heo 126ae1b1539STejun Heo /* make @rq a normal request */ 127ae1b1539STejun Heo rq->cmd_flags &= ~REQ_FLUSH_SEQ; 1284853abaaSJeff Moyer rq->end_io = rq->flush.saved_end_io; 129320ae51fSJens Axboe 130320ae51fSJens Axboe blk_clear_rq_complete(rq); 131320ae51fSJens Axboe } 132320ae51fSJens Axboe 133*18741986SChristoph Hellwig static void mq_flush_run(struct work_struct *work) 134320ae51fSJens Axboe { 135320ae51fSJens Axboe struct request *rq; 136320ae51fSJens Axboe 137*18741986SChristoph Hellwig rq = container_of(work, struct request, mq_flush_work); 138320ae51fSJens Axboe 139320ae51fSJens Axboe memset(&rq->csd, 0, sizeof(rq->csd)); 140320ae51fSJens Axboe blk_mq_run_request(rq, true, false); 141320ae51fSJens Axboe } 142320ae51fSJens Axboe 143*18741986SChristoph Hellwig static bool blk_flush_queue_rq(struct request *rq) 144320ae51fSJens Axboe { 145*18741986SChristoph Hellwig if (rq->q->mq_ops) { 146*18741986SChristoph Hellwig INIT_WORK(&rq->mq_flush_work, mq_flush_run); 147*18741986SChristoph Hellwig kblockd_schedule_work(rq->q, &rq->mq_flush_work); 148*18741986SChristoph Hellwig return false; 149*18741986SChristoph Hellwig } else { 150*18741986SChristoph Hellwig list_add_tail(&rq->queuelist, &rq->q->queue_head); 151*18741986SChristoph Hellwig return true; 152*18741986SChristoph Hellwig } 15347f70d5aSTejun Heo } 15447f70d5aSTejun Heo 155ae1b1539STejun Heo /** 156ae1b1539STejun Heo * blk_flush_complete_seq - complete flush sequence 157ae1b1539STejun Heo * @rq: FLUSH/FUA request being sequenced 158ae1b1539STejun Heo * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) 159ae1b1539STejun Heo * @error: whether an error occurred 160ae1b1539STejun Heo * 161ae1b1539STejun Heo * @rq just completed @seq part of its flush sequence, record the 162ae1b1539STejun Heo * completion and trigger the next step. 163ae1b1539STejun Heo * 164ae1b1539STejun Heo * CONTEXT: 165320ae51fSJens Axboe * spin_lock_irq(q->queue_lock or q->mq_flush_lock) 166ae1b1539STejun Heo * 167ae1b1539STejun Heo * RETURNS: 168ae1b1539STejun Heo * %true if requests were added to the dispatch queue, %false otherwise. 169ae1b1539STejun Heo */ 170ae1b1539STejun Heo static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, 171ae1b1539STejun Heo int error) 17286db1e29SJens Axboe { 173ae1b1539STejun Heo struct request_queue *q = rq->q; 174ae1b1539STejun Heo struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; 175320ae51fSJens Axboe bool queued = false, kicked; 17686db1e29SJens Axboe 177ae1b1539STejun Heo BUG_ON(rq->flush.seq & seq); 178ae1b1539STejun Heo rq->flush.seq |= seq; 17986db1e29SJens Axboe 180ae1b1539STejun Heo if (likely(!error)) 181ae1b1539STejun Heo seq = blk_flush_cur_seq(rq); 182ae1b1539STejun Heo else 183ae1b1539STejun Heo seq = REQ_FSEQ_DONE; 18486db1e29SJens Axboe 185ae1b1539STejun Heo switch (seq) { 186ae1b1539STejun Heo case REQ_FSEQ_PREFLUSH: 187ae1b1539STejun Heo case REQ_FSEQ_POSTFLUSH: 188ae1b1539STejun Heo /* queue for flush */ 189ae1b1539STejun Heo if (list_empty(pending)) 190ae1b1539STejun Heo q->flush_pending_since = jiffies; 191ae1b1539STejun Heo list_move_tail(&rq->flush.list, pending); 19228e7d184STejun Heo break; 193ae1b1539STejun Heo 194ae1b1539STejun Heo case REQ_FSEQ_DATA: 195ae1b1539STejun Heo list_move_tail(&rq->flush.list, &q->flush_data_in_flight); 196*18741986SChristoph Hellwig queued = blk_flush_queue_rq(rq); 197ae1b1539STejun Heo break; 198ae1b1539STejun Heo 199ae1b1539STejun Heo case REQ_FSEQ_DONE: 20009d60c70STejun Heo /* 201ae1b1539STejun Heo * @rq was previously adjusted by blk_flush_issue() for 202ae1b1539STejun Heo * flush sequencing and may already have gone through the 203ae1b1539STejun Heo * flush data request completion path. Restore @rq for 204ae1b1539STejun Heo * normal completion and end it. 20509d60c70STejun Heo */ 206ae1b1539STejun Heo BUG_ON(!list_empty(&rq->queuelist)); 207ae1b1539STejun Heo list_del_init(&rq->flush.list); 208ae1b1539STejun Heo blk_flush_restore_request(rq); 209320ae51fSJens Axboe if (q->mq_ops) 210320ae51fSJens Axboe blk_mq_end_io(rq, error); 211320ae51fSJens Axboe else 212ae1b1539STejun Heo __blk_end_request_all(rq, error); 21328e7d184STejun Heo break; 214ae1b1539STejun Heo 21528e7d184STejun Heo default: 21628e7d184STejun Heo BUG(); 21728e7d184STejun Heo } 218cde4c406SChristoph Hellwig 219320ae51fSJens Axboe kicked = blk_kick_flush(q); 220320ae51fSJens Axboe return kicked | queued; 22128e7d184STejun Heo } 22228e7d184STejun Heo 223ae1b1539STejun Heo static void flush_end_io(struct request *flush_rq, int error) 22486db1e29SJens Axboe { 225ae1b1539STejun Heo struct request_queue *q = flush_rq->q; 226320ae51fSJens Axboe struct list_head *running; 227ae1b1539STejun Heo bool queued = false; 228ae1b1539STejun Heo struct request *rq, *n; 229320ae51fSJens Axboe unsigned long flags = 0; 2308f11b3e9STejun Heo 231*18741986SChristoph Hellwig if (q->mq_ops) 232320ae51fSJens Axboe spin_lock_irqsave(&q->mq_flush_lock, flags); 233*18741986SChristoph Hellwig 234320ae51fSJens Axboe running = &q->flush_queue[q->flush_running_idx]; 235ae1b1539STejun Heo BUG_ON(q->flush_pending_idx == q->flush_running_idx); 236ae1b1539STejun Heo 237ae1b1539STejun Heo /* account completion of the flush request */ 238ae1b1539STejun Heo q->flush_running_idx ^= 1; 239320ae51fSJens Axboe 240320ae51fSJens Axboe if (!q->mq_ops) 241ae1b1539STejun Heo elv_completed_request(q, flush_rq); 242ae1b1539STejun Heo 243ae1b1539STejun Heo /* and push the waiting requests to the next stage */ 244ae1b1539STejun Heo list_for_each_entry_safe(rq, n, running, flush.list) { 245ae1b1539STejun Heo unsigned int seq = blk_flush_cur_seq(rq); 246ae1b1539STejun Heo 247ae1b1539STejun Heo BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); 248ae1b1539STejun Heo queued |= blk_flush_complete_seq(rq, seq, error); 249ae1b1539STejun Heo } 250ae1b1539STejun Heo 25186db1e29SJens Axboe /* 2523ac0cc45Sshaohua.li@intel.com * Kick the queue to avoid stall for two cases: 2533ac0cc45Sshaohua.li@intel.com * 1. Moving a request silently to empty queue_head may stall the 2543ac0cc45Sshaohua.li@intel.com * queue. 2553ac0cc45Sshaohua.li@intel.com * 2. When flush request is running in non-queueable queue, the 2563ac0cc45Sshaohua.li@intel.com * queue is hold. Restart the queue after flush request is finished 2573ac0cc45Sshaohua.li@intel.com * to avoid stall. 2583ac0cc45Sshaohua.li@intel.com * This function is called from request completion path and calling 2593ac0cc45Sshaohua.li@intel.com * directly into request_fn may confuse the driver. Always use 2603ac0cc45Sshaohua.li@intel.com * kblockd. 26186db1e29SJens Axboe */ 262320ae51fSJens Axboe if (queued || q->flush_queue_delayed) { 263*18741986SChristoph Hellwig WARN_ON(q->mq_ops); 26424ecfbe2SChristoph Hellwig blk_run_queue_async(q); 265320ae51fSJens Axboe } 2663ac0cc45Sshaohua.li@intel.com q->flush_queue_delayed = 0; 267320ae51fSJens Axboe if (q->mq_ops) 268320ae51fSJens Axboe spin_unlock_irqrestore(&q->mq_flush_lock, flags); 269320ae51fSJens Axboe } 270320ae51fSJens Axboe 271ae1b1539STejun Heo /** 272ae1b1539STejun Heo * blk_kick_flush - consider issuing flush request 273ae1b1539STejun Heo * @q: request_queue being kicked 2744fed947cSTejun Heo * 275ae1b1539STejun Heo * Flush related states of @q have changed, consider issuing flush request. 276ae1b1539STejun Heo * Please read the comment at the top of this file for more info. 277ae1b1539STejun Heo * 278ae1b1539STejun Heo * CONTEXT: 279320ae51fSJens Axboe * spin_lock_irq(q->queue_lock or q->mq_flush_lock) 280ae1b1539STejun Heo * 281ae1b1539STejun Heo * RETURNS: 282ae1b1539STejun Heo * %true if flush was issued, %false otherwise. 28328e7d184STejun Heo */ 284ae1b1539STejun Heo static bool blk_kick_flush(struct request_queue *q) 285ae1b1539STejun Heo { 286ae1b1539STejun Heo struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; 287ae1b1539STejun Heo struct request *first_rq = 288ae1b1539STejun Heo list_first_entry(pending, struct request, flush.list); 289ae1b1539STejun Heo 290ae1b1539STejun Heo /* C1 described at the top of this file */ 291ae1b1539STejun Heo if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending)) 292ae1b1539STejun Heo return false; 293ae1b1539STejun Heo 294ae1b1539STejun Heo /* C2 and C3 */ 295ae1b1539STejun Heo if (!list_empty(&q->flush_data_in_flight) && 296ae1b1539STejun Heo time_before(jiffies, 297ae1b1539STejun Heo q->flush_pending_since + FLUSH_PENDING_TIMEOUT)) 298ae1b1539STejun Heo return false; 299ae1b1539STejun Heo 300ae1b1539STejun Heo /* 301ae1b1539STejun Heo * Issue flush and toggle pending_idx. This makes pending_idx 302ae1b1539STejun Heo * different from running_idx, which means flush is in flight. 303ae1b1539STejun Heo */ 304320ae51fSJens Axboe q->flush_pending_idx ^= 1; 305*18741986SChristoph Hellwig 306320ae51fSJens Axboe if (q->mq_ops) { 307*18741986SChristoph Hellwig struct blk_mq_ctx *ctx = first_rq->mq_ctx; 308*18741986SChristoph Hellwig struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); 309*18741986SChristoph Hellwig 310*18741986SChristoph Hellwig blk_mq_rq_init(hctx, q->flush_rq); 311*18741986SChristoph Hellwig q->flush_rq->mq_ctx = ctx; 312*18741986SChristoph Hellwig 313*18741986SChristoph Hellwig /* 314*18741986SChristoph Hellwig * Reuse the tag value from the fist waiting request, 315*18741986SChristoph Hellwig * with blk-mq the tag is generated during request 316*18741986SChristoph Hellwig * allocation and drivers can rely on it being inside 317*18741986SChristoph Hellwig * the range they asked for. 318*18741986SChristoph Hellwig */ 319*18741986SChristoph Hellwig q->flush_rq->tag = first_rq->tag; 320*18741986SChristoph Hellwig } else { 321*18741986SChristoph Hellwig blk_rq_init(q, q->flush_rq); 322320ae51fSJens Axboe } 323320ae51fSJens Axboe 324*18741986SChristoph Hellwig q->flush_rq->cmd_type = REQ_TYPE_FS; 325*18741986SChristoph Hellwig q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; 326*18741986SChristoph Hellwig q->flush_rq->rq_disk = first_rq->rq_disk; 327*18741986SChristoph Hellwig q->flush_rq->end_io = flush_end_io; 328ae1b1539STejun Heo 329*18741986SChristoph Hellwig return blk_flush_queue_rq(q->flush_rq); 330ae1b1539STejun Heo } 331ae1b1539STejun Heo 332ae1b1539STejun Heo static void flush_data_end_io(struct request *rq, int error) 333ae1b1539STejun Heo { 334ae1b1539STejun Heo struct request_queue *q = rq->q; 335ae1b1539STejun Heo 33686db1e29SJens Axboe /* 337e83a46bbSTejun Heo * After populating an empty queue, kick it to avoid stall. Read 338e83a46bbSTejun Heo * the comment in flush_end_io(). 33986db1e29SJens Axboe */ 34073c10101SJens Axboe if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) 34124ecfbe2SChristoph Hellwig blk_run_queue_async(q); 342ae1b1539STejun Heo } 343ae1b1539STejun Heo 344320ae51fSJens Axboe static void mq_flush_data_end_io(struct request *rq, int error) 345320ae51fSJens Axboe { 346320ae51fSJens Axboe struct request_queue *q = rq->q; 347320ae51fSJens Axboe struct blk_mq_hw_ctx *hctx; 348320ae51fSJens Axboe struct blk_mq_ctx *ctx; 349320ae51fSJens Axboe unsigned long flags; 350320ae51fSJens Axboe 351320ae51fSJens Axboe ctx = rq->mq_ctx; 352320ae51fSJens Axboe hctx = q->mq_ops->map_queue(q, ctx->cpu); 353320ae51fSJens Axboe 354320ae51fSJens Axboe /* 355320ae51fSJens Axboe * After populating an empty queue, kick it to avoid stall. Read 356320ae51fSJens Axboe * the comment in flush_end_io(). 357320ae51fSJens Axboe */ 358320ae51fSJens Axboe spin_lock_irqsave(&q->mq_flush_lock, flags); 359320ae51fSJens Axboe if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) 360320ae51fSJens Axboe blk_mq_run_hw_queue(hctx, true); 361320ae51fSJens Axboe spin_unlock_irqrestore(&q->mq_flush_lock, flags); 362320ae51fSJens Axboe } 363320ae51fSJens Axboe 364ae1b1539STejun Heo /** 365ae1b1539STejun Heo * blk_insert_flush - insert a new FLUSH/FUA request 366ae1b1539STejun Heo * @rq: request to insert 367ae1b1539STejun Heo * 368b710a480SJens Axboe * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. 369320ae51fSJens Axboe * or __blk_mq_run_hw_queue() to dispatch request. 370ae1b1539STejun Heo * @rq is being submitted. Analyze what needs to be done and put it on the 371ae1b1539STejun Heo * right queue. 372ae1b1539STejun Heo * 373ae1b1539STejun Heo * CONTEXT: 374320ae51fSJens Axboe * spin_lock_irq(q->queue_lock) in !mq case 375ae1b1539STejun Heo */ 376ae1b1539STejun Heo void blk_insert_flush(struct request *rq) 377ae1b1539STejun Heo { 378ae1b1539STejun Heo struct request_queue *q = rq->q; 379ae1b1539STejun Heo unsigned int fflags = q->flush_flags; /* may change, cache */ 380ae1b1539STejun Heo unsigned int policy = blk_flush_policy(fflags, rq); 381ae1b1539STejun Heo 382ae1b1539STejun Heo /* 383ae1b1539STejun Heo * @policy now records what operations need to be done. Adjust 384ae1b1539STejun Heo * REQ_FLUSH and FUA for the driver. 385ae1b1539STejun Heo */ 3864fed947cSTejun Heo rq->cmd_flags &= ~REQ_FLUSH; 387ae1b1539STejun Heo if (!(fflags & REQ_FUA)) 3884fed947cSTejun Heo rq->cmd_flags &= ~REQ_FUA; 389ae1b1539STejun Heo 390ae1b1539STejun Heo /* 3914853abaaSJeff Moyer * An empty flush handed down from a stacking driver may 3924853abaaSJeff Moyer * translate into nothing if the underlying device does not 3934853abaaSJeff Moyer * advertise a write-back cache. In this case, simply 3944853abaaSJeff Moyer * complete the request. 3954853abaaSJeff Moyer */ 3964853abaaSJeff Moyer if (!policy) { 397320ae51fSJens Axboe if (q->mq_ops) 398320ae51fSJens Axboe blk_mq_end_io(rq, 0); 399320ae51fSJens Axboe else 4004853abaaSJeff Moyer __blk_end_bidi_request(rq, 0, 0, 0); 4014853abaaSJeff Moyer return; 4024853abaaSJeff Moyer } 4034853abaaSJeff Moyer 404834f9f61SJeff Moyer BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ 4054853abaaSJeff Moyer 4064853abaaSJeff Moyer /* 407ae1b1539STejun Heo * If there's data but flush is not necessary, the request can be 408ae1b1539STejun Heo * processed directly without going through flush machinery. Queue 409ae1b1539STejun Heo * for normal execution. 410ae1b1539STejun Heo */ 411ae1b1539STejun Heo if ((policy & REQ_FSEQ_DATA) && 412ae1b1539STejun Heo !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { 413320ae51fSJens Axboe if (q->mq_ops) { 414320ae51fSJens Axboe blk_mq_run_request(rq, false, true); 415320ae51fSJens Axboe } else 41653d63e6bSJens Axboe list_add_tail(&rq->queuelist, &q->queue_head); 417ae1b1539STejun Heo return; 41828e7d184STejun Heo } 41928e7d184STejun Heo 42028e7d184STejun Heo /* 421ae1b1539STejun Heo * @rq should go through flush machinery. Mark it part of flush 422ae1b1539STejun Heo * sequence and submit for further processing. 42328e7d184STejun Heo */ 424ae1b1539STejun Heo memset(&rq->flush, 0, sizeof(rq->flush)); 425ae1b1539STejun Heo INIT_LIST_HEAD(&rq->flush.list); 426ae1b1539STejun Heo rq->cmd_flags |= REQ_FLUSH_SEQ; 4274853abaaSJeff Moyer rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ 428320ae51fSJens Axboe if (q->mq_ops) { 429320ae51fSJens Axboe rq->end_io = mq_flush_data_end_io; 430320ae51fSJens Axboe 431320ae51fSJens Axboe spin_lock_irq(&q->mq_flush_lock); 432320ae51fSJens Axboe blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); 433320ae51fSJens Axboe spin_unlock_irq(&q->mq_flush_lock); 434320ae51fSJens Axboe return; 435320ae51fSJens Axboe } 436ae1b1539STejun Heo rq->end_io = flush_data_end_io; 437ae1b1539STejun Heo 438ae1b1539STejun Heo blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); 439ae1b1539STejun Heo } 440ae1b1539STejun Heo 441ae1b1539STejun Heo /** 442ae1b1539STejun Heo * blk_abort_flushes - @q is being aborted, abort flush requests 443ae1b1539STejun Heo * @q: request_queue being aborted 444ae1b1539STejun Heo * 445ae1b1539STejun Heo * To be called from elv_abort_queue(). @q is being aborted. Prepare all 446ae1b1539STejun Heo * FLUSH/FUA requests for abortion. 447ae1b1539STejun Heo * 448ae1b1539STejun Heo * CONTEXT: 449ae1b1539STejun Heo * spin_lock_irq(q->queue_lock) 450ae1b1539STejun Heo */ 451ae1b1539STejun Heo void blk_abort_flushes(struct request_queue *q) 452ae1b1539STejun Heo { 453ae1b1539STejun Heo struct request *rq, *n; 454ae1b1539STejun Heo int i; 455ae1b1539STejun Heo 456ae1b1539STejun Heo /* 457ae1b1539STejun Heo * Requests in flight for data are already owned by the dispatch 458ae1b1539STejun Heo * queue or the device driver. Just restore for normal completion. 459ae1b1539STejun Heo */ 460ae1b1539STejun Heo list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) { 461ae1b1539STejun Heo list_del_init(&rq->flush.list); 462ae1b1539STejun Heo blk_flush_restore_request(rq); 46328e7d184STejun Heo } 46428e7d184STejun Heo 46528e7d184STejun Heo /* 466ae1b1539STejun Heo * We need to give away requests on flush queues. Restore for 467ae1b1539STejun Heo * normal completion and put them on the dispatch queue. 46828e7d184STejun Heo */ 469ae1b1539STejun Heo for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) { 470ae1b1539STejun Heo list_for_each_entry_safe(rq, n, &q->flush_queue[i], 471ae1b1539STejun Heo flush.list) { 472ae1b1539STejun Heo list_del_init(&rq->flush.list); 473ae1b1539STejun Heo blk_flush_restore_request(rq); 474ae1b1539STejun Heo list_add_tail(&rq->queuelist, &q->queue_head); 475ae1b1539STejun Heo } 476ae1b1539STejun Heo } 47786db1e29SJens Axboe } 47886db1e29SJens Axboe 47986db1e29SJens Axboe /** 48086db1e29SJens Axboe * blkdev_issue_flush - queue a flush 48186db1e29SJens Axboe * @bdev: blockdev to issue flush for 482fbd9b09aSDmitry Monakhov * @gfp_mask: memory allocation flags (for bio_alloc) 48386db1e29SJens Axboe * @error_sector: error sector 48486db1e29SJens Axboe * 48586db1e29SJens Axboe * Description: 48686db1e29SJens Axboe * Issue a flush for the block device in question. Caller can supply 48786db1e29SJens Axboe * room for storing the error offset in case of a flush error, if they 488f17e232eSDmitry Monakhov * wish to. If WAIT flag is not passed then caller may check only what 489f17e232eSDmitry Monakhov * request was pushed in some internal queue for later handling. 49086db1e29SJens Axboe */ 491fbd9b09aSDmitry Monakhov int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 492dd3932edSChristoph Hellwig sector_t *error_sector) 49386db1e29SJens Axboe { 49486db1e29SJens Axboe struct request_queue *q; 49586db1e29SJens Axboe struct bio *bio; 496fbd9b09aSDmitry Monakhov int ret = 0; 49786db1e29SJens Axboe 49886db1e29SJens Axboe if (bdev->bd_disk == NULL) 49986db1e29SJens Axboe return -ENXIO; 50086db1e29SJens Axboe 50186db1e29SJens Axboe q = bdev_get_queue(bdev); 50286db1e29SJens Axboe if (!q) 50386db1e29SJens Axboe return -ENXIO; 50486db1e29SJens Axboe 505f10d9f61SDave Chinner /* 506f10d9f61SDave Chinner * some block devices may not have their queue correctly set up here 507f10d9f61SDave Chinner * (e.g. loop device without a backing file) and so issuing a flush 508f10d9f61SDave Chinner * here will panic. Ensure there is a request function before issuing 509d391a2ddSTejun Heo * the flush. 510f10d9f61SDave Chinner */ 511f10d9f61SDave Chinner if (!q->make_request_fn) 512f10d9f61SDave Chinner return -ENXIO; 513f10d9f61SDave Chinner 514fbd9b09aSDmitry Monakhov bio = bio_alloc(gfp_mask, 0); 51586db1e29SJens Axboe bio->bi_bdev = bdev; 516f17e232eSDmitry Monakhov 51733879d45SKent Overstreet ret = submit_bio_wait(WRITE_FLUSH, bio); 518dd3932edSChristoph Hellwig 51986db1e29SJens Axboe /* 52086db1e29SJens Axboe * The driver must store the error location in ->bi_sector, if 521f17e232eSDmitry Monakhov * it supports it. For non-stacked drivers, this should be 522f17e232eSDmitry Monakhov * copied from blk_rq_pos(rq). 52386db1e29SJens Axboe */ 52486db1e29SJens Axboe if (error_sector) 5254f024f37SKent Overstreet *error_sector = bio->bi_iter.bi_sector; 52686db1e29SJens Axboe 52786db1e29SJens Axboe bio_put(bio); 52886db1e29SJens Axboe return ret; 52986db1e29SJens Axboe } 53086db1e29SJens Axboe EXPORT_SYMBOL(blkdev_issue_flush); 531320ae51fSJens Axboe 532320ae51fSJens Axboe void blk_mq_init_flush(struct request_queue *q) 533320ae51fSJens Axboe { 534320ae51fSJens Axboe spin_lock_init(&q->mq_flush_lock); 535320ae51fSJens Axboe } 536