/linux/block/ |
H A D | blk-crypto-internal.h | 40 return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req), in bio_crypt_ctx_back_mergeable() 54 return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req), in bio_crypt_ctx_merge_rq()
|
H A D | blk-mq.c | 825 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags() 861 int total_bytes = blk_rq_bytes(req); in blk_complete_request() 916 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 947 if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req)) in blk_update_request() 1029 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { in blk_update_request() 1089 if (blk_rq_bytes(req) & (bdev_logical_block_size(bio->bi_bdev) - 1)) in blk_rq_passthrough_stats() 1151 if (blk_update_request(rq, error, blk_rq_bytes(rq))) in blk_mq_end_request() 1389 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { in blk_add_rq_to_plug() 3277 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq))) in blk_insert_cloned_request() 3366 rq->__data_len = blk_rq_bytes(rq_sr in blk_rq_prep_clone() [all...] |
H A D | bsg-lib.c | 223 buf->payload_len = blk_rq_bytes(req); in bsg_map_buffer()
|
H A D | blk-merge.c | 754 req->__data_len += blk_rq_bytes(next); in attempt_merge()
|
/linux/drivers/scsi/ |
H A D | scsi_lib.c | 738 return blk_rq_bytes(rq); in scsi_rq_err_bytes() 754 BUG_ON(blk_rq_bytes(rq) && !bytes); in scsi_rq_err_bytes() 989 } else if (blk_rq_bytes(req) == 0 && sense_current) { in scsi_io_completion_nz_result() 992 * good_bytes != blk_rq_bytes(req) as the signal for an error. in scsi_io_completion_nz_result() 1077 if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) { in scsi_io_completion() 1084 if (scsi_end_request(req, blk_stat, blk_rq_bytes(req))) in scsi_io_completion() 1153 if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) { in scsi_alloc_sgtables() 1155 (rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1; in scsi_alloc_sgtables() 1296 BUG_ON(blk_rq_bytes(req)); in scsi_setup_scsi_cmnd() 1301 cmd->transfersize = blk_rq_bytes(re in scsi_setup_scsi_cmnd() [all...] |
H A D | sd.c | 2299 good_bytes = blk_rq_bytes(req); in sd_done() 2303 scsi_set_resid(SCpnt, blk_rq_bytes(req)); in sd_done()
|
/linux/drivers/block/ |
H A D | zloop.c | 463 iov_iter_bvec(&iter, rw, cmd->bvec, nr_bvec, blk_rq_bytes(rq)); in zloop_rw() 472 nr_bvec, blk_rq_bytes(rq)); in zloop_rw() 567 if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) { in zloop_complete_rq() 583 if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) { in zloop_complete_rq() 585 zone_no, cmd->ret, blk_rq_bytes(rq)); in zloop_complete_rq()
|
H A D | loop.c | 251 ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); in lo_fallocate() 279 if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) || in lo_complete_rq() 378 iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq)); in lo_rw_aio()
|
H A D | ataflop.c | 464 blk_rq_bytes(fd_request))); in fd_end_request_cur()
|
H A D | floppy.c | 2448 if (remaining > blk_rq_bytes(current_req) && CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) { in copy_buffer()
|
H A D | rbd.c | 4727 u64 length = blk_rq_bytes(rq); in rbd_queue_workfn()
|
/linux/include/linux/ |
H A D | blk-mq.h | 1072 * blk_rq_bytes() : bytes left in the entire request 1083 static inline unsigned int blk_rq_bytes(const struct request *rq) in blk_rq_bytes() function 1099 return blk_rq_bytes(rq) >> SECTOR_SHIFT; in blk_rq_sectors() 1122 return blk_rq_bytes(rq); in blk_rq_payload_bytes()
|
/linux/kernel/trace/ |
H A D | blktrace.c | 820 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT, in blk_add_trace_rq_insert() 826 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE, in blk_add_trace_rq_issue() 832 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE, in blk_add_trace_rq_merge() 838 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE, in blk_add_trace_rq_requeue() 1021 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), in blk_add_trace_rq_remap() 1048 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, in blk_add_driver_data()
|
/linux/include/scsi/ |
H A D | scsi_cmnd.h | 239 return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift; in scsi_logical_block_count()
|
/linux/drivers/mtd/ubi/ |
H A D | block.c | 185 int to_read = blk_rq_bytes(req); in ubiblock_read()
|
/linux/drivers/memstick/core/ |
H A D | mspro_block.c | 649 count = blk_rq_bytes(msb->block_req); in mspro_block_issue_req() 693 t_len = blk_rq_bytes(msb->block_req); in mspro_block_complete_req()
|
/linux/drivers/nvme/host/ |
H A D | core.c | 348 blk_rq_bytes(req) >> ns->head->lba_shift, in nvme_log_error() 937 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); in nvme_setup_write_zeroes() 968 if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q)) in nvme_valid_atomic_write() 974 u64 end = start + blk_rq_bytes(req) - 1; in nvme_valid_atomic_write() 977 if (blk_rq_bytes(req) > boundary_bytes) in nvme_valid_atomic_write() 1026 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); in nvme_setup_rw()
|
H A D | pci.c | 1692 blk_rq_bytes(req)); in nvme_timeout()
|
/linux/arch/um/drivers/ |
H A D | ubd_kern.c | 1206 io_req->io_desc[0].length = blk_rq_bytes(req); in ubd_map_req()
|
/linux/io_uring/ |
H A D | rsrc.c | 976 imu->len = blk_rq_bytes(rq); in io_buffer_register_bvec()
|
/linux/drivers/md/ |
H A D | dm-mpath.c | 517 size_t nr_bytes = blk_rq_bytes(rq); in multipath_clone_and_map()
|
/linux/drivers/block/null_blk/ |
H A D | main.c | 1315 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) { in null_handle_throttled()
|
/linux/drivers/mmc/core/ |
H A D | block.c | 2127 } else if (!blk_rq_bytes(req)) { in mmc_blk_mq_complete_rq()
|
/linux/drivers/ata/ |
H A D | libata-scsi.c | 1515 req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size; in ata_check_nblocks()
|
/linux/drivers/s390/block/ |
H A D | dasd_eckd.c | 4679 data_size = blk_rq_bytes(req); in dasd_eckd_build_cp()
|