/linux/include/linux/ |
H A D | t10-pi.h | 47 return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; in t10_pi_ref_tag() 72 return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT)); in ext_pi_ref_tag()
|
H A D | blktrace_api.h | 121 if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) in blk_rq_trace_sector() 123 return blk_rq_pos(rq); in blk_rq_trace_sector()
|
/linux/block/ |
H A D | elevator.c | 67 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) 241 if (blk_rq_pos(rq) < blk_rq_pos(__rq)) in elv_rb_add() 243 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) in elv_rb_add() 268 if (sector < blk_rq_pos(rq)) in elv_rb_find() 270 else if (sector > blk_rq_pos(rq)) in elv_rb_find() 361 __rq = elv_rqhash_find(q, blk_rq_pos(rq)); in elv_attempt_insert_merge()
|
H A D | blk-merge.c | 517 blk_rq_get_max_sectors(req, blk_rq_pos(req))) { in ll_back_merge_fn() 552 blk_rq_get_max_sectors(req, blk_rq_pos(req))) in req_attempt_discard_merge() 574 blk_rq_get_max_sectors(req, blk_rq_pos(req))) in ll_merge_requests_fn() 669 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) in blk_try_req_merge() 839 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge() 841 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge() 928 blk_rq_get_max_sectors(req, blk_rq_pos(req))) in bio_attempt_discard_merge()
|
H A D | bfq-iosched.c | 511 s1 = blk_rq_pos(rq1); in bfq_choose_req() 512 s2 = blk_rq_pos(rq2); in bfq_choose_req() 749 if (sector > blk_rq_pos(bfqq->next_rq)) in bfq_rq_pos_tree_lookup() 751 else if (sector < blk_rq_pos(bfqq->next_rq)) in bfq_rq_pos_tree_lookup() 815 blk_rq_pos(bfqq->next_rq), &parent, &p); in bfq_pos_tree_add_move() 1041 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); in bfq_find_next_rq() 2377 return abs(blk_rq_pos(rq) - last_pos); in get_sdist() 2503 blk_rq_pos(req) < in bfq_request_merged() 2504 blk_rq_pos(container_of(rb_prev(&req->rb_node), in bfq_request_merged() 2664 return blk_rq_pos(io_struct); in bfq_io_struct_pos() [all …]
|
H A D | mq-deadline.c | 145 if (blk_rq_pos(rq) >= pos) { in deadline_from_pos() 420 dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq); in __dd_dispatch_request() 612 BUG_ON(sector != blk_rq_pos(__rq)); in dd_request_merge()
|
H A D | blk-zoned.c | 877 sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req); in blk_zone_write_plug_init_request() 881 disk_get_zone_wplug(disk, blk_rq_pos(req)); in blk_zone_write_plug_init_request()
|
/linux/include/scsi/ |
H A D | scsi_cmnd.h | 225 return blk_rq_pos(scsi_cmd_to_rq(scmd)); in scsi_get_sector() 232 return blk_rq_pos(scsi_cmd_to_rq(scmd)) >> shift; in scsi_get_lba()
|
/linux/drivers/s390/block/ |
H A D | dasd_fba.c | 334 first_rec = blk_rq_pos(req) >> block->s2b_shift; in dasd_fba_build_cp_discard() 336 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_fba_build_cp_discard() 451 first_rec = blk_rq_pos(req) >> block->s2b_shift; in dasd_fba_build_cp_regular() 453 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_fba_build_cp_regular() 489 block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); in dasd_fba_build_cp_regular()
|
H A D | dasd_diag.c | 531 first_rec = blk_rq_pos(req) >> block->s2b_shift; in dasd_diag_build_cp() 533 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_diag_build_cp()
|
H A D | dasd_eckd.c | 3196 first_trk = blk_rq_pos(req) >> block->s2b_shift; in dasd_eckd_ese_format() 3199 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_ese_format() 3282 first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift; in dasd_eckd_ese_read() 3285 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_ese_read() 4671 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; in dasd_eckd_build_cp() 4674 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_build_cp() 4752 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK; in dasd_eckd_build_cp_raw() 4753 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % in dasd_eckd_build_cp_raw() 4766 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK; in dasd_eckd_build_cp_raw() 4767 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / in dasd_eckd_build_cp_raw() [all …]
|
/linux/drivers/block/ |
H A D | z2ram.c | 72 unsigned long start = blk_rq_pos(req) << 9; in z2_queue_rq() 80 (unsigned long long)blk_rq_pos(req), in z2_queue_rq()
|
H A D | ps3disk.c | 116 start_sector = blk_rq_pos(req) * priv->blocking_factor; in ps3disk_submit_request_sg()
|
H A D | zloop.c | 142 return blk_rq_pos(rq) >> zlo->zone_shift; in rq_zone_no() 363 sector_t sector = blk_rq_pos(rq); in zloop_rw()
|
H A D | floppy.c | 2297 block = current_count_sectors + blk_rq_pos(req); in request_done() 2308 write_errors[current_drive].first_error_sector = blk_rq_pos(req); in request_done() 2311 write_errors[current_drive].last_error_sector = blk_rq_pos(req); in request_done() 2570 raw_cmd->cmd[TRACK] = (int)blk_rq_pos(current_req) / max_sector; in make_raw_rw_request() 2571 fsector_t = (int)blk_rq_pos(current_req) % max_sector; in make_raw_rw_request() 2863 current_req, (long)blk_rq_pos(current_req), in floppy_queue_rq()
|
/linux/drivers/md/ |
H A D | dm-rq.c | 134 blk_rq_pos(orig), tio->n_sectors, true, in rq_end_stats() 385 blk_rq_pos(rq)); in map_request() 443 blk_rq_pos(orig), tio->n_sectors, false, 0, in dm_start_request()
|
/linux/drivers/scsi/ |
H A D | sd_zbc.c | 304 sector_t sector = blk_rq_pos(rq); in sd_zbc_cmnd_checks() 334 sector_t sector = blk_rq_pos(rq); in sd_zbc_setup_zone_mgmt_cmnd()
|
H A D | sr.c | 331 good_bytes = (error_sector - blk_rq_pos(rq)) << 9; in sr_done() 430 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) || in sr_init_command() 446 block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9); in sr_init_command()
|
H A D | sd.c | 913 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_unmap_cmnd() 1003 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_write_same16_cmnd() 1030 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_write_same10_cmnd() 1056 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_write_zeroes_cmnd() 1325 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_read_write_cmnd() 1346 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) { in sd_setup_read_write_cmnd() 1351 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { in sd_setup_read_write_cmnd() 1417 (unsigned long long)blk_rq_pos(rq), in sd_setup_read_write_cmnd() 2252 start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); in sd_completed_bytes()
|
/linux/drivers/nvme/host/ |
H A D | zns.c | 241 c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); in nvme_setup_zone_mgmt_send()
|
/linux/drivers/mmc/core/ |
H A D | block.c | 1228 from = blk_rq_pos(req); in mmc_blk_issue_erase_rq() 1284 from = blk_rq_pos(req); in mmc_blk_issue_secdiscard_rq() 1363 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) in mmc_apply_rel_rw() 1465 brq->data.blk_addr = blk_rq_pos(req); in mmc_blk_data_prep() 1489 (blk_rq_pos(req) + blk_rq_sectors(req) == in mmc_blk_data_prep() 1731 brq->cmd.arg = blk_rq_pos(req); in mmc_blk_rw_rq_prep() 1782 brq->cmd.ext_addr = blk_rq_pos(req) >> 32; in mmc_blk_rw_rq_prep()
|
/linux/drivers/mtd/ubi/ |
H A D | block.c | 184 u64 pos = blk_rq_pos(req) << 9; in ubiblock_read()
|
/linux/drivers/mtd/ |
H A D | mtd_blkdevs.c | 54 block = blk_rq_pos(req) << 9 >> tr->blkshift; in do_blktrans_request()
|
/linux/drivers/cdrom/ |
H A D | gdrom.c | 583 block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; in gdrom_readdisk_dma()
|
/linux/drivers/block/null_blk/ |
H A D | main.c | 1279 sector_t sector = blk_rq_pos(rq); in null_handle_data_transfer() 1598 cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req), in null_poll() 1649 sector_t sector = blk_rq_pos(rq); in null_queue_rq()
|