/linux/fs/netfs/ |
H A D | read_pgpriv2.c | 23 loff_t fpos = folio_pos(folio), i_size; in netfs_pgpriv2_copy_folio() local 34 if (fpos >= i_size) { in netfs_pgpriv2_copy_folio() 41 if (fpos + fsize > creq->i_size) in netfs_pgpriv2_copy_folio() 44 if (flen > i_size - fpos) { in netfs_pgpriv2_copy_folio() 45 flen = i_size - fpos; in netfs_pgpriv2_copy_folio() 47 } else if (flen == i_size - fpos) { in netfs_pgpriv2_copy_folio() 75 atomic64_set(&creq->issued_to, fpos + cache->submit_off); in netfs_pgpriv2_copy_folio() 77 part = netfs_advance_write(creq, cache, fpos + cache->submit_off, in netfs_pgpriv2_copy_folio() 88 atomic64_set(&creq->issued_to, fpos + fsize); in netfs_pgpriv2_copy_folio() 185 unsigned long long fpos, fen in netfs_pgpriv2_unlock_copied_folios() local [all...] |
H A D | write_issue.c | 331 loff_t fpos = folio_pos(folio), i_size; in netfs_write_folio() local 346 if (fpos >= i_size) { in netfs_write_folio() 357 if (fpos + fsize > wreq->i_size) in netfs_write_folio() 370 if (flen > i_size - fpos) in netfs_write_folio() 371 flen = i_size - fpos; in netfs_write_folio() 372 } else if (flen > i_size - fpos) { in netfs_write_folio() 373 flen = i_size - fpos; in netfs_write_folio() 377 } else if (flen == i_size - fpos) { in netfs_write_folio() 496 atomic64_set(&wreq->issued_to, fpos + stream->submit_off); in netfs_write_folio() 498 part = netfs_advance_write(wreq, stream, fpos + stream->submit_off, in netfs_write_folio() [all …]
|
H A D | write_collect.c | 140 unsigned long long fpos, fend; in netfs_writeback_unlock_folios() local 149 fpos = folio_pos(folio); in netfs_writeback_unlock_folios() 154 fend = min_t(unsigned long long, fpos + flen, wreq->i_size); in netfs_writeback_unlock_folios() 163 wreq->cleaned_to = fpos + fsize; in netfs_writeback_unlock_folios() 179 if (fpos + fsize >= collected_to) in netfs_writeback_unlock_folios()
|
H A D | read_collect.c | 123 unsigned long long fpos, fend; in netfs_read_unlock_folios() local 139 fpos = folio_pos(folio); in netfs_read_unlock_folios() 140 fend = umin(fpos + fsize, rreq->i_size); in netfs_read_unlock_folios() 149 WRITE_ONCE(rreq->cleaned_to, fpos + fsize); in netfs_read_unlock_folios() 168 if (fpos + fsize >= collected_to) in netfs_read_unlock_folios()
|
/linux/fs/afs/ |
H A D | dir_search.c | 73 size_t blend = (block + 1) * AFS_DIR_BLOCK_SIZE, fpos = iter->fpos; in afs_dir_find_block() local 86 if (!fq || blpos < fpos) { in afs_dir_find_block() 89 fpos = 0; in afs_dir_find_block() 97 if (blend <= fpos + fsize) { in afs_dir_find_block() 100 if (WARN_ON_ONCE(folio_pos(folio) != fpos)) in afs_dir_find_block() 104 iter->fpos = fpos; in afs_dir_find_block() 105 iter->block = kmap_local_folio(folio, blpos - fpos); in afs_dir_find_block() 108 fpos in afs_dir_find_block() [all...] |
H A D | dir_edit.c | 117 size_t blend = (block + 1) * AFS_DIR_BLOCK_SIZE, fpos = iter->fpos; in afs_dir_get_block() local 140 if (blend <= fpos + fsize) { in afs_dir_get_block() 143 if (WARN_ON_ONCE(folio_pos(folio) != fpos)) in afs_dir_get_block() 147 iter->fpos = fpos; in afs_dir_get_block() 148 return kmap_local_folio(folio, blpos - fpos); in afs_dir_get_block() 150 fpos += fsize; in afs_dir_get_block()
|
H A D | dir.c | 31 loff_t fpos, u64 ino, unsigned dtype); 33 loff_t fpos, u64 ino, unsigned dtype); 570 int nlen, loff_t fpos, u64 ino, unsigned dtype) in afs_lookup_one_filldir() argument 638 int nlen, loff_t fpos, u64 ino, unsigned dtype) in afs_lookup_filldir() argument
|
/linux/fs/proc/ |
H A D | kcore.c | 328 loff_t *fpos = &iocb->ki_pos; in read_kcore_iter() local 349 if (buflen && *fpos < sizeof(struct elfhdr)) { in read_kcore_iter() 371 tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos); in read_kcore_iter() 372 if (copy_to_iter((char *)&ehdr + *fpos, tsz, iter) != tsz) { in read_kcore_iter() 378 *fpos += tsz; in read_kcore_iter() 382 if (buflen && *fpos < phdrs_offset + kcore_phdrs_len) { in read_kcore_iter() 414 phdrs_offset + kcore_phdrs_len - *fpos); in read_kcore_iter() 415 if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz, in read_kcore_iter() 424 *fpos += tsz; in read_kcore_iter() 428 if (buflen && *fpos < notes_offse in read_kcore_iter() [all...] |
H A D | vmcore.c | 333 static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos) in __read_vmcore() argument 340 if (!iov_iter_count(iter) || *fpos >= vmcore_size) in __read_vmcore() 343 iov_iter_truncate(iter, vmcore_size - *fpos); in __read_vmcore() 346 if (*fpos < elfcorebuf_sz) { in __read_vmcore() 347 tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter)); in __read_vmcore() 348 if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz) in __read_vmcore() 350 *fpos += tsz; in __read_vmcore() 359 if (*fpos < elfcorebuf_sz + elfnotes_sz) { in __read_vmcore() 373 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) { in __read_vmcore() 375 (size_t)*fpos, iov_iter_coun in __read_vmcore() [all...] |
/linux/arch/mips/alchemy/common/ |
H A D | dma.c | 100 int au1000_dma_read_proc(char *buf, char **start, off_t fpos, in au1000_dma_read_proc() argument 113 if (fpos >= len) { in au1000_dma_read_proc() 118 *start = buf + fpos; in au1000_dma_read_proc() 119 len -= fpos; in au1000_dma_read_proc()
|
/linux/net/rds/ |
H A D | tcp.c | 65 void *buffer, size_t *lenp, loff_t *fpos); 67 void *buffer, size_t *lenp, loff_t *fpos); 685 void *buffer, size_t *lenp, loff_t *fpos) in rds_tcp_skbuf_handler() argument 689 err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos); in rds_tcp_skbuf_handler() 706 void *buffer, size_t *lenp, loff_t *fpos) in rds_tcp_sndbuf_handler() argument 711 return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos); in rds_tcp_sndbuf_handler() 715 void *buffer, size_t *lenp, loff_t *fpos) in rds_tcp_rcvbuf_handler() argument 720 return rds_tcp_skbuf_handler(rtn, ctl, write, buffer, lenp, fpos); in rds_tcp_rcvbuf_handler()
|
/linux/fs/ |
H A D | binfmt_flat.c | 182 static int decompress_exec(struct linux_binprm *bprm, loff_t fpos, char *dst, in decompress_exec() argument 189 pr_debug("decompress_exec(offset=%llx,buf=%p,len=%lx)\n", fpos, dst, len); in decompress_exec() 203 ret = kernel_read(bprm->file, buf, LBUFSIZE, &fpos); in decompress_exec() 273 ret = kernel_read(bprm->file, buf, LBUFSIZE, &fpos); in decompress_exec() 416 loff_t fpos; in load_flat_file() local 567 fpos = ntohl(hdr->data_start); in load_flat_file() 570 result = decompress_exec(bprm, fpos, (char *)datapos, in load_flat_file() 575 result = read_code(bprm->file, datapos, fpos, in load_flat_file()
|
/linux/include/trace/events/ |
H A D | io_uring.h | 622 TP_PROTO(void *ctx, u64 fpos, u64 wanted, u64 got), 624 TP_ARGS(ctx, fpos, wanted, got), 628 __field(u64, fpos) 635 __entry->fpos = fpos; 640 TP_printk("ring %p, fpos %lld, wanted %lld, got %lld", 641 __entry->ctx, __entry->fpos,
|
/linux/fs/adfs/ |
H A D | dir_f.c | 233 adfs_f_setpos(struct adfs_dir *dir, unsigned int fpos) in adfs_f_setpos() argument 235 if (fpos >= ADFS_NUM_DIR_ENTRIES) in adfs_f_setpos() 238 dir->pos = 5 + fpos * 26; in adfs_f_setpos()
|
H A D | dir_fplus.c | 159 adfs_fplus_setpos(struct adfs_dir *dir, unsigned int fpos) in adfs_fplus_setpos() argument 163 if (fpos <= le32_to_cpu(dir->bighead->bigdirentries)) { in adfs_fplus_setpos() 164 dir->pos = fpos; in adfs_fplus_setpos()
|
H A D | adfs.h | 129 int (*setpos)(struct adfs_dir *dir, unsigned int fpos);
|
/linux/drivers/sbus/char/ |
H A D | bbc_envctrl.c | 557 struct bbc_fan_control *fp, *fpos; in destroy_all_fans() local 559 list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) { in destroy_all_fans()
|
/linux/fs/smb/client/ |
H A D | cifsfs.c | 1243 unsigned long long fpos, fend; in cifs_flush_folio() local 1253 fpos = folio_pos(folio); in cifs_flush_folio() 1254 fend = fpos + size - 1; in cifs_flush_folio() 1255 *_fstart = min_t(unsigned long long, *_fstart, fpos); in cifs_flush_folio() 1257 if ((first && pos == fpos) || (!first && pos == fend)) in cifs_flush_folio() 1260 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend); in cifs_flush_folio()
|
/linux/drivers/net/ethernet/microchip/sparx5/lan969x/ |
H A D | lan969x.c | 303 .fpos = lan969x_fpos,
|
/linux/mm/ |
H A D | nommu.c | 975 loff_t fpos; in do_mmap_private() local 977 fpos = vma->vm_pgoff; in do_mmap_private() 978 fpos <<= PAGE_SHIFT; in do_mmap_private() 980 ret = kernel_read(vma->vm_file, base, len, &fpos); in do_mmap_private()
|
H A D | internal.h | 1354 struct folio *folio, loff_t fpos, size_t size);
|
/linux/drivers/net/ethernet/microchip/sparx5/ |
H A D | sparx5_main_regs.h | 2581 BIT(regs->fpos[FP_CPU_PROC_CTRL_AARCH64_MODE_ENA]) 2588 BIT(regs->fpos[FP_CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS]) 2595 BIT(regs->fpos[FP_CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS]) 2602 BIT(regs->fpos[FP_CPU_PROC_CTRL_BE_EXCEP_MODE]) 2609 BIT(regs->fpos[FP_CPU_PROC_CTRL_VINITHI]) 2616 BIT(regs->fpos[FP_CPU_PROC_CTRL_CFGTE]) 2623 BIT(regs->fpos[FP_CPU_PROC_CTRL_CP15S_DISABLE]) 2630 BIT(regs->fpos[FP_CPU_PROC_CTRL_PROC_CRYPTO_DISABLE]) 2658 BIT(regs->fpos[FP_CPU_PROC_CTRL_L2_FLUSH_REQ]) 2677 BIT(regs->fpos[FP_DEV2G5_PHAD_CTRL_PHAD_EN [all...] |
/linux/arch/mips/include/asm/mach-au1x00/ |
H A D | au1000_dma.h | 125 extern int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
|
/linux/fs/erofs/ |
H A D | zdata.c | 1041 erofs_off_t fpos = offset + cur - map->m_la; in z_erofs_scan_folio() local 1044 cur + min(map->m_llen - fpos, end - cur), in z_erofs_scan_folio() 1045 EROFS_I(inode)->z_fragmentoff + fpos); in z_erofs_scan_folio()
|
/linux/fs/ceph/ |
H A D | dir.c | 72 loff_t fpos = ((loff_t)high << 28) | (loff_t)off; in ceph_make_fpos() local 74 fpos |= HASH_ORDER; in ceph_make_fpos() 75 return fpos; in ceph_make_fpos()
|