Lines Matching refs:imu

112 	struct io_mapped_ubuf *imu = priv;
115 for (i = 0; i < imu->nr_bvecs; i++) {
116 struct folio *folio = page_folio(imu->bvec[i].bv_page);
131 static void io_free_imu(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
133 if (imu->nr_bvecs <= IO_CACHED_BVECS_SEGS)
134 io_cache_free(&ctx->imu_cache, imu);
136 kvfree(imu);
139 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
141 if (unlikely(refcount_read(&imu->refs) > 1)) {
142 if (!refcount_dec_and_test(&imu->refs))
146 if (imu->acct_pages)
147 io_unaccount_mem(ctx, imu->acct_pages);
148 imu->release(imu->priv);
149 io_free_imu(ctx, imu);
644 struct io_mapped_ubuf *imu;
648 imu = node->buf;
649 for (j = 0; j < imu->nr_bvecs; j++) {
650 if (!PageCompound(imu->bvec[j].bv_page))
652 if (compound_head(imu->bvec[j].bv_page) == hpage)
661 int nr_pages, struct io_mapped_ubuf *imu,
666 imu->acct_pages = 0;
669 imu->acct_pages++;
679 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
683 if (!imu->acct_pages)
686 ret = io_account_mem(ctx, imu->acct_pages);
688 imu->acct_pages = 0;
781 struct io_mapped_ubuf *imu = NULL;
812 imu = io_alloc_imu(ctx, nr_pages);
813 if (!imu)
816 imu->nr_bvecs = nr_pages;
817 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
823 imu->ubuf = (unsigned long) iov->iov_base;
824 imu->len = iov->iov_len;
825 imu->folio_shift = PAGE_SHIFT;
826 imu->release = io_release_ubuf;
827 imu->priv = imu;
828 imu->is_kbuf = false;
829 imu->dir = IO_IMU_DEST | IO_IMU_SOURCE;
831 imu->folio_shift = data.folio_shift;
832 refcount_set(&imu->refs, 1);
838 node->buf = imu;
844 vec_len = min_t(size_t, size, (1UL << imu->folio_shift) - off);
845 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
851 if (imu)
852 io_free_imu(ctx, imu);
943 struct io_mapped_ubuf *imu;
968 imu = io_alloc_imu(ctx, nr_bvecs);
969 if (!imu) {
975 imu->ubuf = 0;
976 imu->len = blk_rq_bytes(rq);
977 imu->acct_pages = 0;
978 imu->folio_shift = PAGE_SHIFT;
979 imu->nr_bvecs = nr_bvecs;
980 refcount_set(&imu->refs, 1);
981 imu->release = release;
982 imu->priv = rq;
983 imu->is_kbuf = true;
984 imu->dir = 1 << rq_data_dir(rq);
986 bvec = imu->bvec;
990 node->buf = imu;
1032 const struct io_mapped_ubuf *imu)
1039 if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len)))
1047 struct io_mapped_ubuf *imu, size_t len, size_t offset)
1051 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, count);
1054 if (count < imu->len) {
1067 struct io_mapped_ubuf *imu,
1076 ret = validate_fixed_range(buf_addr, len, imu);
1079 if (!(imu->dir & (1 << ddir)))
1082 offset = buf_addr - imu->ubuf;
1084 if (imu->is_kbuf)
1085 return io_import_kbuf(ddir, iter, imu, len, offset);
1097 folio_mask = (1UL << imu->folio_shift) - 1;
1098 bvec = imu->bvec;
1104 seg_skip = 1 + (offset >> imu->folio_shift);
1108 nr_segs = (offset + len + bvec->bv_offset + folio_mask) >> imu->folio_shift;
1338 struct io_mapped_ubuf *imu,
1342 unsigned long folio_size = 1 << imu->folio_shift;
1356 ret = validate_fixed_range(buf_addr, iov_len, imu);
1365 offset = buf_addr - imu->ubuf;
1370 offset += imu->bvec[0].bv_offset;
1372 src_bvec = imu->bvec + (offset >> imu->folio_shift);
1392 struct io_mapped_ubuf *imu)
1394 unsigned shift = imu->folio_shift;
1404 struct io_mapped_ubuf *imu,
1408 const struct bio_vec *src_bvec = imu->bvec;
1432 const struct io_mapped_ubuf *imu,
1436 const struct bio_vec *bvec = imu->bvec;
1441 ret = validate_fixed_range(offset, iov->iov_len, imu);
1445 for (i = 0; off < offset + iov->iov_len && i < imu->nr_bvecs;
1455 struct io_mapped_ubuf *imu, unsigned *nr_segs)
1469 ret = iov_kern_bvec_size(&iov[i], imu, &max_segs);
1484 struct io_mapped_ubuf *imu;
1492 imu = node->buf;
1493 if (!(imu->dir & (1 << ddir)))
1499 if (imu->is_kbuf) {
1500 int ret = io_kern_bvec_size(iov, nr_iovs, imu, &nr_segs);
1505 nr_segs = io_estimate_bvec_size(iov, nr_iovs, imu);
1533 if (imu->is_kbuf)
1534 return io_vec_fill_kern_bvec(ddir, iter, imu, iov, nr_iovs, vec);
1536 return io_vec_fill_bvec(ddir, iter, imu, iov, nr_iovs, vec);