Lines Matching defs:pgsz
61 unsigned long pgsz)
63 return ib_umem_start_dma_addr(umem) & (pgsz - 1);
67 unsigned long pgsz)
69 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
70 ALIGN_DOWN(umem->iova, pgsz))) /
71 pgsz;
81 unsigned long pgsz)
84 umem->sgt_append.sgt.nents, pgsz);
85 biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
86 biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
97 * @pgsz: Page size to split the list into
99 * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
100 * returned DMA blocks will be aligned to pgsz and span the range:
101 * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
105 #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
106 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
152 unsigned long pgsz;
159 pgsz = roundup_pow_of_two((dma_addr ^ (umem->length - 1 + dma_addr)) + 1);
160 return !!ib_umem_find_best_pgoff(umem, pgsz, U64_MAX);