Searched refs:cq_entries (Results 1 – 12 of 12) sorted by relevance
| /linux/io_uring/ |
| H A D | fdinfo.c | 64 unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1; in __io_uring_show_fdinfo() 70 unsigned int cq_entries, sq_entries; in __io_uring_show_fdinfo() local 154 cq_entries = min(cq_tail - cq_head, ctx->cq_entries); in __io_uring_show_fdinfo() 155 for (i = 0; i < cq_entries; i++) { in __io_uring_show_fdinfo()
|
| H A D | io_uring.c | 240 hash_bits = ilog2(p->cq_entries) - 5; in io_ring_ctx_alloc() 527 if (!dying && __io_cqring_events(ctx) == ctx->cq_entries) in __io_cqring_overflow_flush() 689 if (__io_cqring_events(ctx) < ctx->cq_entries) { in io_fill_nop_cqe() 709 unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); in io_cqe_cache_refill() 724 if (cqe32 && off + 1 == ctx->cq_entries) { in io_cqe_cache_refill() 731 queued = min(__io_cqring_events(ctx), ctx->cq_entries); in io_cqe_cache_refill() 732 free = ctx->cq_entries - queued; in io_cqe_cache_refill() 734 len = min(free, ctx->cq_entries - off); in io_cqe_cache_refill() 1193 min_events = min(min_events, ctx->cq_entries); in io_iopoll_check() 2074 unsigned int cq_entries, struct io_rings_layout *rl) in rings_size() argument [all …]
|
| H A D | register.c | 546 WRITE_ONCE(n.rings->cq_ring_mask, p->cq_entries - 1); in io_register_resize_rings() 548 WRITE_ONCE(n.rings->cq_ring_entries, p->cq_entries); in io_register_resize_rings() 612 if (tail - old_head > p->cq_entries) { in io_register_resize_rings() 622 unsigned src_head = i & (ctx->cq_entries - 1); in io_register_resize_rings() 623 unsigned dst_head = i & (p->cq_entries - 1); in io_register_resize_rings() 642 ctx->cq_entries = p->cq_entries; in io_register_resize_rings()
|
| H A D | wait.c | 196 min_events = min_t(int, min_events, ctx->cq_entries); in io_cqring_wait()
|
| /linux/drivers/infiniband/hw/hns/ |
| H A D | hns_roce_cq.c | 368 static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector, in set_cq_param() argument 373 cq_entries = max(cq_entries, hr_dev->caps.min_cqes); in set_cq_param() 374 cq_entries = roundup_pow_of_two(cq_entries); in set_cq_param() 375 hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */ in set_cq_param() 376 hr_cq->cq_depth = cq_entries; in set_cq_param()
|
| /linux/include/trace/events/ |
| H A D | io_uring.h | 29 TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags), 31 TP_ARGS(fd, ctx, sq_entries, cq_entries, flags), 37 __field( u32, cq_entries ) 45 __entry->cq_entries = cq_entries; 51 __entry->cq_entries, __entry->flags)
|
| /linux/tools/include/io_uring/ |
| H A D | mini_liburing.h | 79 sq->ring_sz += p->cq_entries * sizeof(struct io_uring_cqe); in io_uring_mmap() 108 cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); in io_uring_mmap()
|
| /linux/tools/include/uapi/linux/ |
| H A D | io_uring.h | 487 __u32 cq_entries; member
|
| /linux/include/linux/ |
| H A D | io_uring_types.h | 379 unsigned cq_entries; member
|
| /linux/include/uapi/linux/ |
| H A D | io_uring.h | 609 __u32 cq_entries; member
|
| /linux/tools/testing/selftests/x86/ |
| H A D | lam.c | 490 cring->ring_sz = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe); in mmap_io_uring()
|
| /linux/tools/testing/selftests/ublk/ |
| H A D | kublk.c | 39 p.cq_entries = cq_depth; in ublk_setup_ring()
|