Home
last modified time | relevance | path

Searched refs:inflight (Results 1 – 25 of 57) sorted by relevance

123

/linux/include/trace/events/
H A Dscmi.h39 bool poll, int inflight),
40 TP_ARGS(transfer_id, msg_id, protocol_id, seq, poll, inflight),
48 __field(int, inflight)
57 __entry->inflight = inflight;
60 TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X poll=%u inflight=%d",
62 __entry->transfer_id, __entry->poll, __entry->inflight)
95 int status, int inflight),
96 TP_ARGS(transfer_id, msg_id, protocol_id, seq, status, inflight),
104 __field(int, inflight)
[all...]
H A Dpage_pool.h17 s32 inflight, u32 hold, u32 release),
19 TP_ARGS(pool, inflight, hold, release),
23 __field(s32, inflight)
31 __entry->inflight = inflight;
37 TP_printk("page_pool=%p inflight=%d hold=%u release=%u cnt=%llu",
38 __entry->pool, __entry->inflight, __entry->hold,
H A Dwbt.h127 * @inflight: tracked writes inflight
132 int step, unsigned int inflight),
134 TP_ARGS(bdi, status, step, inflight),
140 __field(unsigned int, inflight)
148 __entry->inflight = inflight;
151 TP_printk("%s: status=%u, step=%d, inflight=%u", __entry->name,
152 __entry->status, __entry->step, __entry->inflight)
/linux/drivers/firmware/arm_scmi/transports/
H A Dsmc.c51 * @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
68 atomic_t inflight; member
100 atomic_set(&scmi_info->inflight, INFLIGHT_NONE); in smc_channel_lock_init()
105 static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight) in smc_xfer_inflight() argument
109 ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq); in smc_xfer_inflight()
119 spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight)); in smc_channel_lock_acquire()
127 atomic_set(&scmi_info->inflight, INFLIGHT_NONE); in smc_channel_lock_release()
/linux/block/
H A Dgenhd.c129 unsigned int inflight[2], bool mq_driver) in bdev_count_inflight_rw()
136 blk_mq_in_driver_rw(part, inflight); in bdev_count_inflight_rw()
150 inflight[READ] = read > 0 ? read : 0; in bdev_count_inflight_rw()
151 inflight[WRITE] = write > 0 ? write : 0; in bdev_count_inflight_rw()
165 unsigned int inflight[2] = {0}; in bdev_count_inflight() local
167 bdev_count_inflight_rw(part, inflight, false); in bdev_count_inflight()
169 return inflight[READ] + inflight[WRITE]; in bdev_count_inflight()
1067 unsigned int inflight; in part_stat_show() local
1069 inflight = bdev_count_inflight(bdev); in part_stat_show()
1070 if (inflight) { in part_stat_show()
[all …]
H A Dblk-wbt.c199 int inflight, limit; in wbt_rqw_done() local
201 inflight = atomic_dec_return(&rqw->inflight); in wbt_rqw_done()
219 if (inflight && inflight >= limit) in wbt_rqw_done()
223 int diff = limit - inflight; in wbt_rqw_done()
225 if (!inflight || diff >= rwb->wb_background / 2) in wbt_rqw_done()
293 ret += atomic_read(&rwb->rq_wait[i].inflight); in wbt_inflight()
427 unsigned int inflight = wbt_inflight(rwb); in wb_timer_fn() local
435 trace_wbt_timer(rwb->rqos.disk->bdi, status, rqd->scale_step, inflight); in wb_timer_fn()
477 if (rqd->scale_step || inflight) in wb_timer_fn()
826 atomic_read(&rwb->rq_wait[i].inflight)); in wbt_inflight_show()
H A Dblk-rq-qos.h25 atomic_t inflight; member
85 atomic_set(&rq_wait->inflight, 0); in rq_wait_init()
H A Dblk-iolatency.c93 * inflight tracking. The number of cgroups which have iolat enabled is
276 atomic_dec(&rqw->inflight); in iolat_cleanup_cb()
305 atomic_inc(&rqw->inflight); in __blkcg_iolatency_throttle()
599 int inflight = 0; in blkcg_iolatency_done_bio() local
621 inflight = atomic_dec_return(&rqw->inflight); in blkcg_iolatency_done_bio()
622 WARN_ON_ONCE(inflight < 0); in blkcg_iolatency_done_bio()
/linux/drivers/crypto/chelsio/
H A Dchcr_core.c56 if (atomic_read(&dev->inflight)) { in detach_work_fn()
60 atomic_read(&dev->inflight)); in detach_work_fn()
65 atomic_read(&dev->inflight)); in detach_work_fn()
103 atomic_set(&dev->inflight, 0); in chcr_dev_add()
122 atomic_set(&dev->inflight, 0); in chcr_dev_init()
232 if (atomic_read(&dev->inflight) != 0) { in chcr_detach_device()
H A Dchcr_core.h99 atomic_t inflight; member
/linux/net/core/
H A Dpage_pool.c639 s32 inflight; in page_pool_inflight() local
641 inflight = _distance(hold_cnt, release_cnt); in page_pool_inflight()
644 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); in page_pool_inflight()
645 WARN(inflight < 0, "Negative(%d) inflight packet-pages", in page_pool_inflight()
646 inflight); in page_pool_inflight()
648 inflight = max(0, inflight); in page_pool_inflight()
651 return inflight; in page_pool_inflight()
1151 int inflight; in page_pool_release() local
1154 inflight = page_pool_inflight(pool, true); in page_pool_release()
1158 if (!inflight) in page_pool_release()
[all …]
H A Dpage_pool_user.c219 size_t inflight, refsz; in page_pool_nl_fill() local
240 inflight = page_pool_inflight(pool, false); in page_pool_nl_fill()
242 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT, inflight) || in page_pool_nl_fill()
244 inflight * refsz)) in page_pool_nl_fill()
/linux/tools/testing/selftests/net/af_unix/
H A Dscm_rights.c270 int inflight, int receiver) in __send_fd() argument
284 self->fd[inflight * 2], in __send_fd()
285 self->fd[inflight * 2], in __send_fd()
316 #define send_fd(inflight, receiver) \ argument
317 __send_fd(_metadata, self, variant, inflight, receiver)
/linux/net/unix/
H A Dgarbage.c129 /* If the receiver socket is not inflight, no cyclic in unix_update_graph()
209 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); in unix_add_edges() local
212 if (!inflight) in unix_add_edges()
216 edge->predecessor = inflight; in unix_add_edges()
229 fpl->inflight = true; in unix_add_edges()
260 fpl->inflight = false; in unix_del_edges()
267 * inflight graph, and GC will not see it, so no lock needed. in unix_update_edges()
309 if (fpl->inflight) in unix_destroy_fpl()
325 /* The vertex's fd can be received by a non-inflight socket. */ in unix_vertex_dead()
329 /* The vertex's fd can be received by an inflight socke in unix_vertex_dead()
[all...]
/linux/drivers/gpu/drm/i915/gt/
H A Dintel_context_types.h91 struct intel_engine_cs *inflight; member
95 __intel_context_inflight(READ_ONCE((ce)->inflight))
97 __intel_context_inflight_count(READ_ONCE((ce)->inflight))
H A Dintel_execlists_submission.c336 * If the inflight context did not trigger the preemption, then maybe in need_preempt()
529 old = ce->inflight; in execlists_schedule_in()
532 WRITE_ONCE(ce->inflight, ptr_inc(old)); in execlists_schedule_in()
558 * before we clear ce->inflight make sure that the context has been in kick_siblings()
593 GEM_BUG_ON(ce->inflight != engine); in __execlists_schedule_out()
638 WRITE_ONCE(ce->inflight, NULL); in __execlists_schedule_out()
648 GEM_BUG_ON(!ce->inflight); in execlists_schedule_out()
649 ce->inflight = ptr_dec(ce->inflight); in execlists_schedule_out()
650 if (!__intel_context_inflight_count(ce->inflight)) in execlists_schedule_out()
991 const struct intel_engine_cs *inflight; virtual_matches() local
3316 const struct i915_request *inflight; kick_execlists() local
[all...]
H A Dintel_timeline.c423 unsigned long count, ready, inflight; in intel_gt_show_timelines() local
440 inflight = 0; in intel_gt_show_timelines()
449 inflight++; in intel_gt_show_timelines()
453 drm_printf(m, "count: %lu, ready: %lu, inflight: %lu", in intel_gt_show_timelines()
454 count, ready, inflight); in intel_gt_show_timelines()
/linux/net/atm/
H A Dpppoatm.c64 atomic_t inflight; member
75 * inflight == -2 represents an empty queue, -1 one packet, and zero means
139 atomic_dec(&pvcc->inflight); in pppoatm_pop()
244 atomic_inc_not_zero(&pvcc->inflight)) in pppoatm_may_send()
250 * *must* be set before we do the atomic_inc() on pvcc->inflight. in pppoatm_may_send()
274 atomic_inc_not_zero(&pvcc->inflight)) in pppoatm_may_send()
406 atomic_set(&pvcc->inflight, NONE_INFLIGHT); in pppoatm_assign_vcc()
/linux/drivers/infiniband/ulp/rtrs/
H A Drtrs-clt-stats.c102 atomic_read(&stats->inflight), sum.failover_cnt); in rtrs_clt_stats_rdma_to_str()
158 atomic_set(&s->inflight, 0); in rtrs_clt_reset_all_stats()
182 atomic_inc(&stats->inflight); in rtrs_clt_update_all_stats()
H A DREADME100 corresponding path is disconnected, all the inflight IO are failed over to a
131 inflight IO and for the error code.
149 inflight IO and for the error code. The new rkey is sent back using
171 outstanding inflight IO and the error code.
192 outstanding inflight IO and the error code. The new rkey is sent back using
/linux/net/ipv4/
H A Dtcp_bbr.c42 * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
85 BBR_PROBE_RTT, /* cut inflight to min to probe min_rtt */
405 /* Ensure gain cycling gets inflight above BDP even for small BDPs. */ in bbr_quantization_budget()
412 /* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
415 u32 inflight; in bbr_inflight() local
417 inflight = bbr_bdp(sk, bw, gain); in bbr_inflight()
418 inflight = bbr_quantization_budget(sk, inflight); in bbr_inflight()
420 return inflight; in bbr_inflight()
427 * inflight leve
562 u32 inflight, bw; bbr_is_next_cycle_phase() local
[all...]
/linux/tools/net/ynl/samples/
H A Dpage-pool.c51 if (pp->_present.inflight) in count()
52 s->live[l].refs += pp->inflight; in count()
/linux/io_uring/
H A Dtctx.c64 percpu_counter_destroy(&tctx->inflight); in __io_uring_free()
79 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); in io_uring_alloc_task_context()
88 percpu_counter_destroy(&tctx->inflight); in io_uring_alloc_task_context()
/linux/drivers/vhost/
H A Dscsi.c107 /* Refcount for the inflight reqs */
143 /* Used to track inflight cmd */
144 struct vhost_scsi_inflight *inflight; member
217 * Reference counting for inflight reqs, used for flush operation. At
223 * Indicate current inflight in use, protected by vq->mutex.
262 struct vhost_scsi_inflight *inflight; member
296 struct vhost_scsi_inflight *inflight; in vhost_scsi_done_inflight() local
298 inflight = container_of(kref, struct vhost_scsi_inflight, kref); in vhost_scsi_done_inflight()
299 complete(&inflight->comp); in vhost_scsi_done_inflight()
314 /* store old inflight */ in vhost_scsi_init_inflight()
332 struct vhost_scsi_inflight *inflight; vhost_scsi_get_inflight() local
342 vhost_scsi_put_inflight(struct vhost_scsi_inflight * inflight) vhost_scsi_put_inflight() argument
422 struct vhost_scsi_inflight *inflight = tv_cmd->inflight; vhost_scsi_release_cmd_res() local
459 struct vhost_scsi_inflight *inflight = tmf->inflight; vhost_scsi_release_tmf_res() local
[all...]
/linux/drivers/crypto/cavium/cpt/
H A Dcpt_hw_types.h430 * inflight:8 [7:0](RO/H) Inflight count. Counts the number of instructions
443 u64 inflight:8; member
445 u64 inflight:8;

123