/linux-6.15/include/trace/events/ |
D | wbt.h | 127 * @inflight: tracked writes inflight 132 int step, unsigned int inflight), 134 TP_ARGS(bdi, status, step, inflight), 140 __field(unsigned int, inflight) 148 __entry->inflight = inflight; 151 TP_printk("%s: status=%u, step=%d, inflight=%u", __entry->name, 152 __entry->status, __entry->step, __entry->inflight)
|
D | page_pool.h | 17 s32 inflight, u32 hold, u32 release), 19 TP_ARGS(pool, inflight, hold, release), 23 __field(s32, inflight) 31 __entry->inflight = inflight; 37 TP_printk("page_pool=%p inflight=%d hold=%u release=%u cnt=%llu", 38 __entry->pool, __entry->inflight, __entry->hold,
|
/linux-6.15/block/ |
D | blk-rq-qos.c | 23 return atomic_inc_below(&rq_wait->inflight, limit); in rq_wait_inc_below() 242 * @acquire_inflight_cb: inc the rqw->inflight counter if we can 248 * inc the rqw->inflight if we have the ability to do so, or return false if not 252 * inflight count accordingly. 268 * inflight counter if we can. Otherwise, prepare for adding ourselves in rq_qos_wait() 278 * Make sure there is at least one inflight process; otherwise, waiters in rq_qos_wait() 279 * will never be woken up. Since there may be no inflight process before in rq_qos_wait() 281 * increase the inflight counter for ourselves. And it is sufficient to in rq_qos_wait() 303 /* we are now relying on the waker to increase our inflight counter. */ in rq_qos_wait()
|
D | genhd.c | 130 unsigned int inflight = 0; in part_in_flight() local 134 inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) + in part_in_flight() 137 if ((int)inflight < 0) in part_in_flight() 138 inflight = 0; in part_in_flight() 140 return inflight; in part_in_flight() 144 unsigned int inflight[2]) in part_in_flight_rw() 148 inflight[0] = 0; in part_in_flight_rw() 149 inflight[1] = 0; in part_in_flight_rw() 151 inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu); in part_in_flight_rw() 152 inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu); in part_in_flight_rw() [all …]
|
D | blk-wbt.c | 199 int inflight, limit; in wbt_rqw_done() local 201 inflight = atomic_dec_return(&rqw->inflight); in wbt_rqw_done() 219 if (inflight && inflight >= limit) in wbt_rqw_done() 223 int diff = limit - inflight; in wbt_rqw_done() 225 if (!inflight || diff >= rwb->wb_background / 2) in wbt_rqw_done() 292 ret += atomic_read(&rwb->rq_wait[i].inflight); in wbt_inflight() 426 unsigned int inflight = wbt_inflight(rwb); in wb_timer_fn() local 434 trace_wbt_timer(rwb->rqos.disk->bdi, status, rqd->scale_step, inflight); in wb_timer_fn() 476 if (rqd->scale_step || inflight) in wb_timer_fn() 819 seq_printf(m, "%d: inflight %d\n", i, in wbt_inflight_show() [all …]
|
/linux-6.15/net/core/ |
D | page_pool.c | 619 s32 inflight; in page_pool_inflight() local 621 inflight = _distance(hold_cnt, release_cnt); in page_pool_inflight() 624 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); in page_pool_inflight() 625 WARN(inflight < 0, "Negative(%d) inflight packet-pages", in page_pool_inflight() 626 inflight); in page_pool_inflight() 628 inflight = max(0, inflight); in page_pool_inflight() 631 return inflight; in page_pool_inflight() 662 /* Always account for inflight pages, even if we didn't in __page_pool_release_page_dma() 1094 int inflight; in page_pool_release() local 1097 inflight = page_pool_inflight(pool, true); in page_pool_release() [all …]
|
D | page_pool_user.c | 219 size_t inflight, refsz; in page_pool_nl_fill() local 240 inflight = page_pool_inflight(pool, false); in page_pool_nl_fill() 242 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT, inflight) || in page_pool_nl_fill() 244 inflight * refsz)) in page_pool_nl_fill()
|
/linux-6.15/net/unix/ |
D | garbage.c | 129 /* If the receiver socket is not inflight, no cyclic in unix_update_graph() 209 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); in unix_add_edges() local 212 if (!inflight) in unix_add_edges() 216 edge->predecessor = inflight; in unix_add_edges() 229 fpl->inflight = true; in unix_add_edges() 260 fpl->inflight = false; in unix_del_edges() 267 * inflight graph, and GC will not see it, so no lock needed. in unix_update_edges() 309 if (fpl->inflight) in unix_destroy_fpl() 325 /* The vertex's fd can be received by a non-inflight socket. */ in unix_vertex_dead() 329 /* The vertex's fd can be received by an inflight socket in in unix_vertex_dead() [all …]
|
/linux-6.15/drivers/crypto/chelsio/ |
D | chcr_core.c | 56 if (atomic_read(&dev->inflight)) { in detach_work_fn() 59 pr_debug("Request Inflight Count %d\n", in detach_work_fn() 60 atomic_read(&dev->inflight)); in detach_work_fn() 65 atomic_read(&dev->inflight)); in detach_work_fn() 103 atomic_set(&dev->inflight, 0); in chcr_dev_add() 122 atomic_set(&dev->inflight, 0); in chcr_dev_init() 232 if (atomic_read(&dev->inflight) != 0) { in chcr_detach_device()
|
/linux-6.15/drivers/firmware/arm_scmi/transports/ |
D | smc.c | 51 * @inflight: Atomic flag to protect access to Tx/Rx shared memory area. 68 atomic_t inflight; member 100 atomic_set(&scmi_info->inflight, INFLIGHT_NONE); in smc_channel_lock_init() 105 static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight) in smc_xfer_inflight() argument 109 ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq); in smc_xfer_inflight() 119 spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight)); in smc_channel_lock_acquire() 127 atomic_set(&scmi_info->inflight, INFLIGHT_NONE); in smc_channel_lock_release()
|
/linux-6.15/net/ipv4/ |
D | tcp_bbr.c | 42 * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe 85 BBR_PROBE_RTT, /* cut inflight to min to probe min_rtt */ 405 /* Ensure gain cycling gets inflight above BDP even for small BDPs. */ in bbr_quantization_budget() 412 /* Find inflight based on min RTT and the estimated bottleneck bandwidth. */ 415 u32 inflight; in bbr_inflight() local 417 inflight = bbr_bdp(sk, bw, gain); in bbr_inflight() 418 inflight = bbr_quantization_budget(sk, inflight); in bbr_inflight() 420 return inflight; in bbr_inflight() 427 * inflight level that it estimates has already been "baked in" by previous 432 * If we're increasing inflight, then we want to know if the transmit of the [all …]
|
/linux-6.15/tools/testing/selftests/net/af_unix/ |
D | scm_rights.c | 200 int inflight, int receiver) in __send_fd() argument 214 self->fd[inflight * 2], in __send_fd() 215 self->fd[inflight * 2], in __send_fd() 240 #define send_fd(inflight, receiver) \ argument 241 __send_fd(_metadata, self, variant, inflight, receiver)
|
/linux-6.15/tools/testing/selftests/net/ |
D | nl_netdev.py | 75 refs = sum([pp["inflight"] for pp in pp_list]) 87 refs = sum([pp["inflight"] for pp in pp_list if pp.get("ifindex") == nsim.ifindex]) 94 refs = sum([pp["inflight"] for pp in pp_list])
|
/linux-6.15/net/atm/ |
D | pppoatm.c | 64 atomic_t inflight; member 75 * inflight == -2 represents an empty queue, -1 one packet, and zero means 139 atomic_dec(&pvcc->inflight); in pppoatm_pop() 244 atomic_inc_not_zero(&pvcc->inflight)) in pppoatm_may_send() 250 * *must* be set before we do the atomic_inc() on pvcc->inflight. in pppoatm_may_send() 274 atomic_inc_not_zero(&pvcc->inflight)) in pppoatm_may_send() 406 atomic_set(&pvcc->inflight, NONE_INFLIGHT); in pppoatm_assign_vcc()
|
/linux-6.15/drivers/infiniband/ulp/rtrs/ |
D | README | 100 corresponding path is disconnected, all the inflight IO are failed over to a 131 inflight IO and for the error code. 149 inflight IO and for the error code. The new rkey is sent back using 171 outstanding inflight IO and the error code. 192 outstanding inflight IO and the error code. The new rkey is sent back using
|
D | rtrs-clt-stats.c | 102 atomic_read(&stats->inflight), sum.failover_cnt); in rtrs_clt_stats_rdma_to_str() 158 atomic_set(&s->inflight, 0); in rtrs_clt_reset_all_stats() 182 atomic_inc(&stats->inflight); in rtrs_clt_update_all_stats()
|
/linux-6.15/drivers/vhost/ |
D | scsi.c | 107 /* Refcount for the inflight reqs */ 138 /* Used to track inflight cmd */ 139 struct vhost_scsi_inflight *inflight; member 212 * Reference counting for inflight reqs, used for flush operation. At 218 * Indicate current inflight in use, protected by vq->mutex. 257 struct vhost_scsi_inflight *inflight; member 285 struct vhost_scsi_inflight *inflight; in vhost_scsi_done_inflight() local 287 inflight = container_of(kref, struct vhost_scsi_inflight, kref); in vhost_scsi_done_inflight() 288 complete(&inflight->comp); in vhost_scsi_done_inflight() 321 struct vhost_scsi_inflight *inflight; in vhost_scsi_get_inflight() local [all …]
|
/linux-6.15/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/ |
D | instruction.json | 105 …scription": "Prefetch response received but was dropped since we don't support inflight upgrades.", 108 …escription": "Prefetch response received but was dropped since we don't support inflight upgrades."
|
/linux-6.15/drivers/gpu/drm/i915/ |
D | i915_scheduler_types.h | 118 * @requests: list of requests inflight on this schedule engine 144 * However, since the we may have recorded the priority of an inflight 183 * @bump_inflight_request_prio: update priority of an inflight request
|
/linux-6.15/drivers/gpu/drm/i915/gt/ |
D | intel_execlists_submission.c | 336 * If the inflight context did not trigger the preemption, then maybe in need_preempt() 529 old = ce->inflight; in execlists_schedule_in() 532 WRITE_ONCE(ce->inflight, ptr_inc(old)); in execlists_schedule_in() 558 * before we clear ce->inflight make sure that the context has been in kick_siblings() 593 GEM_BUG_ON(ce->inflight != engine); in __execlists_schedule_out() 638 WRITE_ONCE(ce->inflight, NULL); in __execlists_schedule_out() 648 GEM_BUG_ON(!ce->inflight); in execlists_schedule_out() 649 ce->inflight = ptr_dec(ce->inflight); in execlists_schedule_out() 650 if (!__intel_context_inflight_count(ce->inflight)) in execlists_schedule_out() 991 const struct intel_engine_cs *inflight; in virtual_matches() local [all …]
|
D | intel_timeline.c | 423 unsigned long count, ready, inflight; in intel_gt_show_timelines() local 440 inflight = 0; in intel_gt_show_timelines() 449 inflight++; in intel_gt_show_timelines() 453 drm_printf(m, "count: %lu, ready: %lu, inflight: %lu", in intel_gt_show_timelines() 454 count, ready, inflight); in intel_gt_show_timelines()
|
D | intel_engine_types.h | 240 * @inflight: the set of contexts submitted and acknowledged by HW 242 * The set of inflight contexts is managed by reading CS events 245 * advance our inflight/active tracking accordingly. 247 struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */]; member 252 * promote them to the inflight array once HW has signaled the
|
/linux-6.15/include/linux/ |
D | bpf_mprog.h | 33 * // ensure there are no inflight users of @entry: 57 * // ensure there are no inflight users of @entry: 108 * assumes that for the old bpf_mprog_entry there are no inflight users 227 * bpf_mprog ensured that there are no inflight users anymore. in bpf_mprog_complete_release()
|
/linux-6.15/drivers/crypto/cavium/cpt/ |
D | cpt_hw_types.h | 239 * when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]). 430 * inflight:8 [7:0](RO/H) Inflight count. Counts the number of instructions 436 * 2. [INFLIGHT] is polled until equals to zero. 443 u64 inflight:8; member 445 u64 inflight:8; 634 * clearing [ENA]) only when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]). 639 * See also CPT()_PF_Q()_CTL[CONT_ERR] and CPT()_VQ()_INPROG[INFLIGHT].
|
/linux-6.15/Documentation/devicetree/bindings/net/dsa/ |
D | brcm,sf2.yaml | 74 brcm,acb-packets-inflight: 156 brcm,acb-packets-inflight;
|