/linux-6.15/net/ipv4/ |
D | tcp_htcp.c | 49 static inline u32 htcp_cong_time(const struct htcp *ca) in htcp_cong_time() argument 51 return jiffies - ca->last_cong; in htcp_cong_time() 54 static inline u32 htcp_ccount(const struct htcp *ca) in htcp_ccount() argument 56 return htcp_cong_time(ca) / ca->minRTT; in htcp_ccount() 59 static inline void htcp_reset(struct htcp *ca) in htcp_reset() argument 61 ca->undo_last_cong = ca->last_cong; in htcp_reset() 62 ca->undo_maxRTT = ca->maxRTT; in htcp_reset() 63 ca->undo_old_maxB = ca->old_maxB; in htcp_reset() 65 ca->last_cong = jiffies; in htcp_reset() 70 struct htcp *ca = inet_csk_ca(sk); in htcp_cwnd_undo() local [all …]
|
D | tcp_cdg.c | 142 struct cdg *ca = inet_csk_ca(sk); in tcp_cdg_hystart_update() local 145 ca->delay_min = min_not_zero(ca->delay_min, ca->rtt.min); in tcp_cdg_hystart_update() 146 if (ca->delay_min == 0) in tcp_cdg_hystart_update() 152 if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk)) { in tcp_cdg_hystart_update() 153 ca->last_ack = now_us; in tcp_cdg_hystart_update() 154 ca->round_start = now_us; in tcp_cdg_hystart_update() 155 } else if (before(now_us, ca->last_ack + 3000)) { in tcp_cdg_hystart_update() 156 u32 base_owd = max(ca->delay_min / 2U, 125U); in tcp_cdg_hystart_update() 158 ca->last_ack = now_us; in tcp_cdg_hystart_update() 159 if (after(now_us, ca->round_start + base_owd)) { in tcp_cdg_hystart_update() [all …]
|
D | tcp_nv.c | 94 u8 nv_min_cwnd; /* nv won't make a ca decision if cwnd is 100 u8 nv_rtt_cnt; /* RTTs without making ca decision */; 123 static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk) in tcpnv_reset() argument 127 ca->nv_reset = 0; in tcpnv_reset() 128 ca->nv_no_cong_cnt = 0; in tcpnv_reset() 129 ca->nv_rtt_cnt = 0; in tcpnv_reset() 130 ca->nv_last_rtt = 0; in tcpnv_reset() 131 ca->nv_rtt_max_rate = 0; in tcpnv_reset() 132 ca->nv_rtt_start_seq = tp->snd_una; in tcpnv_reset() 133 ca->nv_eval_call_cnt = 0; in tcpnv_reset() [all …]
|
D | tcp_cong.c | 51 struct tcp_congestion_ops *ca = tcp_ca_find(name); in tcp_ca_find_autoload() local 54 if (!ca && capable(CAP_NET_ADMIN)) { in tcp_ca_find_autoload() 58 ca = tcp_ca_find(name); in tcp_ca_find_autoload() 61 return ca; in tcp_ca_find_autoload() 77 int tcp_validate_congestion_control(struct tcp_congestion_ops *ca) in tcp_validate_congestion_control() argument 80 if (!ca->ssthresh || !ca->undo_cwnd || in tcp_validate_congestion_control() 81 !(ca->cong_avoid || ca->cong_control)) { in tcp_validate_congestion_control() 82 pr_err("%s does not implement required ops\n", ca->name); in tcp_validate_congestion_control() 92 int tcp_register_congestion_control(struct tcp_congestion_ops *ca) in tcp_register_congestion_control() argument 96 ret = tcp_validate_congestion_control(ca); in tcp_register_congestion_control() [all …]
|
D | tcp_bic.c | 60 static inline void bictcp_reset(struct bictcp *ca) in bictcp_reset() argument 62 ca->cnt = 0; in bictcp_reset() 63 ca->last_max_cwnd = 0; in bictcp_reset() 64 ca->last_cwnd = 0; in bictcp_reset() 65 ca->last_time = 0; in bictcp_reset() 66 ca->epoch_start = 0; in bictcp_reset() 67 ca->delayed_ack = 2 << ACK_RATIO_SHIFT; in bictcp_reset() 72 struct bictcp *ca = inet_csk_ca(sk); in bictcp_init() local 74 bictcp_reset(ca); in bictcp_init() 83 static inline void bictcp_update(struct bictcp *ca, u32 cwnd) in bictcp_update() argument [all …]
|
D | tcp_illinois.c | 60 struct illinois *ca = inet_csk_ca(sk); in rtt_reset() local 62 ca->end_seq = tp->snd_nxt; in rtt_reset() 63 ca->cnt_rtt = 0; in rtt_reset() 64 ca->sum_rtt = 0; in rtt_reset() 71 struct illinois *ca = inet_csk_ca(sk); in tcp_illinois_init() local 73 ca->alpha = ALPHA_MAX; in tcp_illinois_init() 74 ca->beta = BETA_BASE; in tcp_illinois_init() 75 ca->base_rtt = 0x7fffffff; in tcp_illinois_init() 76 ca->max_rtt = 0; in tcp_illinois_init() 78 ca->acked = 0; in tcp_illinois_init() [all …]
|
D | tcp_cubic.c | 107 static inline void bictcp_reset(struct bictcp *ca) in bictcp_reset() argument 109 memset(ca, 0, offsetof(struct bictcp, unused)); in bictcp_reset() 110 ca->found = 0; in bictcp_reset() 121 struct bictcp *ca = inet_csk_ca(sk); in bictcp_hystart_reset() local 123 ca->round_start = ca->last_ack = bictcp_clock_us(sk); in bictcp_hystart_reset() 124 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset() 125 ca->curr_rtt = ~0U; in bictcp_hystart_reset() 126 ca->sample_cnt = 0; in bictcp_hystart_reset() 131 struct bictcp *ca = inet_csk_ca(sk); in cubictcp_init() local 133 bictcp_reset(ca); in cubictcp_init() [all …]
|
D | tcp_hybla.c | 36 struct hybla *ca = inet_csk_ca(sk); in hybla_recalc_param() local 38 ca->rho_3ls = max_t(u32, in hybla_recalc_param() 41 ca->rho = ca->rho_3ls >> 3; in hybla_recalc_param() 42 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; in hybla_recalc_param() 43 ca->rho2 = ca->rho2_7ls >> 7; in hybla_recalc_param() 49 struct hybla *ca = inet_csk_ca(sk); in hybla_init() local 51 ca->rho = 0; in hybla_init() 52 ca->rho2 = 0; in hybla_init() 53 ca->rho_3ls = 0; in hybla_init() 54 ca->rho2_7ls = 0; in hybla_init() [all …]
|
D | tcp_dctcp.c | 81 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) in dctcp_reset() argument 83 ca->next_seq = tp->snd_nxt; in dctcp_reset() 85 ca->old_delivered = tp->delivered; in dctcp_reset() 86 ca->old_delivered_ce = tp->delivered_ce; in dctcp_reset() 96 struct dctcp *ca = inet_csk_ca(sk); in dctcp_init() local 98 ca->prior_rcv_nxt = tp->rcv_nxt; in dctcp_init() 100 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); in dctcp_init() 102 ca->loss_cwnd = 0; in dctcp_init() 103 ca->ce_state = 0; in dctcp_init() 105 dctcp_reset(tp, ca); in dctcp_init() [all …]
|
/linux-6.15/fs/bcachefs/ |
D | sb-members.h | 21 static inline bool bch2_dev_is_online(struct bch_dev *ca) in bch2_dev_is_online() argument 23 return !percpu_ref_is_zero(&ca->io_ref[READ]); in bch2_dev_is_online() 31 struct bch_dev *ca = bch2_dev_rcu(c, dev); in bch2_dev_idx_is_online() local 32 bool ret = ca && bch2_dev_is_online(ca); in bch2_dev_idx_is_online() 38 static inline bool bch2_dev_is_healthy(struct bch_dev *ca) in bch2_dev_is_healthy() argument 40 return bch2_dev_is_online(ca) && in bch2_dev_is_healthy() 41 ca->mi.state != BCH_MEMBER_STATE_failed; in bch2_dev_is_healthy() 85 struct bch_dev *ca = NULL; in __bch2_next_dev_idx() local 90 !(ca = rcu_dereference_check(c->devs[idx], in __bch2_next_dev_idx() 94 return ca; in __bch2_next_dev_idx() [all …]
|
D | super.c | 195 for_each_member_device_rcu(c, ca, NULL) in bch2_dev_to_fs() 196 if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) { in bch2_dev_to_fs() 294 for_each_member_device(c, ca) { in __bch2_fs_read_only() 295 bch2_dev_io_ref_stop(ca, WRITE); in __bch2_fs_read_only() 296 bch2_dev_allocator_remove(c, ca); in __bch2_fs_read_only() 448 __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), READ) { in __bch2_fs_read_write() 449 bch2_dev_allocator_add(c, ca); in __bch2_fs_read_write() 450 percpu_ref_reinit(&ca->io_ref[WRITE]); in __bch2_fs_read_write() 631 for_each_member_device(c, ca) in __bch2_fs_stop() 632 bch2_dev_unlink(ca); in __bch2_fs_stop() [all …]
|
D | buckets.h | 15 static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s) in sector_to_bucket() argument 17 return div_u64(s, ca->mi.bucket_size); in sector_to_bucket() 20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b) in bucket_to_sector() argument 22 return ((sector_t) b) * ca->mi.bucket_size; in bucket_to_sector() 25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s) in bucket_remainder() argument 29 div_u64_rem(s, ca->mi.bucket_size, &remainder); in bucket_remainder() 33 static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset) in sector_to_bucket_and_offset() argument 35 return div_u64_rem(s, ca->mi.bucket_size, offset); in sector_to_bucket_and_offset() 57 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b) in gc_bucket() argument 59 return bucket_valid(ca, b) in gc_bucket() [all …]
|
D | chardev.c | 27 /* returns with ref on ca->ref */ 31 struct bch_dev *ca; in bch2_device_lookup() local 37 ca = bch2_dev_tryget_noerror(c, dev); in bch2_device_lookup() 38 if (!ca) in bch2_device_lookup() 48 ca = bch2_dev_lookup(c, path); in bch2_device_lookup() 52 return ca; in bch2_device_lookup() 210 struct bch_dev *ca; in bch2_ioctl_disk_remove() local 222 ca = bch2_device_lookup(c, arg.dev, arg.flags); in bch2_ioctl_disk_remove() 223 if (IS_ERR(ca)) in bch2_ioctl_disk_remove() 224 return PTR_ERR(ca); in bch2_ioctl_disk_remove() [all …]
|
D | alloc_background.c | 342 struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL; in bch2_alloc_to_text() local 362 if (ca) in bch2_alloc_to_text() 363 prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca)); in bch2_alloc_to_text() 367 bch2_dev_put(ca); in bch2_alloc_to_text() 595 struct bch_dev *ca = NULL; in bch2_alloc_read() local 607 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read() 612 if (!ca) { in bch2_alloc_read() 619 for (u64 b = max_t(u64, ca->mi.first_bucket, start); in bch2_alloc_read() 620 b < min_t(u64, ca->mi.nbuckets, end); in bch2_alloc_read() 622 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK]; in bch2_alloc_read() [all …]
|
/linux-6.15/drivers/media/dvb-core/ |
D | dvb_ca_en50221.c | 79 /* Information on a CA slot */ 112 /* Private CA-interface information */ 125 /* number of slots supported by this CA interface */ 137 /* Flag indicating if the CA device is open */ 162 static void dvb_ca_private_free(struct dvb_ca_private *ca) in dvb_ca_private_free() argument 166 dvb_device_put(ca->dvbdev); in dvb_ca_private_free() 167 for (i = 0; i < ca->slot_count; i++) in dvb_ca_private_free() 168 vfree(ca->slot_info[i].rx_buffer.data); in dvb_ca_private_free() 170 kfree(ca->slot_info); in dvb_ca_private_free() 171 kfree(ca); in dvb_ca_private_free() [all …]
|
/linux-6.15/drivers/md/bcache/ |
D | alloc.c | 76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen() argument 80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); in bch_inc_gen() 81 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); in bch_inc_gen() 88 struct cache *ca; in bch_rescale_priorities() local 106 ca = c->cache; in bch_rescale_priorities() 107 for_each_bucket(b, ca) in bch_rescale_priorities() 130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket() argument 132 return (ca->set->gc_mark_valid || b->reclaimable_in_gc) && in bch_can_invalidate_bucket() 137 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket() argument 139 lockdep_assert_held(&ca->set->bucket_lock); in __bch_invalidate_one_bucket() [all …]
|
D | super.c | 358 struct cache *ca = bio->bi_private; in write_super_endio() local 361 bch_count_io_errors(ca, bio->bi_status, 0, in write_super_endio() 363 closure_put(&ca->set->sb_write); in write_super_endio() 376 struct cache *ca = c->cache; in bcache_write_super() local 377 struct bio *bio = &ca->sb_bio; in bcache_write_super() 383 ca->sb.seq++; in bcache_write_super() 385 if (ca->sb.version < version) in bcache_write_super() 386 ca->sb.version = version; in bcache_write_super() 388 bio_init(bio, ca->bdev, ca->sb_bv, 1, 0); in bcache_write_super() 390 bio->bi_private = ca; in bcache_write_super() [all …]
|
/linux-6.15/drivers/media/pci/mantis/ |
D | mantis_ca.c | 30 struct mantis_ca *ca = en50221->data; in mantis_ca_read_attr_mem() local 31 struct mantis_pci *mantis = ca->ca_priv; in mantis_ca_read_attr_mem() 38 return mantis_hif_read_mem(ca, addr); in mantis_ca_read_attr_mem() 43 struct mantis_ca *ca = en50221->data; in mantis_ca_write_attr_mem() local 44 struct mantis_pci *mantis = ca->ca_priv; in mantis_ca_write_attr_mem() 51 return mantis_hif_write_mem(ca, addr, data); in mantis_ca_write_attr_mem() 56 struct mantis_ca *ca = en50221->data; in mantis_ca_read_cam_ctl() local 57 struct mantis_pci *mantis = ca->ca_priv; in mantis_ca_read_cam_ctl() 64 return mantis_hif_read_iom(ca, addr); in mantis_ca_read_cam_ctl() 69 struct mantis_ca *ca = en50221->data; in mantis_ca_write_cam_ctl() local [all …]
|
D | mantis_hif.c | 30 static int mantis_hif_sbuf_opdone_wait(struct mantis_ca *ca) in mantis_hif_sbuf_opdone_wait() argument 32 struct mantis_pci *mantis = ca->ca_priv; in mantis_hif_sbuf_opdone_wait() 35 if (wait_event_timeout(ca->hif_opdone_wq, in mantis_hif_sbuf_opdone_wait() 36 ca->hif_event & MANTIS_SBUF_OPDONE, in mantis_hif_sbuf_opdone_wait() 43 ca->hif_event &= ~MANTIS_SBUF_OPDONE; in mantis_hif_sbuf_opdone_wait() 47 static int mantis_hif_write_wait(struct mantis_ca *ca) in mantis_hif_write_wait() argument 49 struct mantis_pci *mantis = ca->ca_priv; in mantis_hif_write_wait() 53 if (wait_event_timeout(ca->hif_write_wq, in mantis_hif_write_wait() 77 int mantis_hif_read_mem(struct mantis_ca *ca, u32 addr) in mantis_hif_read_mem() argument 79 struct mantis_pci *mantis = ca->ca_priv; in mantis_hif_read_mem() [all …]
|
D | mantis_evm.c | 29 struct mantis_ca *ca = container_of(work, struct mantis_ca, hif_evm_work); in mantis_hifevm_work() local 30 struct mantis_pci *mantis = ca->ca_priv; in mantis_hifevm_work() 40 mantis_event_cam_plugin(ca); in mantis_hifevm_work() 41 dvb_ca_en50221_camchange_irq(&ca->en50221, in mantis_hifevm_work() 49 mantis_event_cam_unplug(ca); in mantis_hifevm_work() 50 dvb_ca_en50221_camchange_irq(&ca->en50221, in mantis_hifevm_work() 79 ca->sbuf_status = MANTIS_SBUF_DATA_AVAIL; in mantis_hifevm_work() 80 ca->hif_event = MANTIS_SBUF_OPDONE; in mantis_hifevm_work() 81 wake_up(&ca->hif_opdone_wq); in mantis_hifevm_work() 85 int mantis_evmgr_init(struct mantis_ca *ca) in mantis_evmgr_init() argument [all …]
|
/linux-6.15/tools/testing/selftests/bpf/progs/ |
D | bpf_cubic.c | 13 * "while (ca->ack_cnt > delta)" loop is changed to the equivalent 14 * "ca->ack_cnt / delta" operation. 102 static void bictcp_reset(struct bpf_bictcp *ca) in bictcp_reset() argument 104 ca->cnt = 0; in bictcp_reset() 105 ca->last_max_cwnd = 0; in bictcp_reset() 106 ca->last_cwnd = 0; in bictcp_reset() 107 ca->last_time = 0; in bictcp_reset() 108 ca->bic_origin_point = 0; in bictcp_reset() 109 ca->bic_K = 0; in bictcp_reset() 110 ca->delay_min = 0; in bictcp_reset() [all …]
|
/linux-6.15/kernel/sched/ |
D | cpuacct.c | 42 static inline struct cpuacct *parent_ca(struct cpuacct *ca) in parent_ca() argument 44 return css_ca(ca->css.parent); in parent_ca() 57 struct cpuacct *ca; in cpuacct_css_alloc() local 62 ca = kzalloc(sizeof(*ca), GFP_KERNEL); in cpuacct_css_alloc() 63 if (!ca) in cpuacct_css_alloc() 66 ca->cpuusage = alloc_percpu(u64); in cpuacct_css_alloc() 67 if (!ca->cpuusage) in cpuacct_css_alloc() 70 ca->cpustat = alloc_percpu(struct kernel_cpustat); in cpuacct_css_alloc() 71 if (!ca->cpustat) in cpuacct_css_alloc() 74 return &ca->css; in cpuacct_css_alloc() [all …]
|
/linux-6.15/include/media/ |
D | dvb_ca_en50221.h | 2 * dvb_ca.h: generic DVB functions for EN50221 CA interfaces 21 #include <linux/dvb/ca.h> 37 * struct dvb_ca_en50221- Structure describing a CA interface 61 int (*read_attribute_mem)(struct dvb_ca_en50221 *ca, 63 int (*write_attribute_mem)(struct dvb_ca_en50221 *ca, 66 int (*read_cam_control)(struct dvb_ca_en50221 *ca, 68 int (*write_cam_control)(struct dvb_ca_en50221 *ca, 71 int (*read_data)(struct dvb_ca_en50221 *ca, 73 int (*write_data)(struct dvb_ca_en50221 *ca, 76 int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot); [all …]
|
/linux-6.15/drivers/ras/ |
D | cec.c | 142 static void do_spring_cleaning(struct ce_array *ca) in do_spring_cleaning() argument 146 for (i = 0; i < ca->n; i++) { in do_spring_cleaning() 147 u8 decay = DECAY(ca->array[i]); in do_spring_cleaning() 154 ca->array[i] &= ~(DECAY_MASK << COUNT_BITS); in do_spring_cleaning() 155 ca->array[i] |= (decay << COUNT_BITS); in do_spring_cleaning() 157 ca->decay_count = 0; in do_spring_cleaning() 158 ca->decays_done++; in do_spring_cleaning() 186 static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to) in __find_elem() argument 188 int min = 0, max = ca->n - 1; in __find_elem() 194 this_pfn = PFN(ca->array[i]); in __find_elem() [all …]
|
/linux-6.15/drivers/media/platform/chips-media/coda/ |
D | coda-gdi.c | 61 * RA[15:0], CA[15:8] are hardwired to contain the 24-bit macroblock 63 * Bits CA[4:0] are set using XY2CA above. BA[3:0] seems to be unused. 79 RBC(CA, 0, CA, 0), 80 RBC(CA, 1, CA, 1), 81 RBC(CA, 2, CA, 2), 82 RBC(CA, 3, CA, 3), 83 RBC(CA, 4, CA, 8), 84 RBC(CA, 8, CA, 9), 85 RBC(CA, 9, CA, 10), 86 RBC(CA, 10, CA, 11), [all …]
|