/linux/net/ipv4/ |
H A D | tcp_htcp.c | 49 static inline u32 htcp_cong_time(const struct htcp *ca) in htcp_cong_time() argument 51 return jiffies - ca->last_cong; in htcp_cong_time() 54 static inline u32 htcp_ccount(const struct htcp *ca) in htcp_ccount() argument 56 return htcp_cong_time(ca) / ca->minRTT; in htcp_ccount() 59 static inline void htcp_reset(struct htcp *ca) in htcp_reset() argument 61 ca->undo_last_cong = ca->last_cong; in htcp_reset() 62 ca->undo_maxRTT = ca in htcp_reset() 70 struct htcp *ca = inet_csk_ca(sk); htcp_cwnd_undo() local 85 struct htcp *ca = inet_csk_ca(sk); measure_rtt() local 106 struct htcp *ca = inet_csk_ca(sk); measure_achieved_throughput() local 147 htcp_beta_update(struct htcp * ca,u32 minRTT,u32 maxRTT) htcp_beta_update() argument 173 htcp_alpha_update(struct htcp * ca) htcp_alpha_update() argument 210 struct htcp *ca = inet_csk_ca(sk); htcp_param_update() local 225 const struct htcp *ca = inet_csk_ca(sk); htcp_recalc_ssthresh() local 234 struct htcp *ca = inet_csk_ca(sk); htcp_cong_avoid() local 259 struct htcp *ca = inet_csk_ca(sk); htcp_init() local 273 struct htcp *ca = inet_csk_ca(sk); htcp_state() local [all...] |
H A D | tcp_nv.c | 94 u8 nv_min_cwnd; /* nv won't make a ca decision if cwnd is 100 u8 nv_rtt_cnt; /* RTTs without making ca decision */; 123 static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk) in tcpnv_reset() argument 127 ca->nv_reset = 0; in tcpnv_reset() 128 ca->nv_no_cong_cnt = 0; in tcpnv_reset() 129 ca->nv_rtt_cnt = 0; in tcpnv_reset() 130 ca->nv_last_rtt = 0; in tcpnv_reset() 131 ca->nv_rtt_max_rate = 0; in tcpnv_reset() 132 ca->nv_rtt_start_seq = tp->snd_una; in tcpnv_reset() 133 ca in tcpnv_reset() 139 struct tcpnv *ca = inet_csk_ca(sk); tcpnv_init() local 170 nv_get_bounded_rtt(struct tcpnv * ca,u32 val) nv_get_bounded_rtt() argument 183 struct tcpnv *ca = inet_csk_ca(sk); tcpnv_cong_avoid() local 217 struct tcpnv *ca = inet_csk_ca(sk); tcpnv_state() local 243 struct tcpnv *ca = inet_csk_ca(sk); tcpnv_acked() local 456 const struct tcpnv *ca = inet_csk_ca(sk); tcpnv_get_info() local [all...] |
H A D | tcp_cdg.c | 142 struct cdg *ca = inet_csk_ca(sk); in tcp_cdg_hystart_update() local 145 ca->delay_min = min_not_zero(ca->delay_min, ca->rtt.min); in tcp_cdg_hystart_update() 146 if (ca->delay_min == 0) in tcp_cdg_hystart_update() 152 if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk)) { in tcp_cdg_hystart_update() 153 ca->last_ack = now_us; in tcp_cdg_hystart_update() 154 ca->round_start = now_us; in tcp_cdg_hystart_update() 155 } else if (before(now_us, ca->last_ack + 3000)) { in tcp_cdg_hystart_update() 156 u32 base_owd = max(ca in tcp_cdg_hystart_update() 190 tcp_cdg_grad(struct cdg * ca) tcp_cdg_grad() argument 243 struct cdg *ca = inet_csk_ca(sk); tcp_cdg_backoff() local 264 struct cdg *ca = inet_csk_ca(sk); tcp_cdg_cong_avoid() local 301 struct cdg *ca = inet_csk_ca(sk); tcp_cdg_acked() local 330 struct cdg *ca = inet_csk_ca(sk); tcp_cdg_ssthresh() local 347 struct cdg *ca = inet_csk_ca(sk); tcp_cdg_cwnd_event() local 375 struct cdg *ca = inet_csk_ca(sk); tcp_cdg_init() local 389 struct cdg *ca = inet_csk_ca(sk); tcp_cdg_release() local [all...] |
H A D | tcp_cubic.c | 107 static inline void bictcp_reset(struct bictcp *ca) in bictcp_reset() argument 109 memset(ca, 0, offsetof(struct bictcp, unused)); in bictcp_reset() 110 ca->found = 0; in bictcp_reset() 121 struct bictcp *ca = inet_csk_ca(sk); in bictcp_hystart_reset() local 123 ca->round_start = ca->last_ack = bictcp_clock_us(sk); in bictcp_hystart_reset() 124 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset() 125 ca->curr_rtt = ~0U; in bictcp_hystart_reset() 126 ca->sample_cnt = 0; in bictcp_hystart_reset() 131 struct bictcp *ca in cubictcp_init() local 145 struct bictcp *ca = inet_csk_ca(sk); cubictcp_cwnd_event() local 214 bictcp_update(struct bictcp * ca,u32 cwnd,u32 acked) bictcp_update() argument 327 struct bictcp *ca = inet_csk_ca(sk); cubictcp_cong_avoid() local 344 struct bictcp *ca = inet_csk_ca(sk); cubictcp_recalc_ssthresh() local 389 struct bictcp *ca = inet_csk_ca(sk); hystart_update() local 455 struct bictcp *ca = inet_csk_ca(sk); cubictcp_acked() local [all...] |
H A D | tcp_cong.c | 51 struct tcp_congestion_ops *ca = tcp_ca_find(name); in tcp_ca_find_autoload() local 54 if (!ca && capable(CAP_NET_ADMIN)) { in tcp_ca_find_autoload() 58 ca = tcp_ca_find(name); in tcp_ca_find_autoload() 61 return ca; in tcp_ca_find_autoload() 77 int tcp_validate_congestion_control(struct tcp_congestion_ops *ca) in tcp_validate_congestion_control() argument 80 if (!ca->ssthresh || !ca->undo_cwnd || in tcp_validate_congestion_control() 81 !(ca->cong_avoid || ca->cong_control)) { in tcp_validate_congestion_control() 82 pr_err("%s does not implement required ops\n", ca in tcp_validate_congestion_control() 92 tcp_register_congestion_control(struct tcp_congestion_ops * ca) tcp_register_congestion_control() argument 123 tcp_unregister_congestion_control(struct tcp_congestion_ops * ca) tcp_unregister_congestion_control() argument 145 tcp_update_congestion_control(struct tcp_congestion_ops * ca,struct tcp_congestion_ops * old_ca) tcp_update_congestion_control() argument 182 const struct tcp_congestion_ops *ca; tcp_ca_get_key_by_name() local 200 const struct tcp_congestion_ops *ca; tcp_ca_get_name_by_key() local 219 const struct tcp_congestion_ops *ca; tcp_assign_congestion_control() local 250 tcp_reinit_congestion_control(struct sock * sk,const struct tcp_congestion_ops * ca) tcp_reinit_congestion_control() argument 282 struct tcp_congestion_ops *ca; tcp_set_default_congestion_control() local 320 struct tcp_congestion_ops *ca; tcp_get_available_congestion_control() local 338 const struct tcp_congestion_ops *ca; tcp_get_default_congestion_control() local 349 struct tcp_congestion_ops *ca; tcp_get_allowed_congestion_control() local 370 struct tcp_congestion_ops *ca; tcp_set_allowed_congestion_control() local 415 const struct tcp_congestion_ops *ca; tcp_set_congestion_control() local [all...] |
H A D | tcp_illinois.c | 60 struct illinois *ca = inet_csk_ca(sk); in rtt_reset() local 62 ca->end_seq = tp->snd_nxt; in rtt_reset() 63 ca->cnt_rtt = 0; in rtt_reset() 64 ca->sum_rtt = 0; in rtt_reset() 71 struct illinois *ca = inet_csk_ca(sk); in tcp_illinois_init() local 73 ca->alpha = ALPHA_MAX; in tcp_illinois_init() 74 ca->beta = BETA_BASE; in tcp_illinois_init() 75 ca->base_rtt = 0x7fffffff; in tcp_illinois_init() 76 ca->max_rtt = 0; in tcp_illinois_init() 78 ca in tcp_illinois_init() 88 struct illinois *ca = inet_csk_ca(sk); tcp_illinois_acked() local 114 max_delay(const struct illinois * ca) max_delay() argument 120 avg_delay(const struct illinois * ca) avg_delay() argument 140 alpha(struct illinois * ca,u32 da,u32 dm) alpha() argument 225 struct illinois *ca = inet_csk_ca(sk); update_params() local 246 struct illinois *ca = inet_csk_ca(sk); tcp_illinois_state() local 263 struct illinois *ca = inet_csk_ca(sk); tcp_illinois_cong_avoid() local 298 struct illinois *ca = inet_csk_ca(sk); tcp_illinois_ssthresh() local 310 const struct illinois *ca = inet_csk_ca(sk); tcp_illinois_info() local [all...] |
H A D | tcp_bic.c | 60 static inline void bictcp_reset(struct bictcp *ca) in bictcp_reset() argument 62 ca->cnt = 0; in bictcp_reset() 63 ca->last_max_cwnd = 0; in bictcp_reset() 64 ca->last_cwnd = 0; in bictcp_reset() 65 ca->last_time = 0; in bictcp_reset() 66 ca->epoch_start = 0; in bictcp_reset() 67 ca->delayed_ack = 2 << ACK_RATIO_SHIFT; in bictcp_reset() 72 struct bictcp *ca = inet_csk_ca(sk); in bictcp_init() local 74 bictcp_reset(ca); in bictcp_init() 83 static inline void bictcp_update(struct bictcp *ca, u3 argument 143 struct bictcp *ca = inet_csk_ca(sk); bictcp_cong_avoid() local 164 struct bictcp *ca = inet_csk_ca(sk); bictcp_recalc_ssthresh() local 195 struct bictcp *ca = inet_csk_ca(sk); bictcp_acked() local [all...] |
H A D | tcp_hybla.c | 36 struct hybla *ca = inet_csk_ca(sk); in hybla_recalc_param() local 38 ca->rho_3ls = max_t(u32, in hybla_recalc_param() 41 ca->rho = ca->rho_3ls >> 3; in hybla_recalc_param() 42 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; in hybla_recalc_param() 43 ca->rho2 = ca->rho2_7ls >> 7; in hybla_recalc_param() 49 struct hybla *ca in hybla_init() local 70 struct hybla *ca = inet_csk_ca(sk); hybla_state() local 93 struct hybla *ca = inet_csk_ca(sk); hybla_cong_avoid() local [all...] |
H A D | tcp_highspeed.c | 103 struct hstcp *ca = inet_csk_ca(sk); in hstcp_init() local 105 ca->ai = 0; in hstcp_init() 115 struct hstcp *ca = inet_csk_ca(sk); in hstcp_cong_avoid() local 126 * hstcp_aimd_vals[ca->ai-1].cwnd < in hstcp_cong_avoid() 128 * hstcp_aimd_vals[ca->ai].cwnd in hstcp_cong_avoid() 130 if (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd) { in hstcp_cong_avoid() 131 while (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd && in hstcp_cong_avoid() 132 ca->ai < HSTCP_AIMD_MAX - 1) in hstcp_cong_avoid() 133 ca->ai++; in hstcp_cong_avoid() 134 } else if (ca in hstcp_cong_avoid() 154 struct hstcp *ca = inet_csk_ca(sk); hstcp_ssthresh() local [all...] |
/linux/drivers/media/dvb-core/ |
H A D | dvb_ca_en50221.c | 162 static void dvb_ca_private_free(struct dvb_ca_private *ca) in dvb_ca_private_free() argument 166 dvb_device_put(ca->dvbdev); in dvb_ca_private_free() 167 for (i = 0; i < ca->slot_count; i++) in dvb_ca_private_free() 168 vfree(ca->slot_info[i].rx_buffer.data); in dvb_ca_private_free() 170 kfree(ca->slot_info); in dvb_ca_private_free() 171 kfree(ca); in dvb_ca_private_free() 176 struct dvb_ca_private *ca; in dvb_ca_private_release() local 178 ca = container_of(ref, struct dvb_ca_private, refcount); in dvb_ca_private_release() 179 dvb_ca_private_free(ca); in dvb_ca_private_release() 182 static void dvb_ca_private_get(struct dvb_ca_private *ca) in dvb_ca_private_get() argument 187 dvb_ca_private_put(struct dvb_ca_private * ca) dvb_ca_private_put() argument 228 dvb_ca_en50221_check_camstatus(struct dvb_ca_private * ca,int slot) dvb_ca_en50221_check_camstatus() argument 278 dvb_ca_en50221_wait_if_status(struct dvb_ca_private * ca,int slot,u8 waitfor,int timeout_hz) dvb_ca_en50221_wait_if_status() argument 326 dvb_ca_en50221_link_init(struct dvb_ca_private * ca,int slot) dvb_ca_en50221_link_init() argument 402 dvb_ca_en50221_read_tuple(struct dvb_ca_private * ca,int slot,int * address,int * tuple_type,int * tuple_length,u8 * tuple) dvb_ca_en50221_read_tuple() argument 456 dvb_ca_en50221_parse_attributes(struct dvb_ca_private * ca,int slot) dvb_ca_en50221_parse_attributes() argument 601 dvb_ca_en50221_set_configoption(struct dvb_ca_private * ca,int slot) dvb_ca_en50221_set_configoption() argument 637 dvb_ca_en50221_read_data(struct dvb_ca_private * ca,int slot,u8 * ebuf,int ecount) dvb_ca_en50221_read_data() argument 792 dvb_ca_en50221_write_data(struct dvb_ca_private * ca,int slot,u8 * buf,int bytes_write,int size_write_flag) dvb_ca_en50221_write_data() argument 913 dvb_ca_en50221_slot_shutdown(struct dvb_ca_private * ca,int slot) dvb_ca_en50221_slot_shutdown() argument 942 struct dvb_ca_private *ca = pubca->private; dvb_ca_en50221_camchange_irq() local 970 struct dvb_ca_private *ca = pubca->private; dvb_ca_en50221_camready_irq() local 990 struct dvb_ca_private *ca = pubca->private; dvb_ca_en50221_frda_irq() local 1021 dvb_ca_en50221_thread_wakeup(struct dvb_ca_private * ca) dvb_ca_en50221_thread_wakeup() argument 1035 dvb_ca_en50221_thread_update_delay(struct dvb_ca_private * ca) dvb_ca_en50221_thread_update_delay() argument 1097 dvb_ca_en50221_poll_cam_gone(struct dvb_ca_private * ca,int slot) dvb_ca_en50221_poll_cam_gone() argument 1126 dvb_ca_en50221_thread_state_machine(struct dvb_ca_private * ca,int slot) dvb_ca_en50221_thread_state_machine() argument 1305 struct dvb_ca_private *ca = data; dvb_ca_en50221_thread() local 1350 struct dvb_ca_private *ca = dvbdev->priv; dvb_ca_en50221_io_do_ioctl() local 1451 struct dvb_ca_private *ca = dvbdev->priv; dvb_ca_en50221_io_write() local 1546 dvb_ca_en50221_io_read_condition(struct dvb_ca_private * ca,int * result,int * _slot) dvb_ca_en50221_io_read_condition() argument 1606 struct dvb_ca_private *ca = dvbdev->priv; dvb_ca_en50221_io_read() local 1714 struct dvb_ca_private *ca = dvbdev->priv; dvb_ca_en50221_io_open() local 1775 struct dvb_ca_private *ca = dvbdev->priv; dvb_ca_en50221_io_release() local 1813 struct dvb_ca_private *ca = dvbdev->priv; dvb_ca_en50221_io_poll() local 1874 struct dvb_ca_private *ca = NULL; dvb_ca_en50221_init() local 1960 struct dvb_ca_private *ca = pubca->private; dvb_ca_en50221_release() local [all...] |
/linux/drivers/md/bcache/ |
H A D | alloc.c | 76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen() argument 80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); in bch_inc_gen() 81 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); in bch_inc_gen() 88 struct cache *ca; in bch_rescale_priorities() local 106 ca = c->cache; in bch_rescale_priorities() 107 for_each_bucket(b, ca) in bch_rescale_priorities() 130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket() argument 132 return (ca->set->gc_mark_valid || b->reclaimable_in_gc) && in bch_can_invalidate_bucket() 137 void __bch_invalidate_one_bucket(struct cache *ca, struc argument 151 bch_invalidate_one_bucket(struct cache * ca,struct bucket * b) bch_invalidate_one_bucket() argument 177 invalidate_buckets_lru(struct cache * ca) invalidate_buckets_lru() argument 214 invalidate_buckets_fifo(struct cache * ca) invalidate_buckets_fifo() argument 237 invalidate_buckets_random(struct cache * ca) invalidate_buckets_random() argument 263 invalidate_buckets(struct cache * ca) invalidate_buckets() argument 280 allocator_wait(ca,cond) global() argument 300 bch_allocator_push(struct cache * ca,long bucket) bch_allocator_push() argument 317 struct cache *ca = arg; bch_allocator_thread() local 389 bch_bucket_alloc(struct cache * ca,unsigned int reserve,bool wait) bch_bucket_alloc() argument 466 __bch_bucket_free(struct cache * ca,struct bucket * b) __bch_bucket_free() argument 488 struct cache *ca; __bch_bucket_alloc_set() local 720 bch_cache_allocator_start(struct cache * ca) bch_cache_allocator_start() argument [all...] |
H A D | super.c | 357 struct cache *ca = bio->bi_private; in write_super_endio() local 360 bch_count_io_errors(ca, bio->bi_status, 0, in write_super_endio() 362 closure_put(&ca->set->sb_write); in write_super_endio() 375 struct cache *ca = c->cache; in bcache_write_super() local 376 struct bio *bio = &ca->sb_bio; in bcache_write_super() 382 ca->sb.seq++; in bcache_write_super() 384 if (ca->sb.version < version) in bcache_write_super() 385 ca->sb.version = version; in bcache_write_super() 387 bio_init(bio, ca->bdev, ca in bcache_write_super() 502 struct cache *ca = c->cache; __uuid_write() local 583 struct cache *ca = bio->bi_private; prio_endio() local 590 prio_io(struct cache * ca,uint64_t bucket,blk_opf_t opf) prio_io() argument 610 bch_prio_write(struct cache * ca,bool wait) bch_prio_write() argument 691 prio_read(struct cache * ca,uint64_t bucket) prio_read() argument 793 struct cache *ca = d->c->cache; bcache_device_unlink() local 805 struct cache *ca = c->cache; bcache_device_link() local 1669 struct cache *ca; CLOSURE_CALLBACK() local 1710 struct cache *ca = c->cache; CLOSURE_CALLBACK() local 1858 struct cache *ca = container_of(sb, struct cache, sb); bch_cache_set_alloc() local 1974 struct cache *ca = c->cache; run_cache_set() local 2146 register_cache_set(struct cache * ca) register_cache_set() argument 2200 struct cache *ca = container_of(kobj, struct cache, kobj); bch_cache_release() local 2228 cache_alloc(struct cache * ca) cache_alloc() argument 2375 register_cache(struct cache_sb * sb,struct cache_sb_disk * sb_disk,struct file * bdev_file,struct cache * ca) register_cache() argument 2462 struct cache *ca = c->cache; bch_is_open_cache() local [all...] |
/linux/fs/bcachefs/ |
H A D | sb-members.h | 22 static inline bool bch2_dev_is_online(struct bch_dev *ca) in bch2_dev_is_online() argument 24 return !enumerated_ref_is_zero(&ca->io_ref[READ]); in bch2_dev_is_online() 32 struct bch_dev *ca = bch2_dev_rcu(c, dev); in bch2_dev_idx_is_online() local 33 return ca && bch2_dev_is_online(ca); in bch2_dev_idx_is_online() 36 static inline bool bch2_dev_is_healthy(struct bch_dev *ca) in bch2_dev_is_healthy() argument 38 return bch2_dev_is_online(ca) && in bch2_dev_is_healthy() 39 ca->mi.state != BCH_MEMBER_STATE_failed; in bch2_dev_is_healthy() 83 struct bch_dev *ca = NULL; in __bch2_next_dev_idx() local 88 !(ca in __bch2_next_dev_idx() 95 __bch2_next_dev(struct bch_fs * c,struct bch_dev * ca,const struct bch_devs_mask * mask) __bch2_next_dev() argument 111 bch2_dev_get(struct bch_dev * ca) bch2_dev_get() argument 120 __bch2_dev_put(struct bch_dev * ca) __bch2_dev_put() argument 134 bch2_dev_put(struct bch_dev * ca) bch2_dev_put() argument 140 bch2_get_next_dev(struct bch_fs * c,struct bch_dev * ca) bch2_get_next_dev() argument 160 bch2_get_next_online_dev(struct bch_fs * c,struct bch_dev * ca,unsigned state_mask,int rw,unsigned ref_idx) bch2_get_next_online_dev() argument 180 for_each_online_member(c,ca,ref_idx) global() argument 183 for_each_rw_member(c,ca,ref_idx) global() argument 186 for_each_readable_member(c,ca,ref_idx) global() argument 194 bucket_valid(const struct bch_dev * ca,u64 b) bucket_valid() argument 228 struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev); bch2_dev_rcu() local 237 struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev); bch2_dev_tryget_noerror() local 245 struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev); bch2_dev_tryget() local 253 struct bch_dev *ca = bch2_dev_tryget_noerror(c, bucket.inode); bch2_dev_bucket_tryget_noerror() local 265 struct bch_dev *ca = bch2_dev_tryget(c, bucket.inode); bch2_dev_bucket_tryget() local 274 bch2_dev_iterate_noerror(struct bch_fs * c,struct bch_dev * ca,unsigned dev_idx) bch2_dev_iterate_noerror() argument 282 bch2_dev_iterate(struct bch_fs * c,struct bch_dev * ca,unsigned dev_idx) bch2_dev_iterate() argument 296 struct bch_dev *ca = bch2_dev_rcu(c, dev); bch2_dev_get_ioref() local 356 bch2_dev_btree_bitmap_marked_sectors(struct bch_dev * ca,u64 start,unsigned sectors) bch2_dev_btree_bitmap_marked_sectors() argument [all...] |
H A D | super.c | 221 for_each_member_device_rcu(c, ca, NULL) in bch2_dev_to_fs() 222 if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) { in bch2_dev_to_fs() 315 for_each_member_device(c, ca) { in __bch2_fs_read_only() 316 bch2_dev_io_ref_stop(ca, WRITE); in __bch2_fs_read_only() 317 bch2_dev_allocator_remove(c, ca); in __bch2_fs_read_only() 499 for_each_online_member_rcu(c, ca) in __bch2_fs_read_write() 500 if (ca->mi.state == BCH_MEMBER_STATE_rw) { in __bch2_fs_read_write() 501 bch2_dev_allocator_add(c, ca); in __bch2_fs_read_write() 502 enumerated_ref_start(&ca in __bch2_fs_read_write() 681 struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true); __bch2_fs_stop() local 722 struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true); bch2_fs_free() local 1126 struct bch_dev *ca; bch2_fs_may_start() local 1352 bch2_dev_io_ref_stop(struct bch_dev * ca,int rw) bch2_dev_io_ref_stop() argument 1366 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); bch2_dev_release() local 1371 bch2_dev_free(struct bch_dev * ca) bch2_dev_free() argument 1405 __bch2_dev_offline(struct bch_fs * c,struct bch_dev * ca) __bch2_dev_offline() argument 1426 struct bch_dev *ca = container_of(ref, struct bch_dev, ref); bch2_dev_ref_complete() local 1432 bch2_dev_unlink(struct bch_dev * ca) bch2_dev_unlink() argument 1452 bch2_dev_sysfs_online(struct bch_fs * c,struct bch_dev * ca) bch2_dev_sysfs_online() argument 1484 struct bch_dev *ca; __bch2_dev_alloc() local 1534 bch2_dev_attach(struct bch_fs * c,struct bch_dev * ca,unsigned dev_idx) bch2_dev_attach() argument 1553 struct bch_dev *ca = NULL; bch2_dev_alloc() local 1570 __bch2_dev_attach_bdev(struct bch_dev * ca,struct bch_sb_handle * sb) __bch2_dev_attach_bdev() argument 1618 struct bch_dev *ca; bch2_dev_attach_bdev() local 1654 bch2_dev_state_allowed(struct bch_fs * c,struct bch_dev * ca,enum bch_member_state new_state,int flags) bch2_dev_state_allowed() argument 1698 __bch2_dev_read_only(struct bch_fs * c,struct bch_dev * ca) __bch2_dev_read_only() argument 1710 __bch2_dev_read_write(struct bch_fs * c,struct bch_dev * ca) __bch2_dev_read_write() argument 1725 __bch2_dev_set_state(struct bch_fs * c,struct bch_dev * ca,enum bch_member_state new_state,int flags) __bch2_dev_set_state() argument 1756 bch2_dev_set_state(struct bch_fs * c,struct bch_dev * ca,enum bch_member_state new_state,int flags) bch2_dev_set_state() argument 1770 bch2_dev_remove(struct bch_fs * c,struct bch_dev * ca,int flags) bch2_dev_remove() argument 1901 struct bch_dev *ca = NULL; bch2_dev_add() local 2053 struct bch_dev *ca; bch2_dev_online() local 2114 bch2_dev_offline(struct bch_fs * c,struct bch_dev * ca,int flags) bch2_dev_offline() argument 2136 __bch2_dev_resize_alloc(struct bch_dev * ca,u64 old_nbuckets,u64 new_nbuckets) __bch2_dev_resize_alloc() argument 2148 bch2_dev_resize(struct bch_fs * c,struct bch_dev * ca,u64 nbuckets) bch2_dev_resize() argument 2305 struct bch_dev *ca = bdev_to_bch_dev(c, bdev); bch2_fs_bdev_mark_dead() local [all...] |
H A D | buckets.h | 15 static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s) in sector_to_bucket() argument 17 return div_u64(s, ca->mi.bucket_size); in sector_to_bucket() 20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b) in bucket_to_sector() argument 22 return ((sector_t) b) * ca->mi.bucket_size; in bucket_to_sector() 25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s) in bucket_remainder() argument 29 div_u64_rem(s, ca->mi.bucket_size, &remainder); in bucket_remainder() 33 static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset) in sector_to_bucket_and_offset() argument 35 return div_u64_rem(s, ca->mi.bucket_size, offset); in sector_to_bucket_and_offset() 57 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b) in gc_bucket() argument 59 return bucket_valid(ca, in gc_bucket() 64 bucket_gens(struct bch_dev * ca) bucket_gens() argument 70 bucket_gen(struct bch_dev * ca,size_t b) bucket_gen() argument 79 bucket_gen_get_rcu(struct bch_dev * ca,size_t b) bucket_gen_get_rcu() argument 85 bucket_gen_get(struct bch_dev * ca,size_t b) bucket_gen_get() argument 91 PTR_BUCKET_NR(const struct bch_dev * ca,const struct bch_extent_ptr * ptr) PTR_BUCKET_NR() argument 97 PTR_BUCKET_POS(const struct bch_dev * ca,const struct bch_extent_ptr * ptr) PTR_BUCKET_POS() argument 103 PTR_BUCKET_POS_OFFSET(const struct bch_dev * ca,const struct bch_extent_ptr * ptr,u32 * bucket_offset) PTR_BUCKET_POS_OFFSET() argument 110 PTR_GC_BUCKET(struct bch_dev * ca,const struct bch_extent_ptr * ptr) PTR_GC_BUCKET() argument 145 dev_ptr_stale_rcu(struct bch_dev * ca,const struct bch_extent_ptr * ptr) dev_ptr_stale_rcu() argument 155 dev_ptr_stale(struct bch_dev * ca,const struct bch_extent_ptr * ptr) dev_ptr_stale() argument 164 bch2_dev_usage_read(struct bch_dev * ca) bch2_dev_usage_read() argument 173 bch2_dev_usage_full_read(struct bch_dev * ca) bch2_dev_usage_full_read() argument 183 bch2_dev_buckets_reserved(struct bch_dev * ca,enum bch_watermark watermark) bch2_dev_buckets_reserved() argument 211 dev_buckets_free(struct bch_dev * ca,struct bch_dev_usage usage,enum bch_watermark watermark) dev_buckets_free() argument 221 __dev_buckets_available(struct bch_dev * ca,struct bch_dev_usage usage,enum bch_watermark watermark) __dev_buckets_available() argument 234 dev_buckets_available(struct bch_dev * ca,enum bch_watermark watermark) dev_buckets_available() argument [all...] |
H A D | alloc_background.c | 342 struct bch_dev *ca = c ? bch2_dev_tryget_noerror(c, dev) : NULL; in __bch2_alloc_v4_to_text() local 362 if (ca) in __bch2_alloc_v4_to_text() 363 prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca)); in __bch2_alloc_v4_to_text() 367 bch2_dev_put(ca); in __bch2_alloc_v4_to_text() 623 struct bch_dev *ca = NULL; in bch2_alloc_read() local 635 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read() 640 if (!ca) { in bch2_alloc_read() 647 for (u64 b = max_t(u64, ca->mi.first_bucket, start); in bch2_alloc_read() 648 b < min_t(u64, ca in bch2_alloc_read() 726 bch2_bucket_do_index(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c alloc_k,const struct bch_alloc_v4 * a,bool set) bch2_bucket_do_index() argument 804 bch2_dev_data_type_accounting_mod(struct btree_trans * trans,struct bch_dev * ca,enum bch_data_type data_type,s64 delta_buckets,s64 delta_sectors,s64 delta_fragmented,unsigned flags) bch2_dev_data_type_accounting_mod() argument 818 bch2_alloc_key_to_dev_counters(struct btree_trans * trans,struct bch_dev * ca,const struct bch_alloc_v4 * old,const struct bch_alloc_v4 * new,unsigned flags) bch2_alloc_key_to_dev_counters() argument 866 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); bch2_trigger_alloc() local 1098 next_bucket(struct bch_fs * c,struct bch_dev ** ca,struct bpos * bucket) next_bucket() argument 1125 bch2_get_key_or_real_bucket_hole(struct btree_trans * trans,struct btree_iter * iter,struct bch_dev ** ca,struct bkey * hole) bch2_get_key_or_real_bucket_hole() argument 1170 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p); bch2_check_alloc_key() local 1254 bch2_check_alloc_hole_freespace(struct btree_trans * trans,struct bch_dev * ca,struct bpos start,struct bpos * end,struct btree_iter * freespace_iter) bch2_check_alloc_hole_freespace() argument 1522 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode); bch2_check_bucket_gens_key() local 1577 struct bch_dev *ca = NULL; bch2_check_alloc_info() local 1717 struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode); bch2_check_alloc_to_lru_ref() local 1787 discard_in_flight_add(struct bch_dev * ca,u64 bucket,bool in_progress) discard_in_flight_add() argument 1809 discard_in_flight_remove(struct bch_dev * ca,u64 bucket) discard_in_flight_remove() argument 1828 bch2_discard_one_bucket(struct btree_trans * trans,struct bch_dev * ca,struct btree_iter * need_discard_iter,struct bpos * discard_pos_done,struct discard_buckets_state * s,bool fastpath) bch2_discard_one_bucket() argument 1936 struct bch_dev *ca = container_of(work, struct bch_dev, discard_work); bch2_do_discards_work() local 1964 bch2_dev_do_discards(struct bch_dev * ca) bch2_dev_do_discards() argument 1989 bch2_do_discards_fast_one(struct btree_trans * trans,struct bch_dev * ca,u64 bucket,struct bpos * discard_pos_done,struct discard_buckets_state * s) bch2_do_discards_fast_one() argument 2016 struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work); bch2_do_discards_fast_work() local 2059 bch2_discard_one_bucket_fast(struct bch_dev * ca,u64 bucket) bch2_discard_one_bucket_fast() argument 2081 invalidate_one_bp(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c_backpointer bp,struct bkey_buf * last_flushed) invalidate_one_bp() argument 2109 invalidate_one_bucket_by_bps(struct btree_trans * trans,struct bch_dev * ca,struct bpos bucket,u8 gen,struct bkey_buf * last_flushed) invalidate_one_bucket_by_bps() argument 2138 invalidate_one_bucket(struct btree_trans * trans,struct bch_dev * ca,struct btree_iter * lru_iter,struct bkey_s_c lru_k,struct bkey_buf * last_flushed,s64 * nr_to_invalidate) invalidate_one_bucket() argument 2211 next_lru_key(struct btree_trans * trans,struct btree_iter * iter,struct bch_dev * ca,bool * wrapped) next_lru_key() argument 2227 struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work); bch2_do_invalidates_work() local 2277 bch2_dev_do_invalidates(struct bch_dev * ca) bch2_dev_do_invalidates() argument 2301 bch2_dev_freespace_init(struct bch_fs * c,struct bch_dev * ca,u64 bucket_start,u64 bucket_end) bch2_dev_freespace_init() argument 2442 bch2_dev_remove_alloc(struct bch_fs * c,struct bch_dev * ca) bch2_dev_remove_alloc() argument 2590 bch2_dev_has_open_write_point(struct bch_fs * c,struct bch_dev * ca) bch2_dev_has_open_write_point() argument 2607 bch2_dev_allocator_set_rw(struct bch_fs * c,struct bch_dev * ca,bool rw) bch2_dev_allocator_set_rw() argument 2621 bch2_dev_allocator_remove(struct bch_fs * c,struct bch_dev * ca) bch2_dev_allocator_remove() argument 2656 bch2_dev_allocator_add(struct bch_fs * c,struct bch_dev * ca) bch2_dev_allocator_add() argument 2664 bch2_dev_allocator_background_exit(struct bch_dev * ca) bch2_dev_allocator_background_exit() argument 2669 bch2_dev_allocator_background_init(struct bch_dev * ca) bch2_dev_allocator_background_init() argument [all...] |
/linux/drivers/media/pci/mantis/ |
H A D | mantis_ca.c | 30 struct mantis_ca *ca = en50221->data; in mantis_ca_read_attr_mem() local 31 struct mantis_pci *mantis = ca->ca_priv; in mantis_ca_read_attr_mem() 38 return mantis_hif_read_mem(ca, addr); in mantis_ca_read_attr_mem() 43 struct mantis_ca *ca = en50221->data; in mantis_ca_write_attr_mem() local 44 struct mantis_pci *mantis = ca->ca_priv; in mantis_ca_write_attr_mem() 51 return mantis_hif_write_mem(ca, addr, data); in mantis_ca_write_attr_mem() 56 struct mantis_ca *ca = en50221->data; in mantis_ca_read_cam_ctl() local 57 struct mantis_pci *mantis = ca->ca_priv; in mantis_ca_read_cam_ctl() 64 return mantis_hif_read_iom(ca, addr); in mantis_ca_read_cam_ctl() 69 struct mantis_ca *ca in mantis_ca_write_cam_ctl() local 82 struct mantis_ca *ca = en50221->data; mantis_ca_slot_reset() local 98 struct mantis_ca *ca = en50221->data; mantis_ca_slot_shutdown() local 108 struct mantis_ca *ca = en50221->data; mantis_ts_control() local 118 struct mantis_ca *ca = en50221->data; mantis_slot_status() local 136 struct mantis_ca *ca; mantis_ca_init() local 185 struct mantis_ca *ca = mantis->mantis_ca; mantis_ca_exit() local [all...] |
H A D | mantis_hif.c | 30 static int mantis_hif_sbuf_opdone_wait(struct mantis_ca *ca) in mantis_hif_sbuf_opdone_wait() argument 32 struct mantis_pci *mantis = ca->ca_priv; in mantis_hif_sbuf_opdone_wait() 35 if (wait_event_timeout(ca->hif_opdone_wq, in mantis_hif_sbuf_opdone_wait() 36 ca->hif_event & MANTIS_SBUF_OPDONE, in mantis_hif_sbuf_opdone_wait() 43 ca->hif_event &= ~MANTIS_SBUF_OPDONE; in mantis_hif_sbuf_opdone_wait() 47 static int mantis_hif_write_wait(struct mantis_ca *ca) in mantis_hif_write_wait() argument 49 struct mantis_pci *mantis = ca->ca_priv; in mantis_hif_write_wait() 53 if (wait_event_timeout(ca->hif_write_wq, in mantis_hif_write_wait() 77 int mantis_hif_read_mem(struct mantis_ca *ca, u32 addr) in mantis_hif_read_mem() argument 79 struct mantis_pci *mantis = ca in mantis_hif_read_mem() 105 mantis_hif_write_mem(struct mantis_ca * ca,u32 addr,u8 data) mantis_hif_write_mem() argument 134 mantis_hif_read_iom(struct mantis_ca * ca,u32 addr) mantis_hif_read_iom() argument 164 mantis_hif_write_iom(struct mantis_ca * ca,u32 addr,u8 data) mantis_hif_write_iom() argument 192 mantis_hif_init(struct mantis_ca * ca) mantis_hif_init() argument 216 mantis_hif_exit(struct mantis_ca * ca) mantis_hif_exit() argument [all...] |
H A D | mantis_evm.c | 29 struct mantis_ca *ca = container_of(work, struct mantis_ca, hif_evm_work); in mantis_hifevm_work() local 30 struct mantis_pci *mantis = ca->ca_priv; in mantis_hifevm_work() 40 mantis_event_cam_plugin(ca); in mantis_hifevm_work() 41 dvb_ca_en50221_camchange_irq(&ca->en50221, in mantis_hifevm_work() 49 mantis_event_cam_unplug(ca); in mantis_hifevm_work() 50 dvb_ca_en50221_camchange_irq(&ca->en50221, in mantis_hifevm_work() 79 ca->sbuf_status = MANTIS_SBUF_DATA_AVAIL; in mantis_hifevm_work() 80 ca->hif_event = MANTIS_SBUF_OPDONE; in mantis_hifevm_work() 81 wake_up(&ca->hif_opdone_wq); in mantis_hifevm_work() 85 int mantis_evmgr_init(struct mantis_ca *ca) in mantis_evmgr_init() argument 97 mantis_evmgr_exit(struct mantis_ca * ca) mantis_evmgr_exit() argument [all...] |
H A D | mantis_pcmcia.c | 30 void mantis_event_cam_plugin(struct mantis_ca *ca) in mantis_event_cam_plugin() argument 32 struct mantis_pci *mantis = ca->ca_priv; in mantis_event_cam_plugin() 36 if (ca->slot_state == MODULE_XTRACTED) { in mantis_event_cam_plugin() 45 ca->slot_state = MODULE_INSERTED; in mantis_event_cam_plugin() 54 void mantis_event_cam_unplug(struct mantis_ca *ca) in mantis_event_cam_unplug() argument 56 struct mantis_pci *mantis = ca->ca_priv; in mantis_event_cam_unplug() 60 if (ca->slot_state == MODULE_INSERTED) { in mantis_event_cam_unplug() 69 ca->slot_state = MODULE_XTRACTED; in mantis_event_cam_unplug() 74 int mantis_pcmcia_init(struct mantis_ca *ca) in mantis_pcmcia_init() argument 76 struct mantis_pci *mantis = ca in mantis_pcmcia_init() 103 mantis_pcmcia_exit(struct mantis_ca * ca) mantis_pcmcia_exit() argument [all...] |
H A D | mantis_link.h | 56 extern void mantis_event_cam_plugin(struct mantis_ca *ca); 57 extern void mantis_event_cam_unplug(struct mantis_ca *ca); 58 extern int mantis_pcmcia_init(struct mantis_ca *ca); 59 extern void mantis_pcmcia_exit(struct mantis_ca *ca); 60 extern int mantis_evmgr_init(struct mantis_ca *ca); 61 extern void mantis_evmgr_exit(struct mantis_ca *ca); 64 extern int mantis_hif_init(struct mantis_ca *ca); 65 extern void mantis_hif_exit(struct mantis_ca *ca); 66 extern int mantis_hif_read_mem(struct mantis_ca *ca, u32 addr); 67 extern int mantis_hif_write_mem(struct mantis_ca *ca, u3 [all...] |
/linux/kernel/sched/ |
H A D | cpuacct.c | 44 static inline struct cpuacct *parent_ca(struct cpuacct *ca) in parent_ca() argument 46 return css_ca(ca->css.parent); in parent_ca() 59 struct cpuacct *ca; in cpuacct_css_alloc() local 64 ca = kzalloc(sizeof(*ca), GFP_KERNEL); in cpuacct_css_alloc() 65 if (!ca) in cpuacct_css_alloc() 68 ca->cpuusage = alloc_percpu(u64); in cpuacct_css_alloc() 69 if (!ca->cpuusage) in cpuacct_css_alloc() 72 ca->cpustat = alloc_percpu(struct kernel_cpustat); in cpuacct_css_alloc() 73 if (!ca in cpuacct_css_alloc() 89 struct cpuacct *ca = css_ca(css); cpuacct_css_free() local 96 cpuacct_cpuusage_read(struct cpuacct * ca,int cpu,enum cpuacct_stat_index index) cpuacct_cpuusage_read() argument 137 cpuacct_cpuusage_write(struct cpuacct * ca,int cpu) cpuacct_cpuusage_write() argument 166 struct cpuacct *ca = css_ca(css); __cpuusage_read() local 196 struct cpuacct *ca = css_ca(css); cpuusage_write() local 214 struct cpuacct *ca = css_ca(seq_css(m)); __cpuacct_percpu_seq_show() local 243 struct cpuacct *ca = css_ca(seq_css(m)); cpuacct_all_seq_show() local 264 struct cpuacct *ca = css_ca(seq_css(sf)); cpuacct_stats_show() local 339 struct cpuacct *ca; cpuacct_charge() local 354 struct cpuacct *ca; cpuacct_account_field() local [all...] |
/linux/include/media/ |
H A D | dvb_ca_en50221.h | 21 #include <linux/dvb/ca.h> 61 int (*read_attribute_mem)(struct dvb_ca_en50221 *ca, 63 int (*write_attribute_mem)(struct dvb_ca_en50221 *ca, 66 int (*read_cam_control)(struct dvb_ca_en50221 *ca, 68 int (*write_cam_control)(struct dvb_ca_en50221 *ca, 71 int (*read_data)(struct dvb_ca_en50221 *ca, 73 int (*write_data)(struct dvb_ca_en50221 *ca, 76 int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot); 77 int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot); 78 int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, in [all...] |
/linux/tools/testing/selftests/bpf/progs/ |
H A D | bpf_dctcp.c | 59 static void dctcp_reset(const struct tcp_sock *tp, struct bpf_dctcp *ca) in dctcp_reset() argument 61 ca->next_seq = tp->snd_nxt; in dctcp_reset() 63 ca->old_delivered = tp->delivered; in dctcp_reset() 64 ca->old_delivered_ce = tp->delivered_ce; in dctcp_reset() 71 struct bpf_dctcp *ca = inet_csk_ca(sk); in BPF_PROG() local 101 ca->prior_rcv_nxt = tp->rcv_nxt; in BPF_PROG() 102 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); in BPF_PROG() 103 ca->loss_cwnd = 0; in BPF_PROG() 104 ca->ce_state = 0; in BPF_PROG() 111 dctcp_reset(tp, ca); in BPF_PROG() 117 struct bpf_dctcp *ca = inet_csk_ca(sk); BPF_PROG() local 128 struct bpf_dctcp *ca = inet_csk_ca(sk); BPF_PROG() local 156 struct bpf_dctcp *ca = inet_csk_ca(sk); dctcp_react_to_loss() local 213 struct bpf_dctcp *ca = inet_csk_ca(sk); BPF_PROG() local 232 const struct bpf_dctcp *ca = inet_csk_ca(sk); BPF_PROG() local [all...] |
/linux/net/bluetooth/hidp/ |
H A D | sock.c | 52 struct hidp_connadd_req ca; in do_hidp_sock_ioctl() local 67 if (copy_from_user(&ca, argp, sizeof(ca))) in do_hidp_sock_ioctl() 70 csock = sockfd_lookup(ca.ctrl_sock, &err); in do_hidp_sock_ioctl() 74 isock = sockfd_lookup(ca.intr_sock, &err); in do_hidp_sock_ioctl() 79 ca.name[sizeof(ca.name)-1] = 0; in do_hidp_sock_ioctl() 81 err = hidp_connection_add(&ca, csock, isock); in do_hidp_sock_ioctl() 82 if (!err && copy_to_user(argp, &ca, sizeof(ca))) in do_hidp_sock_ioctl() 174 struct hidp_connadd_req ca; hidp_sock_compat_ioctl() local [all...] |