Home
last modified time | relevance | path

Searched refs:closure (Results 1 – 25 of 57) sorted by relevance

123

/linux/include/linux/
H A Dclosure.h27 * wait synchronously, use closure_sync() - you will sleep until your closure's
63 * If closure's refcount started at 0, complete_some_read() could run before the
66 * complete_some_read()'s thread owned the closure - and whatever state it was
69 * So, closure_init() initializes a closure's refcount to 1 - and when a
74 * on a closure because you called closure_init() or you were run out of a
75 * closure - _always_ use continue_at(). Doing so consistently will help
82 * closure was already on a wait list or not - a closure can only be on one wait
87 * closure_init() takes two arguments - it takes the closure to initialize, and
90 * If parent is non null, the new closure wil
143 struct closure { global() struct
154 parentclosure global() argument
157 closure_get_happenedclosure global() argument
171 waiting_onclosure global() argument
[all...]
/linux/include/uapi/linux/
H A Dfirewire-cdev.h57 * @closure: For arbitrary use by userspace
63 * Data passed in the @closure field for a request will be returned in the
65 * The ioctl used to set @closure depends on the @type of event.
68 __u64 closure; member
74 * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_GET_INFO ioctl
92 __u64 closure; member
104 * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST
116 __u64 closure; member
125 * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST
155 __u64 closure; member
180 __u64 closure; global() member
207 __u64 closure; global() member
278 __u64 closure; global() member
346 __u64 closure; global() member
385 __u64 closure; global() member
412 __u64 closure; global() member
434 __u64 closure; global() member
475 __u64 closure; global() member
650 __u64 closure; global() member
714 __u64 closure; global() member
842 __u64 closure; global() member
1110 __u64 closure; global() member
1136 __u64 closure; global() member
1159 __u64 closure; global() member
1175 __u64 closure; global() member
[all...]
/linux/lib/
H A Dclosure.c9 #include <linux/closure.h>
21 "closure has guard bits set: %x (%u)", in closure_put_after_sub_checks()
26 "closure ref hit 0 with incorrect flags set: %x (%u)", in closure_put_after_sub_checks()
30 static inline void closure_put_after_sub(struct closure *cl, int flags) in closure_put_after_sub()
44 struct closure *parent = cl->parent; in closure_put_after_sub()
59 void closure_sub(struct closure *cl, int v) in closure_sub()
66 * closure_put - decrement a closure's refcount
68 void closure_put(struct closure *cl) in closure_put()
80 struct closure *cl, *t; in __closure_wake_up()
97 * closure_wait - add a closure t
[all...]
/linux/drivers/md/bcache/
H A Djournal.h111 struct closure io;
170 struct closure;
177 struct closure *parent);
180 void bch_journal_meta(struct cache_set *c, struct closure *cl);
H A Dstats.h28 struct closure cl;
45 struct closure *parent);
H A Dbtree.h26 * specifying read vs. write locking, and the embedded closure is used for
49 * time, so there's a lock, implemented by a pointer to the btree_op closure -
68 * though - but it takes a refcount on the closure in struct btree_op you passed
138 struct closure io;
264 void __bch_btree_node_write(struct btree *b, struct closure *parent);
265 void bch_btree_node_write(struct btree *b, struct closure *parent);
H A Drequest.h6 struct closure cl;
H A Drequest.c108 static void bch_data_invalidate(struct closure *cl) in bch_data_invalidate()
171 struct closure *cl = bio->bi_private; in bch_data_insert_endio()
290 * @cl: closure pointer.
479 struct closure cl;
502 struct closure *cl = bio->bi_private; in bch_cache_read_endio()
637 struct closure *cl = bio->bi_private; in request_endio()
653 struct closure *cl = bio->bi_private; in backing_request_endio()
964 struct closure *cl = &s->cl; in cached_dev_read()
983 struct closure *cl = &s->cl; in cached_dev_write()
1295 struct closure *c in flash_dev_submit_bio()
[all...]
H A Dbtree.c237 struct closure *cl = bio->bi_private; in btree_node_read_endio()
245 struct closure cl; in bch_btree_node_read()
327 struct closure *cl = bio->bi_private; in btree_node_write_endio()
339 struct closure *cl = &b->io; in do_btree_node_write()
402 void __bch_btree_node_write(struct btree *b, struct closure *parent) in __bch_btree_node_write()
433 void bch_btree_node_write(struct btree *b, struct closure *parent) in bch_btree_node_write()
453 struct closure cl; in bch_btree_node_write_sync()
607 struct closure cl; in mca_reap()
748 struct closure cl; in bch_btree_cache_free()
1364 struct closure c in btree_gc_coalesce()
[all...]
H A Djournal.c30 struct closure *cl = bio->bi_private; in journal_read_endio()
43 struct closure cl; in journal_read_bucket()
837 struct closure *cl = &c->journal.io; in journal_try_write()
855 struct closure cl; in journal_wait_for_write()
926 struct closure *parent) in bch_journal()
962 void bch_journal_meta(struct cache_set *c, struct closure *cl) in bch_journal_meta()
H A Dmovinggc.c16 struct closure cl;
131 struct closure cl; in read_moving()
H A Dsuper.c336 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) in bch_write_bdev_super()
338 struct closure *cl = &dc->sb_write; in bch_write_bdev_super()
374 struct closure *cl = &c->sb_write; in bcache_write_super()
401 struct closure *cl = bio->bi_private; in uuid_endio()
417 struct closure *parent) in uuid_io()
419 struct closure *cl = &c->uuid_write; in uuid_io()
457 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) in uuid_read()
501 struct closure cl; in __uuid_write()
592 struct closure *cl = &ca->prio; in prio_io()
614 struct closure c in bch_prio_write()
[all...]
/linux/drivers/firewire/
H A Duapi-test.c22 KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_response, closure)); in structure_layout_event_response()
34 KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_request3, closure)); in structure_layout_event_request3()
53 KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_response2, closure)); in structure_layout_event_response2()
67 KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_phy_packet2, closure)); in structure_layout_event_phy_packet2()
H A Dcore-cdev.c104 __u64 closure; member
384 event->closure = client->bus_reset_closure; in fill_bus_reset_event()
655 rsp->closure = request->closure; in init_request()
662 rsp->closure = request->closure; in init_request()
771 req->closure = handler->closure; in handle_request()
785 req->closure = handler->closure; in handle_request()
[all...]
/linux/drivers/gpu/drm/
H A Ddrm_edid.c3088 typedef void detailed_cb(const struct detailed_timing *timing, void *closure);
3091 cea_for_each_detailed_block(const u8 *ext, detailed_cb *cb, void *closure) in cea_for_each_detailed_block() argument
3102 cb((const struct detailed_timing *)(det_base + 18 * i), closure); in cea_for_each_detailed_block()
3106 vtb_for_each_detailed_block(const u8 *ext, detailed_cb *cb, void *closure) in vtb_for_each_detailed_block() argument
3115 cb((const struct detailed_timing *)(det_base + 18 * i), closure); in vtb_for_each_detailed_block()
3119 detailed_cb *cb, void *closure) in drm_for_each_detailed_block() argument
3129 cb(&drm_edid->edid->detailed_timings[i], closure); in drm_for_each_detailed_block()
3135 cea_for_each_detailed_block(ext, cb, closure); in drm_for_each_detailed_block()
3138 vtb_for_each_detailed_block(ext, cb, closure); in drm_for_each_detailed_block()
3827 struct detailed_mode_closure *closure in do_inferred_modes() local
3869 struct detailed_mode_closure closure = { add_inferred_modes() local
3912 struct detailed_mode_closure *closure = c; do_established_modes() local
3934 struct detailed_mode_closure closure = { add_established_modes() local
3961 struct detailed_mode_closure *closure = c; do_standard_modes() local
3990 struct detailed_mode_closure closure = { add_standard_modes() local
4070 struct detailed_mode_closure *closure = c; do_cvt_mode() local
4081 struct detailed_mode_closure closure = { add_cvt_modes() local
4100 struct detailed_mode_closure *closure = c; do_detailed_mode() local
4134 struct detailed_mode_closure closure = { add_detailed_modes() local
5512 struct drm_edid_match_closure *closure = data; match_identity() local
5555 struct drm_edid_match_closure closure = { drm_edid_match() local
6471 struct detailed_mode_closure *closure = c; get_monitor_range() local
6508 struct detailed_mode_closure closure = { drm_get_monitor_range() local
[all...]
/linux/fs/bcachefs/
H A Dalloc_foreground.h105 struct closure *);
227 struct dev_stripe_state *, struct closure *);
236 struct closure *,
311 void __bch2_wait_on_allocator(struct bch_fs *, struct closure *);
312 static inline void bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl) in bch2_wait_on_allocator()
H A Dalloc_foreground.c210 struct closure *cl) in __try_alloc_bucket()
263 struct closure *cl) in try_alloc_bucket()
287 struct closure *cl) in bch2_bucket_alloc_early()
378 struct closure *cl) in bch2_bucket_alloc_freelist()
453 struct closure *cl, in trace_bucket_alloc2()
491 * @cl: if not NULL, closure to be used to wait if buckets not available
498 struct closure *cl, in bch2_bucket_alloc_trans()
583 struct closure *cl) in bch2_bucket_alloc()
710 struct closure *cl) in bch2_bucket_alloc_set_trans()
764 struct closure *c in bucket_alloc_from_stripe()
[all...]
H A Dec.h203 struct closure iodone;
261 struct alloc_request *, unsigned, struct closure *);
H A Dio_write_types.h69 struct closure cl;
H A Dmove.h26 struct closure cl;
H A Dbtree_cache.h31 int bch2_btree_cache_cannibalize_lock(struct btree_trans *, struct closure *);
/linux/sound/drivers/opl3/
H A Dopl3_oss.c11 static int snd_opl3_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure);
131 static int snd_opl3_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure) in snd_opl3_open_seq_oss() argument
133 struct snd_opl3 *opl3 = closure; in snd_opl3_open_seq_oss()
/linux/rust/pin-init/
H A DREADME.md138 [`impl PinInit<T, E>`] directly from a closure. Of course you have to ensure that the closure
140 (we are calling the parameter to the closure `slot`):
141 - when the closure returns `Ok(())`, then it has completed the initialization successfully, so
143 - when the closure returns `Err(e)`, then the caller may deallocate the memory at `slot`, so
145 - you may assume that `slot` will stay pinned even after the closure returns until `drop` of
184 // - when the closure returns `Ok(())`, then it has successfully initialized and
/linux/include/sound/
H A Dseq_oss.h39 int (*open)(struct snd_seq_oss_arg *p, void *closure);
/linux/sound/synth/emux/
H A Demux_oss.c19 static int snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure);
96 snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure) in snd_emux_open_seq_oss() argument
103 emu = closure; in snd_emux_open_seq_oss()

123