/linux/block/ |
H A D | blk-mq-sched.c | 22 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx() argument 24 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx() 27 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx() 31 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in __blk_mq_sched_restart() argument 33 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in __blk_mq_sched_restart() 36 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) in __blk_mq_sched_restart() 39 * meantime new request added to hctx->dispatch is missed to check in in __blk_mq_sched_restart() 44 blk_mq_run_hw_queue(hctx, true); in __blk_mq_sched_restart() 58 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list() local 64 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list() 85 __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx * hctx) __blk_mq_do_dispatch_sched() argument 176 blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx * hctx) blk_mq_do_dispatch_sched() argument 194 blk_mq_next_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx) blk_mq_next_ctx() argument 213 blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx * hctx) blk_mq_do_dispatch_ctx() argument 268 __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx * hctx) __blk_mq_sched_dispatch_requests() argument 317 blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx * hctx) blk_mq_sched_dispatch_requests() argument 340 struct blk_mq_hw_ctx *hctx; blk_mq_sched_bio_merge() local 380 struct blk_mq_hw_ctx *hctx; blk_mq_sched_tags_teardown() local 392 struct blk_mq_hw_ctx *hctx; blk_mq_sched_reg_debugfs() local 404 struct blk_mq_hw_ctx *hctx; blk_mq_sched_unreg_debugfs() local 550 struct blk_mq_hw_ctx *hctx; blk_mq_init_sched() local 603 struct blk_mq_hw_ctx *hctx; blk_mq_sched_free_rqs() local 620 struct blk_mq_hw_ctx *hctx; blk_mq_exit_sched() local [all...] |
H A D | blk-mq.h | 50 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, 52 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 53 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 77 * @type: the hctx type index 121 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 160 struct blk_mq_hw_ctx *hctx; member 173 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, 186 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr() argument 188 if (!hctx) in bt_wait_ptr() 190 return sbq_wait_ptr(bt, &hctx in bt_wait_ptr() 196 blk_mq_tag_busy(struct blk_mq_hw_ctx * hctx) blk_mq_tag_busy() argument 202 blk_mq_tag_idle(struct blk_mq_hw_ctx * hctx) blk_mq_tag_idle() argument 226 blk_mq_hctx_stopped(struct blk_mq_hw_ctx * hctx) blk_mq_hctx_stopped() argument 244 blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx * hctx) blk_mq_hw_queue_mapped() argument 281 __blk_mq_add_active_requests(struct blk_mq_hw_ctx * hctx,int val) __blk_mq_add_active_requests() argument 290 __blk_mq_inc_active_requests(struct blk_mq_hw_ctx * hctx) __blk_mq_inc_active_requests() argument 295 __blk_mq_sub_active_requests(struct blk_mq_hw_ctx * hctx,int val) __blk_mq_sub_active_requests() argument 304 __blk_mq_dec_active_requests(struct blk_mq_hw_ctx * hctx) __blk_mq_dec_active_requests() argument 309 blk_mq_add_active_requests(struct blk_mq_hw_ctx * hctx,int val) blk_mq_add_active_requests() argument 316 blk_mq_inc_active_requests(struct blk_mq_hw_ctx * hctx) blk_mq_inc_active_requests() argument 322 blk_mq_sub_active_requests(struct blk_mq_hw_ctx * hctx,int val) blk_mq_sub_active_requests() argument 329 blk_mq_dec_active_requests(struct blk_mq_hw_ctx * hctx) blk_mq_dec_active_requests() argument 335 __blk_mq_active_requests(struct blk_mq_hw_ctx * hctx) __blk_mq_active_requests() argument 341 __blk_mq_put_driver_tag(struct blk_mq_hw_ctx * hctx,struct request * rq) __blk_mq_put_driver_tag() argument 390 hctx_may_queue(struct blk_mq_hw_ctx * hctx,struct sbitmap_queue * bt) hctx_may_queue() argument [all...] |
H A D | blk-mq-sysfs.c | 34 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release() local 37 blk_free_flush_queue(hctx->fq); in blk_mq_hw_sysfs_release() 38 sbitmap_free(&hctx->ctx_map); in blk_mq_hw_sysfs_release() 39 free_cpumask_var(hctx->cpumask); in blk_mq_hw_sysfs_release() 40 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release() 41 kfree(hctx); in blk_mq_hw_sysfs_release() 53 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local 58 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show() 59 q = hctx->queue; in blk_mq_hw_sysfs_show() 65 res = entry->show(hctx, pag in blk_mq_hw_sysfs_show() 70 blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx * hctx,char * page) blk_mq_hw_sysfs_nr_tags_show() argument 76 blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx * hctx,char * page) blk_mq_hw_sysfs_nr_reserved_tags_show() argument 82 blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx * hctx,char * page) blk_mq_hw_sysfs_cpus_show() argument 144 blk_mq_unregister_hctx(struct blk_mq_hw_ctx * hctx) blk_mq_unregister_hctx() argument 158 blk_mq_register_hctx(struct blk_mq_hw_ctx * hctx) blk_mq_register_hctx() argument 187 blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx * hctx) blk_mq_hctx_kobj_init() argument 222 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_register() local 256 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_unregister() local 270 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_unregister_hctxs() local 282 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_register_hctxs() local [all...] |
H A D | blk-mq.c | 51 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 53 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 60 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument 62 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending() 63 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending() 64 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending() 70 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument 73 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending() 75 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending() 76 sbitmap_set_bit(&hctx in blk_mq_hctx_mark_pending() 79 blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx) blk_mq_hctx_clear_pending() argument 364 struct blk_mq_hw_ctx *hctx; blk_mq_wake_waiters() local 403 struct blk_mq_hw_ctx *hctx = data->hctx; blk_mq_rq_ctx_init() local 773 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; __blk_mq_free_request() local 1159 blk_mq_flush_tag_batch(struct blk_mq_hw_ctx * hctx,int * tag_array,int nr_tags) blk_mq_flush_tag_batch() argument 1421 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_execute_rq_nowait() local 1482 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_execute_rq() local 1714 struct blk_mq_hw_ctx *hctx; blk_mq_timeout_work() local 1767 struct blk_mq_hw_ctx *hctx; global() member 1774 struct blk_mq_hw_ctx *hctx = flush_data->hctx; flush_busy_ctx() local 1789 blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx * hctx,struct list_head * list) blk_mq_flush_busy_ctxs() argument 1800 struct blk_mq_hw_ctx *hctx; global() member 1808 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; dispatch_rq_from_ctx() local 1824 blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * start) blk_mq_dequeue_from_ctx() argument 1867 struct blk_mq_hw_ctx *hctx; blk_mq_dispatch_wake() local 1891 blk_mq_mark_tag_wait(struct blk_mq_hw_ctx * hctx,struct request * rq) blk_mq_mark_tag_wait() argument 1985 blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx * hctx,bool busy) blk_mq_update_dispatch_busy() argument 2020 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_prep_dispatch_rq() local 2076 blk_mq_commit_rqs(struct blk_mq_hw_ctx * hctx,int queued,bool from_schedule) blk_mq_commit_rqs() argument 2088 blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx * hctx,struct list_head * list,bool get_budget) blk_mq_dispatch_rq_list() argument 2216 blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx * hctx) blk_mq_first_mapped_cpu() argument 2229 blk_mq_hctx_empty_cpumask(struct blk_mq_hw_ctx * hctx) blk_mq_hctx_empty_cpumask() argument 2240 blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx * hctx) blk_mq_hctx_next_cpu() argument 2288 blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx * hctx,unsigned long msecs) blk_mq_delay_run_hw_queue() argument 2297 blk_mq_hw_queue_need_run(struct blk_mq_hw_ctx * hctx) blk_mq_hw_queue_need_run() argument 2324 blk_mq_run_hw_queue(struct blk_mq_hw_ctx * hctx,bool async) blk_mq_run_hw_queue() argument 2377 struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT]; blk_mq_get_sq_hctx() local 2391 struct blk_mq_hw_ctx *hctx, *sq_hctx; blk_mq_run_hw_queues() local 2419 struct blk_mq_hw_ctx *hctx, *sq_hctx; blk_mq_delay_run_hw_queues() local 2457 blk_mq_stop_hw_queue(struct blk_mq_hw_ctx * hctx) blk_mq_stop_hw_queue() argument 2476 struct blk_mq_hw_ctx *hctx; blk_mq_stop_hw_queues() local 2484 blk_mq_start_hw_queue(struct blk_mq_hw_ctx * hctx) blk_mq_start_hw_queue() argument 2494 struct blk_mq_hw_ctx *hctx; blk_mq_start_hw_queues() local 2502 blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx * hctx,bool async) blk_mq_start_stopped_hw_queue() argument 2520 struct blk_mq_hw_ctx *hctx; blk_mq_start_stopped_hw_queues() local 2531 struct blk_mq_hw_ctx *hctx = blk_mq_run_work_fn() local 2548 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_request_bypass_insert() local 2558 blk_mq_insert_requests(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct list_head * list,bool run_queue_async) blk_mq_insert_requests() argument 2599 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_insert_request() local 2680 __blk_mq_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,bool last) __blk_mq_issue_directly() argument 2738 blk_mq_try_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq) blk_mq_try_issue_directly() argument 2772 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_request_issue_directly() local 2787 struct blk_mq_hw_ctx *hctx = NULL; blk_mq_issue_direct() local 2971 blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx * hctx,struct list_head * list) blk_mq_try_issue_list_directly() argument 3116 struct blk_mq_hw_ctx *hctx; blk_mq_submit_bio() local 3660 struct blk_mq_hw_ctx *hctx; global() member 3674 blk_mq_hctx_has_requests(struct blk_mq_hw_ctx * hctx) blk_mq_hctx_has_requests() argument 3686 blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx * hctx,unsigned int this_cpu) blk_mq_hctx_has_online_cpu() argument 3714 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, blk_mq_hctx_notify_offline() local 3752 blk_mq_cpu_mapped_to_hctx(unsigned int cpu,const struct blk_mq_hw_ctx * hctx) blk_mq_cpu_mapped_to_hctx() argument 3762 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, blk_mq_hctx_notify_online() local 3777 struct blk_mq_hw_ctx *hctx; blk_mq_hctx_notify_dead() local 3807 __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx * hctx) __blk_mq_remove_cpuhp() argument 3825 blk_mq_remove_cpuhp(struct blk_mq_hw_ctx * hctx) blk_mq_remove_cpuhp() argument 3832 __blk_mq_add_cpuhp(struct blk_mq_hw_ctx * hctx) __blk_mq_add_cpuhp() argument 3848 struct blk_mq_hw_ctx *hctx; __blk_mq_remove_cpuhp_list() local 3885 struct blk_mq_hw_ctx *hctx; blk_mq_add_hw_queues_cpuhp() local 3926 blk_mq_exit_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx) blk_mq_exit_hctx() argument 3952 struct blk_mq_hw_ctx *hctx; blk_mq_exit_hw_queues() local 3965 blk_mq_init_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned hctx_idx) blk_mq_init_hctx() argument 3998 struct blk_mq_hw_ctx *hctx; blk_mq_alloc_hctx() local 4069 struct blk_mq_hw_ctx *hctx; blk_mq_init_cpu_queues() local 4149 struct blk_mq_hw_ctx *hctx; blk_mq_map_swqueue() local 4265 struct blk_mq_hw_ctx *hctx; queue_set_hctx_shared() local 4366 struct blk_mq_hw_ctx *hctx, *next; blk_mq_release() local 4479 blk_mq_hctx_is_reusable(struct blk_mq_hw_ctx * hctx) blk_mq_hctx_is_reusable() argument 4489 struct blk_mq_hw_ctx *hctx = NULL, *tmp; blk_mq_alloc_and_init_hctx() local 4522 struct blk_mq_hw_ctx *hctx; __blk_mq_realloc_hw_ctxs() local 4923 struct blk_mq_hw_ctx *hctx; blk_mq_update_nr_requests() local 5125 blk_hctx_poll(struct request_queue * q,struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob,unsigned int flags) blk_hctx_poll() argument 5186 struct blk_mq_hw_ctx *hctx; blk_mq_cancel_work_sync() local [all...] |
H A D | blk-mq-debugfs.c | 168 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show() local 171 blk_flags_show(m, hctx->state, hctx_state_name, in hctx_state_show() 190 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show() local 194 blk_flags_show(m, hctx->flags, hctx_flag_name, in hctx_flags_show() 298 __acquires(&hctx->lock) in hctx_dispatch_start() 300 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_start() local 302 spin_lock(&hctx->lock); in hctx_dispatch_start() 303 return seq_list_start(&hctx->dispatch, *pos); in hctx_dispatch_start() 308 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_next() local 310 return seq_list_next(v, &hctx in hctx_dispatch_next() 316 struct blk_mq_hw_ctx *hctx = m->private; hctx_dispatch_stop() local 330 struct blk_mq_hw_ctx *hctx; global() member 350 struct blk_mq_hw_ctx *hctx = data; hctx_busy_show() local 372 struct blk_mq_hw_ctx *hctx = data; hctx_type_show() local 381 struct blk_mq_hw_ctx *hctx = data; hctx_ctx_map_show() local 406 struct blk_mq_hw_ctx *hctx = data; hctx_tags_show() local 422 struct blk_mq_hw_ctx *hctx = data; hctx_tags_bitmap_show() local 438 struct blk_mq_hw_ctx *hctx = data; hctx_sched_tags_show() local 454 struct blk_mq_hw_ctx *hctx = data; hctx_sched_tags_bitmap_show() local 470 struct blk_mq_hw_ctx *hctx = data; hctx_active_show() local 478 struct blk_mq_hw_ctx *hctx = data; hctx_dispatch_busy_show() local 622 struct blk_mq_hw_ctx *hctx; blk_mq_debugfs_register() local 642 blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx) blk_mq_debugfs_register_ctx() argument 655 blk_mq_debugfs_register_hctx(struct request_queue * q,struct blk_mq_hw_ctx * hctx) blk_mq_debugfs_register_hctx() argument 673 blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx * hctx) blk_mq_debugfs_unregister_hctx() argument 684 struct blk_mq_hw_ctx *hctx; blk_mq_debugfs_register_hctxs() local 693 struct blk_mq_hw_ctx *hctx; blk_mq_debugfs_unregister_hctxs() local 771 blk_mq_debugfs_register_sched_hctx(struct request_queue * q,struct blk_mq_hw_ctx * hctx) blk_mq_debugfs_register_sched_hctx() argument 794 blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx * hctx) blk_mq_debugfs_unregister_sched_hctx() argument [all...] |
H A D | kyber-iosched.c | 138 * There is a same mapping between ctx & hctx and kcq & khd, 443 static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) in kyber_depth_updated() argument 445 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; in kyber_depth_updated() 446 struct blk_mq_tags *tags = hctx->sched_tags; in kyber_depth_updated() 448 kqd->async_depth = hctx->queue->nr_requests * KYBER_ASYNC_PERCENT / 100U; in kyber_depth_updated() 452 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument 457 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx() 461 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx() 463 GFP_KERNEL, hctx->numa_node); in kyber_init_hctx() 467 for (i = 0; i < hctx in kyber_init_hctx() 507 kyber_exit_hctx(struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx) kyber_exit_hctx() argument 559 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(bio->bi_opf, ctx); kyber_bio_merge() local 578 kyber_insert_requests(struct blk_mq_hw_ctx * hctx,struct list_head * rq_list,blk_insert_t flags) kyber_insert_requests() argument 686 struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private); kyber_domain_wake() local 696 kyber_get_domain_token(struct kyber_queue_data * kqd,struct kyber_hctx_data * khd,struct blk_mq_hw_ctx * hctx) kyber_get_domain_token() argument 744 kyber_dispatch_cur_domain(struct kyber_queue_data * kqd,struct kyber_hctx_data * khd,struct blk_mq_hw_ctx * hctx) kyber_dispatch_cur_domain() argument 791 kyber_dispatch_request(struct blk_mq_hw_ctx * hctx) kyber_dispatch_request() argument 837 kyber_has_work(struct blk_mq_hw_ctx * hctx) kyber_has_work() argument 959 struct blk_mq_hw_ctx *hctx = data; kyber_cur_domain_show() local 968 struct blk_mq_hw_ctx *hctx = data; kyber_batching_show() local [all...] |
H A D | blk-mq-sched.h | 16 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); 17 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); 19 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); 35 static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument 37 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_restart() 38 __blk_mq_sched_restart(hctx); in blk_mq_sched_restart() 80 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() argument 82 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work() 85 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work() 90 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_needs_restart() argument [all...] |
H A D | blk-mq-debugfs.h | 25 struct blk_mq_hw_ctx *hctx); 26 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx); 33 struct blk_mq_hw_ctx *hctx); 34 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx); 44 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx() argument 48 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument 69 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx() argument 73 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
|
/linux/drivers/hid/bpf/progs/ |
H A D | XPPen__ArtistPro16Gen2.bpf.c | 88 int BPF_PROG(hid_fix_rdesc_xppen_artistpro16gen2, struct hid_bpf_ctx *hctx) in SEC() 90 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in SEC() 100 if (hctx->hid->product == PID_ARTIST_PRO14_GEN2) { in SEC() 105 } else if (hctx->hid->product == PID_ARTIST_PRO19_GEN2) { in SEC() 116 static int xppen_16_fix_eraser(struct hid_bpf_ctx *hctx) in xppen_16_fix_eraser() argument 118 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */); in xppen_16_fix_eraser() 238 static int xppen_16_fix_angle_offset(struct hid_bpf_ctx *hctx) in xppen_16_fix_angle_offset() argument 240 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */); in xppen_16_fix_angle_offset() 273 switch (hctx->hid->product) { in xppen_16_fix_angle_offset() 292 int BPF_PROG(xppen_artist_pro_16_device_event, struct hid_bpf_ctx *hctx) in SEC() [all...] |
H A D | Huion__Kamvas-Pro-19.bpf.c | 255 int BPF_PROG(hid_fix_rdesc_huion_kamvas_pro_19, struct hid_bpf_ctx *hctx) in SEC() 257 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */); in SEC() 262 if (hctx->size == FW_240524_RDESC_SIZE) { in SEC() 284 int BPF_PROG(kamvas_pro_19_fix_3rd_button, struct hid_bpf_ctx *hctx) in SEC() 286 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */); in SEC() 341 struct hid_bpf_ctx *hctx = hid_bpf_allocate_context(ctx->hid); in probe() local 343 if (!hctx) { in probe() 348 const char *name = hctx->hid->name; in probe() 357 hid_bpf_release_context(hctx); in probe()
|
H A D | XPPen__ACK05.bpf.c | 218 int BPF_PROG(ack05_fix_rdesc, struct hid_bpf_ctx *hctx) in SEC() 220 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */); in SEC() 221 __s32 rdesc_size = hctx->size; in SEC() 235 hid_set_name(hctx->hid, "Disabled by HID-BPF Hanvon Ugee Shortcut Remote"); in SEC() 265 int BPF_PROG(ack05_fix_events, struct hid_bpf_ctx *hctx) in SEC() 267 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, PAD_REPORT_LENGTH); in SEC() 278 HID_BPF_ASYNC_DELAYED_CALL(switch_to_raw_mode, hctx, 10); in SEC() 315 struct hid_bpf_ctx *hctx = hid_bpf_allocate_context(ctx->hid); in probe() local 317 if (!hctx) { in probe() 323 switch_to_raw_mode(hctx); in probe() [all...] |
H A D | XPPen__DecoMini4.bpf.c | 153 int BPF_PROG(hid_rdesc_fixup_xppen_deco_mini_4, struct hid_bpf_ctx *hctx) in SEC() 155 __u8 *data = hid_bpf_get_data(hctx, 0, HID_MAX_DESCRIPTOR_SIZE); in SEC() 160 if (hctx->size == RDESC_SIZE_PAD) { in SEC() 163 } else if (hctx->size == RDESC_SIZE_PEN) { in SEC() 172 int BPF_PROG(hid_device_event_xppen_deco_mini_4, struct hid_bpf_ctx *hctx) in SEC() 174 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 8 /* size */); in SEC()
|
H A D | FR-TEC__Raptor-Mach-2.bpf.c | 137 int BPF_PROG(hid_fix_rdesc_raptor_mach_2, struct hid_bpf_ctx *hctx) in SEC() 139 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */); in SEC() 156 int BPF_PROG(raptor_mach_2_fix_hat_switch, struct hid_bpf_ctx *hctx) in SEC() 158 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 64 /* size */); in SEC()
|
H A D | XPPen__Artist24.bpf.c | 93 int BPF_PROG(hid_fix_rdesc_xppen_artist24, struct hid_bpf_ctx *hctx) in SEC() 95 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in SEC() 154 int BPF_PROG(xppen_24_fix_eraser, struct hid_bpf_ctx *hctx) in SEC() 156 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */); in SEC()
|
H A D | Thrustmaster__TCA-Yoke-Boeing.bpf.c | 100 int BPF_PROG(hid_fix_rdesc_tca_yoke, struct hid_bpf_ctx *hctx) in SEC() 104 if (hctx->size != expected_length) in SEC() 107 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */); in SEC()
|
H A D | Huion__KeydialK20.bpf.c | 345 int BPF_PROG(k20_fix_rdesc, struct hid_bpf_ctx *hctx) in SEC() 347 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */); in SEC() 348 __s32 rdesc_size = hctx->size; in SEC() 389 int BPF_PROG(k20_fix_events, struct hid_bpf_ctx *hctx) in SEC() 391 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */); in SEC()
|
H A D | hid_bpf_async.h | 85 static int HID_BPF_ASYNC_CB(____##fun##_cb)(struct hid_bpf_ctx *hctx) \ 87 return fun(hctx); \ 168 static int hid_bpf_async_delayed_call(struct hid_bpf_ctx *hctx, u64 milliseconds, int key, in hid_bpf_async_delayed_call() argument 191 elem->hid = hctx->hid->id; in hid_bpf_async_delayed_call()
|
H A D | Huion__Inspiroy-2-S.bpf.c | 379 int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx) in SEC() 381 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */); in SEC() 382 __s32 rdesc_size = hctx->size; in SEC() 425 int BPF_PROG(inspiroy_2_fix_events, struct hid_bpf_ctx *hctx) in SEC() 427 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */); in SEC()
|
H A D | HP__Elite-Presenter.bpf.c | 34 int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx) in SEC() 36 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in SEC()
|
H A D | Huion__Dial-2.bpf.c | 442 int BPF_PROG(dial_2_fix_rdesc, struct hid_bpf_ctx *hctx) in SEC() 444 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */); in SEC() 445 __s32 rdesc_size = hctx->size; in SEC() 488 int BPF_PROG(dial_2_fix_events, struct hid_bpf_ctx *hctx) in SEC() 490 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 16 /* size */); in SEC()
|
H A D | Mistel__MD770.bpf.c | 127 int BPF_PROG(hid_rdesc_fixup_mistel_md770, struct hid_bpf_ctx *hctx) in SEC() 129 __u8 *data = hid_bpf_get_data(hctx, 0, HID_MAX_DESCRIPTOR_SIZE); in SEC()
|
/linux/tools/testing/selftests/hid/progs/ |
H A D | hid.c | 311 int BPF_PROG(hid_test_filter_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum, in BPF_PROG() argument 332 int BPF_PROG(hid_test_hidraw_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum, in BPF_PROG() argument 335 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */); in BPF_PROG() 345 ret = hid_bpf_hw_request(hctx, data, 2, rtype, reqtype); in BPF_PROG() 363 int BPF_PROG(hid_test_infinite_loop_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum, in BPF_PROG() argument 366 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */); in BPF_PROG() 377 ret = hid_bpf_hw_request(hctx, data, 2, rtype, reqtype); in BPF_PROG() 390 int BPF_PROG(hid_test_filter_output_report, struct hid_bpf_ctx *hctx, unsigned char reportnum, in BPF_PROG() argument 402 int BPF_PROG(hid_test_hidraw_output_report, struct hid_bpf_ctx *hctx, __u64 source) in BPF_PROG() argument 404 __u8 *data = hid_bpf_get_data(hctx, in BPF_PROG() 423 BPF_PROG(hid_test_infinite_loop_output_report,struct hid_bpf_ctx * hctx,__u64 source) BPF_PROG() argument 566 BPF_PROG(hid_test_infinite_loop_input_report,struct hid_bpf_ctx * hctx,enum hid_report_type report_type,__u64 source) BPF_PROG() argument [all...] |
/linux/drivers/crypto/marvell/octeontx2/ |
H A D | cn10k_cpt.c | 158 void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz) in cn10k_cpt_hw_ctx_set() argument 160 hctx->w0.aop_valid = 1; in cn10k_cpt_hw_ctx_set() 161 hctx->w0.ctx_hdr_sz = 0; in cn10k_cpt_hw_ctx_set() 162 hctx->w0.ctx_sz = ctx_sz; in cn10k_cpt_hw_ctx_set() 163 hctx->w0.ctx_push_sz = 1; in cn10k_cpt_hw_ctx_set() 170 union cn10k_cpt_hw_ctx *hctx; in cn10k_cpt_hw_ctx_init() local 179 hctx = kmalloc(CN10K_CPT_HW_CTX_SIZE, GFP_KERNEL); in cn10k_cpt_hw_ctx_init() 180 if (unlikely(!hctx)) in cn10k_cpt_hw_ctx_init() 182 cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE, in cn10k_cpt_hw_ctx_init() 185 kfree(hctx); in cn10k_cpt_hw_ctx_init() [all...] |
/linux/samples/hid/ |
H A D | hid_surface_dial.bpf.c | 14 int BPF_PROG(hid_event, struct hid_bpf_ctx *hctx) in BPF_PROG() argument 16 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in BPF_PROG() 105 int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx) in BPF_PROG() argument 107 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in BPF_PROG()
|
/linux/include/linux/ |
H A D | blk-mq.h | 41 /* use hctx->sched_tags */ 327 /** @cpumask: Map of available CPUs where this hctx can run. */ 357 * this hctx 441 * @hctx_list: if this hctx is not in use, this is an entry in 468 * @HCTX_MAX_TYPES: Number of types of hctx. 481 * @map: One or more ctx -> hctx mappings. One map exists for each 909 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 910 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 913 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 920 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigne 998 queue_for_each_hw_ctx(q,hctx,i) global() argument 1001 hctx_for_each_ctx(hctx,ctx,i) global() argument [all...] |