| /linux/drivers/infiniband/hw/ionic/ |
| H A D | ionic_datapath.c | 528 spin_lock(&qp->rq_lock); in ionic_poll_vcq_cq() 530 spin_unlock(&qp->rq_lock); in ionic_poll_vcq_cq() 618 spin_lock(&qp->rq_lock); in ionic_poll_vcq_cq() 620 spin_unlock(&qp->rq_lock); in ionic_poll_vcq_cq() 1323 spin_lock_irqsave(&qp->rq_lock, irqflags); in ionic_post_recv_common() 1341 spin_unlock_irqrestore(&qp->rq_lock, irqflags); in ionic_post_recv_common() 1344 spin_unlock_irqrestore(&qp->rq_lock, irqflags); in ionic_post_recv_common() 1347 spin_lock(&qp->rq_lock); in ionic_post_recv_common() 1366 spin_unlock(&qp->rq_lock); in ionic_post_recv_common()
|
| H A D | ionic_ibdev.h | 276 spinlock_t rq_lock; /* for posting and polling */ member
|
| H A D | ionic_controlpath.c | 2177 spin_lock_init(&qp->rq_lock); in ionic_create_qp() 2437 spin_lock(&qp->rq_lock); in ionic_flush_qp() 2443 spin_unlock(&qp->rq_lock); in ionic_flush_qp() 2509 spin_lock(&qp->rq_lock); in ionic_reset_qp() 2519 spin_unlock(&qp->rq_lock); in ionic_reset_qp()
|
| /linux/drivers/net/ethernet/intel/ice/ |
| H A D | ice_controlq.c | 540 mutex_lock(&cq->rq_lock); in ice_shutdown_rq() 562 mutex_unlock(&cq->rq_lock); in ice_shutdown_rq() 789 mutex_init(&cq->rq_lock); in ice_init_ctrlq_locks() 827 mutex_destroy(&cq->rq_lock); in ice_destroy_ctrlq_locks() 1190 mutex_lock(&cq->rq_lock); in ice_clean_rq_elem() 1258 mutex_unlock(&cq->rq_lock); in ice_clean_rq_elem()
|
| H A D | ice_controlq.h | 101 struct mutex rq_lock; /* Receive queue lock */ member
|
| /linux/io_uring/ |
| H A D | zcrx.h | 46 spinlock_t rq_lock ____cacheline_aligned_in_smp;
|
| H A D | zcrx.c | 480 spin_lock_init(&ifq->rq_lock); in io_zcrx_ifq_alloc() 782 guard(spinlock_bh)(&ifq->rq_lock); in io_zcrx_ring_refill()
|
| /linux/Documentation/scheduler/ |
| H A D | membarrier.rst | 16 rq_lock(); smp_mb__after_spinlock() in __schedule(). The barrier matches a full
|
| /linux/drivers/infiniband/hw/bnxt_re/ |
| H A D | ib_verbs.h | 89 spinlock_t rq_lock; /* protect rq */ member
|
| H A D | ib_verbs.c | 1713 spin_lock_init(&qp->rq_lock); in bnxt_re_create_qp() 3026 spin_lock_irqsave(&qp->rq_lock, flags); in bnxt_re_post_recv() 3069 spin_unlock_irqrestore(&qp->rq_lock, flags); in bnxt_re_post_recv()
|
| /linux/drivers/infiniband/sw/siw/ |
| H A D | siw_verbs.c | 369 spin_lock_init(&qp->rq_lock); in siw_create_qp() 1072 spin_lock_irqsave(&qp->rq_lock, flags); in siw_post_receive() 1100 spin_unlock_irqrestore(&qp->rq_lock, flags); in siw_post_receive()
|
| H A D | siw.h | 449 spinlock_t rq_lock; member
|
| /linux/kernel/sched/ |
| H A D | sched.h | 1866 static inline void rq_lock(struct rq *rq, struct rq_flags *rf) in rq_lock() function 1894 DEFINE_LOCK_GUARD_1(rq_lock, struct rq, 1895 rq_lock(_T->lock, &_T->rf), 1916 rq_lock(rq, rf); in this_rq_lock_irq()
|
| H A D | core.c | 878 rq_lock(rq, &rf); in hrtick() 902 rq_lock(rq, &rf); in __hrtick_start() 2475 rq_lock(rq, rf); in move_queued_task() 2549 rq_lock(rq, &rf); in migration_cpu_stop() 3967 rq_lock(rq, &rf); in ttwu_queue() 5588 rq_lock(rq, &rf); in sched_tick() 6833 rq_lock(rq, &rf); in __schedule() 8145 rq_lock(rq, &rf); in __balance_push_cpu_stop()
|
| H A D | deadline.c | 1160 scoped_guard (rq_lock, rq) { in dl_server_timer() 1736 rq_lock(rq, &rf); in inactive_task_timer() 2232 rq_lock(rq, &rf); in migrate_task_rq_dl()
|
| H A D | rt.c | 816 rq_lock(rq, &rf); in do_sched_rt_period_timer()
|
| H A D | fair.c | 6079 rq_lock(rq, &rf); in __cfsb_csd_unthrottle() 9692 rq_lock(rq, &rf); in attach_one_task() 9708 rq_lock(env->dst_rq, &rf); in attach_tasks()
|
| H A D | ext.c | 4279 rq_lock(rq, &rf); in scx_dump_state()
|