Home
last modified time | relevance | path

Searched refs:rq_lock (Results 1 – 18 of 18) sorted by relevance

/linux/drivers/infiniband/hw/ionic/
H A Dionic_datapath.c528 spin_lock(&qp->rq_lock); in ionic_poll_vcq_cq()
530 spin_unlock(&qp->rq_lock); in ionic_poll_vcq_cq()
618 spin_lock(&qp->rq_lock); in ionic_poll_vcq_cq()
620 spin_unlock(&qp->rq_lock); in ionic_poll_vcq_cq()
1323 spin_lock_irqsave(&qp->rq_lock, irqflags); in ionic_post_recv_common()
1341 spin_unlock_irqrestore(&qp->rq_lock, irqflags); in ionic_post_recv_common()
1344 spin_unlock_irqrestore(&qp->rq_lock, irqflags); in ionic_post_recv_common()
1347 spin_lock(&qp->rq_lock); in ionic_post_recv_common()
1366 spin_unlock(&qp->rq_lock); in ionic_post_recv_common()
H A Dionic_ibdev.h276 spinlock_t rq_lock; /* for posting and polling */ member
H A Dionic_controlpath.c2177 spin_lock_init(&qp->rq_lock); in ionic_create_qp()
2437 spin_lock(&qp->rq_lock); in ionic_flush_qp()
2443 spin_unlock(&qp->rq_lock); in ionic_flush_qp()
2509 spin_lock(&qp->rq_lock); in ionic_reset_qp()
2519 spin_unlock(&qp->rq_lock); in ionic_reset_qp()
/linux/drivers/net/ethernet/intel/ice/
H A Dice_controlq.c540 mutex_lock(&cq->rq_lock); in ice_shutdown_rq()
562 mutex_unlock(&cq->rq_lock); in ice_shutdown_rq()
789 mutex_init(&cq->rq_lock); in ice_init_ctrlq_locks()
827 mutex_destroy(&cq->rq_lock); in ice_destroy_ctrlq_locks()
1190 mutex_lock(&cq->rq_lock); in ice_clean_rq_elem()
1258 mutex_unlock(&cq->rq_lock); in ice_clean_rq_elem()
H A Dice_controlq.h101 struct mutex rq_lock; /* Receive queue lock */ member
/linux/io_uring/
H A Dzcrx.h46 spinlock_t rq_lock ____cacheline_aligned_in_smp;
H A Dzcrx.c480 spin_lock_init(&ifq->rq_lock); in io_zcrx_ifq_alloc()
782 guard(spinlock_bh)(&ifq->rq_lock); in io_zcrx_ring_refill()
/linux/Documentation/scheduler/
H A Dmembarrier.rst16 rq_lock(); smp_mb__after_spinlock() in __schedule(). The barrier matches a full
/linux/drivers/infiniband/hw/bnxt_re/
H A Dib_verbs.h89 spinlock_t rq_lock; /* protect rq */ member
H A Dib_verbs.c1713 spin_lock_init(&qp->rq_lock); in bnxt_re_create_qp()
3026 spin_lock_irqsave(&qp->rq_lock, flags); in bnxt_re_post_recv()
3069 spin_unlock_irqrestore(&qp->rq_lock, flags); in bnxt_re_post_recv()
/linux/drivers/infiniband/sw/siw/
H A Dsiw_verbs.c369 spin_lock_init(&qp->rq_lock); in siw_create_qp()
1072 spin_lock_irqsave(&qp->rq_lock, flags); in siw_post_receive()
1100 spin_unlock_irqrestore(&qp->rq_lock, flags); in siw_post_receive()
H A Dsiw.h449 spinlock_t rq_lock; member
/linux/kernel/sched/
H A Dsched.h1866 static inline void rq_lock(struct rq *rq, struct rq_flags *rf) in rq_lock() function
1894 DEFINE_LOCK_GUARD_1(rq_lock, struct rq,
1895 rq_lock(_T->lock, &_T->rf),
1916 rq_lock(rq, rf); in this_rq_lock_irq()
H A Dcore.c878 rq_lock(rq, &rf); in hrtick()
902 rq_lock(rq, &rf); in __hrtick_start()
2475 rq_lock(rq, rf); in move_queued_task()
2549 rq_lock(rq, &rf); in migration_cpu_stop()
3967 rq_lock(rq, &rf); in ttwu_queue()
5588 rq_lock(rq, &rf); in sched_tick()
6833 rq_lock(rq, &rf); in __schedule()
8145 rq_lock(rq, &rf); in __balance_push_cpu_stop()
H A Ddeadline.c1160 scoped_guard (rq_lock, rq) { in dl_server_timer()
1736 rq_lock(rq, &rf); in inactive_task_timer()
2232 rq_lock(rq, &rf); in migrate_task_rq_dl()
H A Drt.c816 rq_lock(rq, &rf); in do_sched_rt_period_timer()
H A Dfair.c6079 rq_lock(rq, &rf); in __cfsb_csd_unthrottle()
9692 rq_lock(rq, &rf); in attach_one_task()
9708 rq_lock(env->dst_rq, &rf); in attach_tasks()
H A Dext.c4279 rq_lock(rq, &rf); in scx_dump_state()