1*96b903f5SPaul E. McKenney // SPDX-License-Identifier: GPL-2.0+ 2cc44ca84SOleg Nesterov /* 3cc44ca84SOleg Nesterov * RCU-based infrastructure for lightweight reader-writer locking 4cc44ca84SOleg Nesterov * 5cc44ca84SOleg Nesterov * Copyright (c) 2015, Red Hat, Inc. 6cc44ca84SOleg Nesterov * 7cc44ca84SOleg Nesterov * Author: Oleg Nesterov <oleg@redhat.com> 8cc44ca84SOleg Nesterov */ 9cc44ca84SOleg Nesterov 10cc44ca84SOleg Nesterov #include <linux/rcu_sync.h> 11cc44ca84SOleg Nesterov #include <linux/sched.h> 12cc44ca84SOleg Nesterov 133a518b76SOleg Nesterov #ifdef CONFIG_PROVE_RCU 143a518b76SOleg Nesterov #define __INIT_HELD(func) .held = func, 153a518b76SOleg Nesterov #else 163a518b76SOleg Nesterov #define __INIT_HELD(func) 173a518b76SOleg Nesterov #endif 183a518b76SOleg Nesterov 1982e8c565SOleg Nesterov static const struct { 2082e8c565SOleg Nesterov void (*sync)(void); 2182e8c565SOleg Nesterov void (*call)(struct rcu_head *, void (*)(struct rcu_head *)); 2207899a6eSOleg Nesterov void (*wait)(void); 233a518b76SOleg Nesterov #ifdef CONFIG_PROVE_RCU 243a518b76SOleg Nesterov int (*held)(void); 253a518b76SOleg Nesterov #endif 2682e8c565SOleg Nesterov } gp_ops[] = { 2782e8c565SOleg Nesterov [RCU_SYNC] = { 2882e8c565SOleg Nesterov .sync = synchronize_rcu, 2982e8c565SOleg Nesterov .call = call_rcu, 3007899a6eSOleg Nesterov .wait = rcu_barrier, 313a518b76SOleg Nesterov __INIT_HELD(rcu_read_lock_held) 3282e8c565SOleg Nesterov }, 3382e8c565SOleg Nesterov [RCU_SCHED_SYNC] = { 34d3ff3891SPaul E. McKenney .sync = synchronize_rcu, 35d3ff3891SPaul E. McKenney .call = call_rcu, 36d3ff3891SPaul E. McKenney .wait = rcu_barrier, 373a518b76SOleg Nesterov __INIT_HELD(rcu_read_lock_sched_held) 3882e8c565SOleg Nesterov }, 3982e8c565SOleg Nesterov [RCU_BH_SYNC] = { 40d3ff3891SPaul E. McKenney .sync = synchronize_rcu, 41d3ff3891SPaul E. McKenney .call = call_rcu, 42d3ff3891SPaul E. McKenney .wait = rcu_barrier, 433a518b76SOleg Nesterov __INIT_HELD(rcu_read_lock_bh_held) 4482e8c565SOleg Nesterov }, 4582e8c565SOleg Nesterov }; 4682e8c565SOleg Nesterov 47cc44ca84SOleg Nesterov enum { GP_IDLE = 0, GP_PENDING, GP_PASSED }; 48cc44ca84SOleg Nesterov enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY }; 49cc44ca84SOleg Nesterov 50cc44ca84SOleg Nesterov #define rss_lock gp_wait.lock 51cc44ca84SOleg Nesterov 523a518b76SOleg Nesterov #ifdef CONFIG_PROVE_RCU 534bace734SOleg Nesterov void rcu_sync_lockdep_assert(struct rcu_sync *rsp) 543a518b76SOleg Nesterov { 554bace734SOleg Nesterov RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(), 564bace734SOleg Nesterov "suspicious rcu_sync_is_idle() usage"); 573a518b76SOleg Nesterov } 5880127a39SPeter Zijlstra 5980127a39SPeter Zijlstra EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert); 603a518b76SOleg Nesterov #endif 613a518b76SOleg Nesterov 62cc44ca84SOleg Nesterov /** 63cc44ca84SOleg Nesterov * rcu_sync_init() - Initialize an rcu_sync structure 64cc44ca84SOleg Nesterov * @rsp: Pointer to rcu_sync structure to be initialized 65cc44ca84SOleg Nesterov * @type: Flavor of RCU with which to synchronize rcu_sync structure 66cc44ca84SOleg Nesterov */ 67cc44ca84SOleg Nesterov void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type) 68cc44ca84SOleg Nesterov { 69cc44ca84SOleg Nesterov memset(rsp, 0, sizeof(*rsp)); 70cc44ca84SOleg Nesterov init_waitqueue_head(&rsp->gp_wait); 7182e8c565SOleg Nesterov rsp->gp_type = type; 72cc44ca84SOleg Nesterov } 73cc44ca84SOleg Nesterov 74cc44ca84SOleg Nesterov /** 7527fdb35fSPaul E. McKenney * rcu_sync_enter_start - Force readers onto slow path for multiple updates 7627fdb35fSPaul E. McKenney * @rsp: Pointer to rcu_sync structure to use for synchronization 7727fdb35fSPaul E. McKenney * 783942a9bdSPeter Zijlstra * Must be called after rcu_sync_init() and before first use. 793942a9bdSPeter Zijlstra * 803942a9bdSPeter Zijlstra * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}() 813942a9bdSPeter Zijlstra * pairs turn into NO-OPs. 823942a9bdSPeter Zijlstra */ 833942a9bdSPeter Zijlstra void rcu_sync_enter_start(struct rcu_sync *rsp) 843942a9bdSPeter Zijlstra { 853942a9bdSPeter Zijlstra rsp->gp_count++; 863942a9bdSPeter Zijlstra rsp->gp_state = GP_PASSED; 873942a9bdSPeter Zijlstra } 883942a9bdSPeter Zijlstra 893942a9bdSPeter Zijlstra /** 90cc44ca84SOleg Nesterov * rcu_sync_enter() - Force readers onto slowpath 91cc44ca84SOleg Nesterov * @rsp: Pointer to rcu_sync structure to use for synchronization 92cc44ca84SOleg Nesterov * 93cc44ca84SOleg Nesterov * This function is used by updaters who need readers to make use of 94cc44ca84SOleg Nesterov * a slowpath during the update. After this function returns, all 95cc44ca84SOleg Nesterov * subsequent calls to rcu_sync_is_idle() will return false, which 96cc44ca84SOleg Nesterov * tells readers to stay off their fastpaths. A later call to 97cc44ca84SOleg Nesterov * rcu_sync_exit() re-enables reader slowpaths. 98cc44ca84SOleg Nesterov * 99cc44ca84SOleg Nesterov * When called in isolation, rcu_sync_enter() must wait for a grace 100cc44ca84SOleg Nesterov * period, however, closely spaced calls to rcu_sync_enter() can 101cc44ca84SOleg Nesterov * optimize away the grace-period wait via a state machine implemented 102cc44ca84SOleg Nesterov * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func(). 103cc44ca84SOleg Nesterov */ 104cc44ca84SOleg Nesterov void rcu_sync_enter(struct rcu_sync *rsp) 105cc44ca84SOleg Nesterov { 106cc44ca84SOleg Nesterov bool need_wait, need_sync; 107cc44ca84SOleg Nesterov 108cc44ca84SOleg Nesterov spin_lock_irq(&rsp->rss_lock); 109cc44ca84SOleg Nesterov need_wait = rsp->gp_count++; 110cc44ca84SOleg Nesterov need_sync = rsp->gp_state == GP_IDLE; 111cc44ca84SOleg Nesterov if (need_sync) 112cc44ca84SOleg Nesterov rsp->gp_state = GP_PENDING; 113cc44ca84SOleg Nesterov spin_unlock_irq(&rsp->rss_lock); 114cc44ca84SOleg Nesterov 115042d4c70SPaul E. McKenney WARN_ON_ONCE(need_wait && need_sync); 116cc44ca84SOleg Nesterov if (need_sync) { 11782e8c565SOleg Nesterov gp_ops[rsp->gp_type].sync(); 118cc44ca84SOleg Nesterov rsp->gp_state = GP_PASSED; 119cc44ca84SOleg Nesterov wake_up_all(&rsp->gp_wait); 120cc44ca84SOleg Nesterov } else if (need_wait) { 121cc44ca84SOleg Nesterov wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED); 122cc44ca84SOleg Nesterov } else { 123cc44ca84SOleg Nesterov /* 124cc44ca84SOleg Nesterov * Possible when there's a pending CB from a rcu_sync_exit(). 125cc44ca84SOleg Nesterov * Nobody has yet been allowed the 'fast' path and thus we can 126cc44ca84SOleg Nesterov * avoid doing any sync(). The callback will get 'dropped'. 127cc44ca84SOleg Nesterov */ 128042d4c70SPaul E. McKenney WARN_ON_ONCE(rsp->gp_state != GP_PASSED); 129cc44ca84SOleg Nesterov } 130cc44ca84SOleg Nesterov } 131cc44ca84SOleg Nesterov 132cc44ca84SOleg Nesterov /** 133cc44ca84SOleg Nesterov * rcu_sync_func() - Callback function managing reader access to fastpath 13427fdb35fSPaul E. McKenney * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization 135cc44ca84SOleg Nesterov * 136cc44ca84SOleg Nesterov * This function is passed to one of the call_rcu() functions by 137cc44ca84SOleg Nesterov * rcu_sync_exit(), so that it is invoked after a grace period following the 138cc44ca84SOleg Nesterov * that invocation of rcu_sync_exit(). It takes action based on events that 139cc44ca84SOleg Nesterov * have taken place in the meantime, so that closely spaced rcu_sync_enter() 140cc44ca84SOleg Nesterov * and rcu_sync_exit() pairs need not wait for a grace period. 141cc44ca84SOleg Nesterov * 142cc44ca84SOleg Nesterov * If another rcu_sync_enter() is invoked before the grace period 143cc44ca84SOleg Nesterov * ended, reset state to allow the next rcu_sync_exit() to let the 144cc44ca84SOleg Nesterov * readers back onto their fastpaths (after a grace period). If both 145cc44ca84SOleg Nesterov * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked 146cc44ca84SOleg Nesterov * before the grace period ended, re-invoke call_rcu() on behalf of that 147cc44ca84SOleg Nesterov * rcu_sync_exit(). Otherwise, set all state back to idle so that readers 148cc44ca84SOleg Nesterov * can again use their fastpaths. 149cc44ca84SOleg Nesterov */ 15027fdb35fSPaul E. McKenney static void rcu_sync_func(struct rcu_head *rhp) 151cc44ca84SOleg Nesterov { 15227fdb35fSPaul E. McKenney struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); 153cc44ca84SOleg Nesterov unsigned long flags; 154cc44ca84SOleg Nesterov 155042d4c70SPaul E. McKenney WARN_ON_ONCE(rsp->gp_state != GP_PASSED); 156042d4c70SPaul E. McKenney WARN_ON_ONCE(rsp->cb_state == CB_IDLE); 157cc44ca84SOleg Nesterov 158cc44ca84SOleg Nesterov spin_lock_irqsave(&rsp->rss_lock, flags); 159cc44ca84SOleg Nesterov if (rsp->gp_count) { 160cc44ca84SOleg Nesterov /* 161cc44ca84SOleg Nesterov * A new rcu_sync_begin() has happened; drop the callback. 162cc44ca84SOleg Nesterov */ 163cc44ca84SOleg Nesterov rsp->cb_state = CB_IDLE; 164cc44ca84SOleg Nesterov } else if (rsp->cb_state == CB_REPLAY) { 165cc44ca84SOleg Nesterov /* 166cc44ca84SOleg Nesterov * A new rcu_sync_exit() has happened; requeue the callback 167cc44ca84SOleg Nesterov * to catch a later GP. 168cc44ca84SOleg Nesterov */ 169cc44ca84SOleg Nesterov rsp->cb_state = CB_PENDING; 17082e8c565SOleg Nesterov gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func); 171cc44ca84SOleg Nesterov } else { 172cc44ca84SOleg Nesterov /* 173cc44ca84SOleg Nesterov * We're at least a GP after rcu_sync_exit(); eveybody will now 174cc44ca84SOleg Nesterov * have observed the write side critical section. Let 'em rip!. 175cc44ca84SOleg Nesterov */ 176cc44ca84SOleg Nesterov rsp->cb_state = CB_IDLE; 177cc44ca84SOleg Nesterov rsp->gp_state = GP_IDLE; 178cc44ca84SOleg Nesterov } 179cc44ca84SOleg Nesterov spin_unlock_irqrestore(&rsp->rss_lock, flags); 180cc44ca84SOleg Nesterov } 181cc44ca84SOleg Nesterov 182cc44ca84SOleg Nesterov /** 183cc44ca84SOleg Nesterov * rcu_sync_exit() - Allow readers back onto fast patch after grace period 184cc44ca84SOleg Nesterov * @rsp: Pointer to rcu_sync structure to use for synchronization 185cc44ca84SOleg Nesterov * 186cc44ca84SOleg Nesterov * This function is used by updaters who have completed, and can therefore 187cc44ca84SOleg Nesterov * now allow readers to make use of their fastpaths after a grace period 188cc44ca84SOleg Nesterov * has elapsed. After this grace period has completed, all subsequent 189cc44ca84SOleg Nesterov * calls to rcu_sync_is_idle() will return true, which tells readers that 190cc44ca84SOleg Nesterov * they can once again use their fastpaths. 191cc44ca84SOleg Nesterov */ 192cc44ca84SOleg Nesterov void rcu_sync_exit(struct rcu_sync *rsp) 193cc44ca84SOleg Nesterov { 194cc44ca84SOleg Nesterov spin_lock_irq(&rsp->rss_lock); 195cc44ca84SOleg Nesterov if (!--rsp->gp_count) { 196cc44ca84SOleg Nesterov if (rsp->cb_state == CB_IDLE) { 197cc44ca84SOleg Nesterov rsp->cb_state = CB_PENDING; 19882e8c565SOleg Nesterov gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func); 199cc44ca84SOleg Nesterov } else if (rsp->cb_state == CB_PENDING) { 200cc44ca84SOleg Nesterov rsp->cb_state = CB_REPLAY; 201cc44ca84SOleg Nesterov } 202cc44ca84SOleg Nesterov } 203cc44ca84SOleg Nesterov spin_unlock_irq(&rsp->rss_lock); 204cc44ca84SOleg Nesterov } 20507899a6eSOleg Nesterov 20607899a6eSOleg Nesterov /** 20707899a6eSOleg Nesterov * rcu_sync_dtor() - Clean up an rcu_sync structure 20807899a6eSOleg Nesterov * @rsp: Pointer to rcu_sync structure to be cleaned up 20907899a6eSOleg Nesterov */ 21007899a6eSOleg Nesterov void rcu_sync_dtor(struct rcu_sync *rsp) 21107899a6eSOleg Nesterov { 21207899a6eSOleg Nesterov int cb_state; 21307899a6eSOleg Nesterov 214042d4c70SPaul E. McKenney WARN_ON_ONCE(rsp->gp_count); 21507899a6eSOleg Nesterov 21607899a6eSOleg Nesterov spin_lock_irq(&rsp->rss_lock); 21707899a6eSOleg Nesterov if (rsp->cb_state == CB_REPLAY) 21807899a6eSOleg Nesterov rsp->cb_state = CB_PENDING; 21907899a6eSOleg Nesterov cb_state = rsp->cb_state; 22007899a6eSOleg Nesterov spin_unlock_irq(&rsp->rss_lock); 22107899a6eSOleg Nesterov 22207899a6eSOleg Nesterov if (cb_state != CB_IDLE) { 22307899a6eSOleg Nesterov gp_ops[rsp->gp_type].wait(); 224042d4c70SPaul E. McKenney WARN_ON_ONCE(rsp->cb_state != CB_IDLE); 22507899a6eSOleg Nesterov } 22607899a6eSOleg Nesterov } 227