Lines Matching +full:spin +full:- +full:table

35 .. table:: Expected Results
37 +------------------------------------+------------------------------------+
41 +------------------------------------+------------------------------------+
43 +------------------------------------+------------------------------------+
45 +------------------------------------+------------------------------------+
47 +------------------------------------+------------------------------------+
49 +------------------------------------+------------------------------------+
51 +------------------------------------+------------------------------------+
55 .. table:: Possible Results
57 +------------------------------------+------------------------------------+
61 +------------------------------------+------------------------------------+
63 +------------------------------------+------------------------------------+
65 +------------------------------------+------------------------------------+
67 +------------------------------------+------------------------------------+
69 +------------------------------------+------------------------------------+
71 +------------------------------------+------------------------------------+
75 ------------------------------------
108 -----------------------------------------------------
112 single-holder lock: if you can't get the spinlock, you keep trying
121 `What Functions Are Safe To Call From Interrupts? <#sleeping-things>`__),
128 ------------------------------
132 design decision: when no-one else can run at the same time, there is no
148 ----------------------------
160 nf_register_sockopt(). Registration and de-registration
168 -----------------------------------------
182 as well: see `Hard IRQ Context <#hard-irq-context>`__.
184 This works perfectly for UP as well: the spin lock vanishes, and this
190 -----------------------------------------
196 ---------------------------------------
203 -------------------------------
225 ------------------------
232 The same softirq can run on the other CPUs: you can use a per-CPU array
233 (see `Per-CPU Data <#per-cpu-data>`__) for better performance. If you're
256 ----------------------------------------------
272 This works perfectly for UP as well: the spin lock vanishes, and this
290 -------------------------------------
294 architecture-specific whether all interrupts are disabled inside irq
302 - If you are in a process context (any syscall) and want to lock other
306 - Otherwise (== data can be touched in an interrupt), use
310 - Avoid holding spinlock for more than 5 lines of code and across any
313 Table of Minimum Requirements
314 -----------------------------
316 The following table lists the **minimum** locking requirements between
341 Table: Table of Locking Requirements
343 +--------+----------------------------+
345 +--------+----------------------------+
347 +--------+----------------------------+
349 +--------+----------------------------+
351 +--------+----------------------------+
353 +--------+----------------------------+
355 Table: Legend for Locking Requirements Table
366 spin_trylock() does not spin but returns non-zero if it
369 disabled the contexts that might interrupt you and acquire the spin
373 non-zero if it could lock the mutex on the first try or 0 if not. This
385 -------------------
417 if (i->id == id) {
418 i->popularity++;
428 list_del(&obj->list);
430 cache_num--;
436 list_add(&obj->list, &cache);
440 if (!outcast || i->popularity < outcast->popularity)
452 return -ENOMEM;
454 strscpy(obj->name, name, sizeof(obj->name));
455 obj->id = id;
456 obj->popularity = 0;
474 int ret = -ENOENT;
480 strcpy(name, obj->name);
494 grabbing the lock. This is safe, as no-one else can access it until we
498 --------------------------------
504 The change is shown below, in standard patch format: the ``-`` are lines
509 --- cache.c.usercontext 2003-12-09 13:58:54.000000000 +1100
510 +++ cache.c.interrupt 2003-12-09 14:07:49.000000000 +1100
511 @@ -12,7 +12,7 @@
515 -static DEFINE_MUTEX(cache_lock);
520 @@ -55,6 +55,7 @@
527 return -ENOMEM;
528 @@ -63,30 +64,33 @@
529 obj->id = id;
530 obj->popularity = 0;
532 - mutex_lock(&cache_lock);
535 - mutex_unlock(&cache_lock);
542 - mutex_lock(&cache_lock);
547 - mutex_unlock(&cache_lock);
554 int ret = -ENOENT;
557 - mutex_lock(&cache_lock);
562 strcpy(name, obj->name);
564 - mutex_unlock(&cache_lock);
581 ----------------------------------
589 we'd need to make this non-static so the rest of the code can use it.
596 worse, add another object, re-using the same address.
598 As there is only one lock, you can't hold it forever: no-one else would
608 --- cache.c.interrupt 2003-12-09 14:25:43.000000000 +1100
609 +++ cache.c.refcnt 2003-12-09 14:33:05.000000000 +1100
610 @@ -7,6 +7,7 @@
618 @@ -17,6 +18,35 @@
624 + if (--obj->refcnt == 0)
630 + obj->refcnt++;
654 @@ -35,6 +65,7 @@
657 list_del(&obj->list);
659 cache_num--;
662 @@ -63,6 +94,7 @@
663 strscpy(obj->name, name, sizeof(obj->name));
664 obj->id = id;
665 obj->popularity = 0;
666 + obj->refcnt = 1; /* The cache holds a reference */
670 @@ -79,18 +111,15 @@
674 -int cache_find(int id, char *name)
678 - int ret = -ENOENT;
683 - if (obj) {
684 - ret = 0;
685 - strcpy(name, obj->name);
686 - }
690 - return ret;
712 although for anything non-trivial using spinlocks is clearer. The
719 --- cache.c.refcnt 2003-12-09 15:00:35.000000000 +1100
720 +++ cache.c.refcnt-atomic 2003-12-11 15:49:42.000000000 +1100
721 @@ -7,7 +7,7 @@
725 - unsigned int refcnt;
730 @@ -18,33 +18,15 @@
734 -static void __object_put(struct object *obj)
735 -{
736 - if (--obj->refcnt == 0)
737 - kfree(obj);
738 -}
739 -
740 -static void __object_get(struct object *obj)
741 -{
742 - obj->refcnt++;
743 -}
744 -
747 - unsigned long flags;
748 -
749 - spin_lock_irqsave(&cache_lock, flags);
750 - __object_put(obj);
751 - spin_unlock_irqrestore(&cache_lock, flags);
752 + if (atomic_dec_and_test(&obj->refcnt))
758 - unsigned long flags;
759 -
760 - spin_lock_irqsave(&cache_lock, flags);
761 - __object_get(obj);
762 - spin_unlock_irqrestore(&cache_lock, flags);
763 + atomic_inc(&obj->refcnt);
767 @@ -65,7 +47,7 @@
770 list_del(&obj->list);
771 - __object_put(obj);
773 cache_num--;
776 @@ -94,7 +76,7 @@
777 strscpy(obj->name, name, sizeof(obj->name));
778 obj->id = id;
779 obj->popularity = 0;
780 - obj->refcnt = 1; /* The cache holds a reference */
781 + atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
785 @@ -119,7 +101,7 @@
789 - __object_get(obj);
796 ---------------------------------
802 - You can make ``cache_lock`` non-static, and tell people to grab that
805 - You can provide a cache_obj_rename() which grabs this
809 - You can make the ``cache_lock`` protect only the cache itself, and
812 Theoretically, you can make the locks as fine-grained as one lock for
816 - One lock which protects the infrastructure (the ``cache`` list in
819 - One lock which protects the infrastructure (including the list
823 - Multiple locks to protect the infrastructure (eg. one lock per hash
824 chain), possibly with a separate per-object lock.
826 Here is the "lock-per-object" implementation:
830 --- cache.c.refcnt-atomic 2003-12-11 15:50:54.000000000 +1100
831 +++ cache.c.perobjectlock 2003-12-11 17:15:03.000000000 +1100
832 @@ -6,11 +6,17 @@
847 - int popularity;
851 @@ -77,6 +84,7 @@
852 obj->id = id;
853 obj->popularity = 0;
854 atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
855 + spin_lock_init(&obj->lock);
861 ``cache_lock`` rather than the per-object lock: this is because it (like
881 -----------------------------
884 twice: it will spin forever, waiting for the lock to be released
887 stay-up-five-nights-talk-to-fluffy-code-bunnies kind of problem.
892 by the softirq while it holds the lock, and the softirq will then spin
905 A more complex problem is the so-called 'deadly embrace', involving two
906 or more locks. Say you have a hash table: each entry in the table is a
919 +-----------------------+-----------------------+
922 | Grab lock A -> OK | Grab lock B -> OK |
923 +-----------------------+-----------------------+
924 | Grab lock B -> spin | Grab lock A -> spin |
925 +-----------------------+-----------------------+
927 Table: Consequences
929 The two CPUs will spin forever, waiting for the other to give up their
933 -------------------
942 are never held around calls to non-trivial functions outside the same
964 -------------------------------
978 struct foo *next = list->next;
979 del_timer(&list->timer);
1001 struct foo *next = list->next;
1002 if (!del_timer(&list->timer)) {
1038 the last one to grab the lock (ie. is the lock cache-hot for this CPU):
1041 increment takes about 58ns, a lock which is cache-hot on this CPU takes
1047 by splitting locks into parts (such as in our final per-object-lock
1056 ------------------------
1071 --------------------------------
1084 new->next = list->next;
1086 list->next = new;
1108 list->next = old->next;
1117 don't realize that the pre-fetched contents is wrong when the ``next``
1131 destroy the object once all pre-existing readers are finished.
1133 until all pre-existing are finished.
1149 --- cache.c.perobjectlock 2003-12-11 17:15:03.000000000 +1100
1150 +++ cache.c.rcupdate 2003-12-11 17:55:14.000000000 +1100
1151 @@ -1,15 +1,18 @@
1161 - /* These two protected by cache_lock. */
1171 @@ -40,7 +43,7 @@
1175 - list_for_each_entry(i, &cache, list) {
1177 if (i->id == id) {
1178 i->popularity++;
1180 @@ -49,19 +52,25 @@
1194 - list_del(&obj->list);
1195 - object_put(obj);
1196 + list_del_rcu(&obj->list);
1197 cache_num--;
1198 + call_rcu(&obj->rcu, cache_delete_rcu);
1204 - list_add(&obj->list, &cache);
1205 + list_add_rcu(&obj->list, &cache);
1209 @@ -104,12 +114,11 @@
1213 - unsigned long flags;
1215 - spin_lock_irqsave(&cache_lock, flags);
1220 - spin_unlock_irqrestore(&cache_lock, flags);
1245 __cache_find() by making it non-static, and such
1252 Per-CPU Data
1253 ------------
1257 count of a common condition, you could use a spin lock and a single
1266 Of particular use for simple per-cpu counters is the ``local_t`` type,
1276 ----------------------------------------
1308 --------------------------
1316 - Accesses to userspace:
1318 - copy_from_user()
1320 - copy_to_user()
1322 - get_user()
1324 - put_user()
1326 - kmalloc(GP_KERNEL) <kmalloc>`
1328 - mutex_lock_interruptible() and
1338 --------------------------------
1343 - printk()
1345 - kfree()
1347 - add_timer() and del_timer()
1352 .. kernel-doc:: include/linux/mutex.h
1355 .. kernel-doc:: kernel/locking/mutex.c
1361 .. kernel-doc:: kernel/futex.c
1367 - ``Documentation/locking/spinlocks.rst``: Linus Torvalds' spinlocking
1370 - Unix Systems for Modern Architectures: Symmetric Multiprocessing and
1417 Symmetric Multi-Processor: kernels compiled for multiple-CPU machines.
1430 A dynamically-registrable software interrupt, which is guaranteed to
1434 A dynamically-registrable software interrupt, which is run at (or close
1439 Uni-Processor: Non-SMP. (``CONFIG_SMP=n``).