| /linux/include/linux/ |
| H A D | rbtree.h | 110 bool leftmost) in rb_insert_color_cached() argument 112 if (leftmost) in rb_insert_color_cached() 121 struct rb_node *leftmost = NULL; in rb_erase_cached() local 124 leftmost = root->rb_leftmost = rb_next(node); in rb_erase_cached() 128 return leftmost; in rb_erase_cached() 170 bool leftmost = true; in rb_add_cached() local 178 leftmost = false; in rb_add_cached() 183 rb_insert_color_cached(node, tree, leftmost); in rb_add_cached() 185 return leftmost ? node : NULL; in rb_add_cached() 226 bool leftmost = true; in rb_find_add_cached() local [all …]
|
| H A D | interval_tree_generic.h | 44 bool leftmost = true; \ 55 leftmost = false; \ 62 leftmost, &ITPREFIX ## _augment); \ 118 ITSTRUCT *node, *leftmost; \ 140 leftmost = rb_entry(root->rb_leftmost, ITSTRUCT, ITRB); \ 141 if (ITSTART(leftmost) > last) \
|
| H A D | timerqueue.h | 25 struct rb_node *leftmost = rb_first_cached(&head->rb_root); in timerqueue_getnext() local 27 return rb_entry_safe(leftmost, struct timerqueue_node, node); in timerqueue_getnext()
|
| H A D | rbtree_augmented.h | 70 bool leftmost = true; in rb_add_augmented_cached() local 78 leftmost = false; in rb_add_augmented_cached() 84 rb_insert_augmented_cached(node, tree, leftmost, augment); in rb_add_augmented_cached() 86 return leftmost ? node : NULL; in rb_add_augmented_cached()
|
| /linux/tools/perf/util/ |
| H A D | rblist.c | 17 bool leftmost = true; in rblist__add_node() local 29 leftmost = false; in rblist__add_node() 40 rb_insert_color_cached(new_node, &rblist->entries, leftmost); in rblist__add_node() 59 bool leftmost = true; in __rblist__findnew() local 71 leftmost = false; in __rblist__findnew() 82 &rblist->entries, leftmost); in __rblist__findnew()
|
| H A D | srcline.c | 268 bool leftmost = true; in srcline__tree_insert() local 286 leftmost = false; in srcline__tree_insert() 290 rb_insert_color_cached(&node->rb_node, tree, leftmost); in srcline__tree_insert() 361 bool leftmost = true; in inlines__tree_insert() local 370 leftmost = false; in inlines__tree_insert() 374 rb_insert_color_cached(&inlines->rb_node, tree, leftmost); in inlines__tree_insert()
|
| H A D | hist.c | 692 bool leftmost = true; in hists__findnew_entry() local 741 leftmost = false; in hists__findnew_entry() 754 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost); in hists__findnew_entry() 1636 bool leftmost = true; in hierarchy_insert_entry() local 1652 leftmost = false; in hierarchy_insert_entry() 1692 rb_insert_color_cached(&new->rb_node_in, root, leftmost); in hierarchy_insert_entry() 1756 bool leftmost = true; in hists__collapse_insert_entry() local 1794 leftmost = false; in hists__collapse_insert_entry() 1800 rb_insert_color_cached(&he->rb_node_in, root, leftmost); in hists__collapse_insert_entry() 1939 bool leftmost = true; in hierarchy_insert_output_entry() local [all …]
|
| /linux/tools/include/linux/ |
| H A D | interval_tree_generic.h | 44 bool leftmost = true; \ 55 leftmost = false; \ 62 leftmost, &ITPREFIX ## _augment); \ 122 ITSTRUCT *node, *leftmost; \ 144 leftmost = rb_entry(root->rb_leftmost, ITSTRUCT, ITRB); \ 145 if (ITSTART(leftmost) > last) \
|
| H A D | rbtree.h | 131 bool leftmost) in rb_insert_color_cached() argument 133 if (leftmost) in rb_insert_color_cached() 183 bool leftmost = true; in rb_add_cached() local 191 leftmost = false; in rb_add_cached() 196 rb_insert_color_cached(node, tree, leftmost); in rb_add_cached()
|
| /linux/kernel/locking/ |
| H A D | rtmutex_common.h | 124 struct rb_node *leftmost = rb_first_cached(&lock->waiters); in rt_mutex_waiter_is_top_waiter() local 126 return rb_entry(leftmost, struct rt_mutex_waiter, tree.entry) == waiter; in rt_mutex_waiter_is_top_waiter() 131 struct rb_node *leftmost = rb_first_cached(&lock->waiters); in rt_mutex_top_waiter() local 136 if (leftmost) { in rt_mutex_top_waiter() 137 w = rb_entry(leftmost, struct rt_mutex_waiter, tree.entry); in rt_mutex_top_waiter()
|
| /linux/fs/f2fs/ |
| H A D | extent_cache.c | 223 bool *leftmost) in __lookup_extent_node_ret() argument 240 *leftmost = true; in __lookup_extent_node_ret() 250 *leftmost = false; in __lookup_extent_node_ret() 293 bool leftmost) in __attach_extent_node() argument 307 rb_insert_color_cached(&en->rb_node, &et->root, leftmost); in __attach_extent_node() 581 bool leftmost) in __insert_extent_tree() argument 594 leftmost = true; in __insert_extent_tree() 605 leftmost = false; in __insert_extent_tree() 618 en = __attach_extent_node(sbi, et, ei, parent, p, leftmost); in __insert_extent_tree() 668 bool leftmost = false; in __update_extent_tree_range() local [all …]
|
| /linux/lib/ |
| H A D | rbtree_test.c | 54 bool leftmost = true; in insert_cached() local 62 leftmost = false; in insert_cached() 67 rb_insert_color_cached(&node->rb, root, leftmost); in insert_cached() 117 bool leftmost = true; in insert_augmented_cached() local 128 leftmost = false; in insert_augmented_cached() 135 leftmost, &augment_callbacks); in insert_augmented_cached()
|
| /linux/kernel/bpf/ |
| H A D | range_tree.c | 91 bool leftmost = true; in __range_size_insert() local 99 leftmost = false; in __range_size_insert() 104 rb_insert_color_cached(&rn->rb_range_size, root, leftmost); in __range_size_insert()
|
| /linux/drivers/gpu/drm/ |
| H A D | drm_mm.c | 170 bool leftmost; in drm_mm_interval_tree_add_node() local 187 leftmost = false; in drm_mm_interval_tree_add_node() 191 leftmost = true; in drm_mm_interval_tree_add_node() 203 leftmost = false; in drm_mm_interval_tree_add_node() 208 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, in drm_mm_interval_tree_add_node()
|
| /linux/Documentation/translations/zh_TW/dev-tools/ |
| H A D | gdb-kernel-debugging.rst | 126 (gdb) set $leftmost = $lx_per_cpu(hrtimer_bases).clock_base[0].active.rb_root.rb_leftmost 127 (gdb) p *$container_of($leftmost, "struct hrtimer", "node")
|
| /linux/Documentation/translations/zh_CN/dev-tools/ |
| H A D | gdb-kernel-debugging.rst | 130 (gdb) set $leftmost = $lx_per_cpu(hrtimer_bases).clock_base[0].active.rb_root.rb_leftmost 131 (gdb) p *$container_of($leftmost, "struct hrtimer", "node")
|
| /linux/Documentation/userspace-api/media/v4l/ |
| H A D | pixfmt-intro.rst | 29 leftmost pixel of the topmost row. Following that is the pixel 34 leftmost pixel of the second row from the top, and so on. The last row
|
| H A D | pixfmt-v4l2-mplane.rst | 43 - Distance in bytes between the leftmost pixels in two adjacent
|
| /linux/net/sched/ |
| H A D | sch_etf.c | 168 bool leftmost = true; in etf_enqueue_timesortedlist() local 183 leftmost = false; in etf_enqueue_timesortedlist() 189 rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost); in etf_enqueue_timesortedlist()
|
| /linux/Documentation/scheduler/ |
| H A D | sched-design-CFS.rst | 76 p->se.vruntime key. CFS picks the "leftmost" task from this tree and sticks to it. 79 to become the "leftmost task" and thus get on the CPU within a deterministic 86 becomes the "leftmost task" of the time-ordered rbtree it maintains (plus a 87 small amount of "granularity" distance relative to the leftmost task so that we 88 do not over-schedule tasks and trash the cache), then the new leftmost task is
|
| /linux/kernel/sched/ |
| H A D | deadline.c | 589 struct rb_node *leftmost; in enqueue_pushable_dl_task() local 593 leftmost = rb_add_cached(&p->pushable_dl_tasks, in enqueue_pushable_dl_task() 596 if (leftmost) in enqueue_pushable_dl_task() 609 struct rb_node *leftmost; in dequeue_pushable_dl_task() local 614 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root); in dequeue_pushable_dl_task() 615 if (leftmost) in dequeue_pushable_dl_task() 616 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline; in dequeue_pushable_dl_task() 1817 struct rb_node *leftmost = rb_first_cached(&dl_rq->root); in dec_dl_deadline() local 1818 struct sched_dl_entity *entry = __node_2_dle(leftmost); in dec_dl_deadline()
|
| /linux/Documentation/process/debugging/ |
| H A D | gdb-kernel-debugging.rst | 137 (gdb) set $leftmost = $lx_per_cpu(hrtimer_bases).clock_base[0].active.rb_root.rb_leftmost 138 (gdb) p *$container_of($leftmost, "struct hrtimer", "node")
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | rbtree.rst | 281 * Iterate to find the leftmost such node N. 293 return node; /* node is leftmost match */
|
| /linux/Documentation/core-api/ |
| H A D | rbtree.rst | 199 Computing the leftmost (smallest) node is quite a common task for binary 212 leftmost node. This allows rb_root_cached to exist wherever rb_root does, 319 * Iterate to find the leftmost such node N. 331 return node; /* node is leftmost match */
|
| /linux/Documentation/bpf/ |
| H A D | map_lpm_trie.rst | 105 from leftmost leaf first. This means that iteration will return more
|