Home
last modified time | relevance | path

Searched refs:rb (Results 1 – 25 of 381) sorted by relevance

12345678910>>...16

/linux/kernel/events/
H A Dring_buffer.c22 atomic_set(&handle->rb->poll, EPOLLIN | EPOLLRDNORM); in perf_output_wakeup()
42 struct perf_buffer *rb = handle->rb; in perf_output_get_handle() local
50 (*(volatile unsigned int *)&rb->nest)++; in perf_output_get_handle()
51 handle->wakeup = local_read(&rb->wakeup); in perf_output_get_handle()
56 struct perf_buffer *rb = handle->rb; in perf_output_put_handle() local
64 nest = READ_ONCE(rb->nest); in perf_output_put_handle()
66 WRITE_ONCE(rb->nest, nest - 1); in perf_output_put_handle()
80 head = local_read(&rb->head); in perf_output_put_handle()
114 WRITE_ONCE(rb->user_page->data_head, head); in perf_output_put_handle()
122 WRITE_ONCE(rb->nest, 0); in perf_output_put_handle()
[all …]
H A Dinternal.h63 extern void rb_free(struct perf_buffer *rb);
67 struct perf_buffer *rb; in rb_free_rcu() local
69 rb = container_of(rcu_head, struct perf_buffer, rcu_head); in rb_free_rcu()
70 rb_free(rb); in rb_free_rcu()
73 static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause) in rb_toggle_paused() argument
75 if (!pause && rb->nr_pages) in rb_toggle_paused()
76 rb->paused = 0; in rb_toggle_paused()
78 rb->paused = 1; in rb_toggle_paused()
84 extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
86 extern void rb_free_aux(struct perf_buffer *rb);
[all …]
/linux/tools/lib/bpf/
H A Dringbuf.c60 static void ringbuf_free_ring(struct ring_buffer *rb, struct ring *r) in ringbuf_free_ring() argument
63 munmap(r->consumer_pos, rb->page_size); in ringbuf_free_ring()
67 munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1)); in ringbuf_free_ring()
75 int ring_buffer__add(struct ring_buffer *rb, int map_fd, in ring_buffer__add() argument
102 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); in ring_buffer__add()
105 rb->rings = tmp; in ring_buffer__add()
107 tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events)); in ring_buffer__add()
110 rb->events = tmp; in ring_buffer__add()
115 rb->rings[rb->ring_cnt] = r; in ring_buffer__add()
123 tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); in ring_buffer__add()
[all …]
/linux/drivers/scsi/bfa/
H A Dbfa_ioc_ct.c185 void __iomem *rb; in bfa_ioc_ct_reg_init() local
188 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init()
190 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; in bfa_ioc_ct_reg_init()
191 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; in bfa_ioc_ct_reg_init()
192 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; in bfa_ioc_ct_reg_init()
195 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_ct_reg_init()
196 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init()
197 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init()
198 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; in bfa_ioc_ct_reg_init()
199 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; in bfa_ioc_ct_reg_init()
[all …]
H A Dbfa_ioc_cb.c138 void __iomem *rb; in bfa_ioc_cb_reg_init() local
141 rb = bfa_ioc_bar0(ioc); in bfa_ioc_cb_reg_init()
143 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; in bfa_ioc_cb_reg_init()
144 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; in bfa_ioc_cb_reg_init()
145 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; in bfa_ioc_cb_reg_init()
148 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_cb_reg_init()
149 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_cb_reg_init()
150 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_cb_reg_init()
152 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); in bfa_ioc_cb_reg_init()
153 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); in bfa_ioc_cb_reg_init()
[all …]
/linux/drivers/net/ethernet/brocade/bna/
H A Dbfa_ioc_ct.c49 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
51 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
251 void __iomem *rb; in bfa_ioc_ct_reg_init() local
254 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init()
256 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; in bfa_ioc_ct_reg_init()
257 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; in bfa_ioc_ct_reg_init()
258 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; in bfa_ioc_ct_reg_init()
261 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_ct_reg_init()
262 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init()
263 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init()
[all …]
/linux/fs/xfs/scrub/
H A Dbmap_repair.c99 struct xrep_bmap *rb, in xrep_bmap_discover_shared() argument
103 struct xfs_scrub *sc = rb->sc; in xrep_bmap_discover_shared()
123 rb->reflink_scan = RLS_SET_IFLAG; in xrep_bmap_discover_shared()
131 struct xrep_bmap *rb, in xrep_bmap_from_rmap() argument
143 struct xfs_scrub *sc = rb->sc; in xrep_bmap_from_rmap()
151 if (rb->reflink_scan == RLS_UNKNOWN && !unwritten) { in xrep_bmap_from_rmap()
152 error = xrep_bmap_discover_shared(rb, startblock, blockcount); in xrep_bmap_from_rmap()
163 fa = xfs_bmap_validate_extent(sc->ip, rb->whichfork, &irec); in xrep_bmap_from_rmap()
169 trace_xrep_bmap_found(sc->ip, rb->whichfork, &irec); in xrep_bmap_from_rmap()
174 error = xfarray_append(rb->bmap_records, &rbe); in xrep_bmap_from_rmap()
[all …]
/linux/kernel/bpf/
H A Dringbuf.c81 struct bpf_ringbuf *rb; member
98 struct bpf_ringbuf *rb; in bpf_ringbuf_area_alloc() local
135 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, in bpf_ringbuf_area_alloc()
137 if (rb) { in bpf_ringbuf_area_alloc()
139 rb->pages = pages; in bpf_ringbuf_area_alloc()
140 rb->nr_pages = nr_pages; in bpf_ringbuf_area_alloc()
141 return rb; in bpf_ringbuf_area_alloc()
153 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); in bpf_ringbuf_notify() local
155 wake_up_all(&rb->waitq); in bpf_ringbuf_notify()
171 struct bpf_ringbuf *rb; in bpf_ringbuf_alloc() local
[all …]
H A Drange_tree.c45 static struct range_node *rb_to_range_node(struct rb_node *rb) in rb_to_range_node() argument
47 return rb_entry(rb, struct range_node, rb_range_size); in rb_to_range_node()
58 struct rb_node *rb = rt->range_size_root.rb_root.rb_node; in __find_range() local
61 while (rb) { in __find_range()
62 struct range_node *rn = rb_to_range_node(rb); in __find_range()
66 rb = rb->rb_right; in __find_range()
68 rb = rb->rb_left; in __find_range()
89 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; in __range_size_insert() local
94 rb = *link; in __range_size_insert()
95 if (size > rn_size(rb_to_range_node(rb))) { in __range_size_insert()
[all …]
/linux/drivers/hid/intel-ish-hid/ishtp/
H A Dclient-buffers.c23 struct ishtp_cl_rb *rb; in ishtp_cl_alloc_rx_ring() local
28 rb = ishtp_io_rb_init(cl); in ishtp_cl_alloc_rx_ring()
29 if (!rb) { in ishtp_cl_alloc_rx_ring()
33 ret = ishtp_io_rb_alloc_buf(rb, len); in ishtp_cl_alloc_rx_ring()
37 list_add_tail(&rb->list, &cl->free_rb_list.list); in ishtp_cl_alloc_rx_ring()
99 struct ishtp_cl_rb *rb; in ishtp_cl_free_rx_ring() local
105 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, in ishtp_cl_free_rx_ring()
107 list_del(&rb->list); in ishtp_cl_free_rx_ring()
108 kfree(rb->buffer.data); in ishtp_cl_free_rx_ring()
109 kfree(rb); in ishtp_cl_free_rx_ring()
[all …]
H A Dclient.c25 struct ishtp_cl_rb *rb; in ishtp_read_list_flush() local
30 list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list) in ishtp_read_list_flush()
31 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) { in ishtp_read_list_flush()
32 list_del(&rb->list); in ishtp_read_list_flush()
34 list_add_tail(&rb->list, &cl->free_rb_list.list); in ishtp_read_list_flush()
593 struct ishtp_cl_rb *rb; in ishtp_cl_read_start() local
623 rb = NULL; in ishtp_cl_read_start()
627 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list); in ishtp_cl_read_start()
628 list_del_init(&rb->list); in ishtp_cl_read_start()
631 rb->cl = cl; in ishtp_cl_read_start()
[all …]
/linux/lib/
H A Drbtree_test.c21 struct rb_node rb; member
40 if (key < rb_entry(parent, struct test_node, rb)->key) in insert()
46 rb_link_node(&node->rb, parent, new); in insert()
47 rb_insert_color(&node->rb, &root->rb_root); in insert()
58 if (key < rb_entry(parent, struct test_node, rb)->key) in insert_cached()
66 rb_link_node(&node->rb, parent, new); in insert_cached()
67 rb_insert_color_cached(&node->rb, root, leftmost); in insert_cached()
72 rb_erase(&node->rb, &root->rb_root); in erase()
77 rb_erase_cached(&node->rb, root); in erase_cached()
84 struct test_node, rb, u32, augmented, NODE_VAL) in RB_DECLARE_CALLBACKS_MAX() argument
[all …]
/linux/kernel/printk/
H A Dprintk_ringbuffer.c565 static bool data_make_reusable(struct printk_ringbuffer *rb, in data_make_reusable() argument
571 struct prb_data_ring *data_ring = &rb->text_data_ring; in data_make_reusable()
572 struct prb_desc_ring *desc_ring = &rb->desc_ring; in data_make_reusable()
633 static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos) in data_push_tail() argument
635 struct prb_data_ring *data_ring = &rb->text_data_ring; in data_push_tail()
676 if (!data_make_reusable(rb, tail_lpos, lpos, &next_lpos)) { in data_push_tail()
768 static bool desc_push_tail(struct printk_ringbuffer *rb, in desc_push_tail() argument
771 struct prb_desc_ring *desc_ring = &rb->desc_ring; in desc_push_tail()
813 if (!data_push_tail(rb, desc.text_blk_lpos.next)) in desc_push_tail()
876 static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out) in desc_reserve() argument
[all …]
H A Dprintk_ringbuffer.h109 struct printk_ringbuffer *rb; member
322 bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
324 bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
329 void prb_init(struct printk_ringbuffer *rb,
372 #define prb_for_each_record(from, rb, s, r) \ argument
373 for ((s) = from; prb_read_valid(rb, s, r); (s) = (r)->info->seq + 1)
390 #define prb_for_each_info(from, rb, s, i, lc) \ argument
391 for ((s) = from; prb_read_valid_info(rb, s, i, lc); (s) = (i)->seq + 1)
393 bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
395 bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
[all …]
/linux/drivers/gpu/drm/
H A Ddrm_mm.c152 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, in INTERVAL_TREE_DEFINE() argument
168 struct rb_node **link, *rb; in drm_mm_interval_tree_add_node() local
175 rb = &hole_node->rb; in drm_mm_interval_tree_add_node()
176 while (rb) { in drm_mm_interval_tree_add_node()
177 parent = rb_entry(rb, struct drm_mm_node, rb); in drm_mm_interval_tree_add_node()
182 rb = rb_parent(rb); in drm_mm_interval_tree_add_node()
185 rb = &hole_node->rb; in drm_mm_interval_tree_add_node()
186 link = &hole_node->rb.rb_right; in drm_mm_interval_tree_add_node()
189 rb = NULL; in drm_mm_interval_tree_add_node()
195 rb = *link; in drm_mm_interval_tree_add_node()
[all …]
H A Ddrm_prime.c100 struct rb_node **p, *rb; in drm_prime_add_buf_handle() local
110 rb = NULL; in drm_prime_add_buf_handle()
115 rb = *p; in drm_prime_add_buf_handle()
116 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); in drm_prime_add_buf_handle()
118 p = &rb->rb_right; in drm_prime_add_buf_handle()
120 p = &rb->rb_left; in drm_prime_add_buf_handle()
122 rb_link_node(&member->dmabuf_rb, rb, p); in drm_prime_add_buf_handle()
125 rb = NULL; in drm_prime_add_buf_handle()
130 rb = *p; in drm_prime_add_buf_handle()
131 pos = rb_entry(rb, struct drm_prime_member, handle_rb); in drm_prime_add_buf_handle()
[all …]
/linux/mm/
H A Dinterval_tree.c23 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
38 if (!prev->shared.rb.rb_right) { in vma_interval_tree_insert_after()
40 link = &prev->shared.rb.rb_right; in vma_interval_tree_insert_after()
42 parent = rb_entry(prev->shared.rb.rb_right, in vma_interval_tree_insert_after()
43 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after()
46 while (parent->shared.rb.rb_left) { in vma_interval_tree_insert_after()
47 parent = rb_entry(parent->shared.rb.rb_left, in vma_interval_tree_insert_after()
48 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after()
52 link = &parent->shared.rb.rb_left; in vma_interval_tree_insert_after()
56 rb_link_node(&node->shared.rb, &parent->shared.rb, link); in vma_interval_tree_insert_after()
[all …]
/linux/drivers/misc/mchp_pci1xxxx/
H A Dmchp_pci1xxxx_otpe2p.c99 void __iomem *rb = priv->reg_base; in is_eeprom_responsive() local
104 rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); in is_eeprom_responsive()
106 rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); in is_eeprom_responsive()
111 true, rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); in is_eeprom_responsive()
124 void __iomem *rb = priv->reg_base; in pci1xxxx_eeprom_read() local
141 writel(EEPROM_CMD_EPC_BUSY_BIT | (off + byte), rb + in pci1xxxx_eeprom_read()
148 rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); in pci1xxxx_eeprom_read()
154 buf[byte] = readl(rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG)); in pci1xxxx_eeprom_read()
165 void __iomem *rb = priv->reg_base; in pci1xxxx_eeprom_write() local
182 writel(*(value + byte), rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG)); in pci1xxxx_eeprom_write()
[all …]
/linux/Documentation/translations/zh_CN/core-api/
H A Drbtree.rst271 node = rb_entry(root->rb_node, struct interval_tree_node, rb);
274 if (node->rb.rb_left) {
276 rb_entry(node->rb.rb_left,
277 struct interval_tree_node, rb);
294 if (node->rb.rb_right) {
295 node = rb_entry(node->rb.rb_right,
296 struct interval_tree_node, rb);
311 if (node->rb.rb_left) {
312 subtree_last = rb_entry(node->rb.rb_left,
313 struct interval_tree_node, rb)->__subtree_last;
[all …]
/linux/arch/arm64/crypto/
H A Dsm3-neon-core.S42 #define rb w4 macro
356 ldp ra, rb, [RSTATE, #0]
401 R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 0, 0, IW, _, 0)
402 R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 1, 1, IW, _, 0)
403 R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 2, 2, IW, _, 0)
404 R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 3, 3, IW, _, 0)
407 R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 4, 0, IW, _, 0)
408 R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 5, 1, IW, _, 0)
409 R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 6, 2, IW, SCHED_W_W0W1W2W3W4W5_1, 12)
410 R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 7, 3, IW, SCHED_W_W0W1W2W3W4W5_2, 12)
[all …]
/linux/drivers/target/iscsi/
H A Discsi_target_configfs.c44 ssize_t rb; in lio_target_np_driver_show() local
48 rb = sysfs_emit(page, "1\n"); in lio_target_np_driver_show()
50 rb = sysfs_emit(page, "0\n"); in lio_target_np_driver_show()
52 return rb; in lio_target_np_driver_show()
474 ssize_t rb; \
479 rb = snprintf(page, PAGE_SIZE, \
483 rb = snprintf(page, PAGE_SIZE, "%u\n", \
488 return rb; \
530 ssize_t rb = 0; in lio_target_nacl_info_show() local
536 rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator" in lio_target_nacl_info_show()
[all …]
/linux/fs/jffs2/
H A Dnodelist.h230 struct rb_node rb; member
271 struct rb_node rb; member
334 return rb_entry(node, struct jffs2_node_frag, rb); in frag_first()
344 return rb_entry(node, struct jffs2_node_frag, rb); in frag_last()
347 #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb)
348 #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb)
349 #define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb)
350 #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb)
351 #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb)
352 #define frag_erase(frag, list) rb_erase(&frag->rb, list)
[all …]
/linux/drivers/firmware/arm_scmi/
H A Draw_mode.c261 struct scmi_raw_buffer *rb = NULL; in scmi_raw_buffer_get() local
266 rb = list_first_entry(head, struct scmi_raw_buffer, node); in scmi_raw_buffer_get()
267 list_del_init(&rb->node); in scmi_raw_buffer_get()
271 return rb; in scmi_raw_buffer_get()
275 struct scmi_raw_buffer *rb) in scmi_raw_buffer_put() argument
280 rb->msg.len = rb->max_len; in scmi_raw_buffer_put()
283 list_add_tail(&rb->node, &q->free_bufs); in scmi_raw_buffer_put()
288 struct scmi_raw_buffer *rb) in scmi_raw_buffer_enqueue() argument
293 list_add_tail(&rb->node, &q->msg_q); in scmi_raw_buffer_enqueue()
302 struct scmi_raw_buffer *rb = NULL; in scmi_raw_buffer_dequeue_unlocked() local
[all …]
/linux/tools/testing/selftests/bpf/benchs/
H A Drun_bench_ringbufs.sh10 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
15 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
20 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
43 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
/linux/net/sunrpc/xprtrdma/
H A Dverbs.c82 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
83 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
861 struct rpcrdma_regbuf *rb; in rpcrdma_req_setup() local
868 rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize), in rpcrdma_req_setup()
870 if (!rb) in rpcrdma_req_setup()
873 if (!__rpcrdma_regbuf_dma_map(r_xprt, rb)) in rpcrdma_req_setup()
876 req->rl_rdmabuf = rb; in rpcrdma_req_setup()
877 xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb)); in rpcrdma_req_setup()
881 rpcrdma_regbuf_free(rb); in rpcrdma_req_setup()
1246 struct rpcrdma_regbuf *rb; in rpcrdma_regbuf_alloc_node() local
[all …]

12345678910>>...16