/linux/kernel/cgroup/ |
H A D | rstat.c | 101 if (llist_on_list(&rstatc->lnode)) in css_rstat_updated() 106 * and may try to insert the same per-cpu lnode into the llist. Note in css_rstat_updated() 110 * fact that lnode points to itself when not on a list and then use in css_rstat_updated() 113 * successful and the winner will eventually add the per-cpu lnode to in css_rstat_updated() 116 self = &rstatc->lnode; in css_rstat_updated() 118 if (this_cpu_cmpxchg(rstatc_pcpu->lnode.next, self, NULL) != self) in css_rstat_updated() 122 llist_add(&rstatc->lnode, lhead); in css_rstat_updated() 157 struct llist_node *lnode; in css_process_update_tree() local 159 while ((lnode = llist_del_first_init(lhead))) { in css_process_update_tree() 177 rstatc = container_of(lnode, struc in css_process_update_tree() [all...] |
/linux/drivers/scsi/csiostor/ |
H A D | csio_scsi.c | 93 return ((ioreq->lnode == sld->lnode) && in csio_scsi_match_io() 98 return ((ioreq->lnode == sld->lnode) && in csio_scsi_match_io() 101 return (ioreq->lnode == sld->lnode); in csio_scsi_match_io() 203 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_init_cmd_wr() 258 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_cmd() 362 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_init_read_wr() 415 struct csio_hw *hw = req->lnode in csio_scsi_init_write_wr() [all...] |
H A D | csio_lnode.c | 108 * csio_ln_match_by_portid - lookup lnode using given portid. 112 * If found, returns lnode matching given portid otherwise returns NULL. 120 /* Match siblings lnode with portid */ in csio_ln_lookup_by_portid() 131 * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id. 134 * Returns - If found, returns lnode matching given vnp id 151 /* Match sibling lnode */ in csio_ln_lookup_by_vnpi() 171 * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn. 175 * If found, returns lnode matching given wwpn, returns NULL otherwise. 191 /* Match sibling lnode */ in csio_lnode_lookup_by_wwpn() 270 struct csio_lnode *ln = fdmi_req->lnode; in csio_ln_fdmi_done() [all...] |
H A D | csio_scsi.h | 187 struct csio_lnode *lnode; member
|
H A D | csio_wr.h | 251 struct csio_lnode *lnode; /* Owner lnode */ member
|
/linux/drivers/accel/habanalabs/common/ |
H A D | memory.c | 1429 struct hl_vm_hw_block_list_node *lnode = in hw_block_vm_close() local 1431 struct hl_ctx *ctx = lnode->ctx; in hw_block_vm_close() 1434 new_mmap_size = lnode->mapped_size - (vma->vm_end - vma->vm_start); in hw_block_vm_close() 1436 lnode->mapped_size = new_mmap_size; in hw_block_vm_close() 1441 list_del(&lnode->node); in hw_block_vm_close() 1444 kfree(lnode); in hw_block_vm_close() 1462 struct hl_vm_hw_block_list_node *lnode; in hl_hw_block_mmap() local 1485 lnode = kzalloc(sizeof(*lnode), GFP_KERNEL); in hl_hw_block_mmap() 1486 if (!lnode) in hl_hw_block_mmap() 2922 struct hl_vm_hw_block_list_node *lnode, *tmp; hl_hw_block_mem_fini() local [all...] |
H A D | debugfs.c | 238 struct hl_vm_hw_block_list_node *lnode; in vm_show() local 289 list_for_each_entry(lnode, &ctx->hw_block_mem_list, node) { in vm_show() 292 lnode->vaddr, lnode->block_size, lnode->mapped_size, in vm_show() 293 lnode->id); in vm_show()
|
/linux/block/ |
H A D | blk-cgroup.c | 1064 struct llist_node *lnode; in __blkcg_rstat_flush() local 1070 lnode = llist_del_all(lhead); in __blkcg_rstat_flush() 1071 if (!lnode) in __blkcg_rstat_flush() 1085 llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) { in __blkcg_rstat_flush() 1127 llist_add(&parent->iostat.lnode, plhead); in __blkcg_rstat_flush() 2241 llist_add(&bis->lnode, lhead); in blk_cgroup_bio_start()
|
H A D | blk-cgroup.h | 49 struct llist_node lnode; member
|
/linux/drivers/gpu/drm/exynos/ |
H A D | exynos_drm_g2d.c | 361 struct g2d_cmdlist_node *lnode; in g2d_add_cmdlist_to_inuse() local 367 lnode = list_entry(file_priv->inuse_cmdlist.prev, in g2d_add_cmdlist_to_inuse() 369 lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr; in g2d_add_cmdlist_to_inuse()
|
/linux/include/linux/ |
H A D | cgroup-defs.h | 382 struct llist_node lnode; /* lockless list for update */ member
|
H A D | filter.h | 1269 return list_empty(&fp->aux->ksym.lnode) || in bpf_prog_kallsyms_verify_off() 1270 fp->aux->ksym.lnode.prev == LIST_POISON2; in bpf_prog_kallsyms_verify_off()
|
H A D | bpf.h | 1239 struct list_head lnode; member 1431 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
|
/linux/kernel/bpf/ |
H A D | core.c | 129 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); in bpf_prog_alloc_no_stats() 131 INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode); in bpf_prog_alloc_no_stats() 670 WARN_ON_ONCE(!list_empty(&ksym->lnode)); in bpf_ksym_add() 671 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms); in bpf_ksym_add() 678 if (list_empty(&ksym->lnode)) in __bpf_ksym_del() 682 list_del_rcu(&ksym->lnode); in __bpf_ksym_del() 824 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) { in bpf_get_kallsym()
|
H A D | bpf_struct_ops.c | 638 INIT_LIST_HEAD_RCU(&ksym->lnode); in bpf_struct_ops_ksym_init()
|
/linux/net/netfilter/ |
H A D | nf_conntrack_sip.c | 812 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) { in refresh_signalling_expectation() 835 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) { in flush_expectations()
|
H A D | nf_nat_sip.c | 340 hlist_for_each_entry(pair_exp, &help->expectations, lnode) { in nf_nat_sip_expected()
|
H A D | nf_conntrack_netlink.c | 3244 hlist_for_each_entry_rcu(exp, &help->expectations, lnode) { in ctnetlink_exp_ct_dump_table()
|
/linux/tools/power/x86/turbostat/ |
H A D | turbostat.c | 5941 int pkg, node, lnode, cpu, cpux; in set_node_data() local 5950 lnode = 0; in set_node_data() 5957 cpus[cpu].logical_node_id = lnode; in set_node_data() 5966 cpus[cpux].logical_node_id = lnode; in set_node_data() 5970 lnode++; in set_node_data() 5971 if (lnode > topo.nodes_per_pkg) in set_node_data() 5972 topo.nodes_per_pkg = lnode; in set_node_data() 9286 "cpu %d pkg %d die %d l3 %d node %d lnode %d core %d thread %d\n", in topology_probe()
|