Lines Matching refs:vn
944 #define for_each_vmap_node(vn) \
945 for ((vn) = &vmap_nodes[0]; \
946 (vn) < &vmap_nodes[nr_vmap_nodes]; (vn)++)
1117 struct vmap_node *vn;
1122 for_each_vmap_node(vn) {
1123 spin_lock(&vn->busy.lock);
1124 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
1129 spin_unlock(&vn->busy.lock);
1138 vn = addr_to_node(va_start_lowest);
1140 spin_lock(&vn->busy.lock);
1141 *va = __find_vmap_area(va_start_lowest, &vn->busy.root);
1144 return vn;
1146 spin_unlock(&vn->busy.lock);
1856 struct vmap_node *vn = addr_to_node(va->va_start);
1861 spin_lock(&vn->busy.lock);
1862 unlink_va(va, &vn->busy.root);
1863 spin_unlock(&vn->busy.lock);
1898 size_to_va_pool(struct vmap_node *vn, unsigned long size)
1903 return &vn->pool[idx];
1926 node_pool_del_va(struct vmap_node *vn, unsigned long size,
1934 vp = size_to_va_pool(vn, size);
1938 spin_lock(&vn->pool_lock);
1962 spin_unlock(&vn->pool_lock);
2015 struct vmap_node *vn;
2082 vn = addr_to_node(va->va_start);
2084 spin_lock(&vn->busy.lock);
2085 insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
2086 spin_unlock(&vn->busy.lock);
2186 decay_va_pool_node(struct vmap_node *vn, bool full_decay)
2197 if (list_empty(&vn->pool[i].head))
2201 spin_lock(&vn->pool_lock);
2202 list_replace_init(&vn->pool[i].head, &tmp_list);
2203 spin_unlock(&vn->pool_lock);
2205 pool_len = n_decay = vn->pool[i].len;
2206 WRITE_ONCE(vn->pool[i].len, 0);
2228 spin_lock(&vn->pool_lock);
2229 list_replace_init(&tmp_list, &vn->pool[i].head);
2230 WRITE_ONCE(vn->pool[i].len, pool_len);
2231 spin_unlock(&vn->pool_lock);
2239 kasan_release_vmalloc_node(struct vmap_node *vn)
2244 start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start;
2245 end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end;
2247 list_for_each_entry(va, &vn->purge_list, list) {
2259 struct vmap_node *vn = container_of(work,
2266 kasan_release_vmalloc_node(vn);
2268 vn->nr_purged = 0;
2270 list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
2277 vn->nr_purged++;
2279 if (is_vn_id_valid(vn_id) && !vn->skip_populate)
2280 if (node_pool_add_va(vn, va))
2302 struct vmap_node *vn;
2312 for_each_vmap_node(vn) {
2313 INIT_LIST_HEAD(&vn->purge_list);
2314 vn->skip_populate = full_pool_decay;
2315 decay_va_pool_node(vn, full_pool_decay);
2317 if (RB_EMPTY_ROOT(&vn->lazy.root))
2320 spin_lock(&vn->lazy.lock);
2321 WRITE_ONCE(vn->lazy.root.rb_node, NULL);
2322 list_replace_init(&vn->lazy.head, &vn->purge_list);
2323 spin_unlock(&vn->lazy.lock);
2325 start = min(start, list_first_entry(&vn->purge_list,
2328 end = max(end, list_last_entry(&vn->purge_list,
2331 cpumask_set_cpu(node_to_id(vn), &purge_nodes);
2343 vn = &vmap_nodes[i];
2346 INIT_WORK(&vn->purge_work, purge_vmap_node);
2349 schedule_work_on(i, &vn->purge_work);
2351 schedule_work(&vn->purge_work);
2355 vn->purge_work.func = NULL;
2356 purge_vmap_node(&vn->purge_work);
2357 nr_purged_areas += vn->nr_purged;
2362 vn = &vmap_nodes[i];
2364 if (vn->purge_work.func) {
2365 flush_work(&vn->purge_work);
2366 nr_purged_areas += vn->nr_purged;
2404 struct vmap_node *vn;
2417 vn = is_vn_id_valid(vn_id) ?
2420 spin_lock(&vn->lazy.lock);
2421 insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head);
2422 spin_unlock(&vn->lazy.lock);
2446 struct vmap_node *vn;
2468 vn = &vmap_nodes[i];
2470 spin_lock(&vn->busy.lock);
2471 va = __find_vmap_area(addr, &vn->busy.root);
2472 spin_unlock(&vn->busy.lock);
2483 struct vmap_node *vn;
2492 vn = &vmap_nodes[i];
2494 spin_lock(&vn->busy.lock);
2495 va = __find_vmap_area(addr, &vn->busy.root);
2497 unlink_va(va, &vn->busy.root);
2498 spin_unlock(&vn->busy.lock);
2722 struct vmap_node *vn;
2730 vn = addr_to_node(vb->va->va_start);
2731 spin_lock(&vn->busy.lock);
2732 unlink_va(vb->va, &vn->busy.root);
2733 spin_unlock(&vn->busy.lock);
4406 struct vmap_node *vn;
4421 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va);
4422 if (!vn)
4488 spin_unlock(&vn->busy.lock);
4489 } while ((vn = find_vmap_area_exceed_addr_lock(next, &va)));
4492 if (vn)
4493 spin_unlock(&vn->busy.lock);
4499 if (vn)
4500 spin_unlock(&vn->busy.lock);
4835 struct vmap_node *vn = addr_to_node(vas[area]->va_start);
4837 spin_lock(&vn->busy.lock);
4838 insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
4841 spin_unlock(&vn->busy.lock);
4956 struct vmap_node *vn;
4961 vn = addr_to_node(addr);
4963 if (!spin_trylock(&vn->busy.lock))
4966 va = __find_vmap_area(addr, &vn->busy.root);
4968 spin_unlock(&vn->busy.lock);
4976 spin_unlock(&vn->busy.lock);
5013 struct vmap_node *vn;
5016 for_each_vmap_node(vn) {
5017 spin_lock(&vn->lazy.lock);
5018 list_for_each_entry(va, &vn->lazy.head, list) {
5023 spin_unlock(&vn->lazy.lock);
5029 struct vmap_node *vn;
5037 for_each_vmap_node(vn) {
5038 spin_lock(&vn->busy.lock);
5039 list_for_each_entry(va, &vn->busy.head, list) {
5094 spin_unlock(&vn->busy.lock);
5159 struct vmap_node *vn;
5180 vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
5181 if (vn) {
5185 vmap_nodes = vn;
5192 for_each_vmap_node(vn) {
5193 vn->busy.root = RB_ROOT;
5194 INIT_LIST_HEAD(&vn->busy.head);
5195 spin_lock_init(&vn->busy.lock);
5197 vn->lazy.root = RB_ROOT;
5198 INIT_LIST_HEAD(&vn->lazy.head);
5199 spin_lock_init(&vn->lazy.lock);
5202 INIT_LIST_HEAD(&vn->pool[i].head);
5203 WRITE_ONCE(vn->pool[i].len, 0);
5206 spin_lock_init(&vn->pool_lock);
5214 struct vmap_node *vn;
5217 for_each_vmap_node(vn) {
5219 count += READ_ONCE(vn->pool[i].len);
5228 struct vmap_node *vn;
5230 for_each_vmap_node(vn)
5231 decay_va_pool_node(vn, true);
5240 struct vmap_node *vn;
5277 vn = addr_to_node(va->va_start);
5278 insert_vmap_area(va, &vn->busy.root, &vn->busy.head);