Lines Matching refs:lru
136 static void __bpf_lru_list_rotate_active(struct bpf_lru *lru, in __bpf_lru_list_rotate_active() argument
150 if (++i == lru->nr_scans || node == first_node) in __bpf_lru_list_rotate_active()
163 static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru, in __bpf_lru_list_rotate_inactive() argument
179 while (i < lru->nr_scans) { in __bpf_lru_list_rotate_inactive()
203 __bpf_lru_list_shrink_inactive(struct bpf_lru *lru, in __bpf_lru_list_shrink_inactive() argument
217 } else if (lru->del_from_htab(lru->del_arg, node)) { in __bpf_lru_list_shrink_inactive()
224 if (++i == lru->nr_scans) in __bpf_lru_list_shrink_inactive()
234 static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l) in __bpf_lru_list_rotate() argument
237 __bpf_lru_list_rotate_active(lru, l); in __bpf_lru_list_rotate()
239 __bpf_lru_list_rotate_inactive(lru, l); in __bpf_lru_list_rotate()
252 static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru, in __bpf_lru_list_shrink() argument
263 nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink, in __bpf_lru_list_shrink()
276 if (lru->del_from_htab(lru->del_arg, node)) { in __bpf_lru_list_shrink()
315 static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru, in bpf_lru_list_pop_free_to_local() argument
318 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_lru_list_pop_free_to_local()
326 __bpf_lru_list_rotate(lru, l); in bpf_lru_list_pop_free_to_local()
332 if (++nfree == lru->target_free) in bpf_lru_list_pop_free_to_local()
336 if (nfree < lru->target_free) in bpf_lru_list_pop_free_to_local()
337 __bpf_lru_list_shrink(lru, l, lru->target_free - nfree, in bpf_lru_list_pop_free_to_local()
344 static void __local_list_add_pending(struct bpf_lru *lru, in __local_list_add_pending() argument
350 *(u32 *)((void *)node + lru->hash_offset) = hash; in __local_list_add_pending()
372 __local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l) in __local_list_pop_pending() argument
382 lru->del_from_htab(lru->del_arg, node)) { in __local_list_pop_pending()
396 static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru, in bpf_percpu_lru_pop_free() argument
405 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_pop_free()
409 __bpf_lru_list_rotate(lru, l); in bpf_percpu_lru_pop_free()
413 __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list, in bpf_percpu_lru_pop_free()
418 *(u32 *)((void *)node + lru->hash_offset) = hash; in bpf_percpu_lru_pop_free()
428 static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru, in bpf_common_lru_pop_free() argument
432 struct bpf_common_lru *clru = &lru->common_lru; in bpf_common_lru_pop_free()
444 bpf_lru_list_pop_free_to_local(lru, loc_l); in bpf_common_lru_pop_free()
449 __local_list_add_pending(lru, loc_l, cpu, node, hash); in bpf_common_lru_pop_free()
473 node = __local_list_pop_pending(lru, steal_loc_l); in bpf_common_lru_pop_free()
484 __local_list_add_pending(lru, loc_l, cpu, node, hash); in bpf_common_lru_pop_free()
491 struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash) in bpf_lru_pop_free() argument
493 if (lru->percpu) in bpf_lru_pop_free()
494 return bpf_percpu_lru_pop_free(lru, hash); in bpf_lru_pop_free()
496 return bpf_common_lru_pop_free(lru, hash); in bpf_lru_pop_free()
499 static void bpf_common_lru_push_free(struct bpf_lru *lru, in bpf_common_lru_push_free() argument
512 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); in bpf_common_lru_push_free()
530 bpf_lru_list_push_free(&lru->common_lru.lru_list, node); in bpf_common_lru_push_free()
533 static void bpf_percpu_lru_push_free(struct bpf_lru *lru, in bpf_percpu_lru_push_free() argument
539 l = per_cpu_ptr(lru->percpu_lru, node->cpu); in bpf_percpu_lru_push_free()
548 void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node) in bpf_lru_push_free() argument
550 if (lru->percpu) in bpf_lru_push_free()
551 bpf_percpu_lru_push_free(lru, node); in bpf_lru_push_free()
553 bpf_common_lru_push_free(lru, node); in bpf_lru_push_free()
556 static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, in bpf_common_lru_populate() argument
560 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_common_lru_populate()
573 lru->target_free = clamp((nr_elems / num_possible_cpus()) / 2, in bpf_common_lru_populate()
577 static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf, in bpf_percpu_lru_populate() argument
592 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_populate()
608 void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset, in bpf_lru_populate() argument
611 if (lru->percpu) in bpf_lru_populate()
612 bpf_percpu_lru_populate(lru, buf, node_offset, elem_size, in bpf_lru_populate()
615 bpf_common_lru_populate(lru, buf, node_offset, elem_size, in bpf_lru_populate()
646 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, in bpf_lru_init() argument
652 lru->percpu_lru = alloc_percpu(struct bpf_lru_list); in bpf_lru_init()
653 if (!lru->percpu_lru) in bpf_lru_init()
659 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_lru_init()
662 lru->nr_scans = PERCPU_NR_SCANS; in bpf_lru_init()
664 struct bpf_common_lru *clru = &lru->common_lru; in bpf_lru_init()
678 lru->nr_scans = LOCAL_NR_SCANS; in bpf_lru_init()
681 lru->percpu = percpu; in bpf_lru_init()
682 lru->del_from_htab = del_from_htab; in bpf_lru_init()
683 lru->del_arg = del_arg; in bpf_lru_init()
684 lru->hash_offset = hash_offset; in bpf_lru_init()
689 void bpf_lru_destroy(struct bpf_lru *lru) in bpf_lru_destroy() argument
691 if (lru->percpu) in bpf_lru_destroy()
692 free_percpu(lru->percpu_lru); in bpf_lru_destroy()
694 free_percpu(lru->common_lru.local_list); in bpf_lru_destroy()