Lines Matching +full:len +full:- +full:or +full:- +full:limit

1 // SPDX-License-Identifier: GPL-2.0
4 * change in the future and be a per-client cache.
64 * XXX: these limits are per-container, so memory used will increase
70 unsigned int limit; in nfsd_cache_size_limit() local
71 unsigned long low_pages = totalram_pages() - totalhigh_pages(); in nfsd_cache_size_limit()
73 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); in nfsd_cache_size_limit()
74 return min_t(unsigned int, limit, 256*1024); in nfsd_cache_size_limit()
82 nfsd_hashsize(unsigned int limit) in nfsd_hashsize() argument
84 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); in nfsd_hashsize()
95 rp->c_state = RC_UNUSED; in nfsd_cacherep_alloc()
96 rp->c_type = RC_NOCACHE; in nfsd_cacherep_alloc()
97 RB_CLEAR_NODE(&rp->c_node); in nfsd_cacherep_alloc()
98 INIT_LIST_HEAD(&rp->c_lru); in nfsd_cacherep_alloc()
100 memset(&rp->c_key, 0, sizeof(rp->c_key)); in nfsd_cacherep_alloc()
101 rp->c_key.k_xid = rqstp->rq_xid; in nfsd_cacherep_alloc()
102 rp->c_key.k_proc = rqstp->rq_proc; in nfsd_cacherep_alloc()
103 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp)); in nfsd_cacherep_alloc()
104 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp))); in nfsd_cacherep_alloc()
105 rp->c_key.k_prot = rqstp->rq_prot; in nfsd_cacherep_alloc()
106 rp->c_key.k_vers = rqstp->rq_vers; in nfsd_cacherep_alloc()
107 rp->c_key.k_len = rqstp->rq_arg.len; in nfsd_cacherep_alloc()
108 rp->c_key.k_csum = csum; in nfsd_cacherep_alloc()
115 if (rp->c_type == RC_REPLBUFF) in nfsd_cacherep_free()
116 kfree(rp->c_replvec.iov_base); in nfsd_cacherep_free()
128 list_del(&rp->c_lru); in nfsd_cacherep_dispose()
139 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) in nfsd_cacherep_unlink_locked()
140 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len); in nfsd_cacherep_unlink_locked()
141 if (rp->c_state != RC_UNUSED) { in nfsd_cacherep_unlink_locked()
142 rb_erase(&rp->c_node, &b->rb_head); in nfsd_cacherep_unlink_locked()
143 list_del(&rp->c_lru); in nfsd_cacherep_unlink_locked()
144 atomic_dec(&nn->num_drc_entries); in nfsd_cacherep_unlink_locked()
161 spin_lock(&b->cache_lock); in nfsd_reply_cache_free()
163 spin_unlock(&b->cache_lock); in nfsd_reply_cache_free()
171 return drc_slab ? 0: -ENOMEM; in nfsd_drc_slab_create()
180 * nfsd_net_reply_cache_init - per net namespace reply cache set-up
187 return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM); in nfsd_net_reply_cache_init()
191 * nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down
197 nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM); in nfsd_net_reply_cache_destroy()
205 nn->max_drc_entries = nfsd_cache_size_limit(); in nfsd_reply_cache_init()
206 atomic_set(&nn->num_drc_entries, 0); in nfsd_reply_cache_init()
207 hashsize = nfsd_hashsize(nn->max_drc_entries); in nfsd_reply_cache_init()
208 nn->maskbits = ilog2(hashsize); in nfsd_reply_cache_init()
210 nn->drc_hashtbl = kvzalloc(array_size(hashsize, in nfsd_reply_cache_init()
211 sizeof(*nn->drc_hashtbl)), GFP_KERNEL); in nfsd_reply_cache_init()
212 if (!nn->drc_hashtbl) in nfsd_reply_cache_init()
213 return -ENOMEM; in nfsd_reply_cache_init()
215 nn->nfsd_reply_cache_shrinker = shrinker_alloc(0, "nfsd-reply:%s", in nfsd_reply_cache_init()
216 nn->nfsd_name); in nfsd_reply_cache_init()
217 if (!nn->nfsd_reply_cache_shrinker) in nfsd_reply_cache_init()
220 nn->nfsd_reply_cache_shrinker->scan_objects = nfsd_reply_cache_scan; in nfsd_reply_cache_init()
221 nn->nfsd_reply_cache_shrinker->count_objects = nfsd_reply_cache_count; in nfsd_reply_cache_init()
222 nn->nfsd_reply_cache_shrinker->seeks = 1; in nfsd_reply_cache_init()
223 nn->nfsd_reply_cache_shrinker->private_data = nn; in nfsd_reply_cache_init()
225 shrinker_register(nn->nfsd_reply_cache_shrinker); in nfsd_reply_cache_init()
228 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head); in nfsd_reply_cache_init()
229 spin_lock_init(&nn->drc_hashtbl[i].cache_lock); in nfsd_reply_cache_init()
231 nn->drc_hashsize = hashsize; in nfsd_reply_cache_init()
235 kvfree(nn->drc_hashtbl); in nfsd_reply_cache_init()
237 return -ENOMEM; in nfsd_reply_cache_init()
245 shrinker_free(nn->nfsd_reply_cache_shrinker); in nfsd_reply_cache_shutdown()
247 for (i = 0; i < nn->drc_hashsize; i++) { in nfsd_reply_cache_shutdown()
248 struct list_head *head = &nn->drc_hashtbl[i].lru_head; in nfsd_reply_cache_shutdown()
251 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i], in nfsd_reply_cache_shutdown()
256 kvfree(nn->drc_hashtbl); in nfsd_reply_cache_shutdown()
257 nn->drc_hashtbl = NULL; in nfsd_reply_cache_shutdown()
258 nn->drc_hashsize = 0; in nfsd_reply_cache_shutdown()
269 rp->c_timestamp = jiffies; in lru_put_end()
270 list_move_tail(&rp->c_lru, &b->lru_head); in lru_put_end()
276 unsigned int hash = hash_32((__force u32)xid, nn->maskbits); in nfsd_cache_bucket_find()
278 return &nn->drc_hashtbl[hash]; in nfsd_cache_bucket_find()
283 * If @max is zero, do not limit the number of removed entries.
289 unsigned long expiry = jiffies - RC_EXPIRE; in nfsd_prune_bucket_locked()
293 lockdep_assert_held(&b->cache_lock); in nfsd_prune_bucket_locked()
295 /* The bucket LRU is ordered oldest-first. */ in nfsd_prune_bucket_locked()
296 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { in nfsd_prune_bucket_locked()
299 * in-progress, but do keep scanning the list. in nfsd_prune_bucket_locked()
301 if (rp->c_state == RC_INPROG) in nfsd_prune_bucket_locked()
304 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries && in nfsd_prune_bucket_locked()
305 time_before(expiry, rp->c_timestamp)) in nfsd_prune_bucket_locked()
309 list_add(&rp->c_lru, dispose); in nfsd_prune_bucket_locked()
317 * nfsd_reply_cache_count - count_objects method for the DRC shrinker
329 struct nfsd_net *nn = shrink->private_data; in nfsd_reply_cache_count()
331 return atomic_read(&nn->num_drc_entries); in nfsd_reply_cache_count()
335 * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
341 * has not exceeded it's max_drc_entries limit.
348 struct nfsd_net *nn = shrink->private_data; in nfsd_reply_cache_scan()
353 for (i = 0; i < nn->drc_hashsize; i++) { in nfsd_reply_cache_scan()
354 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i]; in nfsd_reply_cache_scan()
356 if (list_empty(&b->lru_head)) in nfsd_reply_cache_scan()
359 spin_lock(&b->cache_lock); in nfsd_reply_cache_scan()
361 spin_unlock(&b->cache_lock); in nfsd_reply_cache_scan()
364 if (freed > sc->nr_to_scan) in nfsd_reply_cache_scan()
371 * nfsd_cache_csum - Checksum incoming NFS Call arguments
385 * Returns a 32-bit checksum value, as defined in RFC 793.
390 unsigned int base, len; in nfsd_cache_csum() local
403 len = min_t(unsigned int, subbuf.head[0].iov_len, remaining); in nfsd_cache_csum()
404 csum = csum_partial(subbuf.head[0].iov_base, len, csum); in nfsd_cache_csum()
405 remaining -= len; in nfsd_cache_csum()
413 len = min_t(unsigned int, PAGE_SIZE - base, remaining); in nfsd_cache_csum()
414 csum = csum_partial(p, len, csum); in nfsd_cache_csum()
415 remaining -= len; in nfsd_cache_csum()
426 if (key->c_key.k_xid == rp->c_key.k_xid && in nfsd_cache_key_cmp()
427 key->c_key.k_csum != rp->c_key.k_csum) { in nfsd_cache_key_cmp()
432 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); in nfsd_cache_key_cmp()
437 * Must be called with cache_lock held. Returns the found entry or
445 struct rb_node **p = &b->rb_head.rb_node, in nfsd_cache_insert()
457 p = &parent->rb_left; in nfsd_cache_insert()
459 p = &parent->rb_right; in nfsd_cache_insert()
465 rb_link_node(&key->c_node, parent, p); in nfsd_cache_insert()
466 rb_insert_color(&key->c_node, &b->rb_head); in nfsd_cache_insert()
469 if (entries > nn->longest_chain) { in nfsd_cache_insert()
470 nn->longest_chain = entries; in nfsd_cache_insert()
471 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries); in nfsd_cache_insert()
472 } else if (entries == nn->longest_chain) { in nfsd_cache_insert()
474 nn->longest_chain_cachesize = min_t(unsigned int, in nfsd_cache_insert()
475 nn->longest_chain_cachesize, in nfsd_cache_insert()
476 atomic_read(&nn->num_drc_entries)); in nfsd_cache_insert()
484 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
486 * @start: starting byte in @rqstp->rq_arg of the NFS Call header
487 * @len: size of the NFS Call header, in bytes
502 unsigned int len, struct nfsd_cacherep **cacherep) in nfsd_cache_lookup() argument
508 int type = rqstp->rq_cachetype; in nfsd_cache_lookup()
517 csum = nfsd_cache_csum(&rqstp->rq_arg, start, len); in nfsd_cache_lookup()
528 b = nfsd_cache_bucket_find(rqstp->rq_xid, nn); in nfsd_cache_lookup()
529 spin_lock(&b->cache_lock); in nfsd_cache_lookup()
534 rp->c_state = RC_INPROG; in nfsd_cache_lookup()
536 spin_unlock(&b->cache_lock); in nfsd_cache_lookup()
541 atomic_inc(&nn->num_drc_entries); in nfsd_cache_lookup()
546 /* We found a matching entry which is either in progress or done. */ in nfsd_cache_lookup()
553 if (rp->c_state == RC_INPROG) in nfsd_cache_lookup()
559 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) in nfsd_cache_lookup()
563 switch (rp->c_type) { in nfsd_cache_lookup()
567 xdr_stream_encode_be32(&rqstp->rq_res_stream, rp->c_replstat); in nfsd_cache_lookup()
571 if (!nfsd_cache_append(rqstp, &rp->c_replvec)) in nfsd_cache_lookup()
576 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type); in nfsd_cache_lookup()
582 spin_unlock(&b->cache_lock); in nfsd_cache_lookup()
588 * nfsd_cache_update - Update an entry in the duplicate reply cache.
592 * @statp: pointer to Reply's NFS status code, or NULL
595 * executed and the complete reply is in rqstp->rq_res.
598 * the toplevel loop requires max-sized buffers, which would be a waste
612 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; in nfsd_cache_update()
614 int len; in nfsd_cache_update() local
620 b = nfsd_cache_bucket_find(rp->c_key.k_xid, nn); in nfsd_cache_update()
622 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); in nfsd_cache_update()
623 len >>= 2; in nfsd_cache_update()
626 if (!statp || len > (256 >> 2)) { in nfsd_cache_update()
633 if (len != 1) in nfsd_cache_update()
634 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); in nfsd_cache_update()
635 rp->c_replstat = *statp; in nfsd_cache_update()
638 cachv = &rp->c_replvec; in nfsd_cache_update()
639 bufsize = len << 2; in nfsd_cache_update()
640 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); in nfsd_cache_update()
641 if (!cachv->iov_base) { in nfsd_cache_update()
645 cachv->iov_len = bufsize; in nfsd_cache_update()
646 memcpy(cachv->iov_base, statp, bufsize); in nfsd_cache_update()
652 spin_lock(&b->cache_lock); in nfsd_cache_update()
655 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); in nfsd_cache_update()
656 rp->c_type = cachetype; in nfsd_cache_update()
657 rp->c_state = RC_DONE; in nfsd_cache_update()
658 spin_unlock(&b->cache_lock); in nfsd_cache_update()
667 p = xdr_reserve_space(&rqstp->rq_res_stream, data->iov_len); in nfsd_cache_append()
670 memcpy(p, data->iov_base, data->iov_len); in nfsd_cache_append()
671 xdr_commit_encode(&rqstp->rq_res_stream); in nfsd_cache_append()
676 * Note that fields may be added, removed or reordered in the future. Programs
682 struct nfsd_net *nn = net_generic(file_inode(m->file)->i_sb->s_fs_info, in nfsd_reply_cache_stats_show()
685 seq_printf(m, "max entries: %u\n", nn->max_drc_entries); in nfsd_reply_cache_stats_show()
687 atomic_read(&nn->num_drc_entries)); in nfsd_reply_cache_stats_show()
688 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits); in nfsd_reply_cache_stats_show()
690 percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE])); in nfsd_reply_cache_stats_show()
698 percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES])); in nfsd_reply_cache_stats_show()
699 seq_printf(m, "longest chain len: %u\n", nn->longest_chain); in nfsd_reply_cache_stats_show()
700 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize); in nfsd_reply_cache_stats_show()