Lines Matching full:cache

6  * User extended attribute client side cache functions.
21 * a cache structure attached to NFS inodes. This structure is allocated
22 * when needed, and freed when the cache is zapped.
24 * The cache structure contains as hash table of entries, and a pointer
25 * to a special-cased entry for the listxattr cache.
28 * counting. The cache entries use a similar refcounting scheme.
30 * This makes freeing a cache, both from the shrinker and from the
31 * zap cache path, easy. It also means that, in current use cases,
40 * Two shrinkers deal with the cache entries themselves: one for
45 * The other shrinker frees the cache structures themselves.
64 struct nfs4_xattr_cache *cache; member
106 nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache) in nfs4_xattr_hash_init() argument
111 INIT_HLIST_HEAD(&cache->buckets[i].hlist); in nfs4_xattr_hash_init()
112 spin_lock_init(&cache->buckets[i].lock); in nfs4_xattr_hash_init()
113 cache->buckets[i].cache = cache; in nfs4_xattr_hash_init()
114 cache->buckets[i].draining = false; in nfs4_xattr_hash_init()
125 * Wrapper functions to add a cache entry to the right LRU.
150 * This function allocates cache entries. They are the normal
152 * cache. Those allocations use the same entry so that they can be
155 * xattr cache entries are allocated together with names. If the
163 * @name: Name of the extended attribute. NULL for listxattr cache
165 * @value: Value of attribute, or listxattr cache. NULL if the
270 struct nfs4_xattr_cache *cache; in nfs4_xattr_free_cache_cb() local
273 cache = container_of(kref, struct nfs4_xattr_cache, ref); in nfs4_xattr_free_cache_cb()
276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) in nfs4_xattr_free_cache_cb()
278 cache->buckets[i].draining = false; in nfs4_xattr_free_cache_cb()
281 cache->listxattr = NULL; in nfs4_xattr_free_cache_cb()
283 kmem_cache_free(nfs4_xattr_cache_cachep, cache); in nfs4_xattr_free_cache_cb()
290 struct nfs4_xattr_cache *cache; in nfs4_xattr_alloc_cache() local
292 cache = kmem_cache_alloc(nfs4_xattr_cache_cachep, in nfs4_xattr_alloc_cache()
294 if (cache == NULL) in nfs4_xattr_alloc_cache()
297 kref_init(&cache->ref); in nfs4_xattr_alloc_cache()
298 atomic_long_set(&cache->nent, 0); in nfs4_xattr_alloc_cache()
300 return cache; in nfs4_xattr_alloc_cache()
304 * Set the listxattr cache, which is a special-cased cache entry.
306 * the cache is being drained - this prevents a new listxattr
307 * cache from being added to what is now a stale cache.
310 nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache, in nfs4_xattr_set_listcache() argument
316 spin_lock(&cache->listxattr_lock); in nfs4_xattr_set_listcache()
318 old = cache->listxattr; in nfs4_xattr_set_listcache()
325 cache->listxattr = new; in nfs4_xattr_set_listcache()
334 spin_unlock(&cache->listxattr_lock); in nfs4_xattr_set_listcache()
340 * Unlink a cache from its parent inode, clearing out an invalid
341 * cache. Must be called with i_lock held.
364 * Discard a cache. Called by get_cache() if there was an old,
365 * invalid cache. Can also be called from a shrinker callback.
367 * The cache is dead, it has already been unlinked from its inode,
368 * and no longer appears on the cache LRU list.
376 * any way to 'find' this cache. Then, remove the entries from the hash
379 * At that point, the cache will remain empty and can be freed when the final
385 nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache) in nfs4_xattr_discard_cache() argument
392 nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE)); in nfs4_xattr_discard_cache()
395 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache()
407 atomic_long_set(&cache->nent, 0); in nfs4_xattr_discard_cache()
409 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_discard_cache()
413 * Get a referenced copy of the cache structure. Avoid doing allocs
417 * This function only checks the NFS_INO_INVALID_XATTR cache validity bit
418 * and acts accordingly, replacing the cache when needed. For the read case
419 * (!add), this means that the caller must make sure that the cache
421 * revalidate_inode to do this. The attribute cache timeout (for the
430 struct nfs4_xattr_cache *cache, *oldcache, *newcache; in nfs4_xattr_get_cache() local
434 cache = oldcache = NULL; in nfs4_xattr_get_cache()
441 cache = nfsi->xattr_cache; in nfs4_xattr_get_cache()
443 if (cache != NULL) in nfs4_xattr_get_cache()
444 kref_get(&cache->ref); in nfs4_xattr_get_cache()
448 if (add && cache == NULL) { in nfs4_xattr_get_cache()
451 cache = nfs4_xattr_alloc_cache(); in nfs4_xattr_get_cache()
452 if (cache == NULL) in nfs4_xattr_get_cache()
458 * The cache was invalidated again. Give up, in nfs4_xattr_get_cache()
463 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_get_cache()
464 cache = NULL; in nfs4_xattr_get_cache()
475 kref_get(&cache->ref); in nfs4_xattr_get_cache()
476 nfsi->xattr_cache = cache; in nfs4_xattr_get_cache()
477 cache->inode = inode; in nfs4_xattr_get_cache()
478 list_lru_add(&nfs4_xattr_cache_lru, &cache->lru); in nfs4_xattr_get_cache()
484 * If there was a race, throw away the cache we just in nfs4_xattr_get_cache()
489 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_get_cache()
490 cache = newcache; in nfs4_xattr_get_cache()
496 * Discard the now orphaned old cache. in nfs4_xattr_get_cache()
501 return cache; in nfs4_xattr_get_cache()
505 nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name) in nfs4_xattr_hash_bucket() argument
507 return &cache->buckets[jhash(name, strlen(name), 0) & in nfs4_xattr_hash_bucket()
508 (ARRAY_SIZE(cache->buckets) - 1)]; in nfs4_xattr_hash_bucket()
527 nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache, in nfs4_xattr_hash_add() argument
534 bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name); in nfs4_xattr_hash_add()
549 atomic_long_inc(&cache->nent); in nfs4_xattr_hash_add()
565 nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name) in nfs4_xattr_hash_remove() argument
570 bucket = nfs4_xattr_hash_bucket(cache, name); in nfs4_xattr_hash_remove()
578 atomic_long_dec(&cache->nent); in nfs4_xattr_hash_remove()
588 nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name) in nfs4_xattr_hash_find() argument
593 bucket = nfs4_xattr_hash_bucket(cache, name); in nfs4_xattr_hash_find()
607 * Entry point to retrieve an entry from the cache.
612 struct nfs4_xattr_cache *cache; in nfs4_xattr_cache_get() local
616 cache = nfs4_xattr_get_cache(inode, 0); in nfs4_xattr_cache_get()
617 if (cache == NULL) in nfs4_xattr_cache_get()
621 entry = nfs4_xattr_hash_find(cache, name); in nfs4_xattr_cache_get()
624 dprintk("%s: cache hit '%s', len %lu\n", __func__, in nfs4_xattr_cache_get()
637 dprintk("%s: cache miss '%s'\n", __func__, name); in nfs4_xattr_cache_get()
641 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_get()
647 * Retrieve a cached list of xattrs from the cache.
651 struct nfs4_xattr_cache *cache; in nfs4_xattr_cache_list() local
655 cache = nfs4_xattr_get_cache(inode, 0); in nfs4_xattr_cache_list()
656 if (cache == NULL) in nfs4_xattr_cache_list()
659 spin_lock(&cache->listxattr_lock); in nfs4_xattr_cache_list()
661 entry = cache->listxattr; in nfs4_xattr_cache_list()
677 spin_unlock(&cache->listxattr_lock); in nfs4_xattr_cache_list()
679 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_list()
685 * Add an xattr to the cache.
687 * This also invalidates the xattr list cache.
692 struct nfs4_xattr_cache *cache; in nfs4_xattr_cache_add() local
698 cache = nfs4_xattr_get_cache(inode, 1); in nfs4_xattr_cache_add()
699 if (cache == NULL) in nfs4_xattr_cache_add()
706 (void)nfs4_xattr_set_listcache(cache, NULL); in nfs4_xattr_cache_add()
708 if (!nfs4_xattr_hash_add(cache, entry)) in nfs4_xattr_cache_add()
712 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_add()
717 * Remove an xattr from the cache.
719 * This also invalidates the xattr list cache.
723 struct nfs4_xattr_cache *cache; in nfs4_xattr_cache_remove() local
727 cache = nfs4_xattr_get_cache(inode, 0); in nfs4_xattr_cache_remove()
728 if (cache == NULL) in nfs4_xattr_cache_remove()
731 (void)nfs4_xattr_set_listcache(cache, NULL); in nfs4_xattr_cache_remove()
732 nfs4_xattr_hash_remove(cache, name); in nfs4_xattr_cache_remove()
734 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_remove()
738 * Cache listxattr output, replacing any possible old one.
743 struct nfs4_xattr_cache *cache; in nfs4_xattr_cache_set_list() local
746 cache = nfs4_xattr_get_cache(inode, 1); in nfs4_xattr_cache_set_list()
747 if (cache == NULL) in nfs4_xattr_cache_set_list()
755 * This is just there to be able to get to bucket->cache, in nfs4_xattr_cache_set_list()
759 entry->bucket = &cache->buckets[0]; in nfs4_xattr_cache_set_list()
761 if (!nfs4_xattr_set_listcache(cache, entry)) in nfs4_xattr_cache_set_list()
765 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_set_list()
769 * Zap the entire cache. Called when an inode is evicted.
784 * The entry LRU is shrunk more aggressively than the cache LRU,
787 * Cache structures are freed only when they've become empty, after
829 struct nfs4_xattr_cache *cache = container_of(item, in cache_lru_isolate() local
832 if (atomic_long_read(&cache->nent) > 1) in cache_lru_isolate()
836 * If a cache structure is on the LRU list, we know that in cache_lru_isolate()
840 inode = cache->inode; in cache_lru_isolate()
845 kref_get(&cache->ref); in cache_lru_isolate()
847 cache->inode = NULL; in cache_lru_isolate()
850 list_lru_isolate(lru, &cache->lru); in cache_lru_isolate()
854 list_add_tail(&cache->dispose, dispose); in cache_lru_isolate()
863 struct nfs4_xattr_cache *cache; in nfs4_xattr_cache_scan() local
868 cache = list_first_entry(&dispose, struct nfs4_xattr_cache, in nfs4_xattr_cache_scan()
870 list_del_init(&cache->dispose); in nfs4_xattr_cache_scan()
871 nfs4_xattr_discard_cache(cache); in nfs4_xattr_cache_scan()
872 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_scan()
894 struct nfs4_xattr_cache *cache; in entry_lru_isolate() local
899 cache = bucket->cache; in entry_lru_isolate()
902 * Unhook the entry from its parent (either a cache bucket in entry_lru_isolate()
903 * or a cache structure if it's a listxattr buf), so that in entry_lru_isolate()
911 /* Regular cache entry */ in entry_lru_isolate()
918 atomic_long_dec(&cache->nent); in entry_lru_isolate()
923 /* Listxattr cache entry */ in entry_lru_isolate()
924 if (!spin_trylock(&cache->listxattr_lock)) in entry_lru_isolate()
929 cache->listxattr = NULL; in entry_lru_isolate()
932 spin_unlock(&cache->listxattr_lock); in entry_lru_isolate()
985 struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p; in nfs4_xattr_cache_init_once() local
987 spin_lock_init(&cache->listxattr_lock); in nfs4_xattr_cache_init_once()
988 atomic_long_set(&cache->nent, 0); in nfs4_xattr_cache_init_once()
989 nfs4_xattr_hash_init(cache); in nfs4_xattr_cache_init_once()
990 cache->listxattr = NULL; in nfs4_xattr_cache_init_once()
991 INIT_LIST_HEAD(&cache->lru); in nfs4_xattr_cache_init_once()
992 INIT_LIST_HEAD(&cache->dispose); in nfs4_xattr_cache_init_once()