F: fs/iomap/
F: include/linux/iomap.h
+FILESYSTEMS [NETFS LIBRARY]
+M: David Howells <dhowells@redhat.com>
+L: linux-cachefs@redhat.com (moderated for non-subscribers)
+L: linux-fsdevel@vger.kernel.org
+S: Supported
+F: Documentation/filesystems/caching/
+F: Documentation/filesystems/netfs_library.rst
+F: fs/netfs/
+F: include/linux/fscache*.h
+F: include/linux/netfs.h
+F: include/trace/events/fscache.h
+F: include/trace/events/netfs.h
+
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: Riku Voipio <riku.voipio@iki.fi>
L: linux-hwmon@vger.kernel.org
F: include/linux/freezer.h
F: kernel/freezer.c
-FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
-M: David Howells <dhowells@redhat.com>
-L: linux-cachefs@redhat.com (moderated for non-subscribers)
-S: Supported
-F: Documentation/filesystems/caching/
-F: fs/fscache/
-F: include/linux/fscache*.h
-
FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
M: Eric Biggers <ebiggers@kernel.org>
M: Theodore Y. Ts'o <tytso@mit.edu>
menu "Caches"
source "fs/netfs/Kconfig"
-source "fs/fscache/Kconfig"
source "fs/cachefiles/Kconfig"
endmenu
# Do not add any filesystems before this line
obj-$(CONFIG_NETFS_SUPPORT) += netfs/
-obj-$(CONFIG_FSCACHE) += fscache/
obj-$(CONFIG_REISERFS_FS) += reiserfs/
obj-$(CONFIG_EXT4_FS) += ext4/
# We place ext4 before ext2 so that clean ext3 root fs's do NOT mount using the
+++ /dev/null
-# SPDX-License-Identifier: GPL-2.0-only
-
-config FSCACHE
- tristate "General filesystem local caching manager"
- select NETFS_SUPPORT
- help
- This option enables a generic filesystem caching manager that can be
- used by various network and other filesystems to cache data locally.
- Different sorts of caches can be plugged in, depending on the
- resources available.
-
- See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_STATS
- bool "Gather statistical information on local caching"
- depends on FSCACHE && PROC_FS
- select NETFS_STATS
- help
- This option causes statistical information to be gathered on local
- caching and exported through file:
-
- /proc/fs/fscache/stats
-
- The gathering of statistics adds a certain amount of overhead to
- execution as there are a quite a few stats gathered, and on a
- multi-CPU system these may be on cachelines that keep bouncing
- between CPUs. On the other hand, the stats are very useful for
- debugging purposes. Saying 'Y' here is recommended.
-
- See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_DEBUG
- bool "Debug FS-Cache"
- depends on FSCACHE
- help
- This permits debugging to be dynamically enabled in the local caching
- management module. If this is set, the debugging output may be
- enabled by setting bits in /sys/modules/fscache/parameter/debug.
-
- See Documentation/filesystems/caching/fscache.rst for more information.
+++ /dev/null
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for general filesystem caching code
-#
-
-fscache-y := \
- cache.o \
- cookie.o \
- io.o \
- main.o \
- volume.o
-
-fscache-$(CONFIG_PROC_FS) += proc.o
-fscache-$(CONFIG_FSCACHE_STATS) += stats.o
-
-obj-$(CONFIG_FSCACHE) := fscache.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* FS-Cache cache handling
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL CACHE
-#include <linux/export.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-static LIST_HEAD(fscache_caches);
-DECLARE_RWSEM(fscache_addremove_sem);
-EXPORT_SYMBOL(fscache_addremove_sem);
-DECLARE_WAIT_QUEUE_HEAD(fscache_clearance_waiters);
-EXPORT_SYMBOL(fscache_clearance_waiters);
-
-static atomic_t fscache_cache_debug_id;
-
-/*
- * Allocate a cache cookie.
- */
-static struct fscache_cache *fscache_alloc_cache(const char *name)
-{
- struct fscache_cache *cache;
-
- cache = kzalloc(sizeof(*cache), GFP_KERNEL);
- if (cache) {
- if (name) {
- cache->name = kstrdup(name, GFP_KERNEL);
- if (!cache->name) {
- kfree(cache);
- return NULL;
- }
- }
- refcount_set(&cache->ref, 1);
- INIT_LIST_HEAD(&cache->cache_link);
- cache->debug_id = atomic_inc_return(&fscache_cache_debug_id);
- }
- return cache;
-}
-
-static bool fscache_get_cache_maybe(struct fscache_cache *cache,
- enum fscache_cache_trace where)
-{
- bool success;
- int ref;
-
- success = __refcount_inc_not_zero(&cache->ref, &ref);
- if (success)
- trace_fscache_cache(cache->debug_id, ref + 1, where);
- return success;
-}
-
-/*
- * Look up a cache cookie.
- */
-struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache)
-{
- struct fscache_cache *candidate, *cache, *unnamed = NULL;
-
- /* firstly check for the existence of the cache under read lock */
- down_read(&fscache_addremove_sem);
-
- list_for_each_entry(cache, &fscache_caches, cache_link) {
- if (cache->name && name && strcmp(cache->name, name) == 0 &&
- fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
- goto got_cache_r;
- if (!cache->name && !name &&
- fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
- goto got_cache_r;
- }
-
- if (!name) {
- list_for_each_entry(cache, &fscache_caches, cache_link) {
- if (cache->name &&
- fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
- goto got_cache_r;
- }
- }
-
- up_read(&fscache_addremove_sem);
-
- /* the cache does not exist - create a candidate */
- candidate = fscache_alloc_cache(name);
- if (!candidate)
- return ERR_PTR(-ENOMEM);
-
- /* write lock, search again and add if still not present */
- down_write(&fscache_addremove_sem);
-
- list_for_each_entry(cache, &fscache_caches, cache_link) {
- if (cache->name && name && strcmp(cache->name, name) == 0 &&
- fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
- goto got_cache_w;
- if (!cache->name) {
- unnamed = cache;
- if (!name &&
- fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
- goto got_cache_w;
- }
- }
-
- if (unnamed && is_cache &&
- fscache_get_cache_maybe(unnamed, fscache_cache_get_acquire))
- goto use_unnamed_cache;
-
- if (!name) {
- list_for_each_entry(cache, &fscache_caches, cache_link) {
- if (cache->name &&
- fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
- goto got_cache_w;
- }
- }
-
- list_add_tail(&candidate->cache_link, &fscache_caches);
- trace_fscache_cache(candidate->debug_id,
- refcount_read(&candidate->ref),
- fscache_cache_new_acquire);
- up_write(&fscache_addremove_sem);
- return candidate;
-
-got_cache_r:
- up_read(&fscache_addremove_sem);
- return cache;
-use_unnamed_cache:
- cache = unnamed;
- cache->name = candidate->name;
- candidate->name = NULL;
-got_cache_w:
- up_write(&fscache_addremove_sem);
- kfree(candidate->name);
- kfree(candidate);
- return cache;
-}
-
-/**
- * fscache_acquire_cache - Acquire a cache-level cookie.
- * @name: The name of the cache.
- *
- * Get a cookie to represent an actual cache. If a name is given and there is
- * a nameless cache record available, this will acquire that and set its name,
- * directing all the volumes using it to this cache.
- *
- * The cache will be switched over to the preparing state if not currently in
- * use, otherwise -EBUSY will be returned.
- */
-struct fscache_cache *fscache_acquire_cache(const char *name)
-{
- struct fscache_cache *cache;
-
- ASSERT(name);
- cache = fscache_lookup_cache(name, true);
- if (IS_ERR(cache))
- return cache;
-
- if (!fscache_set_cache_state_maybe(cache,
- FSCACHE_CACHE_IS_NOT_PRESENT,
- FSCACHE_CACHE_IS_PREPARING)) {
- pr_warn("Cache tag %s in use\n", name);
- fscache_put_cache(cache, fscache_cache_put_cache);
- return ERR_PTR(-EBUSY);
- }
-
- return cache;
-}
-EXPORT_SYMBOL(fscache_acquire_cache);
-
-/**
- * fscache_put_cache - Release a cache-level cookie.
- * @cache: The cache cookie to be released
- * @where: An indication of where the release happened
- *
- * Release the caller's reference on a cache-level cookie. The @where
- * indication should give information about the circumstances in which the call
- * occurs and will be logged through a tracepoint.
- */
-void fscache_put_cache(struct fscache_cache *cache,
- enum fscache_cache_trace where)
-{
- unsigned int debug_id = cache->debug_id;
- bool zero;
- int ref;
-
- if (IS_ERR_OR_NULL(cache))
- return;
-
- zero = __refcount_dec_and_test(&cache->ref, &ref);
- trace_fscache_cache(debug_id, ref - 1, where);
-
- if (zero) {
- down_write(&fscache_addremove_sem);
- list_del_init(&cache->cache_link);
- up_write(&fscache_addremove_sem);
- kfree(cache->name);
- kfree(cache);
- }
-}
-
-/**
- * fscache_relinquish_cache - Reset cache state and release cookie
- * @cache: The cache cookie to be released
- *
- * Reset the state of a cache and release the caller's reference on a cache
- * cookie.
- */
-void fscache_relinquish_cache(struct fscache_cache *cache)
-{
- enum fscache_cache_trace where =
- (cache->state == FSCACHE_CACHE_IS_PREPARING) ?
- fscache_cache_put_prep_failed :
- fscache_cache_put_relinquish;
-
- cache->ops = NULL;
- cache->cache_priv = NULL;
- fscache_set_cache_state(cache, FSCACHE_CACHE_IS_NOT_PRESENT);
- fscache_put_cache(cache, where);
-}
-EXPORT_SYMBOL(fscache_relinquish_cache);
-
-/**
- * fscache_add_cache - Declare a cache as being open for business
- * @cache: The cache-level cookie representing the cache
- * @ops: Table of cache operations to use
- * @cache_priv: Private data for the cache record
- *
- * Add a cache to the system, making it available for netfs's to use.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
- */
-int fscache_add_cache(struct fscache_cache *cache,
- const struct fscache_cache_ops *ops,
- void *cache_priv)
-{
- int n_accesses;
-
- _enter("{%s,%s}", ops->name, cache->name);
-
- BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
-
- /* Get a ref on the cache cookie and keep its n_accesses counter raised
- * by 1 to prevent wakeups from transitioning it to 0 until we're
- * withdrawing caching services from it.
- */
- n_accesses = atomic_inc_return(&cache->n_accesses);
- trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
- n_accesses, fscache_access_cache_pin);
-
- down_write(&fscache_addremove_sem);
-
- cache->ops = ops;
- cache->cache_priv = cache_priv;
- fscache_set_cache_state(cache, FSCACHE_CACHE_IS_ACTIVE);
-
- up_write(&fscache_addremove_sem);
- pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
- _leave(" = 0 [%s]", cache->name);
- return 0;
-}
-EXPORT_SYMBOL(fscache_add_cache);
-
-/**
- * fscache_begin_cache_access - Pin a cache so it can be accessed
- * @cache: The cache-level cookie
- * @why: An indication of the circumstances of the access for tracing
- *
- * Attempt to pin the cache to prevent it from going away whilst we're
- * accessing it and returns true if successful. This works as follows:
- *
- * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
- * then we return false to indicate access was not permitted.
- *
- * (2) If the cache tests as live, then we increment the n_accesses count and
- * then recheck the liveness, ending the access if it ceased to be live.
- *
- * (3) When we end the access, we decrement n_accesses and wake up the any
- * waiters if it reaches 0.
- *
- * (4) Whilst the cache is caching, n_accesses is kept artificially
- * incremented to prevent wakeups from happening.
- *
- * (5) When the cache is taken offline, the state is changed to prevent new
- * accesses, n_accesses is decremented and we wait for n_accesses to
- * become 0.
- */
-bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
-{
- int n_accesses;
-
- if (!fscache_cache_is_live(cache))
- return false;
-
- n_accesses = atomic_inc_return(&cache->n_accesses);
- smp_mb__after_atomic(); /* Reread live flag after n_accesses */
- trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
- n_accesses, why);
- if (!fscache_cache_is_live(cache)) {
- fscache_end_cache_access(cache, fscache_access_unlive);
- return false;
- }
- return true;
-}
-
-/**
- * fscache_end_cache_access - Unpin a cache at the end of an access.
- * @cache: The cache-level cookie
- * @why: An indication of the circumstances of the access for tracing
- *
- * Unpin a cache after we've accessed it. The @why indicator is merely
- * provided for tracing purposes.
- */
-void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
-{
- int n_accesses;
-
- smp_mb__before_atomic();
- n_accesses = atomic_dec_return(&cache->n_accesses);
- trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
- n_accesses, why);
- if (n_accesses == 0)
- wake_up_var(&cache->n_accesses);
-}
-
-/**
- * fscache_io_error - Note a cache I/O error
- * @cache: The record describing the cache
- *
- * Note that an I/O error occurred in a cache and that it should no longer be
- * used for anything. This also reports the error into the kernel log.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
- */
-void fscache_io_error(struct fscache_cache *cache)
-{
- if (fscache_set_cache_state_maybe(cache,
- FSCACHE_CACHE_IS_ACTIVE,
- FSCACHE_CACHE_GOT_IOERROR))
- pr_err("Cache '%s' stopped due to I/O error\n",
- cache->name);
-}
-EXPORT_SYMBOL(fscache_io_error);
-
-/**
- * fscache_withdraw_cache - Withdraw a cache from the active service
- * @cache: The cache cookie
- *
- * Begin the process of withdrawing a cache from service. This stops new
- * cache-level and volume-level accesses from taking place and waits for
- * currently ongoing cache-level accesses to end.
- */
-void fscache_withdraw_cache(struct fscache_cache *cache)
-{
- int n_accesses;
-
- pr_notice("Withdrawing cache \"%s\" (%u objs)\n",
- cache->name, atomic_read(&cache->object_count));
-
- fscache_set_cache_state(cache, FSCACHE_CACHE_IS_WITHDRAWN);
-
- /* Allow wakeups on dec-to-0 */
- n_accesses = atomic_dec_return(&cache->n_accesses);
- trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
- n_accesses, fscache_access_cache_unpin);
-
- wait_var_event(&cache->n_accesses,
- atomic_read(&cache->n_accesses) == 0);
-}
-EXPORT_SYMBOL(fscache_withdraw_cache);
-
-#ifdef CONFIG_PROC_FS
-static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] = "-PAEW";
-
-/*
- * Generate a list of caches in /proc/fs/fscache/caches
- */
-static int fscache_caches_seq_show(struct seq_file *m, void *v)
-{
- struct fscache_cache *cache;
-
- if (v == &fscache_caches) {
- seq_puts(m,
- "CACHE REF VOLS OBJS ACCES S NAME\n"
- "======== ===== ===== ===== ===== = ===============\n"
- );
- return 0;
- }
-
- cache = list_entry(v, struct fscache_cache, cache_link);
- seq_printf(m,
- "%08x %5d %5d %5d %5d %c %s\n",
- cache->debug_id,
- refcount_read(&cache->ref),
- atomic_read(&cache->n_volumes),
- atomic_read(&cache->object_count),
- atomic_read(&cache->n_accesses),
- fscache_cache_states[cache->state],
- cache->name ?: "-");
- return 0;
-}
-
-static void *fscache_caches_seq_start(struct seq_file *m, loff_t *_pos)
- __acquires(fscache_addremove_sem)
-{
- down_read(&fscache_addremove_sem);
- return seq_list_start_head(&fscache_caches, *_pos);
-}
-
-static void *fscache_caches_seq_next(struct seq_file *m, void *v, loff_t *_pos)
-{
- return seq_list_next(v, &fscache_caches, _pos);
-}
-
-static void fscache_caches_seq_stop(struct seq_file *m, void *v)
- __releases(fscache_addremove_sem)
-{
- up_read(&fscache_addremove_sem);
-}
-
-const struct seq_operations fscache_caches_seq_ops = {
- .start = fscache_caches_seq_start,
- .next = fscache_caches_seq_next,
- .stop = fscache_caches_seq_stop,
- .show = fscache_caches_seq_show,
-};
-#endif /* CONFIG_PROC_FS */
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* netfs cookie management
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * See Documentation/filesystems/caching/netfs-api.rst for more information on
- * the netfs API.
- */
-
-#define FSCACHE_DEBUG_LEVEL COOKIE
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-struct kmem_cache *fscache_cookie_jar;
-
-static void fscache_cookie_lru_timed_out(struct timer_list *timer);
-static void fscache_cookie_lru_worker(struct work_struct *work);
-static void fscache_cookie_worker(struct work_struct *work);
-static void fscache_unhash_cookie(struct fscache_cookie *cookie);
-static void fscache_perform_invalidation(struct fscache_cookie *cookie);
-
-#define fscache_cookie_hash_shift 15
-static struct hlist_bl_head fscache_cookie_hash[1 << fscache_cookie_hash_shift];
-static LIST_HEAD(fscache_cookies);
-static DEFINE_RWLOCK(fscache_cookies_lock);
-static LIST_HEAD(fscache_cookie_lru);
-static DEFINE_SPINLOCK(fscache_cookie_lru_lock);
-DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out);
-static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
-static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD";
-static unsigned int fscache_lru_cookie_timeout = 10 * HZ;
-
-void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
-{
- const u8 *k;
-
- pr_err("%c-cookie c=%08x [fl=%lx na=%u nA=%u s=%c]\n",
- prefix,
- cookie->debug_id,
- cookie->flags,
- atomic_read(&cookie->n_active),
- atomic_read(&cookie->n_accesses),
- fscache_cookie_states[cookie->state]);
- pr_err("%c-cookie V=%08x [%s]\n",
- prefix,
- cookie->volume->debug_id,
- cookie->volume->key);
-
- k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
- cookie->inline_key : cookie->key;
- pr_err("%c-key=[%u] '%*phN'\n", prefix, cookie->key_len, cookie->key_len, k);
-}
-
-static void fscache_free_cookie(struct fscache_cookie *cookie)
-{
- if (WARN_ON_ONCE(!list_empty(&cookie->commit_link))) {
- spin_lock(&fscache_cookie_lru_lock);
- list_del_init(&cookie->commit_link);
- spin_unlock(&fscache_cookie_lru_lock);
- fscache_stat_d(&fscache_n_cookies_lru);
- fscache_stat(&fscache_n_cookies_lru_removed);
- }
-
- if (WARN_ON_ONCE(test_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags))) {
- fscache_print_cookie(cookie, 'F');
- return;
- }
-
- write_lock(&fscache_cookies_lock);
- list_del(&cookie->proc_link);
- write_unlock(&fscache_cookies_lock);
- if (cookie->aux_len > sizeof(cookie->inline_aux))
- kfree(cookie->aux);
- if (cookie->key_len > sizeof(cookie->inline_key))
- kfree(cookie->key);
- fscache_stat_d(&fscache_n_cookies);
- kmem_cache_free(fscache_cookie_jar, cookie);
-}
-
-static void __fscache_queue_cookie(struct fscache_cookie *cookie)
-{
- if (!queue_work(fscache_wq, &cookie->work))
- fscache_put_cookie(cookie, fscache_cookie_put_over_queued);
-}
-
-static void fscache_queue_cookie(struct fscache_cookie *cookie,
- enum fscache_cookie_trace where)
-{
- fscache_get_cookie(cookie, where);
- __fscache_queue_cookie(cookie);
-}
-
-/*
- * Initialise the access gate on a cookie by setting a flag to prevent the
- * state machine from being queued when the access counter transitions to 0.
- * We're only interested in this when we withdraw caching services from the
- * cookie.
- */
-static void fscache_init_access_gate(struct fscache_cookie *cookie)
-{
- int n_accesses;
-
- n_accesses = atomic_read(&cookie->n_accesses);
- trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
- n_accesses, fscache_access_cache_pin);
- set_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags);
-}
-
-/**
- * fscache_end_cookie_access - Unpin a cache at the end of an access.
- * @cookie: A data file cookie
- * @why: An indication of the circumstances of the access for tracing
- *
- * Unpin a cache cookie after we've accessed it and bring a deferred
- * relinquishment or withdrawal state into effect.
- *
- * The @why indicator is provided for tracing purposes.
- */
-void fscache_end_cookie_access(struct fscache_cookie *cookie,
- enum fscache_access_trace why)
-{
- int n_accesses;
-
- smp_mb__before_atomic();
- n_accesses = atomic_dec_return(&cookie->n_accesses);
- trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
- n_accesses, why);
- if (n_accesses == 0 &&
- !test_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags))
- fscache_queue_cookie(cookie, fscache_cookie_get_end_access);
-}
-EXPORT_SYMBOL(fscache_end_cookie_access);
-
-/*
- * Pin the cache behind a cookie so that we can access it.
- */
-static void __fscache_begin_cookie_access(struct fscache_cookie *cookie,
- enum fscache_access_trace why)
-{
- int n_accesses;
-
- n_accesses = atomic_inc_return(&cookie->n_accesses);
- smp_mb__after_atomic(); /* (Future) read state after is-caching.
- * Reread n_accesses after is-caching
- */
- trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
- n_accesses, why);
-}
-
-/**
- * fscache_begin_cookie_access - Pin a cache so data can be accessed
- * @cookie: A data file cookie
- * @why: An indication of the circumstances of the access for tracing
- *
- * Attempt to pin the cache to prevent it from going away whilst we're
- * accessing data and returns true if successful. This works as follows:
- *
- * (1) If the cookie is not being cached (ie. FSCACHE_COOKIE_IS_CACHING is not
- * set), we return false to indicate access was not permitted.
- *
- * (2) If the cookie is being cached, we increment its n_accesses count and
- * then recheck the IS_CACHING flag, ending the access if it got cleared.
- *
- * (3) When we end the access, we decrement the cookie's n_accesses and wake
- * up the any waiters if it reaches 0.
- *
- * (4) Whilst the cookie is actively being cached, its n_accesses is kept
- * artificially incremented to prevent wakeups from happening.
- *
- * (5) When the cache is taken offline or if the cookie is culled, the flag is
- * cleared to prevent new accesses, the cookie's n_accesses is decremented
- * and we wait for it to become 0.
- *
- * The @why indicator are merely provided for tracing purposes.
- */
-bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
- enum fscache_access_trace why)
-{
- if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags))
- return false;
- __fscache_begin_cookie_access(cookie, why);
- if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags) ||
- !fscache_cache_is_live(cookie->volume->cache)) {
- fscache_end_cookie_access(cookie, fscache_access_unlive);
- return false;
- }
- return true;
-}
-
-static inline void wake_up_cookie_state(struct fscache_cookie *cookie)
-{
- /* Use a barrier to ensure that waiters see the state variable
- * change, as spin_unlock doesn't guarantee a barrier.
- *
- * See comments over wake_up_bit() and waitqueue_active().
- */
- smp_mb();
- wake_up_var(&cookie->state);
-}
-
-/*
- * Change the state a cookie is at and wake up anyone waiting for that. Impose
- * an ordering between the stuff stored in the cookie and the state member.
- * Paired with fscache_cookie_state().
- */
-static void __fscache_set_cookie_state(struct fscache_cookie *cookie,
- enum fscache_cookie_state state)
-{
- smp_store_release(&cookie->state, state);
-}
-
-static void fscache_set_cookie_state(struct fscache_cookie *cookie,
- enum fscache_cookie_state state)
-{
- spin_lock(&cookie->lock);
- __fscache_set_cookie_state(cookie, state);
- spin_unlock(&cookie->lock);
- wake_up_cookie_state(cookie);
-}
-
-/**
- * fscache_cookie_lookup_negative - Note negative lookup
- * @cookie: The cookie that was being looked up
- *
- * Note that some part of the metadata path in the cache doesn't exist and so
- * we can release any waiting readers in the certain knowledge that there's
- * nothing for them to actually read.
- *
- * This function uses no locking and must only be called from the state machine.
- */
-void fscache_cookie_lookup_negative(struct fscache_cookie *cookie)
-{
- set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
- fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_CREATING);
-}
-EXPORT_SYMBOL(fscache_cookie_lookup_negative);
-
-/**
- * fscache_resume_after_invalidation - Allow I/O to resume after invalidation
- * @cookie: The cookie that was invalidated
- *
- * Tell fscache that invalidation is sufficiently complete that I/O can be
- * allowed again.
- */
-void fscache_resume_after_invalidation(struct fscache_cookie *cookie)
-{
- fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
-}
-EXPORT_SYMBOL(fscache_resume_after_invalidation);
-
-/**
- * fscache_caching_failed - Report that a failure stopped caching on a cookie
- * @cookie: The cookie that was affected
- *
- * Tell fscache that caching on a cookie needs to be stopped due to some sort
- * of failure.
- *
- * This function uses no locking and must only be called from the state machine.
- */
-void fscache_caching_failed(struct fscache_cookie *cookie)
-{
- clear_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags);
- fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_FAILED);
- trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
- fscache_cookie_failed);
-}
-EXPORT_SYMBOL(fscache_caching_failed);
-
-/*
- * Set the index key in a cookie. The cookie struct has space for a 16-byte
- * key plus length and hash, but if that's not big enough, it's instead a
- * pointer to a buffer containing 3 bytes of hash, 1 byte of length and then
- * the key data.
- */
-static int fscache_set_key(struct fscache_cookie *cookie,
- const void *index_key, size_t index_key_len)
-{
- void *buf;
- size_t buf_size;
-
- buf_size = round_up(index_key_len, sizeof(__le32));
-
- if (index_key_len > sizeof(cookie->inline_key)) {
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- cookie->key = buf;
- } else {
- buf = cookie->inline_key;
- }
-
- memcpy(buf, index_key, index_key_len);
- cookie->key_hash = fscache_hash(cookie->volume->key_hash,
- buf, buf_size);
- return 0;
-}
-
-static bool fscache_cookie_same(const struct fscache_cookie *a,
- const struct fscache_cookie *b)
-{
- const void *ka, *kb;
-
- if (a->key_hash != b->key_hash ||
- a->volume != b->volume ||
- a->key_len != b->key_len)
- return false;
-
- if (a->key_len <= sizeof(a->inline_key)) {
- ka = &a->inline_key;
- kb = &b->inline_key;
- } else {
- ka = a->key;
- kb = b->key;
- }
- return memcmp(ka, kb, a->key_len) == 0;
-}
-
-static atomic_t fscache_cookie_debug_id = ATOMIC_INIT(1);
-
-/*
- * Allocate a cookie.
- */
-static struct fscache_cookie *fscache_alloc_cookie(
- struct fscache_volume *volume,
- u8 advice,
- const void *index_key, size_t index_key_len,
- const void *aux_data, size_t aux_data_len,
- loff_t object_size)
-{
- struct fscache_cookie *cookie;
-
- /* allocate and initialise a cookie */
- cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
- if (!cookie)
- return NULL;
- fscache_stat(&fscache_n_cookies);
-
- cookie->volume = volume;
- cookie->advice = advice;
- cookie->key_len = index_key_len;
- cookie->aux_len = aux_data_len;
- cookie->object_size = object_size;
- if (object_size == 0)
- __set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
-
- if (fscache_set_key(cookie, index_key, index_key_len) < 0)
- goto nomem;
-
- if (cookie->aux_len <= sizeof(cookie->inline_aux)) {
- memcpy(cookie->inline_aux, aux_data, cookie->aux_len);
- } else {
- cookie->aux = kmemdup(aux_data, cookie->aux_len, GFP_KERNEL);
- if (!cookie->aux)
- goto nomem;
- }
-
- refcount_set(&cookie->ref, 1);
- cookie->debug_id = atomic_inc_return(&fscache_cookie_debug_id);
- spin_lock_init(&cookie->lock);
- INIT_LIST_HEAD(&cookie->commit_link);
- INIT_WORK(&cookie->work, fscache_cookie_worker);
- __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
-
- write_lock(&fscache_cookies_lock);
- list_add_tail(&cookie->proc_link, &fscache_cookies);
- write_unlock(&fscache_cookies_lock);
- fscache_see_cookie(cookie, fscache_cookie_new_acquire);
- return cookie;
-
-nomem:
- fscache_free_cookie(cookie);
- return NULL;
-}
-
-static inline bool fscache_cookie_is_dropped(struct fscache_cookie *cookie)
-{
- return READ_ONCE(cookie->state) == FSCACHE_COOKIE_STATE_DROPPED;
-}
-
-static void fscache_wait_on_collision(struct fscache_cookie *candidate,
- struct fscache_cookie *wait_for)
-{
- enum fscache_cookie_state *statep = &wait_for->state;
-
- wait_var_event_timeout(statep, fscache_cookie_is_dropped(wait_for),
- 20 * HZ);
- if (!fscache_cookie_is_dropped(wait_for)) {
- pr_notice("Potential collision c=%08x old: c=%08x",
- candidate->debug_id, wait_for->debug_id);
- wait_var_event(statep, fscache_cookie_is_dropped(wait_for));
- }
-}
-
-/*
- * Attempt to insert the new cookie into the hash. If there's a collision, we
- * wait for the old cookie to complete if it's being relinquished and an error
- * otherwise.
- */
-static bool fscache_hash_cookie(struct fscache_cookie *candidate)
-{
- struct fscache_cookie *cursor, *wait_for = NULL;
- struct hlist_bl_head *h;
- struct hlist_bl_node *p;
- unsigned int bucket;
-
- bucket = candidate->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
- h = &fscache_cookie_hash[bucket];
-
- hlist_bl_lock(h);
- hlist_bl_for_each_entry(cursor, p, h, hash_link) {
- if (fscache_cookie_same(candidate, cursor)) {
- if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cursor->flags))
- goto collision;
- wait_for = fscache_get_cookie(cursor,
- fscache_cookie_get_hash_collision);
- break;
- }
- }
-
- fscache_get_volume(candidate->volume, fscache_volume_get_cookie);
- atomic_inc(&candidate->volume->n_cookies);
- hlist_bl_add_head(&candidate->hash_link, h);
- set_bit(FSCACHE_COOKIE_IS_HASHED, &candidate->flags);
- hlist_bl_unlock(h);
-
- if (wait_for) {
- fscache_wait_on_collision(candidate, wait_for);
- fscache_put_cookie(wait_for, fscache_cookie_put_hash_collision);
- }
- return true;
-
-collision:
- trace_fscache_cookie(cursor->debug_id, refcount_read(&cursor->ref),
- fscache_cookie_collision);
- pr_err("Duplicate cookie detected\n");
- fscache_print_cookie(cursor, 'O');
- fscache_print_cookie(candidate, 'N');
- hlist_bl_unlock(h);
- return false;
-}
-
-/*
- * Request a cookie to represent a data storage object within a volume.
- *
- * We never let on to the netfs about errors. We may set a negative cookie
- * pointer, but that's okay
- */
-struct fscache_cookie *__fscache_acquire_cookie(
- struct fscache_volume *volume,
- u8 advice,
- const void *index_key, size_t index_key_len,
- const void *aux_data, size_t aux_data_len,
- loff_t object_size)
-{
- struct fscache_cookie *cookie;
-
- _enter("V=%x", volume->debug_id);
-
- if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
- return NULL;
- if (!aux_data || !aux_data_len) {
- aux_data = NULL;
- aux_data_len = 0;
- }
-
- fscache_stat(&fscache_n_acquires);
-
- cookie = fscache_alloc_cookie(volume, advice,
- index_key, index_key_len,
- aux_data, aux_data_len,
- object_size);
- if (!cookie) {
- fscache_stat(&fscache_n_acquires_oom);
- return NULL;
- }
-
- if (!fscache_hash_cookie(cookie)) {
- fscache_see_cookie(cookie, fscache_cookie_discard);
- fscache_free_cookie(cookie);
- return NULL;
- }
-
- trace_fscache_acquire(cookie);
- fscache_stat(&fscache_n_acquires_ok);
- _leave(" = c=%08x", cookie->debug_id);
- return cookie;
-}
-EXPORT_SYMBOL(__fscache_acquire_cookie);
-
-/*
- * Prepare a cache object to be written to.
- */
-static void fscache_prepare_to_write(struct fscache_cookie *cookie)
-{
- cookie->volume->cache->ops->prepare_to_write(cookie);
-}
-
-/*
- * Look up a cookie in the cache.
- */
-static void fscache_perform_lookup(struct fscache_cookie *cookie)
-{
- enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed;
- bool need_withdraw = false;
-
- _enter("");
-
- if (!cookie->volume->cache_priv) {
- fscache_create_volume(cookie->volume, true);
- if (!cookie->volume->cache_priv) {
- fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
- goto out;
- }
- }
-
- if (!cookie->volume->cache->ops->lookup_cookie(cookie)) {
- if (cookie->state != FSCACHE_COOKIE_STATE_FAILED)
- fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
- need_withdraw = true;
- _leave(" [fail]");
- goto out;
- }
-
- fscache_see_cookie(cookie, fscache_cookie_see_active);
- spin_lock(&cookie->lock);
- if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
- __fscache_set_cookie_state(cookie,
- FSCACHE_COOKIE_STATE_INVALIDATING);
- else
- __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
- spin_unlock(&cookie->lock);
- wake_up_cookie_state(cookie);
- trace = fscache_access_lookup_cookie_end;
-
-out:
- fscache_end_cookie_access(cookie, trace);
- if (need_withdraw)
- fscache_withdraw_cookie(cookie);
- fscache_end_volume_access(cookie->volume, cookie, trace);
-}
-
-/*
- * Begin the process of looking up a cookie. We offload the actual process to
- * a worker thread.
- */
-static bool fscache_begin_lookup(struct fscache_cookie *cookie, bool will_modify)
-{
- if (will_modify) {
- set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags);
- set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
- }
- if (!fscache_begin_volume_access(cookie->volume, cookie,
- fscache_access_lookup_cookie))
- return false;
-
- __fscache_begin_cookie_access(cookie, fscache_access_lookup_cookie);
- __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_LOOKING_UP);
- set_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags);
- set_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags);
- return true;
-}
-
-/*
- * Start using the cookie for I/O. This prevents the backing object from being
- * reaped by VM pressure.
- */
-void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify)
-{
- enum fscache_cookie_state state;
- bool queue = false;
- int n_active;
-
- _enter("c=%08x", cookie->debug_id);
-
- if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
- "Trying to use relinquished cookie\n"))
- return;
-
- spin_lock(&cookie->lock);
-
- n_active = atomic_inc_return(&cookie->n_active);
- trace_fscache_active(cookie->debug_id, refcount_read(&cookie->ref),
- n_active, atomic_read(&cookie->n_accesses),
- will_modify ?
- fscache_active_use_modify : fscache_active_use);
-
-again:
- state = fscache_cookie_state(cookie);
- switch (state) {
- case FSCACHE_COOKIE_STATE_QUIESCENT:
- queue = fscache_begin_lookup(cookie, will_modify);
- break;
-
- case FSCACHE_COOKIE_STATE_LOOKING_UP:
- case FSCACHE_COOKIE_STATE_CREATING:
- if (will_modify)
- set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags);
- break;
- case FSCACHE_COOKIE_STATE_ACTIVE:
- case FSCACHE_COOKIE_STATE_INVALIDATING:
- if (will_modify &&
- !test_and_set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags)) {
- set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
- queue = true;
- }
- /*
- * We could race with cookie_lru which may set LRU_DISCARD bit
- * but has yet to run the cookie state machine. If this happens
- * and another thread tries to use the cookie, clear LRU_DISCARD
- * so we don't end up withdrawing the cookie while in use.
- */
- if (test_and_clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags))
- fscache_see_cookie(cookie, fscache_cookie_see_lru_discard_clear);
- break;
-
- case FSCACHE_COOKIE_STATE_FAILED:
- case FSCACHE_COOKIE_STATE_WITHDRAWING:
- break;
-
- case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
- spin_unlock(&cookie->lock);
- wait_var_event(&cookie->state,
- fscache_cookie_state(cookie) !=
- FSCACHE_COOKIE_STATE_LRU_DISCARDING);
- spin_lock(&cookie->lock);
- goto again;
-
- case FSCACHE_COOKIE_STATE_DROPPED:
- case FSCACHE_COOKIE_STATE_RELINQUISHING:
- WARN(1, "Can't use cookie in state %u\n", state);
- break;
- }
-
- spin_unlock(&cookie->lock);
- if (queue)
- fscache_queue_cookie(cookie, fscache_cookie_get_use_work);
- _leave("");
-}
-EXPORT_SYMBOL(__fscache_use_cookie);
-
-static void fscache_unuse_cookie_locked(struct fscache_cookie *cookie)
-{
- clear_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags);
- if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags))
- return;
-
- cookie->unused_at = jiffies;
- spin_lock(&fscache_cookie_lru_lock);
- if (list_empty(&cookie->commit_link)) {
- fscache_get_cookie(cookie, fscache_cookie_get_lru);
- fscache_stat(&fscache_n_cookies_lru);
- }
- list_move_tail(&cookie->commit_link, &fscache_cookie_lru);
-
- spin_unlock(&fscache_cookie_lru_lock);
- timer_reduce(&fscache_cookie_lru_timer,
- jiffies + fscache_lru_cookie_timeout);
-}
-
-/*
- * Stop using the cookie for I/O.
- */
-void __fscache_unuse_cookie(struct fscache_cookie *cookie,
- const void *aux_data, const loff_t *object_size)
-{
- unsigned int debug_id = cookie->debug_id;
- unsigned int r = refcount_read(&cookie->ref);
- unsigned int a = atomic_read(&cookie->n_accesses);
- unsigned int c;
-
- if (aux_data || object_size)
- __fscache_update_cookie(cookie, aux_data, object_size);
-
- /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
- c = atomic_fetch_add_unless(&cookie->n_active, -1, 1);
- if (c != 1) {
- trace_fscache_active(debug_id, r, c - 1, a, fscache_active_unuse);
- return;
- }
-
- spin_lock(&cookie->lock);
- r = refcount_read(&cookie->ref);
- a = atomic_read(&cookie->n_accesses);
- c = atomic_dec_return(&cookie->n_active);
- trace_fscache_active(debug_id, r, c, a, fscache_active_unuse);
- if (c == 0)
- fscache_unuse_cookie_locked(cookie);
- spin_unlock(&cookie->lock);
-}
-EXPORT_SYMBOL(__fscache_unuse_cookie);
-
-/*
- * Perform work upon the cookie, such as committing its cache state,
- * relinquishing it or withdrawing the backing cache. We're protected from the
- * cache going away under us as object withdrawal must come through this
- * non-reentrant work item.
- */
-static void fscache_cookie_state_machine(struct fscache_cookie *cookie)
-{
- enum fscache_cookie_state state;
- bool wake = false;
-
- _enter("c=%x", cookie->debug_id);
-
-again:
- spin_lock(&cookie->lock);
-again_locked:
- state = cookie->state;
- switch (state) {
- case FSCACHE_COOKIE_STATE_QUIESCENT:
- /* The QUIESCENT state is jumped to the LOOKING_UP state by
- * fscache_use_cookie().
- */
-
- if (atomic_read(&cookie->n_accesses) == 0 &&
- test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
- __fscache_set_cookie_state(cookie,
- FSCACHE_COOKIE_STATE_RELINQUISHING);
- wake = true;
- goto again_locked;
- }
- break;
-
- case FSCACHE_COOKIE_STATE_LOOKING_UP:
- spin_unlock(&cookie->lock);
- fscache_init_access_gate(cookie);
- fscache_perform_lookup(cookie);
- goto again;
-
- case FSCACHE_COOKIE_STATE_INVALIDATING:
- spin_unlock(&cookie->lock);
- fscache_perform_invalidation(cookie);
- goto again;
-
- case FSCACHE_COOKIE_STATE_ACTIVE:
- if (test_and_clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags)) {
- spin_unlock(&cookie->lock);
- fscache_prepare_to_write(cookie);
- spin_lock(&cookie->lock);
- }
- if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) {
- __fscache_set_cookie_state(cookie,
- FSCACHE_COOKIE_STATE_LRU_DISCARDING);
- wake = true;
- goto again_locked;
- }
- fallthrough;
-
- case FSCACHE_COOKIE_STATE_FAILED:
- if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
- fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
-
- if (atomic_read(&cookie->n_accesses) != 0)
- break;
- if (test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
- __fscache_set_cookie_state(cookie,
- FSCACHE_COOKIE_STATE_RELINQUISHING);
- wake = true;
- goto again_locked;
- }
- if (test_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags)) {
- __fscache_set_cookie_state(cookie,
- FSCACHE_COOKIE_STATE_WITHDRAWING);
- wake = true;
- goto again_locked;
- }
- break;
-
- case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
- case FSCACHE_COOKIE_STATE_RELINQUISHING:
- case FSCACHE_COOKIE_STATE_WITHDRAWING:
- if (cookie->cache_priv) {
- spin_unlock(&cookie->lock);
- cookie->volume->cache->ops->withdraw_cookie(cookie);
- spin_lock(&cookie->lock);
- }
-
- if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
- fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
-
- switch (state) {
- case FSCACHE_COOKIE_STATE_RELINQUISHING:
- fscache_see_cookie(cookie, fscache_cookie_see_relinquish);
- fscache_unhash_cookie(cookie);
- __fscache_set_cookie_state(cookie,
- FSCACHE_COOKIE_STATE_DROPPED);
- wake = true;
- goto out;
- case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
- fscache_see_cookie(cookie, fscache_cookie_see_lru_discard);
- break;
- case FSCACHE_COOKIE_STATE_WITHDRAWING:
- fscache_see_cookie(cookie, fscache_cookie_see_withdraw);
- break;
- default:
- BUG();
- }
-
- clear_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
- clear_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags);
- clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
- clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
- set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
- __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
- wake = true;
- goto again_locked;
-
- case FSCACHE_COOKIE_STATE_DROPPED:
- break;
-
- default:
- WARN_ONCE(1, "Cookie %x in unexpected state %u\n",
- cookie->debug_id, state);
- break;
- }
-
-out:
- spin_unlock(&cookie->lock);
- if (wake)
- wake_up_cookie_state(cookie);
- _leave("");
-}
-
-static void fscache_cookie_worker(struct work_struct *work)
-{
- struct fscache_cookie *cookie = container_of(work, struct fscache_cookie, work);
-
- fscache_see_cookie(cookie, fscache_cookie_see_work);
- fscache_cookie_state_machine(cookie);
- fscache_put_cookie(cookie, fscache_cookie_put_work);
-}
-
-/*
- * Wait for the object to become inactive. The cookie's work item will be
- * scheduled when someone transitions n_accesses to 0 - but if someone's
- * already done that, schedule it anyway.
- */
-static void __fscache_withdraw_cookie(struct fscache_cookie *cookie)
-{
- int n_accesses;
- bool unpinned;
-
- unpinned = test_and_clear_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags);
-
- /* Need to read the access count after unpinning */
- n_accesses = atomic_read(&cookie->n_accesses);
- if (unpinned)
- trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
- n_accesses, fscache_access_cache_unpin);
- if (n_accesses == 0)
- fscache_queue_cookie(cookie, fscache_cookie_get_end_access);
-}
-
-static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie)
-{
- fscache_see_cookie(cookie, fscache_cookie_see_lru_do_one);
-
- spin_lock(&cookie->lock);
- if (cookie->state != FSCACHE_COOKIE_STATE_ACTIVE ||
- time_before(jiffies, cookie->unused_at + fscache_lru_cookie_timeout) ||
- atomic_read(&cookie->n_active) > 0) {
- spin_unlock(&cookie->lock);
- fscache_stat(&fscache_n_cookies_lru_removed);
- } else {
- set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
- spin_unlock(&cookie->lock);
- fscache_stat(&fscache_n_cookies_lru_expired);
- _debug("lru c=%x", cookie->debug_id);
- __fscache_withdraw_cookie(cookie);
- }
-
- fscache_put_cookie(cookie, fscache_cookie_put_lru);
-}
-
-static void fscache_cookie_lru_worker(struct work_struct *work)
-{
- struct fscache_cookie *cookie;
- unsigned long unused_at;
-
- spin_lock(&fscache_cookie_lru_lock);
-
- while (!list_empty(&fscache_cookie_lru)) {
- cookie = list_first_entry(&fscache_cookie_lru,
- struct fscache_cookie, commit_link);
- unused_at = cookie->unused_at + fscache_lru_cookie_timeout;
- if (time_before(jiffies, unused_at)) {
- timer_reduce(&fscache_cookie_lru_timer, unused_at);
- break;
- }
-
- list_del_init(&cookie->commit_link);
- fscache_stat_d(&fscache_n_cookies_lru);
- spin_unlock(&fscache_cookie_lru_lock);
- fscache_cookie_lru_do_one(cookie);
- spin_lock(&fscache_cookie_lru_lock);
- }
-
- spin_unlock(&fscache_cookie_lru_lock);
-}
-
-static void fscache_cookie_lru_timed_out(struct timer_list *timer)
-{
- queue_work(fscache_wq, &fscache_cookie_lru_work);
-}
-
-static void fscache_cookie_drop_from_lru(struct fscache_cookie *cookie)
-{
- bool need_put = false;
-
- if (!list_empty(&cookie->commit_link)) {
- spin_lock(&fscache_cookie_lru_lock);
- if (!list_empty(&cookie->commit_link)) {
- list_del_init(&cookie->commit_link);
- fscache_stat_d(&fscache_n_cookies_lru);
- fscache_stat(&fscache_n_cookies_lru_dropped);
- need_put = true;
- }
- spin_unlock(&fscache_cookie_lru_lock);
- if (need_put)
- fscache_put_cookie(cookie, fscache_cookie_put_lru);
- }
-}
-
-/*
- * Remove a cookie from the hash table.
- */
-static void fscache_unhash_cookie(struct fscache_cookie *cookie)
-{
- struct hlist_bl_head *h;
- unsigned int bucket;
-
- bucket = cookie->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
- h = &fscache_cookie_hash[bucket];
-
- hlist_bl_lock(h);
- hlist_bl_del(&cookie->hash_link);
- clear_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags);
- hlist_bl_unlock(h);
- fscache_stat(&fscache_n_relinquishes_dropped);
-}
-
-static void fscache_drop_withdraw_cookie(struct fscache_cookie *cookie)
-{
- fscache_cookie_drop_from_lru(cookie);
- __fscache_withdraw_cookie(cookie);
-}
-
-/**
- * fscache_withdraw_cookie - Mark a cookie for withdrawal
- * @cookie: The cookie to be withdrawn.
- *
- * Allow the cache backend to withdraw the backing for a cookie for its own
- * reasons, even if that cookie is in active use.
- */
-void fscache_withdraw_cookie(struct fscache_cookie *cookie)
-{
- set_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags);
- fscache_drop_withdraw_cookie(cookie);
-}
-EXPORT_SYMBOL(fscache_withdraw_cookie);
-
-/*
- * Allow the netfs to release a cookie back to the cache.
- * - the object will be marked as recyclable on disk if retire is true
- */
-void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
-{
- fscache_stat(&fscache_n_relinquishes);
- if (retire)
- fscache_stat(&fscache_n_relinquishes_retire);
-
- _enter("c=%08x{%d},%d",
- cookie->debug_id, atomic_read(&cookie->n_active), retire);
-
- if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
- "Cookie c=%x already relinquished\n", cookie->debug_id))
- return;
-
- if (retire)
- set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
- trace_fscache_relinquish(cookie, retire);
-
- ASSERTCMP(atomic_read(&cookie->n_active), ==, 0);
- ASSERTCMP(atomic_read(&cookie->volume->n_cookies), >, 0);
- atomic_dec(&cookie->volume->n_cookies);
-
- if (test_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags)) {
- set_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags);
- fscache_drop_withdraw_cookie(cookie);
- } else {
- fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_DROPPED);
- fscache_unhash_cookie(cookie);
- }
- fscache_put_cookie(cookie, fscache_cookie_put_relinquish);
-}
-EXPORT_SYMBOL(__fscache_relinquish_cookie);
-
-/*
- * Drop a reference to a cookie.
- */
-void fscache_put_cookie(struct fscache_cookie *cookie,
- enum fscache_cookie_trace where)
-{
- struct fscache_volume *volume = cookie->volume;
- unsigned int cookie_debug_id = cookie->debug_id;
- bool zero;
- int ref;
-
- zero = __refcount_dec_and_test(&cookie->ref, &ref);
- trace_fscache_cookie(cookie_debug_id, ref - 1, where);
- if (zero) {
- fscache_free_cookie(cookie);
- fscache_put_volume(volume, fscache_volume_put_cookie);
- }
-}
-EXPORT_SYMBOL(fscache_put_cookie);
-
-/*
- * Get a reference to a cookie.
- */
-struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie,
- enum fscache_cookie_trace where)
-{
- int ref;
-
- __refcount_inc(&cookie->ref, &ref);
- trace_fscache_cookie(cookie->debug_id, ref + 1, where);
- return cookie;
-}
-EXPORT_SYMBOL(fscache_get_cookie);
-
-/*
- * Ask the cache to effect invalidation of a cookie.
- */
-static void fscache_perform_invalidation(struct fscache_cookie *cookie)
-{
- if (!cookie->volume->cache->ops->invalidate_cookie(cookie))
- fscache_caching_failed(cookie);
- fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
-}
-
-/*
- * Invalidate an object.
- */
-void __fscache_invalidate(struct fscache_cookie *cookie,
- const void *aux_data, loff_t new_size,
- unsigned int flags)
-{
- bool is_caching;
-
- _enter("c=%x", cookie->debug_id);
-
- fscache_stat(&fscache_n_invalidates);
-
- if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
- "Trying to invalidate relinquished cookie\n"))
- return;
-
- if ((flags & FSCACHE_INVAL_DIO_WRITE) &&
- test_and_set_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags))
- return;
-
- spin_lock(&cookie->lock);
- set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
- fscache_update_aux(cookie, aux_data, &new_size);
- cookie->inval_counter++;
- trace_fscache_invalidate(cookie, new_size);
-
- switch (cookie->state) {
- case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */
- default:
- spin_unlock(&cookie->lock);
- _leave(" [no %u]", cookie->state);
- return;
-
- case FSCACHE_COOKIE_STATE_LOOKING_UP:
- if (!test_and_set_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
- __fscache_begin_cookie_access(cookie, fscache_access_invalidate_cookie);
- fallthrough;
- case FSCACHE_COOKIE_STATE_CREATING:
- spin_unlock(&cookie->lock);
- _leave(" [look %x]", cookie->inval_counter);
- return;
-
- case FSCACHE_COOKIE_STATE_ACTIVE:
- is_caching = fscache_begin_cookie_access(
- cookie, fscache_access_invalidate_cookie);
- if (is_caching)
- __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_INVALIDATING);
- spin_unlock(&cookie->lock);
- wake_up_cookie_state(cookie);
-
- if (is_caching)
- fscache_queue_cookie(cookie, fscache_cookie_get_inval_work);
- _leave(" [inv]");
- return;
- }
-}
-EXPORT_SYMBOL(__fscache_invalidate);
-
-#ifdef CONFIG_PROC_FS
-/*
- * Generate a list of extant cookies in /proc/fs/fscache/cookies
- */
-static int fscache_cookies_seq_show(struct seq_file *m, void *v)
-{
- struct fscache_cookie *cookie;
- unsigned int keylen = 0, auxlen = 0;
- u8 *p;
-
- if (v == &fscache_cookies) {
- seq_puts(m,
- "COOKIE VOLUME REF ACT ACC S FL DEF \n"
- "======== ======== === === === = == ================\n"
- );
- return 0;
- }
-
- cookie = list_entry(v, struct fscache_cookie, proc_link);
-
- seq_printf(m,
- "%08x %08x %3d %3d %3d %c %02lx",
- cookie->debug_id,
- cookie->volume->debug_id,
- refcount_read(&cookie->ref),
- atomic_read(&cookie->n_active),
- atomic_read(&cookie->n_accesses),
- fscache_cookie_states[cookie->state],
- cookie->flags);
-
- keylen = cookie->key_len;
- auxlen = cookie->aux_len;
-
- if (keylen > 0 || auxlen > 0) {
- seq_puts(m, " ");
- p = keylen <= sizeof(cookie->inline_key) ?
- cookie->inline_key : cookie->key;
- for (; keylen > 0; keylen--)
- seq_printf(m, "%02x", *p++);
- if (auxlen > 0) {
- seq_puts(m, ", ");
- p = auxlen <= sizeof(cookie->inline_aux) ?
- cookie->inline_aux : cookie->aux;
- for (; auxlen > 0; auxlen--)
- seq_printf(m, "%02x", *p++);
- }
- }
-
- seq_puts(m, "\n");
- return 0;
-}
-
-static void *fscache_cookies_seq_start(struct seq_file *m, loff_t *_pos)
- __acquires(fscache_cookies_lock)
-{
- read_lock(&fscache_cookies_lock);
- return seq_list_start_head(&fscache_cookies, *_pos);
-}
-
-static void *fscache_cookies_seq_next(struct seq_file *m, void *v, loff_t *_pos)
-{
- return seq_list_next(v, &fscache_cookies, _pos);
-}
-
-static void fscache_cookies_seq_stop(struct seq_file *m, void *v)
- __releases(rcu)
-{
- read_unlock(&fscache_cookies_lock);
-}
-
-
-const struct seq_operations fscache_cookies_seq_ops = {
- .start = fscache_cookies_seq_start,
- .next = fscache_cookies_seq_next,
- .stop = fscache_cookies_seq_stop,
- .show = fscache_cookies_seq_show,
-};
-#endif
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Internal definitions for FS-Cache
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) "FS-Cache: " fmt
-
-#include <linux/slab.h>
-#include <linux/fscache-cache.h>
-#include <trace/events/fscache.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-
-/*
- * cache.c
- */
-#ifdef CONFIG_PROC_FS
-extern const struct seq_operations fscache_caches_seq_ops;
-#endif
-bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
-void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
-struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
-void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
-
-static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
-{
- return smp_load_acquire(&cache->state);
-}
-
-static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
-{
- return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
-}
-
-static inline void fscache_set_cache_state(struct fscache_cache *cache,
- enum fscache_cache_state new_state)
-{
- smp_store_release(&cache->state, new_state);
-
-}
-
-static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
- enum fscache_cache_state old_state,
- enum fscache_cache_state new_state)
-{
- return try_cmpxchg_release(&cache->state, &old_state, new_state);
-}
-
-/*
- * cookie.c
- */
-extern struct kmem_cache *fscache_cookie_jar;
-#ifdef CONFIG_PROC_FS
-extern const struct seq_operations fscache_cookies_seq_ops;
-#endif
-extern struct timer_list fscache_cookie_lru_timer;
-
-extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
-extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
- enum fscache_access_trace why);
-
-static inline void fscache_see_cookie(struct fscache_cookie *cookie,
- enum fscache_cookie_trace where)
-{
- trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
- where);
-}
-
-/*
- * main.c
- */
-extern unsigned fscache_debug;
-
-extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
-
-/*
- * proc.c
- */
-#ifdef CONFIG_PROC_FS
-extern int __init fscache_proc_init(void);
-extern void fscache_proc_cleanup(void);
-#else
-#define fscache_proc_init() (0)
-#define fscache_proc_cleanup() do {} while (0)
-#endif
-
-/*
- * stats.c
- */
-#ifdef CONFIG_FSCACHE_STATS
-extern atomic_t fscache_n_volumes;
-extern atomic_t fscache_n_volumes_collision;
-extern atomic_t fscache_n_volumes_nomem;
-extern atomic_t fscache_n_cookies;
-extern atomic_t fscache_n_cookies_lru;
-extern atomic_t fscache_n_cookies_lru_expired;
-extern atomic_t fscache_n_cookies_lru_removed;
-extern atomic_t fscache_n_cookies_lru_dropped;
-
-extern atomic_t fscache_n_acquires;
-extern atomic_t fscache_n_acquires_ok;
-extern atomic_t fscache_n_acquires_oom;
-
-extern atomic_t fscache_n_invalidates;
-
-extern atomic_t fscache_n_relinquishes;
-extern atomic_t fscache_n_relinquishes_retire;
-extern atomic_t fscache_n_relinquishes_dropped;
-
-extern atomic_t fscache_n_resizes;
-extern atomic_t fscache_n_resizes_null;
-
-static inline void fscache_stat(atomic_t *stat)
-{
- atomic_inc(stat);
-}
-
-static inline void fscache_stat_d(atomic_t *stat)
-{
- atomic_dec(stat);
-}
-
-#define __fscache_stat(stat) (stat)
-
-int fscache_stats_show(struct seq_file *m, void *v);
-#else
-
-#define __fscache_stat(stat) (NULL)
-#define fscache_stat(stat) do {} while (0)
-#define fscache_stat_d(stat) do {} while (0)
-#endif
-
-/*
- * volume.c
- */
-#ifdef CONFIG_PROC_FS
-extern const struct seq_operations fscache_volumes_seq_ops;
-#endif
-
-struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
- enum fscache_volume_trace where);
-void fscache_put_volume(struct fscache_volume *volume,
- enum fscache_volume_trace where);
-bool fscache_begin_volume_access(struct fscache_volume *volume,
- struct fscache_cookie *cookie,
- enum fscache_access_trace why);
-void fscache_create_volume(struct fscache_volume *volume, bool wait);
-
-
-/*****************************************************************************/
-/*
- * debug tracing
- */
-#define dbgprintk(FMT, ...) \
- printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
-
-#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
-#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
-#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
-
-#define kjournal(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
-
-#ifdef __KDEBUG
-#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
-#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
-#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
-
-#elif defined(CONFIG_FSCACHE_DEBUG)
-#define _enter(FMT, ...) \
-do { \
- if (__do_kdebug(ENTER)) \
- kenter(FMT, ##__VA_ARGS__); \
-} while (0)
-
-#define _leave(FMT, ...) \
-do { \
- if (__do_kdebug(LEAVE)) \
- kleave(FMT, ##__VA_ARGS__); \
-} while (0)
-
-#define _debug(FMT, ...) \
-do { \
- if (__do_kdebug(DEBUG)) \
- kdebug(FMT, ##__VA_ARGS__); \
-} while (0)
-
-#else
-#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
-#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
-#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
-#endif
-
-/*
- * determine whether a particular optional debugging point should be logged
- * - we need to go through three steps to persuade cpp to correctly join the
- * shorthand in FSCACHE_DEBUG_LEVEL with its prefix
- */
-#define ____do_kdebug(LEVEL, POINT) \
- unlikely((fscache_debug & \
- (FSCACHE_POINT_##POINT << (FSCACHE_DEBUG_ ## LEVEL * 3))))
-#define ___do_kdebug(LEVEL, POINT) \
- ____do_kdebug(LEVEL, POINT)
-#define __do_kdebug(POINT) \
- ___do_kdebug(FSCACHE_DEBUG_LEVEL, POINT)
-
-#define FSCACHE_DEBUG_CACHE 0
-#define FSCACHE_DEBUG_COOKIE 1
-#define FSCACHE_DEBUG_OBJECT 2
-#define FSCACHE_DEBUG_OPERATION 3
-
-#define FSCACHE_POINT_ENTER 1
-#define FSCACHE_POINT_LEAVE 2
-#define FSCACHE_POINT_DEBUG 4
-
-#ifndef FSCACHE_DEBUG_LEVEL
-#define FSCACHE_DEBUG_LEVEL CACHE
-#endif
-
-/*
- * assertions
- */
-#if 1 /* defined(__KDEBUGALL) */
-
-#define ASSERT(X) \
-do { \
- if (unlikely(!(X))) { \
- pr_err("\n"); \
- pr_err("Assertion failed\n"); \
- BUG(); \
- } \
-} while (0)
-
-#define ASSERTCMP(X, OP, Y) \
-do { \
- if (unlikely(!((X) OP (Y)))) { \
- pr_err("\n"); \
- pr_err("Assertion failed\n"); \
- pr_err("%lx " #OP " %lx is false\n", \
- (unsigned long)(X), (unsigned long)(Y)); \
- BUG(); \
- } \
-} while (0)
-
-#define ASSERTIF(C, X) \
-do { \
- if (unlikely((C) && !(X))) { \
- pr_err("\n"); \
- pr_err("Assertion failed\n"); \
- BUG(); \
- } \
-} while (0)
-
-#define ASSERTIFCMP(C, X, OP, Y) \
-do { \
- if (unlikely((C) && !((X) OP (Y)))) { \
- pr_err("\n"); \
- pr_err("Assertion failed\n"); \
- pr_err("%lx " #OP " %lx is false\n", \
- (unsigned long)(X), (unsigned long)(Y)); \
- BUG(); \
- } \
-} while (0)
-
-#else
-
-#define ASSERT(X) do {} while (0)
-#define ASSERTCMP(X, OP, Y) do {} while (0)
-#define ASSERTIF(C, X) do {} while (0)
-#define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
-
-#endif /* assert or not */
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Cache data I/O routines
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-#define FSCACHE_DEBUG_LEVEL OPERATION
-#include <linux/fscache-cache.h>
-#include <linux/uio.h>
-#include <linux/bvec.h>
-#include <linux/slab.h>
-#include <linux/uio.h>
-#include "internal.h"
-
-/**
- * fscache_wait_for_operation - Wait for an object become accessible
- * @cres: The cache resources for the operation being performed
- * @want_state: The minimum state the object must be at
- *
- * See if the target cache object is at the specified minimum state of
- * accessibility yet, and if not, wait for it.
- */
-bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
- enum fscache_want_state want_state)
-{
- struct fscache_cookie *cookie = fscache_cres_cookie(cres);
- enum fscache_cookie_state state;
-
-again:
- if (!fscache_cache_is_live(cookie->volume->cache)) {
- _leave(" [broken]");
- return false;
- }
-
- state = fscache_cookie_state(cookie);
- _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
-
- switch (state) {
- case FSCACHE_COOKIE_STATE_CREATING:
- case FSCACHE_COOKIE_STATE_INVALIDATING:
- if (want_state == FSCACHE_WANT_PARAMS)
- goto ready; /* There can be no content */
- fallthrough;
- case FSCACHE_COOKIE_STATE_LOOKING_UP:
- case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
- wait_var_event(&cookie->state,
- fscache_cookie_state(cookie) != state);
- goto again;
-
- case FSCACHE_COOKIE_STATE_ACTIVE:
- goto ready;
- case FSCACHE_COOKIE_STATE_DROPPED:
- case FSCACHE_COOKIE_STATE_RELINQUISHING:
- default:
- _leave(" [not live]");
- return false;
- }
-
-ready:
- if (!cres->cache_priv2)
- return cookie->volume->cache->ops->begin_operation(cres, want_state);
- return true;
-}
-EXPORT_SYMBOL(fscache_wait_for_operation);
-
-/*
- * Begin an I/O operation on the cache, waiting till we reach the right state.
- *
- * Attaches the resources required to the operation resources record.
- */
-static int fscache_begin_operation(struct netfs_cache_resources *cres,
- struct fscache_cookie *cookie,
- enum fscache_want_state want_state,
- enum fscache_access_trace why)
-{
- enum fscache_cookie_state state;
- long timeo;
- bool once_only = false;
-
- cres->ops = NULL;
- cres->cache_priv = cookie;
- cres->cache_priv2 = NULL;
- cres->debug_id = cookie->debug_id;
- cres->inval_counter = cookie->inval_counter;
-
- if (!fscache_begin_cookie_access(cookie, why))
- return -ENOBUFS;
-
-again:
- spin_lock(&cookie->lock);
-
- state = fscache_cookie_state(cookie);
- _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
-
- switch (state) {
- case FSCACHE_COOKIE_STATE_LOOKING_UP:
- case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
- case FSCACHE_COOKIE_STATE_INVALIDATING:
- goto wait_for_file_wrangling;
- case FSCACHE_COOKIE_STATE_CREATING:
- if (want_state == FSCACHE_WANT_PARAMS)
- goto ready; /* There can be no content */
- goto wait_for_file_wrangling;
- case FSCACHE_COOKIE_STATE_ACTIVE:
- goto ready;
- case FSCACHE_COOKIE_STATE_DROPPED:
- case FSCACHE_COOKIE_STATE_RELINQUISHING:
- WARN(1, "Can't use cookie in state %u\n", cookie->state);
- goto not_live;
- default:
- goto not_live;
- }
-
-ready:
- spin_unlock(&cookie->lock);
- if (!cookie->volume->cache->ops->begin_operation(cres, want_state))
- goto failed;
- return 0;
-
-wait_for_file_wrangling:
- spin_unlock(&cookie->lock);
- trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
- atomic_read(&cookie->n_accesses),
- fscache_access_io_wait);
- timeo = wait_var_event_timeout(&cookie->state,
- fscache_cookie_state(cookie) != state, 20 * HZ);
- if (timeo <= 1 && !once_only) {
- pr_warn("%s: cookie state change wait timed out: cookie->state=%u state=%u",
- __func__, fscache_cookie_state(cookie), state);
- fscache_print_cookie(cookie, 'O');
- once_only = true;
- }
- goto again;
-
-not_live:
- spin_unlock(&cookie->lock);
-failed:
- cres->cache_priv = NULL;
- cres->ops = NULL;
- fscache_end_cookie_access(cookie, fscache_access_io_not_live);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-}
-
-int __fscache_begin_read_operation(struct netfs_cache_resources *cres,
- struct fscache_cookie *cookie)
-{
- return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
- fscache_access_io_read);
-}
-EXPORT_SYMBOL(__fscache_begin_read_operation);
-
-int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
- struct fscache_cookie *cookie)
-{
- return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
- fscache_access_io_write);
-}
-EXPORT_SYMBOL(__fscache_begin_write_operation);
-
-/**
- * fscache_dirty_folio - Mark folio dirty and pin a cache object for writeback
- * @mapping: The mapping the folio belongs to.
- * @folio: The folio being dirtied.
- * @cookie: The cookie referring to the cache object
- *
- * Set the dirty flag on a folio and pin an in-use cache object in memory
- * so that writeback can later write to it. This is intended
- * to be called from the filesystem's ->dirty_folio() method.
- *
- * Return: true if the dirty flag was set on the folio, false otherwise.
- */
-bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
- struct fscache_cookie *cookie)
-{
- struct inode *inode = mapping->host;
- bool need_use = false;
-
- _enter("");
-
- if (!filemap_dirty_folio(mapping, folio))
- return false;
- if (!fscache_cookie_valid(cookie))
- return true;
-
- if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
- spin_lock(&inode->i_lock);
- if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
- inode->i_state |= I_PINNING_FSCACHE_WB;
- need_use = true;
- }
- spin_unlock(&inode->i_lock);
-
- if (need_use)
- fscache_use_cookie(cookie, true);
- }
- return true;
-}
-EXPORT_SYMBOL(fscache_dirty_folio);
-
-struct fscache_write_request {
- struct netfs_cache_resources cache_resources;
- struct address_space *mapping;
- loff_t start;
- size_t len;
- bool set_bits;
- netfs_io_terminated_t term_func;
- void *term_func_priv;
-};
-
-void __fscache_clear_page_bits(struct address_space *mapping,
- loff_t start, size_t len)
-{
- pgoff_t first = start / PAGE_SIZE;
- pgoff_t last = (start + len - 1) / PAGE_SIZE;
- struct page *page;
-
- if (len) {
- XA_STATE(xas, &mapping->i_pages, first);
-
- rcu_read_lock();
- xas_for_each(&xas, page, last) {
- end_page_fscache(page);
- }
- rcu_read_unlock();
- }
-}
-EXPORT_SYMBOL(__fscache_clear_page_bits);
-
-/*
- * Deal with the completion of writing the data to the cache.
- */
-static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
- bool was_async)
-{
- struct fscache_write_request *wreq = priv;
-
- fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
- wreq->set_bits);
-
- if (wreq->term_func)
- wreq->term_func(wreq->term_func_priv, transferred_or_error,
- was_async);
- fscache_end_operation(&wreq->cache_resources);
- kfree(wreq);
-}
-
-void __fscache_write_to_cache(struct fscache_cookie *cookie,
- struct address_space *mapping,
- loff_t start, size_t len, loff_t i_size,
- netfs_io_terminated_t term_func,
- void *term_func_priv,
- bool cond)
-{
- struct fscache_write_request *wreq;
- struct netfs_cache_resources *cres;
- struct iov_iter iter;
- int ret = -ENOBUFS;
-
- if (len == 0)
- goto abandon;
-
- _enter("%llx,%zx", start, len);
-
- wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
- if (!wreq)
- goto abandon;
- wreq->mapping = mapping;
- wreq->start = start;
- wreq->len = len;
- wreq->set_bits = cond;
- wreq->term_func = term_func;
- wreq->term_func_priv = term_func_priv;
-
- cres = &wreq->cache_resources;
- if (fscache_begin_operation(cres, cookie, FSCACHE_WANT_WRITE,
- fscache_access_io_write) < 0)
- goto abandon_free;
-
- ret = cres->ops->prepare_write(cres, &start, &len, i_size, false);
- if (ret < 0)
- goto abandon_end;
-
- /* TODO: Consider clearing page bits now for space the write isn't
- * covering. This is more complicated than it appears when THPs are
- * taken into account.
- */
-
- iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
- fscache_write(cres, start, &iter, fscache_wreq_done, wreq);
- return;
-
-abandon_end:
- return fscache_wreq_done(wreq, ret, false);
-abandon_free:
- kfree(wreq);
-abandon:
- fscache_clear_page_bits(mapping, start, len, cond);
- if (term_func)
- term_func(term_func_priv, ret, false);
-}
-EXPORT_SYMBOL(__fscache_write_to_cache);
-
-/*
- * Change the size of a backing object.
- */
-void __fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
-{
- struct netfs_cache_resources cres;
-
- trace_fscache_resize(cookie, new_size);
- if (fscache_begin_operation(&cres, cookie, FSCACHE_WANT_WRITE,
- fscache_access_io_resize) == 0) {
- fscache_stat(&fscache_n_resizes);
- set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
-
- /* We cannot defer a resize as we need to do it inside the
- * netfs's inode lock so that we're serialised with respect to
- * writes.
- */
- cookie->volume->cache->ops->resize_cookie(&cres, new_size);
- fscache_end_operation(&cres);
- } else {
- fscache_stat(&fscache_n_resizes_null);
- }
-}
-EXPORT_SYMBOL(__fscache_resize_cookie);
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* General filesystem local caching manager
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL CACHE
-#include <linux/module.h>
-#include <linux/init.h>
-#define CREATE_TRACE_POINTS
-#include "internal.h"
-
-MODULE_DESCRIPTION("FS Cache Manager");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
-unsigned fscache_debug;
-module_param_named(debug, fscache_debug, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(fscache_debug,
- "FS-Cache debugging mask");
-
-EXPORT_TRACEPOINT_SYMBOL(fscache_access_cache);
-EXPORT_TRACEPOINT_SYMBOL(fscache_access_volume);
-EXPORT_TRACEPOINT_SYMBOL(fscache_access);
-
-struct workqueue_struct *fscache_wq;
-EXPORT_SYMBOL(fscache_wq);
-
-/*
- * Mixing scores (in bits) for (7,20):
- * Input delta: 1-bit 2-bit
- * 1 round: 330.3 9201.6
- * 2 rounds: 1246.4 25475.4
- * 3 rounds: 1907.1 31295.1
- * 4 rounds: 2042.3 31718.6
- * Perfect: 2048 31744
- * (32*64) (32*31/2 * 64)
- */
-#define HASH_MIX(x, y, a) \
- ( x ^= (a), \
- y ^= x, x = rol32(x, 7),\
- x += y, y = rol32(y,20),\
- y *= 9 )
-
-static inline unsigned int fold_hash(unsigned long x, unsigned long y)
-{
- /* Use arch-optimized multiply if one exists */
- return __hash_32(y ^ __hash_32(x));
-}
-
-/*
- * Generate a hash. This is derived from full_name_hash(), but we want to be
- * sure it is arch independent and that it doesn't change as bits of the
- * computed hash value might appear on disk. The caller must guarantee that
- * the source data is a multiple of four bytes in size.
- */
-unsigned int fscache_hash(unsigned int salt, const void *data, size_t len)
-{
- const __le32 *p = data;
- unsigned int a, x = 0, y = salt, n = len / sizeof(__le32);
-
- for (; n; n--) {
- a = le32_to_cpu(*p++);
- HASH_MIX(x, y, a);
- }
- return fold_hash(x, y);
-}
-
-/*
- * initialise the fs caching module
- */
-static int __init fscache_init(void)
-{
- int ret = -ENOMEM;
-
- fscache_wq = alloc_workqueue("fscache", WQ_UNBOUND | WQ_FREEZABLE, 0);
- if (!fscache_wq)
- goto error_wq;
-
- ret = fscache_proc_init();
- if (ret < 0)
- goto error_proc;
-
- fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar",
- sizeof(struct fscache_cookie),
- 0, 0, NULL);
- if (!fscache_cookie_jar) {
- pr_notice("Failed to allocate a cookie jar\n");
- ret = -ENOMEM;
- goto error_cookie_jar;
- }
-
- pr_notice("Loaded\n");
- return 0;
-
-error_cookie_jar:
- fscache_proc_cleanup();
-error_proc:
- destroy_workqueue(fscache_wq);
-error_wq:
- return ret;
-}
-
-fs_initcall(fscache_init);
-
-/*
- * clean up on module removal
- */
-static void __exit fscache_exit(void)
-{
- _enter("");
-
- kmem_cache_destroy(fscache_cookie_jar);
- fscache_proc_cleanup();
- destroy_workqueue(fscache_wq);
- pr_notice("Unloaded\n");
-}
-
-module_exit(fscache_exit);
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* FS-Cache statistics viewing interface
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL CACHE
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include "internal.h"
-
-/*
- * initialise the /proc/fs/fscache/ directory
- */
-int __init fscache_proc_init(void)
-{
- if (!proc_mkdir("fs/fscache", NULL))
- goto error_dir;
-
- if (!proc_create_seq("fs/fscache/caches", S_IFREG | 0444, NULL,
- &fscache_caches_seq_ops))
- goto error;
-
- if (!proc_create_seq("fs/fscache/volumes", S_IFREG | 0444, NULL,
- &fscache_volumes_seq_ops))
- goto error;
-
- if (!proc_create_seq("fs/fscache/cookies", S_IFREG | 0444, NULL,
- &fscache_cookies_seq_ops))
- goto error;
-
-#ifdef CONFIG_FSCACHE_STATS
- if (!proc_create_single("fs/fscache/stats", S_IFREG | 0444, NULL,
- fscache_stats_show))
- goto error;
-#endif
-
- return 0;
-
-error:
- remove_proc_entry("fs/fscache", NULL);
-error_dir:
- return -ENOMEM;
-}
-
-/*
- * clean up the /proc/fs/fscache/ directory
- */
-void fscache_proc_cleanup(void)
-{
- remove_proc_subtree("fs/fscache", NULL);
-}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* FS-Cache statistics
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL CACHE
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include "internal.h"
-
-/*
- * operation counters
- */
-atomic_t fscache_n_volumes;
-atomic_t fscache_n_volumes_collision;
-atomic_t fscache_n_volumes_nomem;
-atomic_t fscache_n_cookies;
-atomic_t fscache_n_cookies_lru;
-atomic_t fscache_n_cookies_lru_expired;
-atomic_t fscache_n_cookies_lru_removed;
-atomic_t fscache_n_cookies_lru_dropped;
-
-atomic_t fscache_n_acquires;
-atomic_t fscache_n_acquires_ok;
-atomic_t fscache_n_acquires_oom;
-
-atomic_t fscache_n_invalidates;
-
-atomic_t fscache_n_updates;
-EXPORT_SYMBOL(fscache_n_updates);
-
-atomic_t fscache_n_relinquishes;
-atomic_t fscache_n_relinquishes_retire;
-atomic_t fscache_n_relinquishes_dropped;
-
-atomic_t fscache_n_resizes;
-atomic_t fscache_n_resizes_null;
-
-atomic_t fscache_n_read;
-EXPORT_SYMBOL(fscache_n_read);
-atomic_t fscache_n_write;
-EXPORT_SYMBOL(fscache_n_write);
-atomic_t fscache_n_no_write_space;
-EXPORT_SYMBOL(fscache_n_no_write_space);
-atomic_t fscache_n_no_create_space;
-EXPORT_SYMBOL(fscache_n_no_create_space);
-atomic_t fscache_n_culled;
-EXPORT_SYMBOL(fscache_n_culled);
-
-/*
- * display the general statistics
- */
-int fscache_stats_show(struct seq_file *m, void *v)
-{
- seq_puts(m, "FS-Cache statistics\n");
- seq_printf(m, "Cookies: n=%d v=%d vcol=%u voom=%u\n",
- atomic_read(&fscache_n_cookies),
- atomic_read(&fscache_n_volumes),
- atomic_read(&fscache_n_volumes_collision),
- atomic_read(&fscache_n_volumes_nomem)
- );
-
- seq_printf(m, "Acquire: n=%u ok=%u oom=%u\n",
- atomic_read(&fscache_n_acquires),
- atomic_read(&fscache_n_acquires_ok),
- atomic_read(&fscache_n_acquires_oom));
-
- seq_printf(m, "LRU : n=%u exp=%u rmv=%u drp=%u at=%ld\n",
- atomic_read(&fscache_n_cookies_lru),
- atomic_read(&fscache_n_cookies_lru_expired),
- atomic_read(&fscache_n_cookies_lru_removed),
- atomic_read(&fscache_n_cookies_lru_dropped),
- timer_pending(&fscache_cookie_lru_timer) ?
- fscache_cookie_lru_timer.expires - jiffies : 0);
-
- seq_printf(m, "Invals : n=%u\n",
- atomic_read(&fscache_n_invalidates));
-
- seq_printf(m, "Updates: n=%u rsz=%u rsn=%u\n",
- atomic_read(&fscache_n_updates),
- atomic_read(&fscache_n_resizes),
- atomic_read(&fscache_n_resizes_null));
-
- seq_printf(m, "Relinqs: n=%u rtr=%u drop=%u\n",
- atomic_read(&fscache_n_relinquishes),
- atomic_read(&fscache_n_relinquishes_retire),
- atomic_read(&fscache_n_relinquishes_dropped));
-
- seq_printf(m, "NoSpace: nwr=%u ncr=%u cull=%u\n",
- atomic_read(&fscache_n_no_write_space),
- atomic_read(&fscache_n_no_create_space),
- atomic_read(&fscache_n_culled));
-
- seq_printf(m, "IO : rd=%u wr=%u\n",
- atomic_read(&fscache_n_read),
- atomic_read(&fscache_n_write));
-
- netfs_stats_show(m);
- return 0;
-}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Volume-level cache cookie handling.
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL COOKIE
-#include <linux/export.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-#define fscache_volume_hash_shift 10
-static struct hlist_bl_head fscache_volume_hash[1 << fscache_volume_hash_shift];
-static atomic_t fscache_volume_debug_id;
-static LIST_HEAD(fscache_volumes);
-
-static void fscache_create_volume_work(struct work_struct *work);
-
-struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
- enum fscache_volume_trace where)
-{
- int ref;
-
- __refcount_inc(&volume->ref, &ref);
- trace_fscache_volume(volume->debug_id, ref + 1, where);
- return volume;
-}
-
-static void fscache_see_volume(struct fscache_volume *volume,
- enum fscache_volume_trace where)
-{
- int ref = refcount_read(&volume->ref);
-
- trace_fscache_volume(volume->debug_id, ref, where);
-}
-
-/*
- * Pin the cache behind a volume so that we can access it.
- */
-static void __fscache_begin_volume_access(struct fscache_volume *volume,
- struct fscache_cookie *cookie,
- enum fscache_access_trace why)
-{
- int n_accesses;
-
- n_accesses = atomic_inc_return(&volume->n_accesses);
- smp_mb__after_atomic();
- trace_fscache_access_volume(volume->debug_id, cookie ? cookie->debug_id : 0,
- refcount_read(&volume->ref),
- n_accesses, why);
-}
-
-/**
- * fscache_begin_volume_access - Pin a cache so a volume can be accessed
- * @volume: The volume cookie
- * @cookie: A datafile cookie for a tracing reference (or NULL)
- * @why: An indication of the circumstances of the access for tracing
- *
- * Attempt to pin the cache to prevent it from going away whilst we're
- * accessing a volume and returns true if successful. This works as follows:
- *
- * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
- * then we return false to indicate access was not permitted.
- *
- * (2) If the cache tests as live, then we increment the volume's n_accesses
- * count and then recheck the cache liveness, ending the access if it
- * ceased to be live.
- *
- * (3) When we end the access, we decrement the volume's n_accesses and wake
- * up the any waiters if it reaches 0.
- *
- * (4) Whilst the cache is caching, the volume's n_accesses is kept
- * artificially incremented to prevent wakeups from happening.
- *
- * (5) When the cache is taken offline, the state is changed to prevent new
- * accesses, the volume's n_accesses is decremented and we wait for it to
- * become 0.
- *
- * The datafile @cookie and the @why indicator are merely provided for tracing
- * purposes.
- */
-bool fscache_begin_volume_access(struct fscache_volume *volume,
- struct fscache_cookie *cookie,
- enum fscache_access_trace why)
-{
- if (!fscache_cache_is_live(volume->cache))
- return false;
- __fscache_begin_volume_access(volume, cookie, why);
- if (!fscache_cache_is_live(volume->cache)) {
- fscache_end_volume_access(volume, cookie, fscache_access_unlive);
- return false;
- }
- return true;
-}
-
-/**
- * fscache_end_volume_access - Unpin a cache at the end of an access.
- * @volume: The volume cookie
- * @cookie: A datafile cookie for a tracing reference (or NULL)
- * @why: An indication of the circumstances of the access for tracing
- *
- * Unpin a cache volume after we've accessed it. The datafile @cookie and the
- * @why indicator are merely provided for tracing purposes.
- */
-void fscache_end_volume_access(struct fscache_volume *volume,
- struct fscache_cookie *cookie,
- enum fscache_access_trace why)
-{
- int n_accesses;
-
- smp_mb__before_atomic();
- n_accesses = atomic_dec_return(&volume->n_accesses);
- trace_fscache_access_volume(volume->debug_id, cookie ? cookie->debug_id : 0,
- refcount_read(&volume->ref),
- n_accesses, why);
- if (n_accesses == 0)
- wake_up_var(&volume->n_accesses);
-}
-EXPORT_SYMBOL(fscache_end_volume_access);
-
-static bool fscache_volume_same(const struct fscache_volume *a,
- const struct fscache_volume *b)
-{
- size_t klen;
-
- if (a->key_hash != b->key_hash ||
- a->cache != b->cache ||
- a->key[0] != b->key[0])
- return false;
-
- klen = round_up(a->key[0] + 1, sizeof(__le32));
- return memcmp(a->key, b->key, klen) == 0;
-}
-
-static bool fscache_is_acquire_pending(struct fscache_volume *volume)
-{
- return test_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &volume->flags);
-}
-
-static void fscache_wait_on_volume_collision(struct fscache_volume *candidate,
- unsigned int collidee_debug_id)
-{
- wait_on_bit_timeout(&candidate->flags, FSCACHE_VOLUME_ACQUIRE_PENDING,
- TASK_UNINTERRUPTIBLE, 20 * HZ);
- if (fscache_is_acquire_pending(candidate)) {
- pr_notice("Potential volume collision new=%08x old=%08x",
- candidate->debug_id, collidee_debug_id);
- fscache_stat(&fscache_n_volumes_collision);
- wait_on_bit(&candidate->flags, FSCACHE_VOLUME_ACQUIRE_PENDING,
- TASK_UNINTERRUPTIBLE);
- }
-}
-
-/*
- * Attempt to insert the new volume into the hash. If there's a collision, we
- * wait for the old volume to complete if it's being relinquished and an error
- * otherwise.
- */
-static bool fscache_hash_volume(struct fscache_volume *candidate)
-{
- struct fscache_volume *cursor;
- struct hlist_bl_head *h;
- struct hlist_bl_node *p;
- unsigned int bucket, collidee_debug_id = 0;
-
- bucket = candidate->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1);
- h = &fscache_volume_hash[bucket];
-
- hlist_bl_lock(h);
- hlist_bl_for_each_entry(cursor, p, h, hash_link) {
- if (fscache_volume_same(candidate, cursor)) {
- if (!test_bit(FSCACHE_VOLUME_RELINQUISHED, &cursor->flags))
- goto collision;
- fscache_see_volume(cursor, fscache_volume_get_hash_collision);
- set_bit(FSCACHE_VOLUME_COLLIDED_WITH, &cursor->flags);
- set_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &candidate->flags);
- collidee_debug_id = cursor->debug_id;
- break;
- }
- }
-
- hlist_bl_add_head(&candidate->hash_link, h);
- hlist_bl_unlock(h);
-
- if (fscache_is_acquire_pending(candidate))
- fscache_wait_on_volume_collision(candidate, collidee_debug_id);
- return true;
-
-collision:
- fscache_see_volume(cursor, fscache_volume_collision);
- hlist_bl_unlock(h);
- return false;
-}
-
-/*
- * Allocate and initialise a volume representation cookie.
- */
-static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
- const char *cache_name,
- const void *coherency_data,
- size_t coherency_len)
-{
- struct fscache_volume *volume;
- struct fscache_cache *cache;
- size_t klen, hlen;
- u8 *key;
-
- klen = strlen(volume_key);
- if (klen > NAME_MAX)
- return NULL;
-
- if (!coherency_data)
- coherency_len = 0;
-
- cache = fscache_lookup_cache(cache_name, false);
- if (IS_ERR(cache))
- return NULL;
-
- volume = kzalloc(struct_size(volume, coherency, coherency_len),
- GFP_KERNEL);
- if (!volume)
- goto err_cache;
-
- volume->cache = cache;
- volume->coherency_len = coherency_len;
- if (coherency_data)
- memcpy(volume->coherency, coherency_data, coherency_len);
- INIT_LIST_HEAD(&volume->proc_link);
- INIT_WORK(&volume->work, fscache_create_volume_work);
- refcount_set(&volume->ref, 1);
- spin_lock_init(&volume->lock);
-
- /* Stick the length on the front of the key and pad it out to make
- * hashing easier.
- */
- hlen = round_up(1 + klen + 1, sizeof(__le32));
- key = kzalloc(hlen, GFP_KERNEL);
- if (!key)
- goto err_vol;
- key[0] = klen;
- memcpy(key + 1, volume_key, klen);
-
- volume->key = key;
- volume->key_hash = fscache_hash(0, key, hlen);
-
- volume->debug_id = atomic_inc_return(&fscache_volume_debug_id);
- down_write(&fscache_addremove_sem);
- atomic_inc(&cache->n_volumes);
- list_add_tail(&volume->proc_link, &fscache_volumes);
- fscache_see_volume(volume, fscache_volume_new_acquire);
- fscache_stat(&fscache_n_volumes);
- up_write(&fscache_addremove_sem);
- _leave(" = v=%x", volume->debug_id);
- return volume;
-
-err_vol:
- kfree(volume);
-err_cache:
- fscache_put_cache(cache, fscache_cache_put_alloc_volume);
- fscache_stat(&fscache_n_volumes_nomem);
- return NULL;
-}
-
-/*
- * Create a volume's representation on disk. Have a volume ref and a cache
- * access we have to release.
- */
-static void fscache_create_volume_work(struct work_struct *work)
-{
- const struct fscache_cache_ops *ops;
- struct fscache_volume *volume =
- container_of(work, struct fscache_volume, work);
-
- fscache_see_volume(volume, fscache_volume_see_create_work);
-
- ops = volume->cache->ops;
- if (ops->acquire_volume)
- ops->acquire_volume(volume);
- fscache_end_cache_access(volume->cache,
- fscache_access_acquire_volume_end);
-
- clear_and_wake_up_bit(FSCACHE_VOLUME_CREATING, &volume->flags);
- fscache_put_volume(volume, fscache_volume_put_create_work);
-}
-
-/*
- * Dispatch a worker thread to create a volume's representation on disk.
- */
-void fscache_create_volume(struct fscache_volume *volume, bool wait)
-{
- if (test_and_set_bit(FSCACHE_VOLUME_CREATING, &volume->flags))
- goto maybe_wait;
- if (volume->cache_priv)
- goto no_wait; /* We raced */
- if (!fscache_begin_cache_access(volume->cache,
- fscache_access_acquire_volume))
- goto no_wait;
-
- fscache_get_volume(volume, fscache_volume_get_create_work);
- if (!schedule_work(&volume->work))
- fscache_put_volume(volume, fscache_volume_put_create_work);
-
-maybe_wait:
- if (wait) {
- fscache_see_volume(volume, fscache_volume_wait_create_work);
- wait_on_bit(&volume->flags, FSCACHE_VOLUME_CREATING,
- TASK_UNINTERRUPTIBLE);
- }
- return;
-no_wait:
- clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags);
- wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING);
-}
-
-/*
- * Acquire a volume representation cookie and link it to a (proposed) cache.
- */
-struct fscache_volume *__fscache_acquire_volume(const char *volume_key,
- const char *cache_name,
- const void *coherency_data,
- size_t coherency_len)
-{
- struct fscache_volume *volume;
-
- volume = fscache_alloc_volume(volume_key, cache_name,
- coherency_data, coherency_len);
- if (!volume)
- return ERR_PTR(-ENOMEM);
-
- if (!fscache_hash_volume(volume)) {
- fscache_put_volume(volume, fscache_volume_put_hash_collision);
- return ERR_PTR(-EBUSY);
- }
-
- fscache_create_volume(volume, false);
- return volume;
-}
-EXPORT_SYMBOL(__fscache_acquire_volume);
-
-static void fscache_wake_pending_volume(struct fscache_volume *volume,
- struct hlist_bl_head *h)
-{
- struct fscache_volume *cursor;
- struct hlist_bl_node *p;
-
- hlist_bl_for_each_entry(cursor, p, h, hash_link) {
- if (fscache_volume_same(cursor, volume)) {
- fscache_see_volume(cursor, fscache_volume_see_hash_wake);
- clear_and_wake_up_bit(FSCACHE_VOLUME_ACQUIRE_PENDING,
- &cursor->flags);
- return;
- }
- }
-}
-
-/*
- * Remove a volume cookie from the hash table.
- */
-static void fscache_unhash_volume(struct fscache_volume *volume)
-{
- struct hlist_bl_head *h;
- unsigned int bucket;
-
- bucket = volume->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1);
- h = &fscache_volume_hash[bucket];
-
- hlist_bl_lock(h);
- hlist_bl_del(&volume->hash_link);
- if (test_bit(FSCACHE_VOLUME_COLLIDED_WITH, &volume->flags))
- fscache_wake_pending_volume(volume, h);
- hlist_bl_unlock(h);
-}
-
-/*
- * Drop a cache's volume attachments.
- */
-static void fscache_free_volume(struct fscache_volume *volume)
-{
- struct fscache_cache *cache = volume->cache;
-
- if (volume->cache_priv) {
- __fscache_begin_volume_access(volume, NULL,
- fscache_access_relinquish_volume);
- if (volume->cache_priv)
- cache->ops->free_volume(volume);
- fscache_end_volume_access(volume, NULL,
- fscache_access_relinquish_volume_end);
- }
-
- down_write(&fscache_addremove_sem);
- list_del_init(&volume->proc_link);
- atomic_dec(&volume->cache->n_volumes);
- up_write(&fscache_addremove_sem);
-
- if (!hlist_bl_unhashed(&volume->hash_link))
- fscache_unhash_volume(volume);
-
- trace_fscache_volume(volume->debug_id, 0, fscache_volume_free);
- kfree(volume->key);
- kfree(volume);
- fscache_stat_d(&fscache_n_volumes);
- fscache_put_cache(cache, fscache_cache_put_volume);
-}
-
-/*
- * Drop a reference to a volume cookie.
- */
-void fscache_put_volume(struct fscache_volume *volume,
- enum fscache_volume_trace where)
-{
- if (volume) {
- unsigned int debug_id = volume->debug_id;
- bool zero;
- int ref;
-
- zero = __refcount_dec_and_test(&volume->ref, &ref);
- trace_fscache_volume(debug_id, ref - 1, where);
- if (zero)
- fscache_free_volume(volume);
- }
-}
-
-/*
- * Relinquish a volume representation cookie.
- */
-void __fscache_relinquish_volume(struct fscache_volume *volume,
- const void *coherency_data,
- bool invalidate)
-{
- if (WARN_ON(test_and_set_bit(FSCACHE_VOLUME_RELINQUISHED, &volume->flags)))
- return;
-
- if (invalidate) {
- set_bit(FSCACHE_VOLUME_INVALIDATE, &volume->flags);
- } else if (coherency_data) {
- memcpy(volume->coherency, coherency_data, volume->coherency_len);
- }
-
- fscache_put_volume(volume, fscache_volume_put_relinquish);
-}
-EXPORT_SYMBOL(__fscache_relinquish_volume);
-
-/**
- * fscache_withdraw_volume - Withdraw a volume from being cached
- * @volume: Volume cookie
- *
- * Withdraw a cache volume from service, waiting for all accesses to complete
- * before returning.
- */
-void fscache_withdraw_volume(struct fscache_volume *volume)
-{
- int n_accesses;
-
- _debug("withdraw V=%x", volume->debug_id);
-
- /* Allow wakeups on dec-to-0 */
- n_accesses = atomic_dec_return(&volume->n_accesses);
- trace_fscache_access_volume(volume->debug_id, 0,
- refcount_read(&volume->ref),
- n_accesses, fscache_access_cache_unpin);
-
- wait_var_event(&volume->n_accesses,
- atomic_read(&volume->n_accesses) == 0);
-}
-EXPORT_SYMBOL(fscache_withdraw_volume);
-
-#ifdef CONFIG_PROC_FS
-/*
- * Generate a list of volumes in /proc/fs/fscache/volumes
- */
-static int fscache_volumes_seq_show(struct seq_file *m, void *v)
-{
- struct fscache_volume *volume;
-
- if (v == &fscache_volumes) {
- seq_puts(m,
- "VOLUME REF nCOOK ACC FL CACHE KEY\n"
- "======== ===== ===== === == =============== ================\n");
- return 0;
- }
-
- volume = list_entry(v, struct fscache_volume, proc_link);
- seq_printf(m,
- "%08x %5d %5d %3d %02lx %-15.15s %s\n",
- volume->debug_id,
- refcount_read(&volume->ref),
- atomic_read(&volume->n_cookies),
- atomic_read(&volume->n_accesses),
- volume->flags,
- volume->cache->name ?: "-",
- volume->key + 1);
- return 0;
-}
-
-static void *fscache_volumes_seq_start(struct seq_file *m, loff_t *_pos)
- __acquires(&fscache_addremove_sem)
-{
- down_read(&fscache_addremove_sem);
- return seq_list_start_head(&fscache_volumes, *_pos);
-}
-
-static void *fscache_volumes_seq_next(struct seq_file *m, void *v, loff_t *_pos)
-{
- return seq_list_next(v, &fscache_volumes, _pos);
-}
-
-static void fscache_volumes_seq_stop(struct seq_file *m, void *v)
- __releases(&fscache_addremove_sem)
-{
- up_read(&fscache_addremove_sem);
-}
-
-const struct seq_operations fscache_volumes_seq_ops = {
- .start = fscache_volumes_seq_start,
- .next = fscache_volumes_seq_next,
- .stop = fscache_volumes_seq_stop,
- .show = fscache_volumes_seq_show,
-};
-#endif /* CONFIG_PROC_FS */
multi-CPU system these may be on cachelines that keep bouncing
between CPUs. On the other hand, the stats are very useful for
debugging purposes. Saying 'Y' here is recommended.
+
+config FSCACHE
+ tristate "General filesystem local caching manager"
+ select NETFS_SUPPORT
+ help
+ This option enables a generic filesystem caching manager that can be
+ used by various network and other filesystems to cache data locally.
+ Different sorts of caches can be plugged in, depending on the
+ resources available.
+
+ See Documentation/filesystems/caching/fscache.rst for more information.
+
+config FSCACHE_STATS
+ bool "Gather statistical information on local caching"
+ depends on FSCACHE && PROC_FS
+ select NETFS_STATS
+ help
+ This option causes statistical information to be gathered on local
+ caching and exported through file:
+
+ /proc/fs/fscache/stats
+
+ The gathering of statistics adds a certain amount of overhead to
+ execution as there are a quite a few stats gathered, and on a
+ multi-CPU system these may be on cachelines that keep bouncing
+ between CPUs. On the other hand, the stats are very useful for
+ debugging purposes. Saying 'Y' here is recommended.
+
+ See Documentation/filesystems/caching/fscache.rst for more information.
+
+config FSCACHE_DEBUG
+ bool "Debug FS-Cache"
+ depends on FSCACHE
+ help
+ This permits debugging to be dynamically enabled in the local caching
+ management module. If this is set, the debugging output may be
+ enabled by setting bits in /sys/modules/fscache/parameter/debug.
+
+ See Documentation/filesystems/caching/fscache.rst for more information.
# SPDX-License-Identifier: GPL-2.0
+fscache-y := \
+ fscache_cache.o \
+ fscache_cookie.o \
+ fscache_io.o \
+ fscache_main.o \
+ fscache_volume.o
+
+fscache-$(CONFIG_PROC_FS) += fscache_proc.o
+fscache-$(CONFIG_FSCACHE_STATS) += fscache_stats.o
+
+obj-$(CONFIG_FSCACHE) := fscache.o
+
netfs-y := \
buffered_read.o \
io.o \
netfs-$(CONFIG_NETFS_STATS) += stats.o
-obj-$(CONFIG_NETFS_SUPPORT) := netfs.o
+obj-$(CONFIG_NETFS_SUPPORT) += netfs.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* FS-Cache cache handling
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define FSCACHE_DEBUG_LEVEL CACHE
+#include <linux/export.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+static LIST_HEAD(fscache_caches);
+DECLARE_RWSEM(fscache_addremove_sem);
+EXPORT_SYMBOL(fscache_addremove_sem);
+DECLARE_WAIT_QUEUE_HEAD(fscache_clearance_waiters);
+EXPORT_SYMBOL(fscache_clearance_waiters);
+
+static atomic_t fscache_cache_debug_id;
+
+/*
+ * Allocate a cache cookie.
+ */
+static struct fscache_cache *fscache_alloc_cache(const char *name)
+{
+ struct fscache_cache *cache;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ if (cache) {
+ if (name) {
+ cache->name = kstrdup(name, GFP_KERNEL);
+ if (!cache->name) {
+ kfree(cache);
+ return NULL;
+ }
+ }
+ refcount_set(&cache->ref, 1);
+ INIT_LIST_HEAD(&cache->cache_link);
+ cache->debug_id = atomic_inc_return(&fscache_cache_debug_id);
+ }
+ return cache;
+}
+
+static bool fscache_get_cache_maybe(struct fscache_cache *cache,
+ enum fscache_cache_trace where)
+{
+ bool success;
+ int ref;
+
+ success = __refcount_inc_not_zero(&cache->ref, &ref);
+ if (success)
+ trace_fscache_cache(cache->debug_id, ref + 1, where);
+ return success;
+}
+
+/*
+ * Look up a cache cookie.
+ */
+struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache)
+{
+ struct fscache_cache *candidate, *cache, *unnamed = NULL;
+
+ /* firstly check for the existence of the cache under read lock */
+ down_read(&fscache_addremove_sem);
+
+ list_for_each_entry(cache, &fscache_caches, cache_link) {
+ if (cache->name && name && strcmp(cache->name, name) == 0 &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_r;
+ if (!cache->name && !name &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_r;
+ }
+
+ if (!name) {
+ list_for_each_entry(cache, &fscache_caches, cache_link) {
+ if (cache->name &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_r;
+ }
+ }
+
+ up_read(&fscache_addremove_sem);
+
+ /* the cache does not exist - create a candidate */
+ candidate = fscache_alloc_cache(name);
+ if (!candidate)
+ return ERR_PTR(-ENOMEM);
+
+ /* write lock, search again and add if still not present */
+ down_write(&fscache_addremove_sem);
+
+ list_for_each_entry(cache, &fscache_caches, cache_link) {
+ if (cache->name && name && strcmp(cache->name, name) == 0 &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_w;
+ if (!cache->name) {
+ unnamed = cache;
+ if (!name &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_w;
+ }
+ }
+
+ if (unnamed && is_cache &&
+ fscache_get_cache_maybe(unnamed, fscache_cache_get_acquire))
+ goto use_unnamed_cache;
+
+ if (!name) {
+ list_for_each_entry(cache, &fscache_caches, cache_link) {
+ if (cache->name &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_w;
+ }
+ }
+
+ list_add_tail(&candidate->cache_link, &fscache_caches);
+ trace_fscache_cache(candidate->debug_id,
+ refcount_read(&candidate->ref),
+ fscache_cache_new_acquire);
+ up_write(&fscache_addremove_sem);
+ return candidate;
+
+got_cache_r:
+ up_read(&fscache_addremove_sem);
+ return cache;
+use_unnamed_cache:
+ cache = unnamed;
+ cache->name = candidate->name;
+ candidate->name = NULL;
+got_cache_w:
+ up_write(&fscache_addremove_sem);
+ kfree(candidate->name);
+ kfree(candidate);
+ return cache;
+}
+
+/**
+ * fscache_acquire_cache - Acquire a cache-level cookie.
+ * @name: The name of the cache.
+ *
+ * Get a cookie to represent an actual cache. If a name is given and there is
+ * a nameless cache record available, this will acquire that and set its name,
+ * directing all the volumes using it to this cache.
+ *
+ * The cache will be switched over to the preparing state if not currently in
+ * use, otherwise -EBUSY will be returned.
+ */
+struct fscache_cache *fscache_acquire_cache(const char *name)
+{
+ struct fscache_cache *cache;
+
+ ASSERT(name);
+ cache = fscache_lookup_cache(name, true);
+ if (IS_ERR(cache))
+ return cache;
+
+ if (!fscache_set_cache_state_maybe(cache,
+ FSCACHE_CACHE_IS_NOT_PRESENT,
+ FSCACHE_CACHE_IS_PREPARING)) {
+ pr_warn("Cache tag %s in use\n", name);
+ fscache_put_cache(cache, fscache_cache_put_cache);
+ return ERR_PTR(-EBUSY);
+ }
+
+ return cache;
+}
+EXPORT_SYMBOL(fscache_acquire_cache);
+
+/**
+ * fscache_put_cache - Release a cache-level cookie.
+ * @cache: The cache cookie to be released
+ * @where: An indication of where the release happened
+ *
+ * Release the caller's reference on a cache-level cookie. The @where
+ * indication should give information about the circumstances in which the call
+ * occurs and will be logged through a tracepoint.
+ */
+void fscache_put_cache(struct fscache_cache *cache,
+ enum fscache_cache_trace where)
+{
+ unsigned int debug_id = cache->debug_id;
+ bool zero;
+ int ref;
+
+ if (IS_ERR_OR_NULL(cache))
+ return;
+
+ zero = __refcount_dec_and_test(&cache->ref, &ref);
+ trace_fscache_cache(debug_id, ref - 1, where);
+
+ if (zero) {
+ down_write(&fscache_addremove_sem);
+ list_del_init(&cache->cache_link);
+ up_write(&fscache_addremove_sem);
+ kfree(cache->name);
+ kfree(cache);
+ }
+}
+
+/**
+ * fscache_relinquish_cache - Reset cache state and release cookie
+ * @cache: The cache cookie to be released
+ *
+ * Reset the state of a cache and release the caller's reference on a cache
+ * cookie.
+ */
+void fscache_relinquish_cache(struct fscache_cache *cache)
+{
+ enum fscache_cache_trace where =
+ (cache->state == FSCACHE_CACHE_IS_PREPARING) ?
+ fscache_cache_put_prep_failed :
+ fscache_cache_put_relinquish;
+
+ cache->ops = NULL;
+ cache->cache_priv = NULL;
+ fscache_set_cache_state(cache, FSCACHE_CACHE_IS_NOT_PRESENT);
+ fscache_put_cache(cache, where);
+}
+EXPORT_SYMBOL(fscache_relinquish_cache);
+
+/**
+ * fscache_add_cache - Declare a cache as being open for business
+ * @cache: The cache-level cookie representing the cache
+ * @ops: Table of cache operations to use
+ * @cache_priv: Private data for the cache record
+ *
+ * Add a cache to the system, making it available for netfs's to use.
+ *
+ * See Documentation/filesystems/caching/backend-api.rst for a complete
+ * description.
+ */
+int fscache_add_cache(struct fscache_cache *cache,
+ const struct fscache_cache_ops *ops,
+ void *cache_priv)
+{
+ int n_accesses;
+
+ _enter("{%s,%s}", ops->name, cache->name);
+
+ BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
+
+ /* Get a ref on the cache cookie and keep its n_accesses counter raised
+ * by 1 to prevent wakeups from transitioning it to 0 until we're
+ * withdrawing caching services from it.
+ */
+ n_accesses = atomic_inc_return(&cache->n_accesses);
+ trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
+ n_accesses, fscache_access_cache_pin);
+
+ down_write(&fscache_addremove_sem);
+
+ cache->ops = ops;
+ cache->cache_priv = cache_priv;
+ fscache_set_cache_state(cache, FSCACHE_CACHE_IS_ACTIVE);
+
+ up_write(&fscache_addremove_sem);
+ pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
+ _leave(" = 0 [%s]", cache->name);
+ return 0;
+}
+EXPORT_SYMBOL(fscache_add_cache);
+
+/**
+ * fscache_begin_cache_access - Pin a cache so it can be accessed
+ * @cache: The cache-level cookie
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Attempt to pin the cache to prevent it from going away whilst we're
+ * accessing it and returns true if successful. This works as follows:
+ *
+ * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
+ * then we return false to indicate access was not permitted.
+ *
+ * (2) If the cache tests as live, then we increment the n_accesses count and
+ * then recheck the liveness, ending the access if it ceased to be live.
+ *
+ * (3) When we end the access, we decrement n_accesses and wake up the any
+ * waiters if it reaches 0.
+ *
+ * (4) Whilst the cache is caching, n_accesses is kept artificially
+ * incremented to prevent wakeups from happening.
+ *
+ * (5) When the cache is taken offline, the state is changed to prevent new
+ * accesses, n_accesses is decremented and we wait for n_accesses to
+ * become 0.
+ */
+bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ if (!fscache_cache_is_live(cache))
+ return false;
+
+ n_accesses = atomic_inc_return(&cache->n_accesses);
+ smp_mb__after_atomic(); /* Reread live flag after n_accesses */
+ trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
+ n_accesses, why);
+ if (!fscache_cache_is_live(cache)) {
+ fscache_end_cache_access(cache, fscache_access_unlive);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * fscache_end_cache_access - Unpin a cache at the end of an access.
+ * @cache: The cache-level cookie
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Unpin a cache after we've accessed it. The @why indicator is merely
+ * provided for tracing purposes.
+ */
+void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ smp_mb__before_atomic();
+ n_accesses = atomic_dec_return(&cache->n_accesses);
+ trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
+ n_accesses, why);
+ if (n_accesses == 0)
+ wake_up_var(&cache->n_accesses);
+}
+
+/**
+ * fscache_io_error - Note a cache I/O error
+ * @cache: The record describing the cache
+ *
+ * Note that an I/O error occurred in a cache and that it should no longer be
+ * used for anything. This also reports the error into the kernel log.
+ *
+ * See Documentation/filesystems/caching/backend-api.rst for a complete
+ * description.
+ */
+void fscache_io_error(struct fscache_cache *cache)
+{
+ if (fscache_set_cache_state_maybe(cache,
+ FSCACHE_CACHE_IS_ACTIVE,
+ FSCACHE_CACHE_GOT_IOERROR))
+ pr_err("Cache '%s' stopped due to I/O error\n",
+ cache->name);
+}
+EXPORT_SYMBOL(fscache_io_error);
+
+/**
+ * fscache_withdraw_cache - Withdraw a cache from the active service
+ * @cache: The cache cookie
+ *
+ * Begin the process of withdrawing a cache from service. This stops new
+ * cache-level and volume-level accesses from taking place and waits for
+ * currently ongoing cache-level accesses to end.
+ */
+void fscache_withdraw_cache(struct fscache_cache *cache)
+{
+ int n_accesses;
+
+ pr_notice("Withdrawing cache \"%s\" (%u objs)\n",
+ cache->name, atomic_read(&cache->object_count));
+
+ fscache_set_cache_state(cache, FSCACHE_CACHE_IS_WITHDRAWN);
+
+ /* Allow wakeups on dec-to-0 */
+ n_accesses = atomic_dec_return(&cache->n_accesses);
+ trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
+ n_accesses, fscache_access_cache_unpin);
+
+ wait_var_event(&cache->n_accesses,
+ atomic_read(&cache->n_accesses) == 0);
+}
+EXPORT_SYMBOL(fscache_withdraw_cache);
+
+#ifdef CONFIG_PROC_FS
+static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] = "-PAEW";
+
+/*
+ * Generate a list of caches in /proc/fs/fscache/caches
+ */
+static int fscache_caches_seq_show(struct seq_file *m, void *v)
+{
+ struct fscache_cache *cache;
+
+ if (v == &fscache_caches) {
+ seq_puts(m,
+ "CACHE REF VOLS OBJS ACCES S NAME\n"
+ "======== ===== ===== ===== ===== = ===============\n"
+ );
+ return 0;
+ }
+
+ cache = list_entry(v, struct fscache_cache, cache_link);
+ seq_printf(m,
+ "%08x %5d %5d %5d %5d %c %s\n",
+ cache->debug_id,
+ refcount_read(&cache->ref),
+ atomic_read(&cache->n_volumes),
+ atomic_read(&cache->object_count),
+ atomic_read(&cache->n_accesses),
+ fscache_cache_states[cache->state],
+ cache->name ?: "-");
+ return 0;
+}
+
+static void *fscache_caches_seq_start(struct seq_file *m, loff_t *_pos)
+ __acquires(fscache_addremove_sem)
+{
+ down_read(&fscache_addremove_sem);
+ return seq_list_start_head(&fscache_caches, *_pos);
+}
+
+static void *fscache_caches_seq_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ return seq_list_next(v, &fscache_caches, _pos);
+}
+
+static void fscache_caches_seq_stop(struct seq_file *m, void *v)
+ __releases(fscache_addremove_sem)
+{
+ up_read(&fscache_addremove_sem);
+}
+
+const struct seq_operations fscache_caches_seq_ops = {
+ .start = fscache_caches_seq_start,
+ .next = fscache_caches_seq_next,
+ .stop = fscache_caches_seq_stop,
+ .show = fscache_caches_seq_show,
+};
+#endif /* CONFIG_PROC_FS */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* netfs cookie management
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See Documentation/filesystems/caching/netfs-api.rst for more information on
+ * the netfs API.
+ */
+
+#define FSCACHE_DEBUG_LEVEL COOKIE
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+struct kmem_cache *fscache_cookie_jar;
+
+static void fscache_cookie_lru_timed_out(struct timer_list *timer);
+static void fscache_cookie_lru_worker(struct work_struct *work);
+static void fscache_cookie_worker(struct work_struct *work);
+static void fscache_unhash_cookie(struct fscache_cookie *cookie);
+static void fscache_perform_invalidation(struct fscache_cookie *cookie);
+
+#define fscache_cookie_hash_shift 15
+static struct hlist_bl_head fscache_cookie_hash[1 << fscache_cookie_hash_shift];
+static LIST_HEAD(fscache_cookies);
+static DEFINE_RWLOCK(fscache_cookies_lock);
+static LIST_HEAD(fscache_cookie_lru);
+static DEFINE_SPINLOCK(fscache_cookie_lru_lock);
+DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out);
+static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
+static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD";
+static unsigned int fscache_lru_cookie_timeout = 10 * HZ;
+
+void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
+{
+ const u8 *k;
+
+ pr_err("%c-cookie c=%08x [fl=%lx na=%u nA=%u s=%c]\n",
+ prefix,
+ cookie->debug_id,
+ cookie->flags,
+ atomic_read(&cookie->n_active),
+ atomic_read(&cookie->n_accesses),
+ fscache_cookie_states[cookie->state]);
+ pr_err("%c-cookie V=%08x [%s]\n",
+ prefix,
+ cookie->volume->debug_id,
+ cookie->volume->key);
+
+ k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
+ cookie->inline_key : cookie->key;
+ pr_err("%c-key=[%u] '%*phN'\n", prefix, cookie->key_len, cookie->key_len, k);
+}
+
+static void fscache_free_cookie(struct fscache_cookie *cookie)
+{
+ if (WARN_ON_ONCE(!list_empty(&cookie->commit_link))) {
+ spin_lock(&fscache_cookie_lru_lock);
+ list_del_init(&cookie->commit_link);
+ spin_unlock(&fscache_cookie_lru_lock);
+ fscache_stat_d(&fscache_n_cookies_lru);
+ fscache_stat(&fscache_n_cookies_lru_removed);
+ }
+
+ if (WARN_ON_ONCE(test_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags))) {
+ fscache_print_cookie(cookie, 'F');
+ return;
+ }
+
+ write_lock(&fscache_cookies_lock);
+ list_del(&cookie->proc_link);
+ write_unlock(&fscache_cookies_lock);
+ if (cookie->aux_len > sizeof(cookie->inline_aux))
+ kfree(cookie->aux);
+ if (cookie->key_len > sizeof(cookie->inline_key))
+ kfree(cookie->key);
+ fscache_stat_d(&fscache_n_cookies);
+ kmem_cache_free(fscache_cookie_jar, cookie);
+}
+
+static void __fscache_queue_cookie(struct fscache_cookie *cookie)
+{
+ if (!queue_work(fscache_wq, &cookie->work))
+ fscache_put_cookie(cookie, fscache_cookie_put_over_queued);
+}
+
+static void fscache_queue_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where)
+{
+ fscache_get_cookie(cookie, where);
+ __fscache_queue_cookie(cookie);
+}
+
+/*
+ * Initialise the access gate on a cookie by setting a flag to prevent the
+ * state machine from being queued when the access counter transitions to 0.
+ * We're only interested in this when we withdraw caching services from the
+ * cookie.
+ */
+static void fscache_init_access_gate(struct fscache_cookie *cookie)
+{
+ int n_accesses;
+
+ n_accesses = atomic_read(&cookie->n_accesses);
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ n_accesses, fscache_access_cache_pin);
+ set_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags);
+}
+
+/**
+ * fscache_end_cookie_access - Unpin a cache at the end of an access.
+ * @cookie: A data file cookie
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Unpin a cache cookie after we've accessed it and bring a deferred
+ * relinquishment or withdrawal state into effect.
+ *
+ * The @why indicator is provided for tracing purposes.
+ */
+void fscache_end_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ smp_mb__before_atomic();
+ n_accesses = atomic_dec_return(&cookie->n_accesses);
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ n_accesses, why);
+ if (n_accesses == 0 &&
+ !test_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags))
+ fscache_queue_cookie(cookie, fscache_cookie_get_end_access);
+}
+EXPORT_SYMBOL(fscache_end_cookie_access);
+
+/*
+ * Pin the cache behind a cookie so that we can access it.
+ */
+static void __fscache_begin_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ n_accesses = atomic_inc_return(&cookie->n_accesses);
+ smp_mb__after_atomic(); /* (Future) read state after is-caching.
+ * Reread n_accesses after is-caching
+ */
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ n_accesses, why);
+}
+
+/**
+ * fscache_begin_cookie_access - Pin a cache so data can be accessed
+ * @cookie: A data file cookie
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Attempt to pin the cache to prevent it from going away whilst we're
+ * accessing data and returns true if successful. This works as follows:
+ *
+ * (1) If the cookie is not being cached (ie. FSCACHE_COOKIE_IS_CACHING is not
+ * set), we return false to indicate access was not permitted.
+ *
+ * (2) If the cookie is being cached, we increment its n_accesses count and
+ * then recheck the IS_CACHING flag, ending the access if it got cleared.
+ *
+ * (3) When we end the access, we decrement the cookie's n_accesses and wake
+ * up the any waiters if it reaches 0.
+ *
+ * (4) Whilst the cookie is actively being cached, its n_accesses is kept
+ * artificially incremented to prevent wakeups from happening.
+ *
+ * (5) When the cache is taken offline or if the cookie is culled, the flag is
+ * cleared to prevent new accesses, the cookie's n_accesses is decremented
+ * and we wait for it to become 0.
+ *
+ * The @why indicator are merely provided for tracing purposes.
+ */
+bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags))
+ return false;
+ __fscache_begin_cookie_access(cookie, why);
+ if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags) ||
+ !fscache_cache_is_live(cookie->volume->cache)) {
+ fscache_end_cookie_access(cookie, fscache_access_unlive);
+ return false;
+ }
+ return true;
+}
+
+static inline void wake_up_cookie_state(struct fscache_cookie *cookie)
+{
+ /* Use a barrier to ensure that waiters see the state variable
+ * change, as spin_unlock doesn't guarantee a barrier.
+ *
+ * See comments over wake_up_bit() and waitqueue_active().
+ */
+ smp_mb();
+ wake_up_var(&cookie->state);
+}
+
+/*
+ * Change the state a cookie is at and wake up anyone waiting for that. Impose
+ * an ordering between the stuff stored in the cookie and the state member.
+ * Paired with fscache_cookie_state().
+ */
+static void __fscache_set_cookie_state(struct fscache_cookie *cookie,
+ enum fscache_cookie_state state)
+{
+ smp_store_release(&cookie->state, state);
+}
+
+static void fscache_set_cookie_state(struct fscache_cookie *cookie,
+ enum fscache_cookie_state state)
+{
+ spin_lock(&cookie->lock);
+ __fscache_set_cookie_state(cookie, state);
+ spin_unlock(&cookie->lock);
+ wake_up_cookie_state(cookie);
+}
+
+/**
+ * fscache_cookie_lookup_negative - Note negative lookup
+ * @cookie: The cookie that was being looked up
+ *
+ * Note that some part of the metadata path in the cache doesn't exist and so
+ * we can release any waiting readers in the certain knowledge that there's
+ * nothing for them to actually read.
+ *
+ * This function uses no locking and must only be called from the state machine.
+ */
+void fscache_cookie_lookup_negative(struct fscache_cookie *cookie)
+{
+ set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_CREATING);
+}
+EXPORT_SYMBOL(fscache_cookie_lookup_negative);
+
+/**
+ * fscache_resume_after_invalidation - Allow I/O to resume after invalidation
+ * @cookie: The cookie that was invalidated
+ *
+ * Tell fscache that invalidation is sufficiently complete that I/O can be
+ * allowed again.
+ */
+void fscache_resume_after_invalidation(struct fscache_cookie *cookie)
+{
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
+}
+EXPORT_SYMBOL(fscache_resume_after_invalidation);
+
+/**
+ * fscache_caching_failed - Report that a failure stopped caching on a cookie
+ * @cookie: The cookie that was affected
+ *
+ * Tell fscache that caching on a cookie needs to be stopped due to some sort
+ * of failure.
+ *
+ * This function uses no locking and must only be called from the state machine.
+ */
+void fscache_caching_failed(struct fscache_cookie *cookie)
+{
+ clear_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags);
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_FAILED);
+ trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
+ fscache_cookie_failed);
+}
+EXPORT_SYMBOL(fscache_caching_failed);
+
+/*
+ * Set the index key in a cookie. The cookie struct has space for a 16-byte
+ * key plus length and hash, but if that's not big enough, it's instead a
+ * pointer to a buffer containing 3 bytes of hash, 1 byte of length and then
+ * the key data.
+ */
+static int fscache_set_key(struct fscache_cookie *cookie,
+ const void *index_key, size_t index_key_len)
+{
+ void *buf;
+ size_t buf_size;
+
+ buf_size = round_up(index_key_len, sizeof(__le32));
+
+ if (index_key_len > sizeof(cookie->inline_key)) {
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ cookie->key = buf;
+ } else {
+ buf = cookie->inline_key;
+ }
+
+ memcpy(buf, index_key, index_key_len);
+ cookie->key_hash = fscache_hash(cookie->volume->key_hash,
+ buf, buf_size);
+ return 0;
+}
+
+static bool fscache_cookie_same(const struct fscache_cookie *a,
+ const struct fscache_cookie *b)
+{
+ const void *ka, *kb;
+
+ if (a->key_hash != b->key_hash ||
+ a->volume != b->volume ||
+ a->key_len != b->key_len)
+ return false;
+
+ if (a->key_len <= sizeof(a->inline_key)) {
+ ka = &a->inline_key;
+ kb = &b->inline_key;
+ } else {
+ ka = a->key;
+ kb = b->key;
+ }
+ return memcmp(ka, kb, a->key_len) == 0;
+}
+
+static atomic_t fscache_cookie_debug_id = ATOMIC_INIT(1);
+
+/*
+ * Allocate a cookie.
+ */
+static struct fscache_cookie *fscache_alloc_cookie(
+ struct fscache_volume *volume,
+ u8 advice,
+ const void *index_key, size_t index_key_len,
+ const void *aux_data, size_t aux_data_len,
+ loff_t object_size)
+{
+ struct fscache_cookie *cookie;
+
+ /* allocate and initialise a cookie */
+ cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
+ if (!cookie)
+ return NULL;
+ fscache_stat(&fscache_n_cookies);
+
+ cookie->volume = volume;
+ cookie->advice = advice;
+ cookie->key_len = index_key_len;
+ cookie->aux_len = aux_data_len;
+ cookie->object_size = object_size;
+ if (object_size == 0)
+ __set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+
+ if (fscache_set_key(cookie, index_key, index_key_len) < 0)
+ goto nomem;
+
+ if (cookie->aux_len <= sizeof(cookie->inline_aux)) {
+ memcpy(cookie->inline_aux, aux_data, cookie->aux_len);
+ } else {
+ cookie->aux = kmemdup(aux_data, cookie->aux_len, GFP_KERNEL);
+ if (!cookie->aux)
+ goto nomem;
+ }
+
+ refcount_set(&cookie->ref, 1);
+ cookie->debug_id = atomic_inc_return(&fscache_cookie_debug_id);
+ spin_lock_init(&cookie->lock);
+ INIT_LIST_HEAD(&cookie->commit_link);
+ INIT_WORK(&cookie->work, fscache_cookie_worker);
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
+
+ write_lock(&fscache_cookies_lock);
+ list_add_tail(&cookie->proc_link, &fscache_cookies);
+ write_unlock(&fscache_cookies_lock);
+ fscache_see_cookie(cookie, fscache_cookie_new_acquire);
+ return cookie;
+
+nomem:
+ fscache_free_cookie(cookie);
+ return NULL;
+}
+
+static inline bool fscache_cookie_is_dropped(struct fscache_cookie *cookie)
+{
+ return READ_ONCE(cookie->state) == FSCACHE_COOKIE_STATE_DROPPED;
+}
+
+static void fscache_wait_on_collision(struct fscache_cookie *candidate,
+ struct fscache_cookie *wait_for)
+{
+ enum fscache_cookie_state *statep = &wait_for->state;
+
+ wait_var_event_timeout(statep, fscache_cookie_is_dropped(wait_for),
+ 20 * HZ);
+ if (!fscache_cookie_is_dropped(wait_for)) {
+ pr_notice("Potential collision c=%08x old: c=%08x",
+ candidate->debug_id, wait_for->debug_id);
+ wait_var_event(statep, fscache_cookie_is_dropped(wait_for));
+ }
+}
+
+/*
+ * Attempt to insert the new cookie into the hash. If there's a collision, we
+ * wait for the old cookie to complete if it's being relinquished and an error
+ * otherwise.
+ */
+static bool fscache_hash_cookie(struct fscache_cookie *candidate)
+{
+ struct fscache_cookie *cursor, *wait_for = NULL;
+ struct hlist_bl_head *h;
+ struct hlist_bl_node *p;
+ unsigned int bucket;
+
+ bucket = candidate->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
+ h = &fscache_cookie_hash[bucket];
+
+ hlist_bl_lock(h);
+ hlist_bl_for_each_entry(cursor, p, h, hash_link) {
+ if (fscache_cookie_same(candidate, cursor)) {
+ if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cursor->flags))
+ goto collision;
+ wait_for = fscache_get_cookie(cursor,
+ fscache_cookie_get_hash_collision);
+ break;
+ }
+ }
+
+ fscache_get_volume(candidate->volume, fscache_volume_get_cookie);
+ atomic_inc(&candidate->volume->n_cookies);
+ hlist_bl_add_head(&candidate->hash_link, h);
+ set_bit(FSCACHE_COOKIE_IS_HASHED, &candidate->flags);
+ hlist_bl_unlock(h);
+
+ if (wait_for) {
+ fscache_wait_on_collision(candidate, wait_for);
+ fscache_put_cookie(wait_for, fscache_cookie_put_hash_collision);
+ }
+ return true;
+
+collision:
+ trace_fscache_cookie(cursor->debug_id, refcount_read(&cursor->ref),
+ fscache_cookie_collision);
+ pr_err("Duplicate cookie detected\n");
+ fscache_print_cookie(cursor, 'O');
+ fscache_print_cookie(candidate, 'N');
+ hlist_bl_unlock(h);
+ return false;
+}
+
+/*
+ * Request a cookie to represent a data storage object within a volume.
+ *
+ * We never let on to the netfs about errors. We may set a negative cookie
+ * pointer, but that's okay
+ */
+struct fscache_cookie *__fscache_acquire_cookie(
+ struct fscache_volume *volume,
+ u8 advice,
+ const void *index_key, size_t index_key_len,
+ const void *aux_data, size_t aux_data_len,
+ loff_t object_size)
+{
+ struct fscache_cookie *cookie;
+
+ _enter("V=%x", volume->debug_id);
+
+ if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
+ return NULL;
+ if (!aux_data || !aux_data_len) {
+ aux_data = NULL;
+ aux_data_len = 0;
+ }
+
+ fscache_stat(&fscache_n_acquires);
+
+ cookie = fscache_alloc_cookie(volume, advice,
+ index_key, index_key_len,
+ aux_data, aux_data_len,
+ object_size);
+ if (!cookie) {
+ fscache_stat(&fscache_n_acquires_oom);
+ return NULL;
+ }
+
+ if (!fscache_hash_cookie(cookie)) {
+ fscache_see_cookie(cookie, fscache_cookie_discard);
+ fscache_free_cookie(cookie);
+ return NULL;
+ }
+
+ trace_fscache_acquire(cookie);
+ fscache_stat(&fscache_n_acquires_ok);
+ _leave(" = c=%08x", cookie->debug_id);
+ return cookie;
+}
+EXPORT_SYMBOL(__fscache_acquire_cookie);
+
+/*
+ * Prepare a cache object to be written to.
+ */
+static void fscache_prepare_to_write(struct fscache_cookie *cookie)
+{
+ cookie->volume->cache->ops->prepare_to_write(cookie);
+}
+
+/*
+ * Look up a cookie in the cache.
+ */
+static void fscache_perform_lookup(struct fscache_cookie *cookie)
+{
+ enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed;
+ bool need_withdraw = false;
+
+ _enter("");
+
+ if (!cookie->volume->cache_priv) {
+ fscache_create_volume(cookie->volume, true);
+ if (!cookie->volume->cache_priv) {
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
+ goto out;
+ }
+ }
+
+ if (!cookie->volume->cache->ops->lookup_cookie(cookie)) {
+ if (cookie->state != FSCACHE_COOKIE_STATE_FAILED)
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
+ need_withdraw = true;
+ _leave(" [fail]");
+ goto out;
+ }
+
+ fscache_see_cookie(cookie, fscache_cookie_see_active);
+ spin_lock(&cookie->lock);
+ if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_INVALIDATING);
+ else
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
+ spin_unlock(&cookie->lock);
+ wake_up_cookie_state(cookie);
+ trace = fscache_access_lookup_cookie_end;
+
+out:
+ fscache_end_cookie_access(cookie, trace);
+ if (need_withdraw)
+ fscache_withdraw_cookie(cookie);
+ fscache_end_volume_access(cookie->volume, cookie, trace);
+}
+
+/*
+ * Begin the process of looking up a cookie. We offload the actual process to
+ * a worker thread.
+ */
+static bool fscache_begin_lookup(struct fscache_cookie *cookie, bool will_modify)
+{
+ if (will_modify) {
+ set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags);
+ set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
+ }
+ if (!fscache_begin_volume_access(cookie->volume, cookie,
+ fscache_access_lookup_cookie))
+ return false;
+
+ __fscache_begin_cookie_access(cookie, fscache_access_lookup_cookie);
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_LOOKING_UP);
+ set_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags);
+ set_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags);
+ return true;
+}
+
+/*
+ * Start using the cookie for I/O. This prevents the backing object from being
+ * reaped by VM pressure.
+ */
+void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify)
+{
+ enum fscache_cookie_state state;
+ bool queue = false;
+ int n_active;
+
+ _enter("c=%08x", cookie->debug_id);
+
+ if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
+ "Trying to use relinquished cookie\n"))
+ return;
+
+ spin_lock(&cookie->lock);
+
+ n_active = atomic_inc_return(&cookie->n_active);
+ trace_fscache_active(cookie->debug_id, refcount_read(&cookie->ref),
+ n_active, atomic_read(&cookie->n_accesses),
+ will_modify ?
+ fscache_active_use_modify : fscache_active_use);
+
+again:
+ state = fscache_cookie_state(cookie);
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_QUIESCENT:
+ queue = fscache_begin_lookup(cookie, will_modify);
+ break;
+
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ case FSCACHE_COOKIE_STATE_CREATING:
+ if (will_modify)
+ set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags);
+ break;
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ case FSCACHE_COOKIE_STATE_INVALIDATING:
+ if (will_modify &&
+ !test_and_set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags)) {
+ set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
+ queue = true;
+ }
+ /*
+ * We could race with cookie_lru which may set LRU_DISCARD bit
+ * but has yet to run the cookie state machine. If this happens
+ * and another thread tries to use the cookie, clear LRU_DISCARD
+ * so we don't end up withdrawing the cookie while in use.
+ */
+ if (test_and_clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags))
+ fscache_see_cookie(cookie, fscache_cookie_see_lru_discard_clear);
+ break;
+
+ case FSCACHE_COOKIE_STATE_FAILED:
+ case FSCACHE_COOKIE_STATE_WITHDRAWING:
+ break;
+
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ spin_unlock(&cookie->lock);
+ wait_var_event(&cookie->state,
+ fscache_cookie_state(cookie) !=
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING);
+ spin_lock(&cookie->lock);
+ goto again;
+
+ case FSCACHE_COOKIE_STATE_DROPPED:
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ WARN(1, "Can't use cookie in state %u\n", state);
+ break;
+ }
+
+ spin_unlock(&cookie->lock);
+ if (queue)
+ fscache_queue_cookie(cookie, fscache_cookie_get_use_work);
+ _leave("");
+}
+EXPORT_SYMBOL(__fscache_use_cookie);
+
+static void fscache_unuse_cookie_locked(struct fscache_cookie *cookie)
+{
+ clear_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags);
+ if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags))
+ return;
+
+ cookie->unused_at = jiffies;
+ spin_lock(&fscache_cookie_lru_lock);
+ if (list_empty(&cookie->commit_link)) {
+ fscache_get_cookie(cookie, fscache_cookie_get_lru);
+ fscache_stat(&fscache_n_cookies_lru);
+ }
+ list_move_tail(&cookie->commit_link, &fscache_cookie_lru);
+
+ spin_unlock(&fscache_cookie_lru_lock);
+ timer_reduce(&fscache_cookie_lru_timer,
+ jiffies + fscache_lru_cookie_timeout);
+}
+
+/*
+ * Stop using the cookie for I/O.
+ */
+void __fscache_unuse_cookie(struct fscache_cookie *cookie,
+ const void *aux_data, const loff_t *object_size)
+{
+ unsigned int debug_id = cookie->debug_id;
+ unsigned int r = refcount_read(&cookie->ref);
+ unsigned int a = atomic_read(&cookie->n_accesses);
+ unsigned int c;
+
+ if (aux_data || object_size)
+ __fscache_update_cookie(cookie, aux_data, object_size);
+
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ c = atomic_fetch_add_unless(&cookie->n_active, -1, 1);
+ if (c != 1) {
+ trace_fscache_active(debug_id, r, c - 1, a, fscache_active_unuse);
+ return;
+ }
+
+ spin_lock(&cookie->lock);
+ r = refcount_read(&cookie->ref);
+ a = atomic_read(&cookie->n_accesses);
+ c = atomic_dec_return(&cookie->n_active);
+ trace_fscache_active(debug_id, r, c, a, fscache_active_unuse);
+ if (c == 0)
+ fscache_unuse_cookie_locked(cookie);
+ spin_unlock(&cookie->lock);
+}
+EXPORT_SYMBOL(__fscache_unuse_cookie);
+
+/*
+ * Perform work upon the cookie, such as committing its cache state,
+ * relinquishing it or withdrawing the backing cache. We're protected from the
+ * cache going away under us as object withdrawal must come through this
+ * non-reentrant work item.
+ */
+static void fscache_cookie_state_machine(struct fscache_cookie *cookie)
+{
+ enum fscache_cookie_state state;
+ bool wake = false;
+
+ _enter("c=%x", cookie->debug_id);
+
+again:
+ spin_lock(&cookie->lock);
+again_locked:
+ state = cookie->state;
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_QUIESCENT:
+ /* The QUIESCENT state is jumped to the LOOKING_UP state by
+ * fscache_use_cookie().
+ */
+
+ if (atomic_read(&cookie->n_accesses) == 0 &&
+ test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_RELINQUISHING);
+ wake = true;
+ goto again_locked;
+ }
+ break;
+
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ spin_unlock(&cookie->lock);
+ fscache_init_access_gate(cookie);
+ fscache_perform_lookup(cookie);
+ goto again;
+
+ case FSCACHE_COOKIE_STATE_INVALIDATING:
+ spin_unlock(&cookie->lock);
+ fscache_perform_invalidation(cookie);
+ goto again;
+
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ if (test_and_clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags)) {
+ spin_unlock(&cookie->lock);
+ fscache_prepare_to_write(cookie);
+ spin_lock(&cookie->lock);
+ }
+ if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) {
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING);
+ wake = true;
+ goto again_locked;
+ }
+ fallthrough;
+
+ case FSCACHE_COOKIE_STATE_FAILED:
+ if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
+ fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
+
+ if (atomic_read(&cookie->n_accesses) != 0)
+ break;
+ if (test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_RELINQUISHING);
+ wake = true;
+ goto again_locked;
+ }
+ if (test_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags)) {
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_WITHDRAWING);
+ wake = true;
+ goto again_locked;
+ }
+ break;
+
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ case FSCACHE_COOKIE_STATE_WITHDRAWING:
+ if (cookie->cache_priv) {
+ spin_unlock(&cookie->lock);
+ cookie->volume->cache->ops->withdraw_cookie(cookie);
+ spin_lock(&cookie->lock);
+ }
+
+ if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
+ fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
+
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ fscache_see_cookie(cookie, fscache_cookie_see_relinquish);
+ fscache_unhash_cookie(cookie);
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_DROPPED);
+ wake = true;
+ goto out;
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ fscache_see_cookie(cookie, fscache_cookie_see_lru_discard);
+ break;
+ case FSCACHE_COOKIE_STATE_WITHDRAWING:
+ fscache_see_cookie(cookie, fscache_cookie_see_withdraw);
+ break;
+ default:
+ BUG();
+ }
+
+ clear_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
+ clear_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags);
+ clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
+ clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
+ set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
+ wake = true;
+ goto again_locked;
+
+ case FSCACHE_COOKIE_STATE_DROPPED:
+ break;
+
+ default:
+ WARN_ONCE(1, "Cookie %x in unexpected state %u\n",
+ cookie->debug_id, state);
+ break;
+ }
+
+out:
+ spin_unlock(&cookie->lock);
+ if (wake)
+ wake_up_cookie_state(cookie);
+ _leave("");
+}
+
+static void fscache_cookie_worker(struct work_struct *work)
+{
+ struct fscache_cookie *cookie = container_of(work, struct fscache_cookie, work);
+
+ fscache_see_cookie(cookie, fscache_cookie_see_work);
+ fscache_cookie_state_machine(cookie);
+ fscache_put_cookie(cookie, fscache_cookie_put_work);
+}
+
+/*
+ * Wait for the object to become inactive. The cookie's work item will be
+ * scheduled when someone transitions n_accesses to 0 - but if someone's
+ * already done that, schedule it anyway.
+ */
+static void __fscache_withdraw_cookie(struct fscache_cookie *cookie)
+{
+ int n_accesses;
+ bool unpinned;
+
+ unpinned = test_and_clear_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags);
+
+ /* Need to read the access count after unpinning */
+ n_accesses = atomic_read(&cookie->n_accesses);
+ if (unpinned)
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ n_accesses, fscache_access_cache_unpin);
+ if (n_accesses == 0)
+ fscache_queue_cookie(cookie, fscache_cookie_get_end_access);
+}
+
+static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie)
+{
+ fscache_see_cookie(cookie, fscache_cookie_see_lru_do_one);
+
+ spin_lock(&cookie->lock);
+ if (cookie->state != FSCACHE_COOKIE_STATE_ACTIVE ||
+ time_before(jiffies, cookie->unused_at + fscache_lru_cookie_timeout) ||
+ atomic_read(&cookie->n_active) > 0) {
+ spin_unlock(&cookie->lock);
+ fscache_stat(&fscache_n_cookies_lru_removed);
+ } else {
+ set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
+ spin_unlock(&cookie->lock);
+ fscache_stat(&fscache_n_cookies_lru_expired);
+ _debug("lru c=%x", cookie->debug_id);
+ __fscache_withdraw_cookie(cookie);
+ }
+
+ fscache_put_cookie(cookie, fscache_cookie_put_lru);
+}
+
+static void fscache_cookie_lru_worker(struct work_struct *work)
+{
+ struct fscache_cookie *cookie;
+ unsigned long unused_at;
+
+ spin_lock(&fscache_cookie_lru_lock);
+
+ while (!list_empty(&fscache_cookie_lru)) {
+ cookie = list_first_entry(&fscache_cookie_lru,
+ struct fscache_cookie, commit_link);
+ unused_at = cookie->unused_at + fscache_lru_cookie_timeout;
+ if (time_before(jiffies, unused_at)) {
+ timer_reduce(&fscache_cookie_lru_timer, unused_at);
+ break;
+ }
+
+ list_del_init(&cookie->commit_link);
+ fscache_stat_d(&fscache_n_cookies_lru);
+ spin_unlock(&fscache_cookie_lru_lock);
+ fscache_cookie_lru_do_one(cookie);
+ spin_lock(&fscache_cookie_lru_lock);
+ }
+
+ spin_unlock(&fscache_cookie_lru_lock);
+}
+
+static void fscache_cookie_lru_timed_out(struct timer_list *timer)
+{
+ queue_work(fscache_wq, &fscache_cookie_lru_work);
+}
+
+static void fscache_cookie_drop_from_lru(struct fscache_cookie *cookie)
+{
+ bool need_put = false;
+
+ if (!list_empty(&cookie->commit_link)) {
+ spin_lock(&fscache_cookie_lru_lock);
+ if (!list_empty(&cookie->commit_link)) {
+ list_del_init(&cookie->commit_link);
+ fscache_stat_d(&fscache_n_cookies_lru);
+ fscache_stat(&fscache_n_cookies_lru_dropped);
+ need_put = true;
+ }
+ spin_unlock(&fscache_cookie_lru_lock);
+ if (need_put)
+ fscache_put_cookie(cookie, fscache_cookie_put_lru);
+ }
+}
+
+/*
+ * Remove a cookie from the hash table.
+ */
+static void fscache_unhash_cookie(struct fscache_cookie *cookie)
+{
+ struct hlist_bl_head *h;
+ unsigned int bucket;
+
+ bucket = cookie->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
+ h = &fscache_cookie_hash[bucket];
+
+ hlist_bl_lock(h);
+ hlist_bl_del(&cookie->hash_link);
+ clear_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags);
+ hlist_bl_unlock(h);
+ fscache_stat(&fscache_n_relinquishes_dropped);
+}
+
+static void fscache_drop_withdraw_cookie(struct fscache_cookie *cookie)
+{
+ fscache_cookie_drop_from_lru(cookie);
+ __fscache_withdraw_cookie(cookie);
+}
+
+/**
+ * fscache_withdraw_cookie - Mark a cookie for withdrawal
+ * @cookie: The cookie to be withdrawn.
+ *
+ * Allow the cache backend to withdraw the backing for a cookie for its own
+ * reasons, even if that cookie is in active use.
+ */
+void fscache_withdraw_cookie(struct fscache_cookie *cookie)
+{
+ set_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags);
+ fscache_drop_withdraw_cookie(cookie);
+}
+EXPORT_SYMBOL(fscache_withdraw_cookie);
+
+/*
+ * Allow the netfs to release a cookie back to the cache.
+ * - the object will be marked as recyclable on disk if retire is true
+ */
+void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
+{
+ fscache_stat(&fscache_n_relinquishes);
+ if (retire)
+ fscache_stat(&fscache_n_relinquishes_retire);
+
+ _enter("c=%08x{%d},%d",
+ cookie->debug_id, atomic_read(&cookie->n_active), retire);
+
+ if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
+ "Cookie c=%x already relinquished\n", cookie->debug_id))
+ return;
+
+ if (retire)
+ set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
+ trace_fscache_relinquish(cookie, retire);
+
+ ASSERTCMP(atomic_read(&cookie->n_active), ==, 0);
+ ASSERTCMP(atomic_read(&cookie->volume->n_cookies), >, 0);
+ atomic_dec(&cookie->volume->n_cookies);
+
+ if (test_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags)) {
+ set_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags);
+ fscache_drop_withdraw_cookie(cookie);
+ } else {
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_DROPPED);
+ fscache_unhash_cookie(cookie);
+ }
+ fscache_put_cookie(cookie, fscache_cookie_put_relinquish);
+}
+EXPORT_SYMBOL(__fscache_relinquish_cookie);
+
+/*
+ * Drop a reference to a cookie.
+ */
+void fscache_put_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where)
+{
+ struct fscache_volume *volume = cookie->volume;
+ unsigned int cookie_debug_id = cookie->debug_id;
+ bool zero;
+ int ref;
+
+ zero = __refcount_dec_and_test(&cookie->ref, &ref);
+ trace_fscache_cookie(cookie_debug_id, ref - 1, where);
+ if (zero) {
+ fscache_free_cookie(cookie);
+ fscache_put_volume(volume, fscache_volume_put_cookie);
+ }
+}
+EXPORT_SYMBOL(fscache_put_cookie);
+
+/*
+ * Get a reference to a cookie.
+ */
+struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where)
+{
+ int ref;
+
+ __refcount_inc(&cookie->ref, &ref);
+ trace_fscache_cookie(cookie->debug_id, ref + 1, where);
+ return cookie;
+}
+EXPORT_SYMBOL(fscache_get_cookie);
+
+/*
+ * Ask the cache to effect invalidation of a cookie.
+ */
+static void fscache_perform_invalidation(struct fscache_cookie *cookie)
+{
+ if (!cookie->volume->cache->ops->invalidate_cookie(cookie))
+ fscache_caching_failed(cookie);
+ fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
+}
+
+/*
+ * Invalidate an object.
+ */
+void __fscache_invalidate(struct fscache_cookie *cookie,
+ const void *aux_data, loff_t new_size,
+ unsigned int flags)
+{
+ bool is_caching;
+
+ _enter("c=%x", cookie->debug_id);
+
+ fscache_stat(&fscache_n_invalidates);
+
+ if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
+ "Trying to invalidate relinquished cookie\n"))
+ return;
+
+ if ((flags & FSCACHE_INVAL_DIO_WRITE) &&
+ test_and_set_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags))
+ return;
+
+ spin_lock(&cookie->lock);
+ set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ fscache_update_aux(cookie, aux_data, &new_size);
+ cookie->inval_counter++;
+ trace_fscache_invalidate(cookie, new_size);
+
+ switch (cookie->state) {
+ case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */
+ default:
+ spin_unlock(&cookie->lock);
+ _leave(" [no %u]", cookie->state);
+ return;
+
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ if (!test_and_set_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
+ __fscache_begin_cookie_access(cookie, fscache_access_invalidate_cookie);
+ fallthrough;
+ case FSCACHE_COOKIE_STATE_CREATING:
+ spin_unlock(&cookie->lock);
+ _leave(" [look %x]", cookie->inval_counter);
+ return;
+
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ is_caching = fscache_begin_cookie_access(
+ cookie, fscache_access_invalidate_cookie);
+ if (is_caching)
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_INVALIDATING);
+ spin_unlock(&cookie->lock);
+ wake_up_cookie_state(cookie);
+
+ if (is_caching)
+ fscache_queue_cookie(cookie, fscache_cookie_get_inval_work);
+ _leave(" [inv]");
+ return;
+ }
+}
+EXPORT_SYMBOL(__fscache_invalidate);
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Generate a list of extant cookies in /proc/fs/fscache/cookies
+ */
+static int fscache_cookies_seq_show(struct seq_file *m, void *v)
+{
+ struct fscache_cookie *cookie;
+ unsigned int keylen = 0, auxlen = 0;
+ u8 *p;
+
+ if (v == &fscache_cookies) {
+ seq_puts(m,
+ "COOKIE VOLUME REF ACT ACC S FL DEF \n"
+ "======== ======== === === === = == ================\n"
+ );
+ return 0;
+ }
+
+ cookie = list_entry(v, struct fscache_cookie, proc_link);
+
+ seq_printf(m,
+ "%08x %08x %3d %3d %3d %c %02lx",
+ cookie->debug_id,
+ cookie->volume->debug_id,
+ refcount_read(&cookie->ref),
+ atomic_read(&cookie->n_active),
+ atomic_read(&cookie->n_accesses),
+ fscache_cookie_states[cookie->state],
+ cookie->flags);
+
+ keylen = cookie->key_len;
+ auxlen = cookie->aux_len;
+
+ if (keylen > 0 || auxlen > 0) {
+ seq_puts(m, " ");
+ p = keylen <= sizeof(cookie->inline_key) ?
+ cookie->inline_key : cookie->key;
+ for (; keylen > 0; keylen--)
+ seq_printf(m, "%02x", *p++);
+ if (auxlen > 0) {
+ seq_puts(m, ", ");
+ p = auxlen <= sizeof(cookie->inline_aux) ?
+ cookie->inline_aux : cookie->aux;
+ for (; auxlen > 0; auxlen--)
+ seq_printf(m, "%02x", *p++);
+ }
+ }
+
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static void *fscache_cookies_seq_start(struct seq_file *m, loff_t *_pos)
+ __acquires(fscache_cookies_lock)
+{
+ read_lock(&fscache_cookies_lock);
+ return seq_list_start_head(&fscache_cookies, *_pos);
+}
+
+static void *fscache_cookies_seq_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ return seq_list_next(v, &fscache_cookies, _pos);
+}
+
+static void fscache_cookies_seq_stop(struct seq_file *m, void *v)
+ __releases(rcu)
+{
+ read_unlock(&fscache_cookies_lock);
+}
+
+
+const struct seq_operations fscache_cookies_seq_ops = {
+ .start = fscache_cookies_seq_start,
+ .next = fscache_cookies_seq_next,
+ .stop = fscache_cookies_seq_stop,
+ .show = fscache_cookies_seq_show,
+};
+#endif
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Internal definitions for FS-Cache
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "FS-Cache: " fmt
+
+#include <linux/slab.h>
+#include <linux/fscache-cache.h>
+#include <trace/events/fscache.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+
+/*
+ * cache.c
+ */
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_caches_seq_ops;
+#endif
+bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
+void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
+struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
+void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
+
+static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
+{
+ return smp_load_acquire(&cache->state);
+}
+
+static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
+{
+ return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
+}
+
+static inline void fscache_set_cache_state(struct fscache_cache *cache,
+ enum fscache_cache_state new_state)
+{
+ smp_store_release(&cache->state, new_state);
+
+}
+
+static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
+ enum fscache_cache_state old_state,
+ enum fscache_cache_state new_state)
+{
+ return try_cmpxchg_release(&cache->state, &old_state, new_state);
+}
+
+/*
+ * cookie.c
+ */
+extern struct kmem_cache *fscache_cookie_jar;
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_cookies_seq_ops;
+#endif
+extern struct timer_list fscache_cookie_lru_timer;
+
+extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
+extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+
+static inline void fscache_see_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where)
+{
+ trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
+ where);
+}
+
+/*
+ * main.c
+ */
+extern unsigned fscache_debug;
+
+extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
+
+/*
+ * proc.c
+ */
+#ifdef CONFIG_PROC_FS
+extern int __init fscache_proc_init(void);
+extern void fscache_proc_cleanup(void);
+#else
+#define fscache_proc_init() (0)
+#define fscache_proc_cleanup() do {} while (0)
+#endif
+
+/*
+ * stats.c
+ */
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_volumes;
+extern atomic_t fscache_n_volumes_collision;
+extern atomic_t fscache_n_volumes_nomem;
+extern atomic_t fscache_n_cookies;
+extern atomic_t fscache_n_cookies_lru;
+extern atomic_t fscache_n_cookies_lru_expired;
+extern atomic_t fscache_n_cookies_lru_removed;
+extern atomic_t fscache_n_cookies_lru_dropped;
+
+extern atomic_t fscache_n_acquires;
+extern atomic_t fscache_n_acquires_ok;
+extern atomic_t fscache_n_acquires_oom;
+
+extern atomic_t fscache_n_invalidates;
+
+extern atomic_t fscache_n_relinquishes;
+extern atomic_t fscache_n_relinquishes_retire;
+extern atomic_t fscache_n_relinquishes_dropped;
+
+extern atomic_t fscache_n_resizes;
+extern atomic_t fscache_n_resizes_null;
+
+static inline void fscache_stat(atomic_t *stat)
+{
+ atomic_inc(stat);
+}
+
+static inline void fscache_stat_d(atomic_t *stat)
+{
+ atomic_dec(stat);
+}
+
+#define __fscache_stat(stat) (stat)
+
+int fscache_stats_show(struct seq_file *m, void *v);
+#else
+
+#define __fscache_stat(stat) (NULL)
+#define fscache_stat(stat) do {} while (0)
+#define fscache_stat_d(stat) do {} while (0)
+#endif
+
+/*
+ * volume.c
+ */
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_volumes_seq_ops;
+#endif
+
+struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+void fscache_put_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+bool fscache_begin_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+void fscache_create_volume(struct fscache_volume *volume, bool wait);
+
+
+/*****************************************************************************/
+/*
+ * debug tracing
+ */
+#define dbgprintk(FMT, ...) \
+ printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
+
+#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
+
+#define kjournal(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
+
+#ifdef __KDEBUG
+#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
+#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
+#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
+
+#elif defined(CONFIG_FSCACHE_DEBUG)
+#define _enter(FMT, ...) \
+do { \
+ if (__do_kdebug(ENTER)) \
+ kenter(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#define _leave(FMT, ...) \
+do { \
+ if (__do_kdebug(LEAVE)) \
+ kleave(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#define _debug(FMT, ...) \
+do { \
+ if (__do_kdebug(DEBUG)) \
+ kdebug(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#else
+#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
+#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
+#endif
+
+/*
+ * determine whether a particular optional debugging point should be logged
+ * - we need to go through three steps to persuade cpp to correctly join the
+ * shorthand in FSCACHE_DEBUG_LEVEL with its prefix
+ */
+#define ____do_kdebug(LEVEL, POINT) \
+ unlikely((fscache_debug & \
+ (FSCACHE_POINT_##POINT << (FSCACHE_DEBUG_ ## LEVEL * 3))))
+#define ___do_kdebug(LEVEL, POINT) \
+ ____do_kdebug(LEVEL, POINT)
+#define __do_kdebug(POINT) \
+ ___do_kdebug(FSCACHE_DEBUG_LEVEL, POINT)
+
+#define FSCACHE_DEBUG_CACHE 0
+#define FSCACHE_DEBUG_COOKIE 1
+#define FSCACHE_DEBUG_OBJECT 2
+#define FSCACHE_DEBUG_OPERATION 3
+
+#define FSCACHE_POINT_ENTER 1
+#define FSCACHE_POINT_LEAVE 2
+#define FSCACHE_POINT_DEBUG 4
+
+#ifndef FSCACHE_DEBUG_LEVEL
+#define FSCACHE_DEBUG_LEVEL CACHE
+#endif
+
+/*
+ * assertions
+ */
+#if 1 /* defined(__KDEBUGALL) */
+
+#define ASSERT(X) \
+do { \
+ if (unlikely(!(X))) { \
+ pr_err("\n"); \
+ pr_err("Assertion failed\n"); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTCMP(X, OP, Y) \
+do { \
+ if (unlikely(!((X) OP (Y)))) { \
+ pr_err("\n"); \
+ pr_err("Assertion failed\n"); \
+ pr_err("%lx " #OP " %lx is false\n", \
+ (unsigned long)(X), (unsigned long)(Y)); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTIF(C, X) \
+do { \
+ if (unlikely((C) && !(X))) { \
+ pr_err("\n"); \
+ pr_err("Assertion failed\n"); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTIFCMP(C, X, OP, Y) \
+do { \
+ if (unlikely((C) && !((X) OP (Y)))) { \
+ pr_err("\n"); \
+ pr_err("Assertion failed\n"); \
+ pr_err("%lx " #OP " %lx is false\n", \
+ (unsigned long)(X), (unsigned long)(Y)); \
+ BUG(); \
+ } \
+} while (0)
+
+#else
+
+#define ASSERT(X) do {} while (0)
+#define ASSERTCMP(X, OP, Y) do {} while (0)
+#define ASSERTIF(C, X) do {} while (0)
+#define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
+
+#endif /* assert or not */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Cache data I/O routines
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#define FSCACHE_DEBUG_LEVEL OPERATION
+#include <linux/fscache-cache.h>
+#include <linux/uio.h>
+#include <linux/bvec.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include "internal.h"
+
+/**
+ * fscache_wait_for_operation - Wait for an object become accessible
+ * @cres: The cache resources for the operation being performed
+ * @want_state: The minimum state the object must be at
+ *
+ * See if the target cache object is at the specified minimum state of
+ * accessibility yet, and if not, wait for it.
+ */
+bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state)
+{
+ struct fscache_cookie *cookie = fscache_cres_cookie(cres);
+ enum fscache_cookie_state state;
+
+again:
+ if (!fscache_cache_is_live(cookie->volume->cache)) {
+ _leave(" [broken]");
+ return false;
+ }
+
+ state = fscache_cookie_state(cookie);
+ _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
+
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_CREATING:
+ case FSCACHE_COOKIE_STATE_INVALIDATING:
+ if (want_state == FSCACHE_WANT_PARAMS)
+ goto ready; /* There can be no content */
+ fallthrough;
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ wait_var_event(&cookie->state,
+ fscache_cookie_state(cookie) != state);
+ goto again;
+
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ goto ready;
+ case FSCACHE_COOKIE_STATE_DROPPED:
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ default:
+ _leave(" [not live]");
+ return false;
+ }
+
+ready:
+ if (!cres->cache_priv2)
+ return cookie->volume->cache->ops->begin_operation(cres, want_state);
+ return true;
+}
+EXPORT_SYMBOL(fscache_wait_for_operation);
+
+/*
+ * Begin an I/O operation on the cache, waiting till we reach the right state.
+ *
+ * Attaches the resources required to the operation resources record.
+ */
+static int fscache_begin_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie,
+ enum fscache_want_state want_state,
+ enum fscache_access_trace why)
+{
+ enum fscache_cookie_state state;
+ long timeo;
+ bool once_only = false;
+
+ cres->ops = NULL;
+ cres->cache_priv = cookie;
+ cres->cache_priv2 = NULL;
+ cres->debug_id = cookie->debug_id;
+ cres->inval_counter = cookie->inval_counter;
+
+ if (!fscache_begin_cookie_access(cookie, why))
+ return -ENOBUFS;
+
+again:
+ spin_lock(&cookie->lock);
+
+ state = fscache_cookie_state(cookie);
+ _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
+
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ case FSCACHE_COOKIE_STATE_INVALIDATING:
+ goto wait_for_file_wrangling;
+ case FSCACHE_COOKIE_STATE_CREATING:
+ if (want_state == FSCACHE_WANT_PARAMS)
+ goto ready; /* There can be no content */
+ goto wait_for_file_wrangling;
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ goto ready;
+ case FSCACHE_COOKIE_STATE_DROPPED:
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ WARN(1, "Can't use cookie in state %u\n", cookie->state);
+ goto not_live;
+ default:
+ goto not_live;
+ }
+
+ready:
+ spin_unlock(&cookie->lock);
+ if (!cookie->volume->cache->ops->begin_operation(cres, want_state))
+ goto failed;
+ return 0;
+
+wait_for_file_wrangling:
+ spin_unlock(&cookie->lock);
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ atomic_read(&cookie->n_accesses),
+ fscache_access_io_wait);
+ timeo = wait_var_event_timeout(&cookie->state,
+ fscache_cookie_state(cookie) != state, 20 * HZ);
+ if (timeo <= 1 && !once_only) {
+ pr_warn("%s: cookie state change wait timed out: cookie->state=%u state=%u",
+ __func__, fscache_cookie_state(cookie), state);
+ fscache_print_cookie(cookie, 'O');
+ once_only = true;
+ }
+ goto again;
+
+not_live:
+ spin_unlock(&cookie->lock);
+failed:
+ cres->cache_priv = NULL;
+ cres->ops = NULL;
+ fscache_end_cookie_access(cookie, fscache_access_io_not_live);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+}
+
+int __fscache_begin_read_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
+{
+ return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
+ fscache_access_io_read);
+}
+EXPORT_SYMBOL(__fscache_begin_read_operation);
+
+int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
+{
+ return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
+ fscache_access_io_write);
+}
+EXPORT_SYMBOL(__fscache_begin_write_operation);
+
+/**
+ * fscache_dirty_folio - Mark folio dirty and pin a cache object for writeback
+ * @mapping: The mapping the folio belongs to.
+ * @folio: The folio being dirtied.
+ * @cookie: The cookie referring to the cache object
+ *
+ * Set the dirty flag on a folio and pin an in-use cache object in memory
+ * so that writeback can later write to it. This is intended
+ * to be called from the filesystem's ->dirty_folio() method.
+ *
+ * Return: true if the dirty flag was set on the folio, false otherwise.
+ */
+bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
+ struct fscache_cookie *cookie)
+{
+ struct inode *inode = mapping->host;
+ bool need_use = false;
+
+ _enter("");
+
+ if (!filemap_dirty_folio(mapping, folio))
+ return false;
+ if (!fscache_cookie_valid(cookie))
+ return true;
+
+ if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
+ spin_lock(&inode->i_lock);
+ if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
+ inode->i_state |= I_PINNING_FSCACHE_WB;
+ need_use = true;
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (need_use)
+ fscache_use_cookie(cookie, true);
+ }
+ return true;
+}
+EXPORT_SYMBOL(fscache_dirty_folio);
+
+struct fscache_write_request {
+ struct netfs_cache_resources cache_resources;
+ struct address_space *mapping;
+ loff_t start;
+ size_t len;
+ bool set_bits;
+ netfs_io_terminated_t term_func;
+ void *term_func_priv;
+};
+
+void __fscache_clear_page_bits(struct address_space *mapping,
+ loff_t start, size_t len)
+{
+ pgoff_t first = start / PAGE_SIZE;
+ pgoff_t last = (start + len - 1) / PAGE_SIZE;
+ struct page *page;
+
+ if (len) {
+ XA_STATE(xas, &mapping->i_pages, first);
+
+ rcu_read_lock();
+ xas_for_each(&xas, page, last) {
+ end_page_fscache(page);
+ }
+ rcu_read_unlock();
+ }
+}
+EXPORT_SYMBOL(__fscache_clear_page_bits);
+
+/*
+ * Deal with the completion of writing the data to the cache.
+ */
+static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
+ bool was_async)
+{
+ struct fscache_write_request *wreq = priv;
+
+ fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
+ wreq->set_bits);
+
+ if (wreq->term_func)
+ wreq->term_func(wreq->term_func_priv, transferred_or_error,
+ was_async);
+ fscache_end_operation(&wreq->cache_resources);
+ kfree(wreq);
+}
+
+void __fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool cond)
+{
+ struct fscache_write_request *wreq;
+ struct netfs_cache_resources *cres;
+ struct iov_iter iter;
+ int ret = -ENOBUFS;
+
+ if (len == 0)
+ goto abandon;
+
+ _enter("%llx,%zx", start, len);
+
+ wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
+ if (!wreq)
+ goto abandon;
+ wreq->mapping = mapping;
+ wreq->start = start;
+ wreq->len = len;
+ wreq->set_bits = cond;
+ wreq->term_func = term_func;
+ wreq->term_func_priv = term_func_priv;
+
+ cres = &wreq->cache_resources;
+ if (fscache_begin_operation(cres, cookie, FSCACHE_WANT_WRITE,
+ fscache_access_io_write) < 0)
+ goto abandon_free;
+
+ ret = cres->ops->prepare_write(cres, &start, &len, i_size, false);
+ if (ret < 0)
+ goto abandon_end;
+
+ /* TODO: Consider clearing page bits now for space the write isn't
+ * covering. This is more complicated than it appears when THPs are
+ * taken into account.
+ */
+
+ iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
+ fscache_write(cres, start, &iter, fscache_wreq_done, wreq);
+ return;
+
+abandon_end:
+ return fscache_wreq_done(wreq, ret, false);
+abandon_free:
+ kfree(wreq);
+abandon:
+ fscache_clear_page_bits(mapping, start, len, cond);
+ if (term_func)
+ term_func(term_func_priv, ret, false);
+}
+EXPORT_SYMBOL(__fscache_write_to_cache);
+
+/*
+ * Change the size of a backing object.
+ */
+void __fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
+{
+ struct netfs_cache_resources cres;
+
+ trace_fscache_resize(cookie, new_size);
+ if (fscache_begin_operation(&cres, cookie, FSCACHE_WANT_WRITE,
+ fscache_access_io_resize) == 0) {
+ fscache_stat(&fscache_n_resizes);
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
+
+ /* We cannot defer a resize as we need to do it inside the
+ * netfs's inode lock so that we're serialised with respect to
+ * writes.
+ */
+ cookie->volume->cache->ops->resize_cookie(&cres, new_size);
+ fscache_end_operation(&cres);
+ } else {
+ fscache_stat(&fscache_n_resizes_null);
+ }
+}
+EXPORT_SYMBOL(__fscache_resize_cookie);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* General filesystem local caching manager
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define FSCACHE_DEBUG_LEVEL CACHE
+#include <linux/module.h>
+#include <linux/init.h>
+#define CREATE_TRACE_POINTS
+#include "internal.h"
+
+MODULE_DESCRIPTION("FS Cache Manager");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+unsigned fscache_debug;
+module_param_named(debug, fscache_debug, uint,
+ S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(fscache_debug,
+ "FS-Cache debugging mask");
+
+EXPORT_TRACEPOINT_SYMBOL(fscache_access_cache);
+EXPORT_TRACEPOINT_SYMBOL(fscache_access_volume);
+EXPORT_TRACEPOINT_SYMBOL(fscache_access);
+
+struct workqueue_struct *fscache_wq;
+EXPORT_SYMBOL(fscache_wq);
+
+/*
+ * Mixing scores (in bits) for (7,20):
+ * Input delta: 1-bit 2-bit
+ * 1 round: 330.3 9201.6
+ * 2 rounds: 1246.4 25475.4
+ * 3 rounds: 1907.1 31295.1
+ * 4 rounds: 2042.3 31718.6
+ * Perfect: 2048 31744
+ * (32*64) (32*31/2 * 64)
+ */
+#define HASH_MIX(x, y, a) \
+ ( x ^= (a), \
+ y ^= x, x = rol32(x, 7),\
+ x += y, y = rol32(y,20),\
+ y *= 9 )
+
+static inline unsigned int fold_hash(unsigned long x, unsigned long y)
+{
+ /* Use arch-optimized multiply if one exists */
+ return __hash_32(y ^ __hash_32(x));
+}
+
+/*
+ * Generate a hash. This is derived from full_name_hash(), but we want to be
+ * sure it is arch independent and that it doesn't change as bits of the
+ * computed hash value might appear on disk. The caller must guarantee that
+ * the source data is a multiple of four bytes in size.
+ */
+unsigned int fscache_hash(unsigned int salt, const void *data, size_t len)
+{
+ const __le32 *p = data;
+ unsigned int a, x = 0, y = salt, n = len / sizeof(__le32);
+
+ for (; n; n--) {
+ a = le32_to_cpu(*p++);
+ HASH_MIX(x, y, a);
+ }
+ return fold_hash(x, y);
+}
+
+/*
+ * initialise the fs caching module
+ */
+static int __init fscache_init(void)
+{
+ int ret = -ENOMEM;
+
+ fscache_wq = alloc_workqueue("fscache", WQ_UNBOUND | WQ_FREEZABLE, 0);
+ if (!fscache_wq)
+ goto error_wq;
+
+ ret = fscache_proc_init();
+ if (ret < 0)
+ goto error_proc;
+
+ fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar",
+ sizeof(struct fscache_cookie),
+ 0, 0, NULL);
+ if (!fscache_cookie_jar) {
+ pr_notice("Failed to allocate a cookie jar\n");
+ ret = -ENOMEM;
+ goto error_cookie_jar;
+ }
+
+ pr_notice("Loaded\n");
+ return 0;
+
+error_cookie_jar:
+ fscache_proc_cleanup();
+error_proc:
+ destroy_workqueue(fscache_wq);
+error_wq:
+ return ret;
+}
+
+fs_initcall(fscache_init);
+
+/*
+ * clean up on module removal
+ */
+static void __exit fscache_exit(void)
+{
+ _enter("");
+
+ kmem_cache_destroy(fscache_cookie_jar);
+ fscache_proc_cleanup();
+ destroy_workqueue(fscache_wq);
+ pr_notice("Unloaded\n");
+}
+
+module_exit(fscache_exit);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* FS-Cache statistics viewing interface
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define FSCACHE_DEBUG_LEVEL CACHE
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include "internal.h"
+
+/*
+ * initialise the /proc/fs/fscache/ directory
+ */
+int __init fscache_proc_init(void)
+{
+ if (!proc_mkdir("fs/fscache", NULL))
+ goto error_dir;
+
+ if (!proc_create_seq("fs/fscache/caches", S_IFREG | 0444, NULL,
+ &fscache_caches_seq_ops))
+ goto error;
+
+ if (!proc_create_seq("fs/fscache/volumes", S_IFREG | 0444, NULL,
+ &fscache_volumes_seq_ops))
+ goto error;
+
+ if (!proc_create_seq("fs/fscache/cookies", S_IFREG | 0444, NULL,
+ &fscache_cookies_seq_ops))
+ goto error;
+
+#ifdef CONFIG_FSCACHE_STATS
+ if (!proc_create_single("fs/fscache/stats", S_IFREG | 0444, NULL,
+ fscache_stats_show))
+ goto error;
+#endif
+
+ return 0;
+
+error:
+ remove_proc_entry("fs/fscache", NULL);
+error_dir:
+ return -ENOMEM;
+}
+
+/*
+ * clean up the /proc/fs/fscache/ directory
+ */
+void fscache_proc_cleanup(void)
+{
+ remove_proc_subtree("fs/fscache", NULL);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* FS-Cache statistics
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define FSCACHE_DEBUG_LEVEL CACHE
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include "internal.h"
+
+/*
+ * operation counters
+ */
+atomic_t fscache_n_volumes;
+atomic_t fscache_n_volumes_collision;
+atomic_t fscache_n_volumes_nomem;
+atomic_t fscache_n_cookies;
+atomic_t fscache_n_cookies_lru;
+atomic_t fscache_n_cookies_lru_expired;
+atomic_t fscache_n_cookies_lru_removed;
+atomic_t fscache_n_cookies_lru_dropped;
+
+atomic_t fscache_n_acquires;
+atomic_t fscache_n_acquires_ok;
+atomic_t fscache_n_acquires_oom;
+
+atomic_t fscache_n_invalidates;
+
+atomic_t fscache_n_updates;
+EXPORT_SYMBOL(fscache_n_updates);
+
+atomic_t fscache_n_relinquishes;
+atomic_t fscache_n_relinquishes_retire;
+atomic_t fscache_n_relinquishes_dropped;
+
+atomic_t fscache_n_resizes;
+atomic_t fscache_n_resizes_null;
+
+atomic_t fscache_n_read;
+EXPORT_SYMBOL(fscache_n_read);
+atomic_t fscache_n_write;
+EXPORT_SYMBOL(fscache_n_write);
+atomic_t fscache_n_no_write_space;
+EXPORT_SYMBOL(fscache_n_no_write_space);
+atomic_t fscache_n_no_create_space;
+EXPORT_SYMBOL(fscache_n_no_create_space);
+atomic_t fscache_n_culled;
+EXPORT_SYMBOL(fscache_n_culled);
+
+/*
+ * display the general statistics
+ */
+int fscache_stats_show(struct seq_file *m, void *v)
+{
+ seq_puts(m, "FS-Cache statistics\n");
+ seq_printf(m, "Cookies: n=%d v=%d vcol=%u voom=%u\n",
+ atomic_read(&fscache_n_cookies),
+ atomic_read(&fscache_n_volumes),
+ atomic_read(&fscache_n_volumes_collision),
+ atomic_read(&fscache_n_volumes_nomem)
+ );
+
+ seq_printf(m, "Acquire: n=%u ok=%u oom=%u\n",
+ atomic_read(&fscache_n_acquires),
+ atomic_read(&fscache_n_acquires_ok),
+ atomic_read(&fscache_n_acquires_oom));
+
+ seq_printf(m, "LRU : n=%u exp=%u rmv=%u drp=%u at=%ld\n",
+ atomic_read(&fscache_n_cookies_lru),
+ atomic_read(&fscache_n_cookies_lru_expired),
+ atomic_read(&fscache_n_cookies_lru_removed),
+ atomic_read(&fscache_n_cookies_lru_dropped),
+ timer_pending(&fscache_cookie_lru_timer) ?
+ fscache_cookie_lru_timer.expires - jiffies : 0);
+
+ seq_printf(m, "Invals : n=%u\n",
+ atomic_read(&fscache_n_invalidates));
+
+ seq_printf(m, "Updates: n=%u rsz=%u rsn=%u\n",
+ atomic_read(&fscache_n_updates),
+ atomic_read(&fscache_n_resizes),
+ atomic_read(&fscache_n_resizes_null));
+
+ seq_printf(m, "Relinqs: n=%u rtr=%u drop=%u\n",
+ atomic_read(&fscache_n_relinquishes),
+ atomic_read(&fscache_n_relinquishes_retire),
+ atomic_read(&fscache_n_relinquishes_dropped));
+
+ seq_printf(m, "NoSpace: nwr=%u ncr=%u cull=%u\n",
+ atomic_read(&fscache_n_no_write_space),
+ atomic_read(&fscache_n_no_create_space),
+ atomic_read(&fscache_n_culled));
+
+ seq_printf(m, "IO : rd=%u wr=%u\n",
+ atomic_read(&fscache_n_read),
+ atomic_read(&fscache_n_write));
+
+ netfs_stats_show(m);
+ return 0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Volume-level cache cookie handling.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define FSCACHE_DEBUG_LEVEL COOKIE
+#include <linux/export.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+#define fscache_volume_hash_shift 10
+static struct hlist_bl_head fscache_volume_hash[1 << fscache_volume_hash_shift];
+static atomic_t fscache_volume_debug_id;
+static LIST_HEAD(fscache_volumes);
+
+static void fscache_create_volume_work(struct work_struct *work);
+
+struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where)
+{
+ int ref;
+
+ __refcount_inc(&volume->ref, &ref);
+ trace_fscache_volume(volume->debug_id, ref + 1, where);
+ return volume;
+}
+
+static void fscache_see_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where)
+{
+ int ref = refcount_read(&volume->ref);
+
+ trace_fscache_volume(volume->debug_id, ref, where);
+}
+
+/*
+ * Pin the cache behind a volume so that we can access it.
+ */
+static void __fscache_begin_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ n_accesses = atomic_inc_return(&volume->n_accesses);
+ smp_mb__after_atomic();
+ trace_fscache_access_volume(volume->debug_id, cookie ? cookie->debug_id : 0,
+ refcount_read(&volume->ref),
+ n_accesses, why);
+}
+
+/**
+ * fscache_begin_volume_access - Pin a cache so a volume can be accessed
+ * @volume: The volume cookie
+ * @cookie: A datafile cookie for a tracing reference (or NULL)
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Attempt to pin the cache to prevent it from going away whilst we're
+ * accessing a volume and returns true if successful. This works as follows:
+ *
+ * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
+ * then we return false to indicate access was not permitted.
+ *
+ * (2) If the cache tests as live, then we increment the volume's n_accesses
+ * count and then recheck the cache liveness, ending the access if it
+ * ceased to be live.
+ *
+ * (3) When we end the access, we decrement the volume's n_accesses and wake
+ * up the any waiters if it reaches 0.
+ *
+ * (4) Whilst the cache is caching, the volume's n_accesses is kept
+ * artificially incremented to prevent wakeups from happening.
+ *
+ * (5) When the cache is taken offline, the state is changed to prevent new
+ * accesses, the volume's n_accesses is decremented and we wait for it to
+ * become 0.
+ *
+ * The datafile @cookie and the @why indicator are merely provided for tracing
+ * purposes.
+ */
+bool fscache_begin_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ if (!fscache_cache_is_live(volume->cache))
+ return false;
+ __fscache_begin_volume_access(volume, cookie, why);
+ if (!fscache_cache_is_live(volume->cache)) {
+ fscache_end_volume_access(volume, cookie, fscache_access_unlive);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * fscache_end_volume_access - Unpin a cache at the end of an access.
+ * @volume: The volume cookie
+ * @cookie: A datafile cookie for a tracing reference (or NULL)
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Unpin a cache volume after we've accessed it. The datafile @cookie and the
+ * @why indicator are merely provided for tracing purposes.
+ */
+void fscache_end_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ smp_mb__before_atomic();
+ n_accesses = atomic_dec_return(&volume->n_accesses);
+ trace_fscache_access_volume(volume->debug_id, cookie ? cookie->debug_id : 0,
+ refcount_read(&volume->ref),
+ n_accesses, why);
+ if (n_accesses == 0)
+ wake_up_var(&volume->n_accesses);
+}
+EXPORT_SYMBOL(fscache_end_volume_access);
+
+static bool fscache_volume_same(const struct fscache_volume *a,
+ const struct fscache_volume *b)
+{
+ size_t klen;
+
+ if (a->key_hash != b->key_hash ||
+ a->cache != b->cache ||
+ a->key[0] != b->key[0])
+ return false;
+
+ klen = round_up(a->key[0] + 1, sizeof(__le32));
+ return memcmp(a->key, b->key, klen) == 0;
+}
+
+static bool fscache_is_acquire_pending(struct fscache_volume *volume)
+{
+ return test_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &volume->flags);
+}
+
+static void fscache_wait_on_volume_collision(struct fscache_volume *candidate,
+ unsigned int collidee_debug_id)
+{
+ wait_on_bit_timeout(&candidate->flags, FSCACHE_VOLUME_ACQUIRE_PENDING,
+ TASK_UNINTERRUPTIBLE, 20 * HZ);
+ if (fscache_is_acquire_pending(candidate)) {
+ pr_notice("Potential volume collision new=%08x old=%08x",
+ candidate->debug_id, collidee_debug_id);
+ fscache_stat(&fscache_n_volumes_collision);
+ wait_on_bit(&candidate->flags, FSCACHE_VOLUME_ACQUIRE_PENDING,
+ TASK_UNINTERRUPTIBLE);
+ }
+}
+
+/*
+ * Attempt to insert the new volume into the hash. If there's a collision, we
+ * wait for the old volume to complete if it's being relinquished and an error
+ * otherwise.
+ */
+static bool fscache_hash_volume(struct fscache_volume *candidate)
+{
+ struct fscache_volume *cursor;
+ struct hlist_bl_head *h;
+ struct hlist_bl_node *p;
+ unsigned int bucket, collidee_debug_id = 0;
+
+ bucket = candidate->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1);
+ h = &fscache_volume_hash[bucket];
+
+ hlist_bl_lock(h);
+ hlist_bl_for_each_entry(cursor, p, h, hash_link) {
+ if (fscache_volume_same(candidate, cursor)) {
+ if (!test_bit(FSCACHE_VOLUME_RELINQUISHED, &cursor->flags))
+ goto collision;
+ fscache_see_volume(cursor, fscache_volume_get_hash_collision);
+ set_bit(FSCACHE_VOLUME_COLLIDED_WITH, &cursor->flags);
+ set_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &candidate->flags);
+ collidee_debug_id = cursor->debug_id;
+ break;
+ }
+ }
+
+ hlist_bl_add_head(&candidate->hash_link, h);
+ hlist_bl_unlock(h);
+
+ if (fscache_is_acquire_pending(candidate))
+ fscache_wait_on_volume_collision(candidate, collidee_debug_id);
+ return true;
+
+collision:
+ fscache_see_volume(cursor, fscache_volume_collision);
+ hlist_bl_unlock(h);
+ return false;
+}
+
+/*
+ * Allocate and initialise a volume representation cookie.
+ */
+static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len)
+{
+ struct fscache_volume *volume;
+ struct fscache_cache *cache;
+ size_t klen, hlen;
+ u8 *key;
+
+ klen = strlen(volume_key);
+ if (klen > NAME_MAX)
+ return NULL;
+
+ if (!coherency_data)
+ coherency_len = 0;
+
+ cache = fscache_lookup_cache(cache_name, false);
+ if (IS_ERR(cache))
+ return NULL;
+
+ volume = kzalloc(struct_size(volume, coherency, coherency_len),
+ GFP_KERNEL);
+ if (!volume)
+ goto err_cache;
+
+ volume->cache = cache;
+ volume->coherency_len = coherency_len;
+ if (coherency_data)
+ memcpy(volume->coherency, coherency_data, coherency_len);
+ INIT_LIST_HEAD(&volume->proc_link);
+ INIT_WORK(&volume->work, fscache_create_volume_work);
+ refcount_set(&volume->ref, 1);
+ spin_lock_init(&volume->lock);
+
+ /* Stick the length on the front of the key and pad it out to make
+ * hashing easier.
+ */
+ hlen = round_up(1 + klen + 1, sizeof(__le32));
+ key = kzalloc(hlen, GFP_KERNEL);
+ if (!key)
+ goto err_vol;
+ key[0] = klen;
+ memcpy(key + 1, volume_key, klen);
+
+ volume->key = key;
+ volume->key_hash = fscache_hash(0, key, hlen);
+
+ volume->debug_id = atomic_inc_return(&fscache_volume_debug_id);
+ down_write(&fscache_addremove_sem);
+ atomic_inc(&cache->n_volumes);
+ list_add_tail(&volume->proc_link, &fscache_volumes);
+ fscache_see_volume(volume, fscache_volume_new_acquire);
+ fscache_stat(&fscache_n_volumes);
+ up_write(&fscache_addremove_sem);
+ _leave(" = v=%x", volume->debug_id);
+ return volume;
+
+err_vol:
+ kfree(volume);
+err_cache:
+ fscache_put_cache(cache, fscache_cache_put_alloc_volume);
+ fscache_stat(&fscache_n_volumes_nomem);
+ return NULL;
+}
+
+/*
+ * Create a volume's representation on disk. Have a volume ref and a cache
+ * access we have to release.
+ */
+static void fscache_create_volume_work(struct work_struct *work)
+{
+ const struct fscache_cache_ops *ops;
+ struct fscache_volume *volume =
+ container_of(work, struct fscache_volume, work);
+
+ fscache_see_volume(volume, fscache_volume_see_create_work);
+
+ ops = volume->cache->ops;
+ if (ops->acquire_volume)
+ ops->acquire_volume(volume);
+ fscache_end_cache_access(volume->cache,
+ fscache_access_acquire_volume_end);
+
+ clear_and_wake_up_bit(FSCACHE_VOLUME_CREATING, &volume->flags);
+ fscache_put_volume(volume, fscache_volume_put_create_work);
+}
+
+/*
+ * Dispatch a worker thread to create a volume's representation on disk.
+ */
+void fscache_create_volume(struct fscache_volume *volume, bool wait)
+{
+ if (test_and_set_bit(FSCACHE_VOLUME_CREATING, &volume->flags))
+ goto maybe_wait;
+ if (volume->cache_priv)
+ goto no_wait; /* We raced */
+ if (!fscache_begin_cache_access(volume->cache,
+ fscache_access_acquire_volume))
+ goto no_wait;
+
+ fscache_get_volume(volume, fscache_volume_get_create_work);
+ if (!schedule_work(&volume->work))
+ fscache_put_volume(volume, fscache_volume_put_create_work);
+
+maybe_wait:
+ if (wait) {
+ fscache_see_volume(volume, fscache_volume_wait_create_work);
+ wait_on_bit(&volume->flags, FSCACHE_VOLUME_CREATING,
+ TASK_UNINTERRUPTIBLE);
+ }
+ return;
+no_wait:
+ clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags);
+ wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING);
+}
+
+/*
+ * Acquire a volume representation cookie and link it to a (proposed) cache.
+ */
+struct fscache_volume *__fscache_acquire_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len)
+{
+ struct fscache_volume *volume;
+
+ volume = fscache_alloc_volume(volume_key, cache_name,
+ coherency_data, coherency_len);
+ if (!volume)
+ return ERR_PTR(-ENOMEM);
+
+ if (!fscache_hash_volume(volume)) {
+ fscache_put_volume(volume, fscache_volume_put_hash_collision);
+ return ERR_PTR(-EBUSY);
+ }
+
+ fscache_create_volume(volume, false);
+ return volume;
+}
+EXPORT_SYMBOL(__fscache_acquire_volume);
+
+static void fscache_wake_pending_volume(struct fscache_volume *volume,
+ struct hlist_bl_head *h)
+{
+ struct fscache_volume *cursor;
+ struct hlist_bl_node *p;
+
+ hlist_bl_for_each_entry(cursor, p, h, hash_link) {
+ if (fscache_volume_same(cursor, volume)) {
+ fscache_see_volume(cursor, fscache_volume_see_hash_wake);
+ clear_and_wake_up_bit(FSCACHE_VOLUME_ACQUIRE_PENDING,
+ &cursor->flags);
+ return;
+ }
+ }
+}
+
+/*
+ * Remove a volume cookie from the hash table.
+ */
+static void fscache_unhash_volume(struct fscache_volume *volume)
+{
+ struct hlist_bl_head *h;
+ unsigned int bucket;
+
+ bucket = volume->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1);
+ h = &fscache_volume_hash[bucket];
+
+ hlist_bl_lock(h);
+ hlist_bl_del(&volume->hash_link);
+ if (test_bit(FSCACHE_VOLUME_COLLIDED_WITH, &volume->flags))
+ fscache_wake_pending_volume(volume, h);
+ hlist_bl_unlock(h);
+}
+
+/*
+ * Drop a cache's volume attachments.
+ */
+static void fscache_free_volume(struct fscache_volume *volume)
+{
+ struct fscache_cache *cache = volume->cache;
+
+ if (volume->cache_priv) {
+ __fscache_begin_volume_access(volume, NULL,
+ fscache_access_relinquish_volume);
+ if (volume->cache_priv)
+ cache->ops->free_volume(volume);
+ fscache_end_volume_access(volume, NULL,
+ fscache_access_relinquish_volume_end);
+ }
+
+ down_write(&fscache_addremove_sem);
+ list_del_init(&volume->proc_link);
+ atomic_dec(&volume->cache->n_volumes);
+ up_write(&fscache_addremove_sem);
+
+ if (!hlist_bl_unhashed(&volume->hash_link))
+ fscache_unhash_volume(volume);
+
+ trace_fscache_volume(volume->debug_id, 0, fscache_volume_free);
+ kfree(volume->key);
+ kfree(volume);
+ fscache_stat_d(&fscache_n_volumes);
+ fscache_put_cache(cache, fscache_cache_put_volume);
+}
+
+/*
+ * Drop a reference to a volume cookie.
+ */
+void fscache_put_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where)
+{
+ if (volume) {
+ unsigned int debug_id = volume->debug_id;
+ bool zero;
+ int ref;
+
+ zero = __refcount_dec_and_test(&volume->ref, &ref);
+ trace_fscache_volume(debug_id, ref - 1, where);
+ if (zero)
+ fscache_free_volume(volume);
+ }
+}
+
+/*
+ * Relinquish a volume representation cookie.
+ */
+void __fscache_relinquish_volume(struct fscache_volume *volume,
+ const void *coherency_data,
+ bool invalidate)
+{
+ if (WARN_ON(test_and_set_bit(FSCACHE_VOLUME_RELINQUISHED, &volume->flags)))
+ return;
+
+ if (invalidate) {
+ set_bit(FSCACHE_VOLUME_INVALIDATE, &volume->flags);
+ } else if (coherency_data) {
+ memcpy(volume->coherency, coherency_data, volume->coherency_len);
+ }
+
+ fscache_put_volume(volume, fscache_volume_put_relinquish);
+}
+EXPORT_SYMBOL(__fscache_relinquish_volume);
+
+/**
+ * fscache_withdraw_volume - Withdraw a volume from being cached
+ * @volume: Volume cookie
+ *
+ * Withdraw a cache volume from service, waiting for all accesses to complete
+ * before returning.
+ */
+void fscache_withdraw_volume(struct fscache_volume *volume)
+{
+ int n_accesses;
+
+ _debug("withdraw V=%x", volume->debug_id);
+
+ /* Allow wakeups on dec-to-0 */
+ n_accesses = atomic_dec_return(&volume->n_accesses);
+ trace_fscache_access_volume(volume->debug_id, 0,
+ refcount_read(&volume->ref),
+ n_accesses, fscache_access_cache_unpin);
+
+ wait_var_event(&volume->n_accesses,
+ atomic_read(&volume->n_accesses) == 0);
+}
+EXPORT_SYMBOL(fscache_withdraw_volume);
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Generate a list of volumes in /proc/fs/fscache/volumes
+ */
+static int fscache_volumes_seq_show(struct seq_file *m, void *v)
+{
+ struct fscache_volume *volume;
+
+ if (v == &fscache_volumes) {
+ seq_puts(m,
+ "VOLUME REF nCOOK ACC FL CACHE KEY\n"
+ "======== ===== ===== === == =============== ================\n");
+ return 0;
+ }
+
+ volume = list_entry(v, struct fscache_volume, proc_link);
+ seq_printf(m,
+ "%08x %5d %5d %3d %02lx %-15.15s %s\n",
+ volume->debug_id,
+ refcount_read(&volume->ref),
+ atomic_read(&volume->n_cookies),
+ atomic_read(&volume->n_accesses),
+ volume->flags,
+ volume->cache->name ?: "-",
+ volume->key + 1);
+ return 0;
+}
+
+static void *fscache_volumes_seq_start(struct seq_file *m, loff_t *_pos)
+ __acquires(&fscache_addremove_sem)
+{
+ down_read(&fscache_addremove_sem);
+ return seq_list_start_head(&fscache_volumes, *_pos);
+}
+
+static void *fscache_volumes_seq_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ return seq_list_next(v, &fscache_volumes, _pos);
+}
+
+static void fscache_volumes_seq_stop(struct seq_file *m, void *v)
+ __releases(&fscache_addremove_sem)
+{
+ up_read(&fscache_addremove_sem);
+}
+
+const struct seq_operations fscache_volumes_seq_ops = {
+ .start = fscache_volumes_seq_start,
+ .next = fscache_volumes_seq_next,
+ .stop = fscache_volumes_seq_stop,
+ .show = fscache_volumes_seq_show,
+};
+#endif /* CONFIG_PROC_FS */
* Written by David Howells (dhowells@redhat.com)
*/
+#include <linux/slab.h>
+#include <linux/seq_file.h>
#include <linux/netfs.h>
#include <linux/fscache.h>
#include <trace/events/netfs.h>
+#include "fscache_internal.h"
#ifdef pr_fmt
#undef pr_fmt
/*
* debug tracing
*/
+#if 0
#define dbgprintk(FMT, ...) \
printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
#endif
+#endif
#include <linux/module.h>
#include <linux/export.h>
#include "internal.h"
-#define CREATE_TRACE_POINTS
-#include <trace/events/netfs.h>
+//#define CREATE_TRACE_POINTS
+//#include <trace/events/netfs.h>
MODULE_DESCRIPTION("Network fs support");
MODULE_AUTHOR("Red Hat, Inc.");
unsigned netfs_debug;
module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
+