linux/fs/nfsd/nfscache.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Request reply cache. This is currently a global cache, but this may
 * change in the future and be a per-client cache.
 *
 * This code is heavily inspired by the 44BSD implementation, although
 * it does things a bit differently.
 *
 * Copyright (C) 1995, 1996 Olaf Kirch <[email protected]>
 */

#include <linux/sunrpc/svc_xprt.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sunrpc/addr.h>
#include <linux/highmem.h>
#include <linux/log2.h>
#include <linux/hash.h>
#include <net/checksum.h>

#include "nfsd.h"
#include "cache.h"
#include "trace.h"

/*
 * We use this value to determine the number of hash buckets from the max
 * cache size, the idea being that when the cache is at its maximum number
 * of entries, then this should be the average number of entries per bucket.
 */
#define TARGET_BUCKET_SIZE

struct nfsd_drc_bucket {};

static struct kmem_cache	*drc_slab;

static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
					    struct shrink_control *sc);
static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
					   struct shrink_control *sc);

/*
 * Put a cap on the size of the DRC based on the amount of available
 * low memory in the machine.
 *
 *  64MB:    8192
 * 128MB:   11585
 * 256MB:   16384
 * 512MB:   23170
 *   1GB:   32768
 *   2GB:   46340
 *   4GB:   65536
 *   8GB:   92681
 *  16GB:  131072
 *
 * ...with a hard cap of 256k entries. In the worst case, each entry will be
 * ~1k, so the above numbers should give a rough max of the amount of memory
 * used in k.
 *
 * XXX: these limits are per-container, so memory used will increase
 * linearly with number of containers.  Maybe that's OK.
 */
static unsigned int
nfsd_cache_size_limit(void)
{}

/*
 * Compute the number of hash buckets we need. Divide the max cachesize by
 * the "target" max bucket size, and round up to next power of two.
 */
static unsigned int
nfsd_hashsize(unsigned int limit)
{}

static struct nfsd_cacherep *
nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum,
		    struct nfsd_net *nn)
{}

static void nfsd_cacherep_free(struct nfsd_cacherep *rp)
{}

static unsigned long
nfsd_cacherep_dispose(struct list_head *dispose)
{}

static void
nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
			    struct nfsd_cacherep *rp)
{}

static void
nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
				struct nfsd_net *nn)
{}

static void
nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
			struct nfsd_net *nn)
{}

int nfsd_drc_slab_create(void)
{}

void nfsd_drc_slab_free(void)
{}

int nfsd_reply_cache_init(struct nfsd_net *nn)
{}

void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
{}

/*
 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
 * not already scheduled.
 */
static void
lru_put_end(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp)
{}

static noinline struct nfsd_drc_bucket *
nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn)
{}

/*
 * Remove and return no more than @max expired entries in bucket @b.
 * If @max is zero, do not limit the number of removed entries.
 */
static void
nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
			 unsigned int max, struct list_head *dispose)
{}

/**
 * nfsd_reply_cache_count - count_objects method for the DRC shrinker
 * @shrink: our registered shrinker context
 * @sc: garbage collection parameters
 *
 * Returns the total number of entries in the duplicate reply cache. To
 * keep things simple and quick, this is not the number of expired entries
 * in the cache (ie, the number that would be removed by a call to
 * nfsd_reply_cache_scan).
 */
static unsigned long
nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
{}

/**
 * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
 * @shrink: our registered shrinker context
 * @sc: garbage collection parameters
 *
 * Free expired entries on each bucket's LRU list until we've released
 * nr_to_scan freed objects. Nothing will be released if the cache
 * has not exceeded it's max_drc_entries limit.
 *
 * Returns the number of entries released by this call.
 */
static unsigned long
nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{}

/**
 * nfsd_cache_csum - Checksum incoming NFS Call arguments
 * @buf: buffer containing a whole RPC Call message
 * @start: starting byte of the NFS Call header
 * @remaining: size of the NFS Call header, in bytes
 *
 * Compute a weak checksum of the leading bytes of an NFS procedure
 * call header to help verify that a retransmitted Call matches an
 * entry in the duplicate reply cache.
 *
 * To avoid assumptions about how the RPC message is laid out in
 * @buf and what else it might contain (eg, a GSS MIC suffix), the
 * caller passes us the exact location and length of the NFS Call
 * header.
 *
 * Returns a 32-bit checksum value, as defined in RFC 793.
 */
static __wsum nfsd_cache_csum(struct xdr_buf *buf, unsigned int start,
			      unsigned int remaining)
{}

static int
nfsd_cache_key_cmp(const struct nfsd_cacherep *key,
		   const struct nfsd_cacherep *rp, struct nfsd_net *nn)
{}

/*
 * Search the request hash for an entry that matches the given rqstp.
 * Must be called with cache_lock held. Returns the found entry or
 * inserts an empty key on failure.
 */
static struct nfsd_cacherep *
nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key,
			struct nfsd_net *nn)
{}

/**
 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
 * @rqstp: Incoming Call to find
 * @start: starting byte in @rqstp->rq_arg of the NFS Call header
 * @len: size of the NFS Call header, in bytes
 * @cacherep: OUT: DRC entry for this request
 *
 * Try to find an entry matching the current call in the cache. When none
 * is found, we try to grab the oldest expired entry off the LRU list. If
 * a suitable one isn't there, then drop the cache_lock and allocate a
 * new one, then search again in case one got inserted while this thread
 * didn't hold the lock.
 *
 * Return values:
 *   %RC_DOIT: Process the request normally
 *   %RC_REPLY: Reply from cache
 *   %RC_DROPIT: Do not process the request further
 */
int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
		      unsigned int len, struct nfsd_cacherep **cacherep)
{}

/**
 * nfsd_cache_update - Update an entry in the duplicate reply cache.
 * @rqstp: svc_rqst with a finished Reply
 * @rp: IN: DRC entry for this request
 * @cachetype: which cache to update
 * @statp: pointer to Reply's NFS status code, or NULL
 *
 * This is called from nfsd_dispatch when the procedure has been
 * executed and the complete reply is in rqstp->rq_res.
 *
 * We're copying around data here rather than swapping buffers because
 * the toplevel loop requires max-sized buffers, which would be a waste
 * of memory for a cache with a max reply size of 100 bytes (diropokres).
 *
 * If we should start to use different types of cache entries tailored
 * specifically for attrstat and fh's, we may save even more space.
 *
 * Also note that a cachetype of RC_NOCACHE can legally be passed when
 * nfsd failed to encode a reply that otherwise would have been cached.
 * In this case, nfsd_cache_update is called with statp == NULL.
 */
void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
		       int cachetype, __be32 *statp)
{}

static int
nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
{}

/*
 * Note that fields may be added, removed or reordered in the future. Programs
 * scraping this file for info should test the labels to ensure they're
 * getting the correct field.
 */
int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
{}