linux/fs/nfsd/nfs4state.c

/*
*  Copyright (c) 2001 The Regents of the University of Michigan.
*  All rights reserved.
*
*  Kendrick Smith <[email protected]>
*  Andy Adamson <[email protected]>
*
*  Redistribution and use in source and binary forms, with or without
*  modification, are permitted provided that the following conditions
*  are met:
*
*  1. Redistributions of source code must retain the above copyright
*     notice, this list of conditions and the following disclaimer.
*  2. Redistributions in binary form must reproduce the above copyright
*     notice, this list of conditions and the following disclaimer in the
*     documentation and/or other materials provided with the distribution.
*  3. Neither the name of the University nor the names of its
*     contributors may be used to endorse or promote products derived
*     from this software without specific prior written permission.
*
*  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
*  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
*  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
*  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
*  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
*  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
*  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
*  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
*  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
*  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/

#include <linux/file.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/ratelimit.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/addr.h>
#include <linux/jhash.h>
#include <linux/string_helpers.h>
#include <linux/fsnotify.h>
#include <linux/rhashtable.h>
#include <linux/nfs_ssc.h>

#include "xdr4.h"
#include "xdr4cb.h"
#include "vfs.h"
#include "current_stateid.h"

#include "netns.h"
#include "pnfs.h"
#include "filecache.h"
#include "trace.h"

#define NFSDDBG_FACILITY

#define all_ones
static const stateid_t one_stateid =;
static const stateid_t zero_stateid =;
static const stateid_t currentstateid =;
static const stateid_t close_stateid =;

static u64 current_sessionid =;

#define ZERO_STATEID(stateid)
#define ONE_STATEID(stateid)
#define CURRENT_STATEID(stateid)
#define CLOSE_STATEID(stateid)

/* forward declarations */
static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
void nfsd4_end_grace(struct nfsd_net *nn);
static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
static void nfsd4_file_hash_remove(struct nfs4_file *fi);
static void deleg_reaper(struct nfsd_net *nn);

/* Locking: */

/*
 * Currently used for the del_recall_lru and file hash table.  In an
 * effort to decrease the scope of the client_mutex, this spinlock may
 * eventually cover more:
 */
static DEFINE_SPINLOCK(state_lock);

enum nfsd4_st_mutex_lock_subclass {};

/*
 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
 * the refcount on the open stateid to drop.
 */
static DECLARE_WAIT_QUEUE_HEAD(close_wq);

/*
 * A waitqueue where a writer to clients/#/ctl destroying a client can
 * wait for cl_rpc_users to drop to 0 and then for the client to be
 * unhashed.
 */
static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);

static struct kmem_cache *client_slab;
static struct kmem_cache *openowner_slab;
static struct kmem_cache *lockowner_slab;
static struct kmem_cache *file_slab;
static struct kmem_cache *stateid_slab;
static struct kmem_cache *deleg_slab;
static struct kmem_cache *odstate_slab;

static void free_session(struct nfsd4_session *);

static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops;

static struct workqueue_struct *laundry_wq;

int nfsd4_create_laundry_wq(void)
{}

void nfsd4_destroy_laundry_wq(void)
{}

static bool is_session_dead(struct nfsd4_session *ses)
{}

static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
{}

static bool is_client_expired(struct nfs4_client *clp)
{}

static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn,
					struct nfs4_client *clp)
{}

static __be32 get_client_locked(struct nfs4_client *clp)
{}

/* must be called under the client_lock */
static inline void
renew_client_locked(struct nfs4_client *clp)
{}

static void put_client_renew_locked(struct nfs4_client *clp)
{}

static void put_client_renew(struct nfs4_client *clp)
{}

static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
{}

static void nfsd4_put_session_locked(struct nfsd4_session *ses)
{}

static void nfsd4_put_session(struct nfsd4_session *ses)
{}

static struct nfsd4_blocked_lock *
find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
			struct nfsd_net *nn)
{}

static struct nfsd4_blocked_lock *
find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
			struct nfsd_net *nn)
{}

static void
free_nbl(struct kref *kref)
{}

static void
free_blocked_lock(struct nfsd4_blocked_lock *nbl)
{}

static void
remove_blocked_locks(struct nfs4_lockowner *lo)
{}

static void
nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
{}

static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
{}

static void
nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
{}

static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops =;

/*
 * We store the NONE, READ, WRITE, and BOTH bits separately in the
 * st_{access,deny}_bmap field of the stateid, in order to track not
 * only what share bits are currently in force, but also what
 * combinations of share bits previous opens have used.  This allows us
 * to enforce the recommendation in
 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
 * the server return an error if the client attempt to downgrade to a
 * combination of share bits not explicable by closing some of its
 * previous opens.
 *
 * This enforcement is arguably incomplete, since we don't keep
 * track of access/deny bit combinations; so, e.g., we allow:
 *
 *	OPEN allow read, deny write
 *	OPEN allow both, deny none
 *	DOWNGRADE allow read, deny none
 *
 * which we should reject.
 *
 * But you could also argue that our current code is already overkill,
 * since it only exists to return NFS4ERR_INVAL on incorrect client
 * behavior.
 */
static unsigned int
bmap_to_share_mode(unsigned long bmap)
{}

/* set share access for a given stateid */
static inline void
set_access(u32 access, struct nfs4_ol_stateid *stp)
{}

/* clear share access for a given stateid */
static inline void
clear_access(u32 access, struct nfs4_ol_stateid *stp)
{}

/* test whether a given stateid has access */
static inline bool
test_access(u32 access, struct nfs4_ol_stateid *stp)
{}

/* set share deny for a given stateid */
static inline void
set_deny(u32 deny, struct nfs4_ol_stateid *stp)
{}

/* clear share deny for a given stateid */
static inline void
clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
{}

/* test whether a given stateid is denying specific access */
static inline bool
test_deny(u32 deny, struct nfs4_ol_stateid *stp)
{}

static int nfs4_access_to_omode(u32 access)
{}

static inline int
access_permit_read(struct nfs4_ol_stateid *stp)
{}

static inline int
access_permit_write(struct nfs4_ol_stateid *stp)
{}

static inline struct nfs4_stateowner *
nfs4_get_stateowner(struct nfs4_stateowner *sop)
{}

static int
same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
{}

static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
			struct nfs4_client *clp)
{}

static inline u32
opaque_hashval(const void *ptr, int nbytes)
{}

static void nfsd4_free_file_rcu(struct rcu_head *rcu)
{}

void
put_nfs4_file(struct nfs4_file *fi)
{}

static struct nfsd_file *
find_writeable_file_locked(struct nfs4_file *f)
{}

static struct nfsd_file *
find_writeable_file(struct nfs4_file *f)
{}

static struct nfsd_file *
find_readable_file_locked(struct nfs4_file *f)
{}

static struct nfsd_file *
find_readable_file(struct nfs4_file *f)
{}

static struct nfsd_file *
find_rw_file(struct nfs4_file *f)
{}

struct nfsd_file *
find_any_file(struct nfs4_file *f)
{}

static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
{}

static atomic_long_t num_delegations;
unsigned long max_delegations;

/*
 * Open owner state (share locks)
 */

/* hash tables for lock and open owners */
#define OWNER_HASH_BITS
#define OWNER_HASH_SIZE
#define OWNER_HASH_MASK

static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
{}

static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp;

static const struct rhashtable_params nfs4_file_rhash_params =;

/*
 * Check if courtesy clients have conflicting access and resolve it if possible
 *
 * access:  is op_share_access if share_access is true.
 *	    Check if access mode, op_share_access, would conflict with
 *	    the current deny mode of the file 'fp'.
 * access:  is op_share_deny if share_access is false.
 *	    Check if the deny mode, op_share_deny, would conflict with
 *	    current access of the file 'fp'.
 * stp:     skip checking this entry.
 * new_stp: normal open, not open upgrade.
 *
 * Function returns:
 *	false - access/deny mode conflict with normal client.
 *	true  - no conflict or conflict with courtesy client(s) is resolved.
 */
static bool
nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
		struct nfs4_ol_stateid *stp, u32 access, bool share_access)
{}

static void
__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
{}

static __be32
nfs4_file_get_access(struct nfs4_file *fp, u32 access)
{}

static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
{}

static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
{}

static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
{}

/*
 * Allocate a new open/delegation state counter. This is needed for
 * pNFS for proper return on close semantics.
 *
 * Note that we only allocate it for pNFS-enabled exports, otherwise
 * all pointers to struct nfs4_clnt_odstate are always NULL.
 */
static struct nfs4_clnt_odstate *
alloc_clnt_odstate(struct nfs4_client *clp)
{}

static void
hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
{}

static inline void
get_clnt_odstate(struct nfs4_clnt_odstate *co)
{}

static void
put_clnt_odstate(struct nfs4_clnt_odstate *co)
{}

static struct nfs4_clnt_odstate *
find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
{}

struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
				  void (*sc_free)(struct nfs4_stid *))
{}

/*
 * Create a unique stateid_t to represent each COPY.
 */
static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
			      unsigned char cs_type)
{}

int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
{}

struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
						     struct nfs4_stid *p_stid)
{}

void nfs4_free_copy_state(struct nfsd4_copy *copy)
{}

static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
{}

static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
{}

static void nfs4_free_deleg(struct nfs4_stid *stid)
{}

/*
 * When we recall a delegation, we should be careful not to hand it
 * out again straight away.
 * To ensure this we keep a pair of bloom filters ('new' and 'old')
 * in which the filehandles of recalled delegations are "stored".
 * If a filehandle appear in either filter, a delegation is blocked.
 * When a delegation is recalled, the filehandle is stored in the "new"
 * filter.
 * Every 30 seconds we swap the filters and clear the "new" one,
 * unless both are empty of course.
 *
 * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
 * low 3 bytes as hash-table indices.
 *
 * 'blocked_delegations_lock', which is always taken in block_delegations(),
 * is used to manage concurrent access.  Testing does not need the lock
 * except when swapping the two filters.
 */
static DEFINE_SPINLOCK(blocked_delegations_lock);
static struct bloom_pair {} blocked_delegations;

static int delegation_blocked(struct knfsd_fh *fh)
{}

static void block_delegations(struct knfsd_fh *fh)
{}

static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
		 struct nfs4_clnt_odstate *odstate, u32 dl_type)
{}

void
nfs4_put_stid(struct nfs4_stid *s)
{}

void
nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
{}

static void put_deleg_file(struct nfs4_file *fp)
{}

static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
{}

static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
{}

/**
 * nfs4_delegation_exists - Discover if this delegation already exists
 * @clp:     a pointer to the nfs4_client we're granting a delegation to
 * @fp:      a pointer to the nfs4_file we're granting a delegation on
 *
 * Return:
 *      On success: true iff an existing delegation is found
 */

static bool
nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
{}

/**
 * hash_delegation_locked - Add a delegation to the appropriate lists
 * @dp:     a pointer to the nfs4_delegation we are adding.
 * @fp:     a pointer to the nfs4_file we're granting a delegation on
 *
 * Return:
 *      On success: NULL if the delegation was successfully hashed.
 *
 *      On error: -EAGAIN if one was previously granted to this
 *                 nfs4_client for this nfs4_file. Delegation is not hashed.
 *
 */

static int
hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
{}

static bool delegation_hashed(struct nfs4_delegation *dp)
{}

static bool
unhash_delegation_locked(struct nfs4_delegation *dp, unsigned short statusmask)
{}

static void destroy_delegation(struct nfs4_delegation *dp)
{}

static void revoke_delegation(struct nfs4_delegation *dp)
{}

/*
 * SETCLIENTID state
 */

static unsigned int clientid_hashval(u32 id)
{}

static unsigned int clientstr_hashval(struct xdr_netobj name)
{}

/*
 * A stateid that had a deny mode associated with it is being released
 * or downgraded. Recalculate the deny mode on the file.
 */
static void
recalculate_deny_mode(struct nfs4_file *fp)
{}

static void
reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
{}

/* release all access and file references for a given stateid */
static void
release_all_access(struct nfs4_ol_stateid *stp)
{}

static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
{}

static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
{}

static bool
nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
{}

static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
{}

static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
{}

static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
{}

/*
 * Put the persistent reference to an already unhashed generic stateid, while
 * holding the cl_lock. If it's the last reference, then put it onto the
 * reaplist for later destruction.
 */
static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
				       struct list_head *reaplist)
{}

static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{}

static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{}

static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
{}

/*
 * Free a list of generic stateids that were collected earlier after being
 * fully unhashed.
 */
static void
free_ol_stateid_reaplist(struct list_head *reaplist)
{}

static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
				       struct list_head *reaplist)
{}

static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
				struct list_head *reaplist)
{}

static void release_open_stateid(struct nfs4_ol_stateid *stp)
{}

static void unhash_openowner_locked(struct nfs4_openowner *oo)
{}

static void release_last_closed_stateid(struct nfs4_openowner *oo)
{}

static void release_openowner(struct nfs4_openowner *oo)
{}

static struct nfs4_stid *find_one_sb_stid(struct nfs4_client *clp,
					  struct super_block *sb,
					  unsigned int sc_types)
{}

/**
 * nfsd4_revoke_states - revoke all nfsv4 states associated with given filesystem
 * @net:  used to identify instance of nfsd (there is one per net namespace)
 * @sb:   super_block used to identify target filesystem
 *
 * All nfs4 states (open, lock, delegation, layout) held by the server instance
 * and associated with a file on the given filesystem will be revoked resulting
 * in any files being closed and so all references from nfsd to the filesystem
 * being released.  Thus nfsd will no longer prevent the filesystem from being
 * unmounted.
 *
 * The clients which own the states will subsequently being notified that the
 * states have been "admin-revoked".
 */
void nfsd4_revoke_states(struct net *net, struct super_block *sb)
{}

static inline int
hash_sessionid(struct nfs4_sessionid *sessionid)
{}

#ifdef CONFIG_SUNRPC_DEBUG
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{}
#else
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{
}
#endif

/*
 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
 * won't be used for replay.
 */
void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
{}

static void
gen_sessionid(struct nfsd4_session *ses)
{}

/*
 * The protocol defines ca_maxresponssize_cached to include the size of
 * the rpc header, but all we need to cache is the data starting after
 * the end of the initial SEQUENCE operation--the rest we regenerate
 * each time.  Therefore we can advertise a ca_maxresponssize_cached
 * value that is the number of bytes in our cache plus a few additional
 * bytes.  In order to stay on the safe side, and not promise more than
 * we can cache, those additional bytes must be the minimum possible: 24
 * bytes of rpc header (xid through accept state, with AUTH_NULL
 * verifier), 12 for the compound header (with zero-length tag), and 44
 * for the SEQUENCE op response:
 */
#define NFSD_MIN_HDR_SEQ_SZ

static void
free_session_slots(struct nfsd4_session *ses)
{}

/*
 * We don't actually need to cache the rpc and session headers, so we
 * can allocate a little less for each slot:
 */
static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
{}

/*
 * XXX: If we run out of reserved DRC memory we could (up to a point)
 * re-negotiate active sessions and reduce their slot usage to make
 * room for new connections. For now we just fail the create session.
 */
static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
{}

static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
{}

static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
					   struct nfsd4_channel_attrs *battrs)
{}

static void free_conn(struct nfsd4_conn *c)
{}

static void nfsd4_conn_lost(struct svc_xpt_user *u)
{}

static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
{}

static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
{}

static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
{}

static int nfsd4_register_conn(struct nfsd4_conn *conn)
{}

static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
{}

static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
{}

/* must be called under client_lock */
static void nfsd4_del_conns(struct nfsd4_session *s)
{}

static void __free_session(struct nfsd4_session *ses)
{}

static void free_session(struct nfsd4_session *ses)
{}

static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
{}

/* caller must hold client_lock */
static struct nfsd4_session *
__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
{}

static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
		__be32 *ret)
{}

/* caller must hold client_lock */
static void
unhash_session(struct nfsd4_session *ses)
{}

/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
static int
STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
{}

/* 
 * XXX Should we use a slab cache ?
 * This type of memory management is somewhat inefficient, but we use it
 * anyway since SETCLIENTID is not a common operation.
 */
static struct nfs4_client *alloc_client(struct xdr_netobj name,
				struct nfsd_net *nn)
{}

static void __free_client(struct kref *k)
{}

static void drop_client(struct nfs4_client *clp)
{}

static void
free_client(struct nfs4_client *clp)
{}

/* must be called under the client_lock */
static void
unhash_client_locked(struct nfs4_client *clp)
{}

static void
unhash_client(struct nfs4_client *clp)
{}

static __be32 mark_client_expired_locked(struct nfs4_client *clp)
{}

static void
__destroy_client(struct nfs4_client *clp)
{}

static void
destroy_client(struct nfs4_client *clp)
{}

static void inc_reclaim_complete(struct nfs4_client *clp)
{}

static void expire_client(struct nfs4_client *clp)
{}

static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
{}

static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
{}

static int copy_cred(struct svc_cred *target, struct svc_cred *source)
{}

static int
compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
{}

static int
same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
{}

static int
same_clid(clientid_t *cl1, clientid_t *cl2)
{}

static bool groups_equal(struct group_info *g1, struct group_info *g2)
{}

/*
 * RFC 3530 language requires clid_inuse be returned when the
 * "principal" associated with a requests differs from that previously
 * used.  We use uid, gid's, and gss principal string as our best
 * approximation.  We also don't want to allow non-gss use of a client
 * established using gss: in theory cr_principal should catch that
 * change, but in practice cr_principal can be null even in the gss case
 * since gssd doesn't always pass down a principal string.
 */
static bool is_gss_cred(struct svc_cred *cr)
{}


static bool
same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
{}

static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
{}

bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
{}

static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
{}

static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
{}

static struct nfs4_stid *
find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
{}

static struct nfs4_stid *
find_stateid_by_type(struct nfs4_client *cl, stateid_t *t,
		     unsigned short typemask, unsigned short ok_states)
{}

static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
{}

static void seq_quote_mem(struct seq_file *m, char *data, int len)
{}

static const char *cb_state2str(int state)
{}

static int client_info_show(struct seq_file *m, void *v)
{}

DEFINE_SHOW_ATTRIBUTE();

static void *states_start(struct seq_file *s, loff_t *pos)
	__acquires(&clp->cl_lock)
{}

static void *states_next(struct seq_file *s, void *v, loff_t *pos)
{}

static void states_stop(struct seq_file *s, void *v)
	__releases(&clp->cl_lock)
{}

static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
{}

static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
{}

static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
{}

static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
{}

static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
{}

static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
{}

static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
{}

static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
{}

static int states_show(struct seq_file *s, void *v)
{}

static struct seq_operations states_seq_ops =;

static int client_states_open(struct inode *inode, struct file *file)
{}

static int client_opens_release(struct inode *inode, struct file *file)
{}

static const struct file_operations client_states_fops =;

/*
 * Normally we refuse to destroy clients that are in use, but here the
 * administrator is telling us to just do it.  We also want to wait
 * so the caller has a guarantee that the client's locks are gone by
 * the time the write returns:
 */
static void force_expire_client(struct nfs4_client *clp)
{}

static ssize_t client_ctl_write(struct file *file, const char __user *buf,
				   size_t size, loff_t *pos)
{}

static const struct file_operations client_ctl_fops =;

static const struct tree_descr client_files[] =;

static int
nfsd4_cb_recall_any_done(struct nfsd4_callback *cb,
				struct rpc_task *task)
{}

static void
nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
{}

static int
nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task)
{}

static void
nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
{}

static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops =;

static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops =;

static void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf)
{}

static struct nfs4_client *create_client(struct xdr_netobj name,
		struct svc_rqst *rqstp, nfs4_verifier *verf)
{}

static void
add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
{}

static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
{}

static void
add_to_unconfirmed(struct nfs4_client *clp)
{}

static void
move_to_confirmed(struct nfs4_client *clp)
{}

static struct nfs4_client *
find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
{}

static struct nfs4_client *
find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
{}

static struct nfs4_client *
find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
{}

static bool clp_used_exchangeid(struct nfs4_client *clp)
{} 

static struct nfs4_client *
find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
{}

static struct nfs4_client *
find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
{}

static void
gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
{}

/*
 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
 */
static void
nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
{}

/*
 * Encode the replay sequence operation from the slot values.
 * If cachethis is FALSE encode the uncached rep error on the next
 * operation which sets resp->p and increments resp->opcnt for
 * nfs4svc_encode_compoundres.
 *
 */
static __be32
nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
			  struct nfsd4_compoundres *resp)
{}

/*
 * The sequence operation is not cached because we can use the slot and
 * session values.
 */
static __be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
			 struct nfsd4_sequence *seq)
{}

/*
 * Set the exchange_id flags returned by the server.
 */
static void
nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
{}

static bool client_has_openowners(struct nfs4_client *clp)
{}

static bool client_has_state(struct nfs4_client *clp)
{}

static __be32 copy_impl_id(struct nfs4_client *clp,
				struct nfsd4_exchange_id *exid)
{}

__be32
nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, bool slot_inuse)
{}

/*
 * Cache the create session result into the create session single DRC
 * slot cache by saving the xdr structure. sl_seqid has been set.
 * Do this for solo or embedded create session operations.
 */
static void
nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
			   struct nfsd4_clid_slot *slot, __be32 nfserr)
{}

static __be32
nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
			    struct nfsd4_clid_slot *slot)
{}

#define NFSD_MIN_REQ_HDR_SEQ_SZ

#define NFSD_MIN_RESP_HDR_SEQ_SZ

static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
{}

/*
 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
 * These are based on similar macros in linux/sunrpc/msg_prot.h .
 */
#define RPC_MAX_HEADER_WITH_AUTH_SYS

#define RPC_MAX_REPHEADER_WITH_AUTH_SYS

#define NFSD_CB_MAX_REQ_SZ
#define NFSD_CB_MAX_RESP_SZ

static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
{}

static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
{}

__be32
nfsd4_create_session(struct svc_rqst *rqstp,
		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{}

static __be32 nfsd4_map_bcts_dir(u32 *dir)
{}

__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
		struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
{}

static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
		struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
{}

__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
		     struct nfsd4_compound_state *cstate,
		     union nfsd4_op_u *u)
{}

static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
{}

__be32
nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
{}

static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
{}

static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
				  struct nfsd4_session *session)
{}

static bool replay_matches_cache(struct svc_rqst *rqstp,
		 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
{}

__be32
nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

void
nfsd4_sequence_done(struct nfsd4_compoundres *resp)
{}

__be32
nfsd4_destroy_clientid(struct svc_rqst *rqstp,
		struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

__be32
nfsd4_reclaim_complete(struct svc_rqst *rqstp,
		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{}

__be32
nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		  union nfsd4_op_u *u)
{}

__be32
nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
			struct nfsd4_compound_state *cstate,
			union nfsd4_op_u *u)
{}

static struct nfs4_file *nfsd4_alloc_file(void)
{}

/* OPEN Share state helper functions */

static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp)
{}

void
nfsd4_free_slabs(void)
{}

int
nfsd4_init_slabs(void)
{}

static unsigned long
nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
{}

static unsigned long
nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
{}

void
nfsd4_init_leases_net(struct nfsd_net *nn)
{}

enum rp_lock {};

static void init_nfs4_replay(struct nfs4_replay *rp)
{}

static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
				      struct nfs4_stateowner *so)
{}

void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
{}

static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
{}

static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
{}

static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
{}

static void nfs4_free_openowner(struct nfs4_stateowner *so)
{}

static const struct nfs4_stateowner_operations openowner_ops =;

static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
{}

static void nfsd4_drop_revoked_stid(struct nfs4_stid *s)
	__releases(&s->sc_client->cl_lock)
{}

static void nfsd40_drop_revoked_stid(struct nfs4_client *cl,
				    stateid_t *stid)
{}

static __be32
nfsd4_verify_open_stid(struct nfs4_stid *s)
{}

/* Lock the stateid st_mutex, and deal with races with CLOSE */
static __be32
nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
{}

static struct nfs4_ol_stateid *
nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
{}

static struct nfs4_openowner *
find_or_alloc_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
			      struct nfsd4_compound_state *cstate)
{}

static struct nfs4_ol_stateid *
init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
{}

/*
 * In the 4.0 case we need to keep the owners around a little while to handle
 * CLOSE replay. We still do need to release any file access that is held by
 * them before returning however.
 */
static void
move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
{}

static noinline_for_stack struct nfs4_file *
nfsd4_file_hash_lookup(const struct svc_fh *fhp)
{}

/*
 * On hash insertion, identify entries with the same inode but
 * distinct filehandles. They will all be on the list returned
 * by rhltable_lookup().
 *
 * inode->i_lock prevents racing insertions from adding an entry
 * for the same inode/fhp pair twice.
 */
static noinline_for_stack struct nfs4_file *
nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp)
{}

static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi)
{}

/*
 * Called to check deny when READ with all zero stateid or
 * WRITE with all zero or all one stateid
 */
static __be32
nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
{}

static bool nfsd4_deleg_present(const struct inode *inode)
{}

/**
 * nfsd_wait_for_delegreturn - wait for delegations to be returned
 * @rqstp: the RPC transaction being executed
 * @inode: in-core inode of the file being waited for
 *
 * The timeout prevents deadlock if all nfsd threads happen to be
 * tied up waiting for returning delegations.
 *
 * Return values:
 *   %true: delegation was returned
 *   %false: timed out waiting for delegreturn
 */
bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode)
{}

static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
{}

static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
		struct rpc_task *task)
{}

static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
{}

static const struct nfsd4_callback_ops nfsd4_cb_recall_ops =;

static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
{}

/* Called from break_lease() with flc_lock held. */
static bool
nfsd_break_deleg_cb(struct file_lease *fl)
{}

/**
 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
 * @fl: Lock state to check
 *
 * Return values:
 *   %true: Lease conflict was resolved
 *   %false: Lease conflict was not resolved.
 */
static bool nfsd_breaker_owns_lease(struct file_lease *fl)
{}

static int
nfsd_change_deleg_cb(struct file_lease *onlist, int arg,
		     struct list_head *dispose)
{}

static const struct lease_manager_operations nfsd_lease_mng_ops =;

static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
{}

static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
						struct nfsd_net *nn)
{}

static __be32 set_client(clientid_t *clid,
		struct nfsd4_compound_state *cstate,
		struct nfsd_net *nn)
{}

__be32
nfsd4_process_open1(struct nfsd4_compound_state *cstate,
		    struct nfsd4_open *open, struct nfsd_net *nn)
{}

static inline __be32
nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
{}

static int share_access_to_flags(u32 share_access)
{}

static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl,
						  stateid_t *s)
{}

static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
{}

static __be32
nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
		struct nfs4_delegation **dp)
{}

static inline int nfs4_access_to_access(u32 nfs4_access)
{}

static inline __be32
nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
		struct nfsd4_open *open)
{}

static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
		struct nfsd4_open *open, bool new_stp)
{}

static __be32
nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
		struct nfsd4_open *open)
{}

/* Should we give out recallable state?: */
static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
{}

static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
						int flag)
{}

static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
					 struct nfs4_file *fp)
{}

/*
 * It's possible that between opening the dentry and setting the delegation,
 * that it has been renamed or unlinked. Redo the lookup to verify that this
 * hasn't happened.
 */
static int
nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
			  struct svc_fh *parent)
{}

/*
 * We avoid breaking delegations held by a client due to its own activity, but
 * clearing setuid/setgid bits on a write is an implicit activity and the client
 * may not notice and continue using the old mode. Avoid giving out a delegation
 * on setuid/setgid files when the client is requesting an open for write.
 */
static int
nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
{}

static struct nfs4_delegation *
nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
		    struct svc_fh *parent)
{}

static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
{}

/*
 * The Linux NFS server does not offer write delegations to NFSv4.0
 * clients in order to avoid conflicts between write delegations and
 * GETATTRs requesting CHANGE or SIZE attributes.
 *
 * With NFSv4.1 and later minorversions, the SEQUENCE operation that
 * begins each COMPOUND contains a client ID. Delegation recall can
 * be avoided when the server recognizes the client sending a
 * GETATTR also holds write delegation it conflicts with.
 *
 * However, the NFSv4.0 protocol does not enable a server to
 * determine that a GETATTR originated from the client holding the
 * conflicting delegation versus coming from some other client. Per
 * RFC 7530 Section 16.7.5, the server must recall or send a
 * CB_GETATTR even when the GETATTR originates from the client that
 * holds the conflicting delegation.
 *
 * An NFSv4.0 client can trigger a pathological situation if it
 * always sends a DELEGRETURN preceded by a conflicting GETATTR in
 * the same COMPOUND. COMPOUND execution will always stop at the
 * GETATTR and the DELEGRETURN will never get executed. The server
 * eventually revokes the delegation, which can result in loss of
 * open or lock state.
 */
static void
nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
		     struct svc_fh *currentfh)
{}

static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
					struct nfs4_delegation *dp)
{}

/**
 * nfsd4_process_open2 - finish open processing
 * @rqstp: the RPC transaction being executed
 * @current_fh: NFSv4 COMPOUND's current filehandle
 * @open: OPEN arguments
 *
 * If successful, (1) truncate the file if open->op_truncate was
 * set, (2) set open->op_stateid, (3) set open->op_delegation.
 *
 * Returns %nfs_ok on success; otherwise an nfs4stat value in
 * network byte order is returned.
 */
__be32
nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{}

void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
			      struct nfsd4_open *open)
{}

__be32
nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
	    union nfsd4_op_u *u)
{}

void
nfsd4_end_grace(struct nfsd_net *nn)
{}

/*
 * If we've waited a lease period but there are still clients trying to
 * reclaim, wait a little longer to give them a chance to finish.
 */
static bool clients_still_reclaiming(struct nfsd_net *nn)
{}

struct laundry_time {};

static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
{}

#ifdef CONFIG_NFSD_V4_2_INTER_SSC
void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
{}
EXPORT_SYMBOL_GPL();

/*
 * This is called when nfsd is being shutdown, after all inter_ssc
 * cleanup were done, to destroy the ssc delayed unmount list.
 */
static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
{}

static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
{}
#endif

/* Check if any lock belonging to this lockowner has any blockers */
static bool
nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
{}

static bool
nfs4_anylock_blockers(struct nfs4_client *clp)
{}

static void
nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
				struct laundry_time *lt)
{}

static void
nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn,
				struct list_head *reaplist)
{}

static void
nfs4_process_client_reaplist(struct list_head *reaplist)
{}

static void nfs40_clean_admin_revoked(struct nfsd_net *nn,
				      struct laundry_time *lt)
{}

static time64_t
nfs4_laundromat(struct nfsd_net *nn)
{}

static void laundromat_main(struct work_struct *);

static void
laundromat_main(struct work_struct *laundry)
{}

static void
courtesy_client_reaper(struct nfsd_net *nn)
{}

static void
deleg_reaper(struct nfsd_net *nn)
{}

static void
nfsd4_state_shrinker_worker(struct work_struct *work)
{}

static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
{}

static
__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
{}

static inline __be32
check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
{}

static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
{}

static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
{}

static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
{}

static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
{}

__be32
nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
		     stateid_t *stateid,
		     unsigned short typemask, unsigned short statusmask,
		     struct nfs4_stid **s, struct nfsd_net *nn)
{}

static struct nfsd_file *
nfs4_find_file(struct nfs4_stid *s, int flags)
{}

static __be32
nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
{}

static __be32
nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
		struct nfsd_file **nfp, int flags)
{}
static void
_free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
{}
/*
 * A READ from an inter server to server COPY will have a
 * copy stateid. Look up the copy notify stateid from the
 * idr structure and take a reference on it.
 */
__be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
			  struct nfs4_client *clp,
			  struct nfs4_cpntf_state **cps)
{}

static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
			       struct nfs4_stid **stid)
{}

void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
{}

/**
 * nfs4_preprocess_stateid_op - find and prep stateid for an operation
 * @rqstp: incoming request from client
 * @cstate: current compound state
 * @fhp: filehandle associated with requested stateid
 * @stateid: stateid (provided by client)
 * @flags: flags describing type of operation to be done
 * @nfp: optional nfsd_file return pointer (may be NULL)
 * @cstid: optional returned nfs4_stid pointer (may be NULL)
 *
 * Given info from the client, look up a nfs4_stid for the operation. On
 * success, it returns a reference to the nfs4_stid and/or the nfsd_file
 * associated with it.
 */
__be32
nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
		struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
		stateid_t *stateid, int flags, struct nfsd_file **nfp,
		struct nfs4_stid **cstid)
{}

/*
 * Test if the stateid is valid
 */
__be32
nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		   union nfsd4_op_u *u)
{}

static __be32
nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
{}

__be32
nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		   union nfsd4_op_u *u)
{}

static inline int
setlkflg (int type)
{}

static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
{}

/**
 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
 * @cstate: compund state
 * @seqid: seqid (provided by client)
 * @stateid: stateid (provided by client)
 * @typemask: mask of allowable types for this operation
 * @statusmask: mask of allowed states: 0 or STID_CLOSED
 * @stpp: return pointer for the stateid found
 * @nn: net namespace for request
 *
 * Given a stateid+seqid from a client, look up an nfs4_ol_stateid and
 * return it in @stpp. On a nfs_ok return, the returned stateid will
 * have its st_mutex locked.
 */
static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
			 stateid_t *stateid,
			 unsigned short typemask, unsigned short statusmask,
			 struct nfs4_ol_stateid **stpp,
			 struct nfsd_net *nn)
{}

static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
{}

__be32
nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		   union nfsd4_op_u *u)
{}

static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
{}

static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
{}

__be32
nfsd4_open_downgrade(struct svc_rqst *rqstp,
		     struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{}

static bool nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
{}

/*
 * nfs4_unlock_state() called after encode
 */
__be32
nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

__be32
nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		  union nfsd4_op_u *u)
{}

/* last octet in a range */
static inline u64
last_byte_offset(u64 start, u64 len)
{}

/*
 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
 * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
 * locking, this prevents us from being completely protocol-compliant.  The
 * real solution to this problem is to start using unsigned file offsets in
 * the VFS, but this is a very deep change!
 */
static inline void
nfs4_transform_lock_offset(struct file_lock *lock)
{}

static fl_owner_t
nfsd4_lm_get_owner(fl_owner_t owner)
{}

static void
nfsd4_lm_put_owner(fl_owner_t owner)
{}

/* return pointer to struct nfs4_client if client is expirable */
static bool
nfsd4_lm_lock_expirable(struct file_lock *cfl)
{}

/* schedule laundromat to run immediately and wait for it to complete */
static void
nfsd4_lm_expire_lock(void)
{}

static void
nfsd4_lm_notify(struct file_lock *fl)
{}

static const struct lock_manager_operations nfsd_posix_mng_ops  =;

static inline void
nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
{}

static struct nfs4_lockowner *
find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
{}

static struct nfs4_lockowner *
find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
{}

static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
{}

static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
{}

static const struct nfs4_stateowner_operations lockowner_ops =;

/*
 * Alloc a lock owner structure.
 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
 * occurred. 
 *
 * strhashval = ownerstr_hashval
 */
static struct nfs4_lockowner *
alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
			   struct nfs4_ol_stateid *open_stp,
			   struct nfsd4_lock *lock)
{}

static struct nfs4_ol_stateid *
find_lock_stateid(const struct nfs4_lockowner *lo,
		  const struct nfs4_ol_stateid *ost)
{}

static struct nfs4_ol_stateid *
init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
		  struct nfs4_file *fp, struct inode *inode,
		  struct nfs4_ol_stateid *open_stp)
{}

static struct nfs4_ol_stateid *
find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
			    struct inode *inode, struct nfs4_ol_stateid *ost,
			    bool *new)
{}

static int
check_lock_length(u64 offset, u64 length)
{}

static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
{}

static __be32
lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
			    struct nfs4_ol_stateid *ost,
			    struct nfsd4_lock *lock,
			    struct nfs4_ol_stateid **plst, bool *new)
{}

/*
 *  LOCK operation 
 */
__be32
nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
	   union nfsd4_op_u *u)
{}

void nfsd4_lock_release(union nfsd4_op_u *u)
{}

/*
 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
 * so we do a temporary open here just to get an open file to pass to
 * vfs_test_lock.
 */
static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
{}

/*
 * LOCKT operation
 */
__be32
nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
	    union nfsd4_op_u *u)
{}

void nfsd4_lockt_release(union nfsd4_op_u *u)
{}

__be32
nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
	    union nfsd4_op_u *u)
{}

/*
 * returns
 * 	true:  locks held by lockowner
 * 	false: no locks held by lockowner
 */
static bool
check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
{}

/**
 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
 * @rqstp: RPC transaction
 * @cstate: NFSv4 COMPOUND state
 * @u: RELEASE_LOCKOWNER arguments
 *
 * Check if theree are any locks still held and if not - free the lockowner
 * and any lock state that is owned.
 *
 * Return values:
 *   %nfs_ok: lockowner released or not found
 *   %nfserr_locks_held: lockowner still in use
 *   %nfserr_stale_clientid: clientid no longer active
 *   %nfserr_expired: clientid not recognized
 */
__be32
nfsd4_release_lockowner(struct svc_rqst *rqstp,
			struct nfsd4_compound_state *cstate,
			union nfsd4_op_u *u)
{}

static inline struct nfs4_client_reclaim *
alloc_reclaim(void)
{}

bool
nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
{}

/*
 * failure => all reset bets are off, nfserr_no_grace...
 *
 * The caller is responsible for freeing name.data if NULL is returned (it
 * will be freed in nfs4_remove_reclaim_record in the normal case).
 */
struct nfs4_client_reclaim *
nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
		struct nfsd_net *nn)
{}

void
nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
{}

void
nfs4_release_reclaim(struct nfsd_net *nn)
{}

/*
 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
struct nfs4_client_reclaim *
nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
{}

__be32
nfs4_check_open_reclaim(struct nfs4_client *clp)
{}

/*
 * Since the lifetime of a delegation isn't limited to that of an open, a
 * client may quite reasonably hang on to a delegation as long as it has
 * the inode cached.  This becomes an obvious problem the first time a
 * client's inode cache approaches the size of the server's total memory.
 *
 * For now we avoid this problem by imposing a hard limit on the number
 * of delegations, which varies according to the server's memory size.
 */
static void
set_max_delegations(void)
{}

static int nfs4_state_create_net(struct net *net)
{}

static void
nfs4_state_destroy_net(struct net *net)
{}

int
nfs4_state_start_net(struct net *net)
{}

/* initialization to perform when the nfsd service is started: */

int
nfs4_state_start(void)
{}

void
nfs4_state_shutdown_net(struct net *net)
{}

void
nfs4_state_shutdown(void)
{}

static void
get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
{}

static void
put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
{}

void
clear_current_stateid(struct nfsd4_compound_state *cstate)
{}

/*
 * functions to set current state id
 */
void
nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

void
nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

void
nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

void
nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

/*
 * functions to consume current state id
 */

void
nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

void
nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

void
nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

void
nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

void
nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

void
nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

void
nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

void
nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
		union nfsd4_op_u *u)
{}

/**
 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
 * @rqstp: RPC transaction context
 * @inode: file to be checked for a conflict
 * @modified: return true if file was modified
 * @size: new size of file if modified is true
 *
 * This function is called when there is a conflict between a write
 * delegation and a change/size GETATTR from another client. The server
 * must either use the CB_GETATTR to get the current values of the
 * attributes from the client that holds the delegation or recall the
 * delegation before replying to the GETATTR. See RFC 8881 section
 * 18.7.4.
 *
 * Returns 0 if there is no conflict; otherwise an nfs_stat
 * code is returned.
 */
__be32
nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
				bool *modified, u64 *size)
{}