linux/fs/lockd/clntproc.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * linux/fs/lockd/clntproc.c
 *
 * RPC procedures for the client side NLM implementation
 *
 * Copyright (C) 1996, Olaf Kirch <[email protected]>
 */

#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/filelock.h>
#include <linux/nfs_fs.h>
#include <linux/utsname.h>
#include <linux/freezer.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>

#include "trace.h"

#define NLMDBG_FACILITY
#define NLMCLNT_GRACE_WAIT
#define NLMCLNT_POLL_TIMEOUT
#define NLMCLNT_MAX_RETRIES

static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
static int	nlm_stat_to_errno(__be32 stat);
static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
static int	nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);

static const struct rpc_call_ops nlmclnt_unlock_ops;
static const struct rpc_call_ops nlmclnt_cancel_ops;

/*
 * Cookie counter for NLM requests
 */
static atomic_t	nlm_cookie =;

void nlmclnt_next_cookie(struct nlm_cookie *c)
{}

static struct nlm_lockowner *
nlmclnt_get_lockowner(struct nlm_lockowner *lockowner)
{}

static void nlmclnt_put_lockowner(struct nlm_lockowner *lockowner)
{}

static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
{}

static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
{}

static struct nlm_lockowner *__nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
{}

static struct nlm_lockowner *nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
{}

/*
 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
 */
static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
{}

static void nlmclnt_release_lockargs(struct nlm_rqst *req)
{}

/**
 * nlmclnt_proc - Perform a single client-side lock request
 * @host: address of a valid nlm_host context representing the NLM server
 * @cmd: fcntl-style file lock operation to perform
 * @fl: address of arguments for the lock operation
 * @data: address of data to be sent to callback operations
 *
 */
int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data)
{}
EXPORT_SYMBOL_GPL();

/*
 * Allocate an NLM RPC call struct
 */
struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
{}

void nlmclnt_release_call(struct nlm_rqst *call)
{}

static void nlmclnt_rpc_release(void *data)
{}

static int nlm_wait_on_grace(wait_queue_head_t *queue)
{}

/*
 * Generic NLM call
 */
static int
nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc)
{}

/*
 * Generic NLM call, async version.
 */
static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
{}

static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
{}

/*
 * NLM asynchronous call.
 */
int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{}

int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{}

/*
 * NLM client asynchronous call.
 *
 * Note that although the calls are asynchronous, and are therefore
 *      guaranteed to complete, we still always attempt to wait for
 *      completion in order to be able to correctly track the lock
 *      state.
 */
static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{}

/*
 * TEST for the presence of a conflicting lock
 */
static int
nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
{}

static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{}

static void nlmclnt_locks_release_private(struct file_lock *fl)
{}

static const struct file_lock_operations nlmclnt_lock_ops =;

static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
{}

static int do_vfs_lock(struct file_lock *fl)
{}

/*
 * LOCK: Try to create a lock
 *
 *			Programmer Harassment Alert
 *
 * When given a blocking lock request in a sync RPC call, the HPUX lockd
 * will faithfully return LCK_BLOCKED but never cares to notify us when
 * the lock could be granted. This way, our local process could hang
 * around forever waiting for the callback.
 *
 *  Solution A:	Implement busy-waiting
 *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
 *
 * For now I am implementing solution A, because I hate the idea of
 * re-implementing lockd for a third time in two months. The async
 * calls shouldn't be too hard to do, however.
 *
 * This is one of the lovely things about standards in the NFS area:
 * they're so soft and squishy you can't really blame HP for doing this.
 */
static int
nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
{}

/*
 * RECLAIM: Try to reclaim a lock
 */
int
nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
		struct nlm_rqst *req)
{}

/*
 * UNLOCK: remove an existing lock
 */
static int
nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
{}

static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data)
{}

static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
{}

static const struct rpc_call_ops nlmclnt_unlock_ops =;

/*
 * Cancel a blocked lock request.
 * We always use an async RPC call for this in order not to hang a
 * process that has been Ctrl-C'ed.
 */
static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
{}

static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
{}

static const struct rpc_call_ops nlmclnt_cancel_ops =;

/*
 * Convert an NLM status code to a generic kernel errno
 */
static int
nlm_stat_to_errno(__be32 status)
{}