linux/fs/ceph/mds_client.c

// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>

#include <linux/fs.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/bits.h>
#include <linux/ktime.h>
#include <linux/bitmap.h>
#include <linux/mnt_idmapping.h>

#include "super.h"
#include "mds_client.h"
#include "crypto.h"

#include <linux/ceph/ceph_features.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/pagelist.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/debugfs.h>

#define RECONNECT_MAX_SIZE

/*
 * A cluster of MDS (metadata server) daemons is responsible for
 * managing the file system namespace (the directory hierarchy and
 * inodes) and for coordinating shared access to storage.  Metadata is
 * partitioning hierarchically across a number of servers, and that
 * partition varies over time as the cluster adjusts the distribution
 * in order to balance load.
 *
 * The MDS client is primarily responsible to managing synchronous
 * metadata requests for operations like open, unlink, and so forth.
 * If there is a MDS failure, we find out about it when we (possibly
 * request and) receive a new MDS map, and can resubmit affected
 * requests.
 *
 * For the most part, though, we take advantage of a lossless
 * communications channel to the MDS, and do not need to worry about
 * timing out or resubmitting requests.
 *
 * We maintain a stateful "session" with each MDS we interact with.
 * Within each session, we sent periodic heartbeat messages to ensure
 * any capabilities or leases we have been issues remain valid.  If
 * the session times out and goes stale, our leases and capabilities
 * are no longer valid.
 */

struct ceph_reconnect_state {};

static void __wake_requests(struct ceph_mds_client *mdsc,
			    struct list_head *head);
static void ceph_cap_release_work(struct work_struct *work);
static void ceph_cap_reclaim_work(struct work_struct *work);

static const struct ceph_connection_operations mds_con_ops;


/*
 * mds reply parsing
 */

static int parse_reply_info_quota(void **p, void *end,
				  struct ceph_mds_reply_info_in *info)
{}

/*
 * parse individual inode info
 */
static int parse_reply_info_in(void **p, void *end,
			       struct ceph_mds_reply_info_in *info,
			       u64 features)
{}

static int parse_reply_info_dir(void **p, void *end,
				struct ceph_mds_reply_dirfrag **dirfrag,
				u64 features)
{}

static int parse_reply_info_lease(void **p, void *end,
				  struct ceph_mds_reply_lease **lease,
				  u64 features, u32 *altname_len, u8 **altname)
{}

/*
 * parse a normal reply, which may contain a (dir+)dentry and/or a
 * target inode.
 */
static int parse_reply_info_trace(void **p, void *end,
				  struct ceph_mds_reply_info_parsed *info,
				  u64 features)
{}

/*
 * parse readdir results
 */
static int parse_reply_info_readdir(void **p, void *end,
				    struct ceph_mds_request *req,
				    u64 features)
{}

/*
 * parse fcntl F_GETLK results
 */
static int parse_reply_info_filelock(void **p, void *end,
				     struct ceph_mds_reply_info_parsed *info,
				     u64 features)
{}


#if BITS_PER_LONG == 64

#define DELEGATED_INO_AVAILABLE

static int ceph_parse_deleg_inos(void **p, void *end,
				 struct ceph_mds_session *s)
{}

u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
{}

int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
{}
#else /* BITS_PER_LONG == 64 */
/*
 * FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just
 * ignore delegated_inos on 32 bit arch. Maybe eventually add xarrays for top
 * and bottom words?
 */
static int ceph_parse_deleg_inos(void **p, void *end,
				 struct ceph_mds_session *s)
{
	u32 sets;

	ceph_decode_32_safe(p, end, sets, bad);
	if (sets)
		ceph_decode_skip_n(p, end, sets * 2 * sizeof(__le64), bad);
	return 0;
bad:
	return -EIO;
}

u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
{
	return 0;
}

int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
{
	return 0;
}
#endif /* BITS_PER_LONG == 64 */

/*
 * parse create results
 */
static int parse_reply_info_create(void **p, void *end,
				  struct ceph_mds_reply_info_parsed *info,
				  u64 features, struct ceph_mds_session *s)
{}

static int parse_reply_info_getvxattr(void **p, void *end,
				      struct ceph_mds_reply_info_parsed *info,
				      u64 features)
{}

/*
 * parse extra results
 */
static int parse_reply_info_extra(void **p, void *end,
				  struct ceph_mds_request *req,
				  u64 features, struct ceph_mds_session *s)
{}

/*
 * parse entire mds reply
 */
static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
			    struct ceph_mds_request *req, u64 features)
{}

static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
{}

/*
 * In async unlink case the kclient won't wait for the first reply
 * from MDS and just drop all the links and unhash the dentry and then
 * succeeds immediately.
 *
 * For any new create/link/rename,etc requests followed by using the
 * same file names we must wait for the first reply of the inflight
 * unlink request, or the MDS possibly will fail these following
 * requests with -EEXIST if the inflight async unlink request was
 * delayed for some reasons.
 *
 * And the worst case is that for the none async openc request it will
 * successfully open the file if the CDentry hasn't been unlinked yet,
 * but later the previous delayed async unlink request will remove the
 * CDenty. That means the just created file is possiblly deleted later
 * by accident.
 *
 * We need to wait for the inflight async unlink requests to finish
 * when creating new files/directories by using the same file names.
 */
int ceph_wait_on_conflict_unlink(struct dentry *dentry)
{}


/*
 * sessions
 */
const char *ceph_session_state_name(int s)
{}

struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
{}

void ceph_put_mds_session(struct ceph_mds_session *s)
{}

/*
 * called under mdsc->mutex
 */
struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
						   int mds)
{}

static bool __have_session(struct ceph_mds_client *mdsc, int mds)
{}

static int __verify_registered_session(struct ceph_mds_client *mdsc,
				       struct ceph_mds_session *s)
{}

/*
 * create+register a new session for given mds.
 * called under mdsc->mutex.
 */
static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
						 int mds)
{}

/*
 * called under mdsc->mutex
 */
static void __unregister_session(struct ceph_mds_client *mdsc,
			       struct ceph_mds_session *s)
{}

/*
 * drop session refs in request.
 *
 * should be last request ref, or hold mdsc->mutex
 */
static void put_request_session(struct ceph_mds_request *req)
{}

void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
				void (*cb)(struct ceph_mds_session *),
				bool check_state)
{}

void ceph_mdsc_release_request(struct kref *kref)
{}

DEFINE_RB_FUNCS()

/*
 * lookup session, bump ref if found.
 *
 * called under mdsc->mutex.
 */
static struct ceph_mds_request *
lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
{}

/*
 * Register an in-flight request, and assign a tid.  Link to directory
 * are modifying (if any).
 *
 * Called under mdsc->mutex.
 */
static void __register_request(struct ceph_mds_client *mdsc,
			       struct ceph_mds_request *req,
			       struct inode *dir)
{}

static void __unregister_request(struct ceph_mds_client *mdsc,
				 struct ceph_mds_request *req)
{}

/*
 * Walk back up the dentry tree until we hit a dentry representing a
 * non-snapshot inode. We do this using the rcu_read_lock (which must be held
 * when calling this) to ensure that the objects won't disappear while we're
 * working with them. Once we hit a candidate dentry, we attempt to take a
 * reference to it, and return that as the result.
 */
static struct inode *get_nonsnap_parent(struct dentry *dentry)
{}

/*
 * Choose mds to send request to next.  If there is a hint set in the
 * request (e.g., due to a prior forward hint from the mds), use that.
 * Otherwise, consult frag tree and/or caps to identify the
 * appropriate mds.  If all else fails, choose randomly.
 *
 * Called under mdsc->mutex.
 */
static int __choose_mds(struct ceph_mds_client *mdsc,
			struct ceph_mds_request *req,
			bool *random)
{}


/*
 * session messages
 */
struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq)
{}

static const unsigned char feature_bits[] =;
#define FEATURE_BYTES(c)
static int encode_supported_features(void **p, void *end)
{}

static const unsigned char metric_bits[] =;
#define METRIC_BYTES(cnt)
static int encode_metric_spec(void **p, void *end)
{}

/*
 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
 * to include additional client metadata fields.
 */
static struct ceph_msg *
create_session_full_msg(struct ceph_mds_client *mdsc, int op, u64 seq)
{}

/*
 * send session open request.
 *
 * called under mdsc->mutex
 */
static int __open_session(struct ceph_mds_client *mdsc,
			  struct ceph_mds_session *session)
{}

/*
 * open sessions for any export targets for the given mds
 *
 * called under mdsc->mutex
 */
static struct ceph_mds_session *
__open_export_target_session(struct ceph_mds_client *mdsc, int target)
{}

struct ceph_mds_session *
ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
{}

static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
					  struct ceph_mds_session *session)
{}

void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
					   struct ceph_mds_session *session)
{}

/*
 * session caps
 */

static void detach_cap_releases(struct ceph_mds_session *session,
				struct list_head *target)
{}

static void dispose_cap_releases(struct ceph_mds_client *mdsc,
				 struct list_head *dispose)
{}

static void cleanup_session_requests(struct ceph_mds_client *mdsc,
				     struct ceph_mds_session *session)
{}

/*
 * Helper to safely iterate over all caps associated with a session, with
 * special care taken to handle a racing __ceph_remove_cap().
 *
 * Caller must hold session s_mutex.
 */
int ceph_iterate_session_caps(struct ceph_mds_session *session,
			      int (*cb)(struct inode *, int mds, void *),
			      void *arg)
{}

static int remove_session_caps_cb(struct inode *inode, int mds, void *arg)
{}

/*
 * caller must hold session s_mutex
 */
static void remove_session_caps(struct ceph_mds_session *session)
{}

enum {};

/*
 * wake up any threads waiting on this session's caps.  if the cap is
 * old (didn't get renewed on the client reconnect), remove it now.
 *
 * caller must hold s_mutex.
 */
static int wake_up_session_cb(struct inode *inode, int mds, void *arg)
{}

static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
{}

/*
 * Send periodic message to MDS renewing all currently held caps.  The
 * ack will reset the expiration for all caps from this session.
 *
 * caller holds s_mutex
 */
static int send_renew_caps(struct ceph_mds_client *mdsc,
			   struct ceph_mds_session *session)
{}

static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
			     struct ceph_mds_session *session, u64 seq)
{}


/*
 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
 *
 * Called under session->s_mutex
 */
static void renewed_caps(struct ceph_mds_client *mdsc,
			 struct ceph_mds_session *session, int is_renew)
{}

/*
 * send a session close request
 */
static int request_close_session(struct ceph_mds_session *session)
{}

/*
 * Called with s_mutex held.
 */
static int __close_session(struct ceph_mds_client *mdsc,
			 struct ceph_mds_session *session)
{}

static bool drop_negative_children(struct dentry *dentry)
{}

/*
 * Trim old(er) caps.
 *
 * Because we can't cache an inode without one or more caps, we do
 * this indirectly: if a cap is unused, we prune its aliases, at which
 * point the inode will hopefully get dropped to.
 *
 * Yes, this is a bit sloppy.  Our only real goal here is to respond to
 * memory pressure from the MDS, though, so it needn't be perfect.
 */
static int trim_caps_cb(struct inode *inode, int mds, void *arg)
{}

/*
 * Trim session cap count down to some max number.
 */
int ceph_trim_caps(struct ceph_mds_client *mdsc,
		   struct ceph_mds_session *session,
		   int max_caps)
{}

static int check_caps_flush(struct ceph_mds_client *mdsc,
			    u64 want_flush_tid)
{}

/*
 * flush all dirty inode data to disk.
 *
 * returns true if we've flushed through want_flush_tid
 */
static void wait_caps_flush(struct ceph_mds_client *mdsc,
			    u64 want_flush_tid)
{}

/*
 * called under s_mutex
 */
static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
				   struct ceph_mds_session *session)
{}

static void ceph_cap_release_work(struct work_struct *work)
{}

void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
		             struct ceph_mds_session *session)
{}

/*
 * caller holds session->s_cap_lock
 */
void __ceph_queue_cap_release(struct ceph_mds_session *session,
			      struct ceph_cap *cap)
{}

static void ceph_cap_reclaim_work(struct work_struct *work)
{}

void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
{}

void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
{}

void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc)
{}

static void ceph_cap_unlink_work(struct work_struct *work)
{}

/*
 * requests
 */

int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
				    struct inode *dir)
{}

/*
 * Create an mds request.
 */
struct ceph_mds_request *
ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
{}

/*
 * return oldest (lowest) request, tid in request tree, 0 if none.
 *
 * called under mdsc->mutex.
 */
static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
{}

static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
{}

#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
{}
#else
static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
{
	*plen = 0;
	return NULL;
}
#endif

/**
 * ceph_mdsc_build_path - build a path string to a given dentry
 * @mdsc: mds client
 * @dentry: dentry to which path should be built
 * @plen: returned length of string
 * @pbase: returned base inode number
 * @for_wire: is this path going to be sent to the MDS?
 *
 * Build a string that represents the path to the dentry. This is mostly called
 * for two different purposes:
 *
 * 1) we need to build a path string to send to the MDS (for_wire == true)
 * 2) we need a path string for local presentation (e.g. debugfs)
 *    (for_wire == false)
 *
 * The path is built in reverse, starting with the dentry. Walk back up toward
 * the root, building the path until the first non-snapped inode is reached
 * (for_wire) or the root inode is reached (!for_wire).
 *
 * Encode hidden .snap dirs as a double /, i.e.
 *   foo/.snap/bar -> foo//bar
 */
char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
			   int *plen, u64 *pbase, int for_wire)
{}

static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
			     struct inode *dir, const char **ppath, int *ppathlen,
			     u64 *pino, bool *pfreepath, bool parent_locked)
{}

static int build_inode_path(struct inode *inode,
			    const char **ppath, int *ppathlen, u64 *pino,
			    bool *pfreepath)
{}

/*
 * request arguments may be specified via an inode *, a dentry *, or
 * an explicit ino+path.
 */
static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode,
				 struct dentry *rdentry, struct inode *rdiri,
				 const char *rpath, u64 rino, const char **ppath,
				 int *pathlen, u64 *ino, bool *freepath,
				 bool parent_locked)
{}

static void encode_mclientrequest_tail(void **p,
				       const struct ceph_mds_request *req)
{}

static inline u16 mds_supported_head_version(struct ceph_mds_session *session)
{}

static struct ceph_mds_request_head_legacy *
find_legacy_request_head(void *p, u64 features)
{}

/*
 * called under mdsc->mutex
 */
static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
					       struct ceph_mds_request *req,
					       bool drop_cap_releases)
{}

/*
 * called under mdsc->mutex if error, under no mutex if
 * success.
 */
static void complete_request(struct ceph_mds_client *mdsc,
			     struct ceph_mds_request *req)
{}

/*
 * called under mdsc->mutex
 */
static int __prepare_send_request(struct ceph_mds_session *session,
				  struct ceph_mds_request *req,
				  bool drop_cap_releases)
{}

/*
 * called under mdsc->mutex
 */
static int __send_request(struct ceph_mds_session *session,
			  struct ceph_mds_request *req,
			  bool drop_cap_releases)
{}

/*
 * send request, or put it on the appropriate wait list.
 */
static void __do_request(struct ceph_mds_client *mdsc,
			struct ceph_mds_request *req)
{}

/*
 * called under mdsc->mutex
 */
static void __wake_requests(struct ceph_mds_client *mdsc,
			    struct list_head *head)
{}

/*
 * Wake up threads with requests pending for @mds, so that they can
 * resubmit their requests to a possibly different mds.
 */
static void kick_requests(struct ceph_mds_client *mdsc, int mds)
{}

int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
			      struct ceph_mds_request *req)
{}

int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
			   struct ceph_mds_request *req,
			   ceph_mds_request_wait_callback_t wait_func)
{}

/*
 * Synchrously perform an mds request.  Take care of all of the
 * session setup, forwarding, retry details.
 */
int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
			 struct inode *dir,
			 struct ceph_mds_request *req)
{}

/*
 * Invalidate dir's completeness, dentry lease state on an aborted MDS
 * namespace request.
 */
void ceph_invalidate_dir_request(struct ceph_mds_request *req)
{}

/*
 * Handle mds reply.
 *
 * We take the session mutex and parse and process the reply immediately.
 * This preserves the logical ordering of replies, capabilities, etc., sent
 * by the MDS as they are applied to our local cache.
 */
static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
{}



/*
 * handle mds notification that our request has been forwarded.
 */
static void handle_forward(struct ceph_mds_client *mdsc,
			   struct ceph_mds_session *session,
			   struct ceph_msg *msg)
{}

static int __decode_session_metadata(void **p, void *end,
				     bool *blocklisted)
{}

/*
 * handle a mds session control message
 */
static void handle_session(struct ceph_mds_session *session,
			   struct ceph_msg *msg)
{}

void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
{}

void ceph_mdsc_release_dir_caps_async(struct ceph_mds_request *req)
{}

/*
 * called under session->mutex.
 */
static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
				   struct ceph_mds_session *session)
{}

static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
{}

static struct dentry* d_find_primary(struct inode *inode)
{}

/*
 * Encode information about a cap for a reconnect with the MDS.
 */
static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
{}

static int encode_snap_realms(struct ceph_mds_client *mdsc,
			      struct ceph_reconnect_state *recon_state)
{}


/*
 * If an MDS fails and recovers, clients need to reconnect in order to
 * reestablish shared state.  This includes all caps issued through
 * this session _and_ the snap_realm hierarchy.  Because it's not
 * clear which snap realms the mds cares about, we send everything we
 * know about.. that ensures we'll then get any new info the
 * recovering MDS might have.
 *
 * This is a relatively heavyweight operation, but it's rare.
 */
static void send_mds_reconnect(struct ceph_mds_client *mdsc,
			       struct ceph_mds_session *session)
{}


/*
 * compare old and new mdsmaps, kicking requests
 * and closing out old connections as necessary
 *
 * called under mdsc->mutex.
 */
static void check_new_map(struct ceph_mds_client *mdsc,
			  struct ceph_mdsmap *newmap,
			  struct ceph_mdsmap *oldmap)
{}



/*
 * leases
 */

/*
 * caller must hold session s_mutex, dentry->d_lock
 */
void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
{}

static void handle_lease(struct ceph_mds_client *mdsc,
			 struct ceph_mds_session *session,
			 struct ceph_msg *msg)
{}

void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
			      struct dentry *dentry, char action,
			      u32 seq)
{}

/*
 * lock unlock the session, to wait ongoing session activities
 */
static void lock_unlock_session(struct ceph_mds_session *s)
{}

static void maybe_recover_session(struct ceph_mds_client *mdsc)
{}

bool check_session_state(struct ceph_mds_session *s)
{}

/*
 * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
 * then we need to retransmit that request.
 */
void inc_session_sequence(struct ceph_mds_session *s)
{}

/*
 * delayed work -- periodically trim expired leases, renew caps with mds.  If
 * the @delay parameter is set to 0 or if it's more than 5 secs, the default
 * workqueue delay value of 5 secs will be used.
 */
static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
{}

static void delayed_work(struct work_struct *work)
{}

int ceph_mdsc_init(struct ceph_fs_client *fsc)

{}

/*
 * Wait for safe replies on open mds requests.  If we time out, drop
 * all requests from the tree to avoid dangling dentry refs.
 */
static void wait_requests(struct ceph_mds_client *mdsc)
{}

void send_flush_mdlog(struct ceph_mds_session *s)
{}

static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
			       struct ceph_mds_cap_auth *auth,
			       char *tpath)
{}

int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath, int mask)
{}

/*
 * called before mount is ro, and before dentries are torn down.
 * (hmm, does this still race with new lookups?)
 */
void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
{}

/*
 * flush the mdlog and wait for all write mds requests to flush.
 */
static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
						 u64 want_tid)
{}

void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
{}

/*
 * true if all sessions are closed, or we force unmount
 */
static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
{}

/*
 * called after sb is ro or when metadata corrupted.
 */
void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
{}

void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
{}

static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
{}

void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
{}

void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
{}

/*
 * handle mds map update.
 */
void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
{}

static struct ceph_connection *mds_get_con(struct ceph_connection *con)
{}

static void mds_put_con(struct ceph_connection *con)
{}

/*
 * if the client is unresponsive for long enough, the mds will kill
 * the session entirely.
 */
static void mds_peer_reset(struct ceph_connection *con)
{}

static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{}

/*
 * authentication
 */

/*
 * Note: returned pointer is the address of a structure that's
 * managed separately.  Caller must *not* attempt to free it.
 */
static struct ceph_auth_handshake *
mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
{}

static int mds_add_authorizer_challenge(struct ceph_connection *con,
				    void *challenge_buf, int challenge_buf_len)
{}

static int mds_verify_authorizer_reply(struct ceph_connection *con)
{}

static int mds_invalidate_authorizer(struct ceph_connection *con)
{}

static int mds_get_auth_request(struct ceph_connection *con,
				void *buf, int *buf_len,
				void **authorizer, int *authorizer_len)
{}

static int mds_handle_auth_reply_more(struct ceph_connection *con,
				      void *reply, int reply_len,
				      void *buf, int *buf_len,
				      void **authorizer, int *authorizer_len)
{}

static int mds_handle_auth_done(struct ceph_connection *con,
				u64 global_id, void *reply, int reply_len,
				u8 *session_key, int *session_key_len,
				u8 *con_secret, int *con_secret_len)
{}

static int mds_handle_auth_bad_method(struct ceph_connection *con,
				      int used_proto, int result,
				      const int *allowed_protos, int proto_cnt,
				      const int *allowed_modes, int mode_cnt)
{}

static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
				struct ceph_msg_header *hdr, int *skip)
{}

static int mds_sign_message(struct ceph_msg *msg)
{}

static int mds_check_message_signature(struct ceph_msg *msg)
{}

static const struct ceph_connection_operations mds_con_ops =;

/* eof */