linux/net/ceph/osd_client.c

// SPDX-License-Identifier: GPL-2.0

#include <linux/ceph/ceph_debug.h>

#include <linux/module.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#ifdef CONFIG_BLOCK
#include <linux/bio.h>
#endif

#include <linux/ceph/ceph_features.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/pagelist.h>
#include <linux/ceph/striper.h>

#define OSD_OPREPLY_FRONT_LEN

static struct kmem_cache	*ceph_osd_request_cache;

static const struct ceph_connection_operations osd_con_ops;

/*
 * Implement client access to distributed object storage cluster.
 *
 * All data objects are stored within a cluster/cloud of OSDs, or
 * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
 * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
 * remote daemons serving up and coordinating consistent and safe
 * access to storage.
 *
 * Cluster membership and the mapping of data objects onto storage devices
 * are described by the osd map.
 *
 * We keep track of pending OSD requests (read, write), resubmit
 * requests to different OSDs when the cluster topology/data layout
 * change, or retry the affected requests when the communications
 * channel with an OSD is reset.
 */

static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
static void link_linger(struct ceph_osd *osd,
			struct ceph_osd_linger_request *lreq);
static void unlink_linger(struct ceph_osd *osd,
			  struct ceph_osd_linger_request *lreq);
static void clear_backoffs(struct ceph_osd *osd);

#if 1
static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
{}
static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
{}
static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
{}
static inline void verify_osd_locked(struct ceph_osd *osd)
{}
static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
{}
#else
static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
static inline void verify_osd_locked(struct ceph_osd *osd) { }
static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
#endif

/*
 * calculate the mapping of a file extent onto an object, and fill out the
 * request accordingly.  shorten extent as necessary if it crosses an
 * object boundary.
 *
 * fill osd op in request message.
 */
static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
			u64 *objnum, u64 *objoff, u64 *objlen)
{}

static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
{}

/*
 * Consumes @pages if @own_pages is true.
 */
static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
			struct page **pages, u64 length, u32 alignment,
			bool pages_from_pool, bool own_pages)
{}

/*
 * Consumes a ref on @pagelist.
 */
static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
			struct ceph_pagelist *pagelist)
{}

#ifdef CONFIG_BLOCK
static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
				   struct ceph_bio_iter *bio_pos,
				   u32 bio_length)
{}
#endif /* CONFIG_BLOCK */

static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
				     struct ceph_bvec_iter *bvec_pos,
				     u32 num_bvecs)
{}

static void ceph_osd_iter_init(struct ceph_osd_data *osd_data,
			       struct iov_iter *iter)
{}

static struct ceph_osd_data *
osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
{}

struct ceph_osd_data *
osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
			unsigned int which)
{}
EXPORT_SYMBOL();

void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
			unsigned int which, struct page **pages,
			u64 length, u32 alignment,
			bool pages_from_pool, bool own_pages)
{}
EXPORT_SYMBOL();

void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
			unsigned int which, struct page **pages,
			u64 length, u32 alignment,
			bool pages_from_pool, bool own_pages)
{}
EXPORT_SYMBOL();

void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
			unsigned int which, struct ceph_pagelist *pagelist)
{}
EXPORT_SYMBOL();

#ifdef CONFIG_BLOCK
void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
				    unsigned int which,
				    struct ceph_bio_iter *bio_pos,
				    u32 bio_length)
{}
EXPORT_SYMBOL();
#endif /* CONFIG_BLOCK */

void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
				      unsigned int which,
				      struct bio_vec *bvecs, u32 num_bvecs,
				      u32 bytes)
{}
EXPORT_SYMBOL();

void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
					 unsigned int which,
					 struct ceph_bvec_iter *bvec_pos)
{}
EXPORT_SYMBOL();

/**
 * osd_req_op_extent_osd_iter - Set up an operation with an iterator buffer
 * @osd_req: The request to set up
 * @which: Index of the operation in which to set the iter
 * @iter: The buffer iterator
 */
void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
				unsigned int which, struct iov_iter *iter)
{}
EXPORT_SYMBOL();

static void osd_req_op_cls_request_info_pagelist(
			struct ceph_osd_request *osd_req,
			unsigned int which, struct ceph_pagelist *pagelist)
{}

void osd_req_op_cls_request_data_pagelist(
			struct ceph_osd_request *osd_req,
			unsigned int which, struct ceph_pagelist *pagelist)
{}
EXPORT_SYMBOL();

void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
			unsigned int which, struct page **pages, u64 length,
			u32 alignment, bool pages_from_pool, bool own_pages)
{}
EXPORT_SYMBOL();

void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
				       unsigned int which,
				       struct bio_vec *bvecs, u32 num_bvecs,
				       u32 bytes)
{}
EXPORT_SYMBOL();

void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
			unsigned int which, struct page **pages, u64 length,
			u32 alignment, bool pages_from_pool, bool own_pages)
{}
EXPORT_SYMBOL();

static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
{}

static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
{}

static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
			unsigned int which)
{}

/*
 * Assumes @t is zero-initialized.
 */
static void target_init(struct ceph_osd_request_target *t)
{}

static void target_copy(struct ceph_osd_request_target *dest,
			const struct ceph_osd_request_target *src)
{}

static void target_destroy(struct ceph_osd_request_target *t)
{}

/*
 * requests
 */
static void request_release_checks(struct ceph_osd_request *req)
{}

static void ceph_osdc_release_request(struct kref *kref)
{}

void ceph_osdc_get_request(struct ceph_osd_request *req)
{}
EXPORT_SYMBOL();

void ceph_osdc_put_request(struct ceph_osd_request *req)
{}
EXPORT_SYMBOL();

static void request_init(struct ceph_osd_request *req)
{}

struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
					       struct ceph_snap_context *snapc,
					       unsigned int num_ops,
					       bool use_mempool,
					       gfp_t gfp_flags)
{}
EXPORT_SYMBOL();

static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
{}

static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
				      int num_request_data_items,
				      int num_reply_data_items)
{}

static bool osd_req_opcode_valid(u16 opcode)
{}

static void get_num_data_items(struct ceph_osd_request *req,
			       int *num_request_data_items,
			       int *num_reply_data_items)
{}

/*
 * oid, oloc and OSD op opcode(s) must be filled in before this function
 * is called.
 */
int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
{}
EXPORT_SYMBOL();

/*
 * This is an osd op init function for opcodes that have no data or
 * other information associated with them.  It also serves as a
 * common init routine for all the other init functions, below.
 */
struct ceph_osd_req_op *
osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
		 u16 opcode, u32 flags)
{}
EXPORT_SYMBOL();

void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
				unsigned int which, u16 opcode,
				u64 offset, u64 length,
				u64 truncate_size, u32 truncate_seq)
{}
EXPORT_SYMBOL();

void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
				unsigned int which, u64 length)
{}
EXPORT_SYMBOL();

void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
				unsigned int which, u64 offset_inc)
{}
EXPORT_SYMBOL();

int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
			const char *class, const char *method)
{}
EXPORT_SYMBOL();

int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
			  u16 opcode, const char *name, const void *value,
			  size_t size, u8 cmp_op, u8 cmp_mode)
{}
EXPORT_SYMBOL();

/*
 * @watch_opcode: CEPH_OSD_WATCH_OP_*
 */
static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
				  u8 watch_opcode, u64 cookie, u32 gen)
{}

/*
 * prot_ver, timeout and notify payload (may be empty) should already be
 * encoded in @request_pl
 */
static void osd_req_op_notify_init(struct ceph_osd_request *req, int which,
				   u64 cookie, struct ceph_pagelist *request_pl)
{}

/*
 * @flags: CEPH_OSD_OP_ALLOC_HINT_FLAG_*
 */
void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
				unsigned int which,
				u64 expected_object_size,
				u64 expected_write_size,
				u32 flags)
{}
EXPORT_SYMBOL();

static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
				struct ceph_osd_data *osd_data)
{}

static u32 osd_req_encode_op(struct ceph_osd_op *dst,
			     const struct ceph_osd_req_op *src)
{}

/*
 * build new request AND message, calculate layout, and adjust file
 * extent as needed.
 *
 * if the file was recently truncated, we include information about its
 * old and new size so that the object can be updated appropriately.  (we
 * avoid synchronously deleting truncated objects because it's slow.)
 */
struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
					       struct ceph_file_layout *layout,
					       struct ceph_vino vino,
					       u64 off, u64 *plen,
					       unsigned int which, int num_ops,
					       int opcode, int flags,
					       struct ceph_snap_context *snapc,
					       u32 truncate_seq,
					       u64 truncate_size,
					       bool use_mempool)
{}
EXPORT_SYMBOL();

int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
{}
EXPORT_SYMBOL();

/*
 * We keep osd requests in an rbtree, sorted by ->r_tid.
 */
DEFINE_RB_FUNCS()
DEFINE_RB_FUNCS()

/*
 * Call @fn on each OSD request as long as @fn returns 0.
 */
static void for_each_request(struct ceph_osd_client *osdc,
			int (*fn)(struct ceph_osd_request *req, void *arg),
			void *arg)
{}

static bool osd_homeless(struct ceph_osd *osd)
{}

static bool osd_registered(struct ceph_osd *osd)
{}

/*
 * Assumes @osd is zero-initialized.
 */
static void osd_init(struct ceph_osd *osd)
{}

static void ceph_init_sparse_read(struct ceph_sparse_read *sr)
{}

static void osd_cleanup(struct ceph_osd *osd)
{}

/*
 * Track open sessions with osds.
 */
static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
{}

static struct ceph_osd *get_osd(struct ceph_osd *osd)
{}

static void put_osd(struct ceph_osd *osd)
{}

DEFINE_RB_FUNCS()

static void __move_osd_to_lru(struct ceph_osd *osd)
{}

static void maybe_move_osd_to_lru(struct ceph_osd *osd)
{}

static void __remove_osd_from_lru(struct ceph_osd *osd)
{}

/*
 * Close the connection and assign any leftover requests to the
 * homeless session.
 */
static void close_osd(struct ceph_osd *osd)
{}

/*
 * reset osd connect
 */
static int reopen_osd(struct ceph_osd *osd)
{}

static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
					  bool wrlocked)
{}

/*
 * Create request <-> OSD session relation.
 *
 * @req has to be assigned a tid, @osd may be homeless.
 */
static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
{}

static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
{}

static bool __pool_full(struct ceph_pg_pool_info *pi)
{}

static bool have_pool_full(struct ceph_osd_client *osdc)
{}

static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
{}

/*
 * Returns whether a request should be blocked from being sent
 * based on the current osdmap and osd_client settings.
 */
static bool target_should_be_paused(struct ceph_osd_client *osdc,
				    const struct ceph_osd_request_target *t,
				    struct ceph_pg_pool_info *pi)
{}

static int pick_random_replica(const struct ceph_osds *acting)
{}

/*
 * Picks the closest replica based on client's location given by
 * crush_location option.  Prefers the primary if the locality is
 * the same.
 */
static int pick_closest_replica(struct ceph_osd_client *osdc,
				const struct ceph_osds *acting)
{}

enum calc_target_result {};

static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
					   struct ceph_osd_request_target *t,
					   bool any_change)
{}

static struct ceph_spg_mapping *alloc_spg_mapping(void)
{}

static void free_spg_mapping(struct ceph_spg_mapping *spg)
{}

/*
 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
 * ceph_pg_mapping.  Used to track OSD backoffs -- a backoff [range] is
 * defined only within a specific spgid; it does not pass anything to
 * children on split, or to another primary.
 */
DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
		 RB_BYPTR, const struct ceph_spg *, node)

static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
{}

static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
				   void **pkey, size_t *pkey_len)
{}

static int compare_names(const void *name1, size_t name1_len,
			 const void *name2, size_t name2_len)
{}

static int hoid_compare(const struct ceph_hobject_id *lhs,
			const struct ceph_hobject_id *rhs)
{}

/*
 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
 * compat stuff here.
 *
 * Assumes @hoid is zero-initialized.
 */
static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
{}

static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
{}

static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
{}

static void free_hoid(struct ceph_hobject_id *hoid)
{}

static struct ceph_osd_backoff *alloc_backoff(void)
{}

static void free_backoff(struct ceph_osd_backoff *backoff)
{}

/*
 * Within a specific spgid, backoffs are managed by ->begin hoid.
 */
DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
			RB_BYVAL, spg_node);

static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
					    const struct ceph_hobject_id *hoid)
{}

/*
 * Each backoff has a unique id within its OSD session.
 */
DEFINE_RB_FUNCS()

static void clear_backoffs(struct ceph_osd *osd)
{}

/*
 * Set up a temporary, non-owning view into @t.
 */
static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
				  const struct ceph_osd_request_target *t)
{}

static bool should_plug_request(struct ceph_osd_request *req)
{}

/*
 * Keep get_num_data_items() in sync with this function.
 */
static void setup_request_data(struct ceph_osd_request *req)
{}

static void encode_pgid(void **p, const struct ceph_pg *pgid)
{}

static void encode_spgid(void **p, const struct ceph_spg *spgid)
{}

static void encode_oloc(void **p, void *end,
			const struct ceph_object_locator *oloc)
{}

static void encode_request_partial(struct ceph_osd_request *req,
				   struct ceph_msg *msg)
{}

static void encode_request_finish(struct ceph_msg *msg)
{}

/*
 * @req has to be assigned a tid and registered.
 */
static void send_request(struct ceph_osd_request *req)
{}

static void maybe_request_map(struct ceph_osd_client *osdc)
{}

static void complete_request(struct ceph_osd_request *req, int err);
static void send_map_check(struct ceph_osd_request *req);

static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
{}

static void account_request(struct ceph_osd_request *req)
{}

static void submit_request(struct ceph_osd_request *req, bool wrlocked)
{}

static void finish_request(struct ceph_osd_request *req)
{}

static void __complete_request(struct ceph_osd_request *req)
{}

static void complete_request_workfn(struct work_struct *work)
{}

/*
 * This is open-coded in handle_reply().
 */
static void complete_request(struct ceph_osd_request *req, int err)
{}

static void cancel_map_check(struct ceph_osd_request *req)
{}

static void cancel_request(struct ceph_osd_request *req)
{}

static void abort_request(struct ceph_osd_request *req, int err)
{}

static int abort_fn(struct ceph_osd_request *req, void *arg)
{}

/*
 * Abort all in-flight requests with @err and arrange for all future
 * requests to be failed immediately.
 */
void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
{}
EXPORT_SYMBOL();

void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc)
{}
EXPORT_SYMBOL();

static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
{}

void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
{}
EXPORT_SYMBOL();

/*
 * We can end up releasing caps as a result of abort_request().
 * In that case, we probably want to ensure that the cap release message
 * has an updated epoch barrier in it, so set the epoch barrier prior to
 * aborting the first request.
 */
static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
{}

/*
 * Drop all pending requests that are stalled waiting on a full condition to
 * clear, and complete them with ENOSPC as the return code. Set the
 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
 * cancelled.
 */
static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
{}

static void check_pool_dne(struct ceph_osd_request *req)
{}

static void map_check_cb(struct ceph_mon_generic_request *greq)
{}

static void send_map_check(struct ceph_osd_request *req)
{}

/*
 * lingering requests, watch/notify v2 infrastructure
 */
static void linger_release(struct kref *kref)
{}

static void linger_put(struct ceph_osd_linger_request *lreq)
{}

static struct ceph_osd_linger_request *
linger_get(struct ceph_osd_linger_request *lreq)
{}

static struct ceph_osd_linger_request *
linger_alloc(struct ceph_osd_client *osdc)
{}

DEFINE_RB_INSDEL_FUNCS()
DEFINE_RB_FUNCS()
DEFINE_RB_FUNCS()

/*
 * Create linger request <-> OSD session relation.
 *
 * @lreq has to be registered, @osd may be homeless.
 */
static void link_linger(struct ceph_osd *osd,
			struct ceph_osd_linger_request *lreq)
{}

static void unlink_linger(struct ceph_osd *osd,
			  struct ceph_osd_linger_request *lreq)
{}

static bool __linger_registered(struct ceph_osd_linger_request *lreq)
{}

static bool linger_registered(struct ceph_osd_linger_request *lreq)
{}

static void linger_register(struct ceph_osd_linger_request *lreq)
{}

static void linger_unregister(struct ceph_osd_linger_request *lreq)
{}

static void cancel_linger_request(struct ceph_osd_request *req)
{}

struct linger_work {};

static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
				       work_func_t workfn)
{}

static void lwork_free(struct linger_work *lwork)
{}

static void lwork_queue(struct linger_work *lwork)
{}

static void do_watch_notify(struct work_struct *w)
{}

static void do_watch_error(struct work_struct *w)
{}

static void queue_watch_error(struct ceph_osd_linger_request *lreq)
{}

static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
				       int result)
{}

static void linger_commit_cb(struct ceph_osd_request *req)
{}

static int normalize_watch_error(int err)
{}

static void linger_reconnect_cb(struct ceph_osd_request *req)
{}

static void send_linger(struct ceph_osd_linger_request *lreq)
{}

static void linger_ping_cb(struct ceph_osd_request *req)
{}

static void send_linger_ping(struct ceph_osd_linger_request *lreq)
{}

static void linger_submit(struct ceph_osd_linger_request *lreq)
{}

static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
{}

/*
 * @lreq has to be both registered and linked.
 */
static void __linger_cancel(struct ceph_osd_linger_request *lreq)
{}

static void linger_cancel(struct ceph_osd_linger_request *lreq)
{}

static void send_linger_map_check(struct ceph_osd_linger_request *lreq);

static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
{}

static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
{}

static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
{}

static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
{}

static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq,
				     unsigned long timeout)
{}

/*
 * Timeout callback, called every N seconds.  When 1 or more OSD
 * requests has been active for more than N seconds, we send a keepalive
 * (tag + timestamp) to its OSD to ensure any communications channel
 * reset is detected.
 */
static void handle_timeout(struct work_struct *work)
{}

static void handle_osds_timeout(struct work_struct *work)
{}

static int ceph_oloc_decode(void **p, void *end,
			    struct ceph_object_locator *oloc)
{}

static int ceph_redirect_decode(void **p, void *end,
				struct ceph_request_redirect *redir)
{}

struct MOSDOpReply {};

static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
{}

/*
 * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
 * specified.
 */
static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
{}

static void set_pool_was_full(struct ceph_osd_client *osdc)
{}

static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
{}

static enum calc_target_result
recalc_linger_target(struct ceph_osd_linger_request *lreq)
{}

/*
 * Requeue requests whose mapping to an OSD has changed.
 */
static void scan_requests(struct ceph_osd *osd,
			  bool force_resend,
			  bool cleared_full,
			  bool check_pool_cleared_full,
			  struct rb_root *need_resend,
			  struct list_head *need_resend_linger)
{}

static int handle_one_map(struct ceph_osd_client *osdc,
			  void *p, void *end, bool incremental,
			  struct rb_root *need_resend,
			  struct list_head *need_resend_linger)
{}

static void kick_requests(struct ceph_osd_client *osdc,
			  struct rb_root *need_resend,
			  struct list_head *need_resend_linger)
{}

/*
 * Process updated osd map.
 *
 * The message contains any number of incremental and full maps, normally
 * indicating some sort of topology change in the cluster.  Kick requests
 * off to different OSDs as needed.
 */
void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
{}

/*
 * Resubmit requests pending on the given osd.
 */
static void kick_osd_requests(struct ceph_osd *osd)
{}

/*
 * If the osd connection drops, we need to resubmit all requests.
 */
static void osd_fault(struct ceph_connection *con)
{}

struct MOSDBackoff {};

static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
{}

static struct ceph_msg *create_backoff_message(
				const struct ceph_osd_backoff *backoff,
				u32 map_epoch)
{}

static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
{}

static bool target_contained_by(const struct ceph_osd_request_target *t,
				const struct ceph_hobject_id *begin,
				const struct ceph_hobject_id *end)
{}

static void handle_backoff_unblock(struct ceph_osd *osd,
				   const struct MOSDBackoff *m)
{}

static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
{}

/*
 * Process osd watch notifications
 */
static void handle_watch_notify(struct ceph_osd_client *osdc,
				struct ceph_msg *msg)
{}

/*
 * Register request, send initial attempt.
 */
void ceph_osdc_start_request(struct ceph_osd_client *osdc,
			     struct ceph_osd_request *req)
{}
EXPORT_SYMBOL();

/*
 * Unregister request.  If @req was registered, it isn't completed:
 * r_result isn't set and __complete_request() isn't invoked.
 *
 * If @req wasn't registered, this call may have raced with
 * handle_reply(), in which case r_result would already be set and
 * __complete_request() would be getting invoked, possibly even
 * concurrently with this call.
 */
void ceph_osdc_cancel_request(struct ceph_osd_request *req)
{}
EXPORT_SYMBOL();

/*
 * @timeout: in jiffies, 0 means "wait forever"
 */
static int wait_request_timeout(struct ceph_osd_request *req,
				unsigned long timeout)
{}

/*
 * wait for a request to complete
 */
int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
			   struct ceph_osd_request *req)
{}
EXPORT_SYMBOL();

/*
 * sync - wait for all in-flight requests to flush.  avoid starvation.
 */
void ceph_osdc_sync(struct ceph_osd_client *osdc)
{}
EXPORT_SYMBOL();

/*
 * Returns a handle, caller owns a ref.
 */
struct ceph_osd_linger_request *
ceph_osdc_watch(struct ceph_osd_client *osdc,
		struct ceph_object_id *oid,
		struct ceph_object_locator *oloc,
		rados_watchcb2_t wcb,
		rados_watcherrcb_t errcb,
		void *data)
{}
EXPORT_SYMBOL();

/*
 * Releases a ref.
 *
 * Times out after mount_timeout to preserve rbd unmap behaviour
 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
 * with mount_timeout").
 */
int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
		      struct ceph_osd_linger_request *lreq)
{}
EXPORT_SYMBOL();

static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
				      u64 notify_id, u64 cookie, void *payload,
				      u32 payload_len)
{}

int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
			 struct ceph_object_id *oid,
			 struct ceph_object_locator *oloc,
			 u64 notify_id,
			 u64 cookie,
			 void *payload,
			 u32 payload_len)
{}
EXPORT_SYMBOL();

/*
 * @timeout: in seconds
 *
 * @preply_{pages,len} are initialized both on success and error.
 * The caller is responsible for:
 *
 *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
 */
int ceph_osdc_notify(struct ceph_osd_client *osdc,
		     struct ceph_object_id *oid,
		     struct ceph_object_locator *oloc,
		     void *payload,
		     u32 payload_len,
		     u32 timeout,
		     struct page ***preply_pages,
		     size_t *preply_len)
{}
EXPORT_SYMBOL();

/*
 * Return the number of milliseconds since the watch was last
 * confirmed, or an error.  If there is an error, the watch is no
 * longer valid, and should be destroyed with ceph_osdc_unwatch().
 */
int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
			  struct ceph_osd_linger_request *lreq)
{}

static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
{}

static int decode_watchers(void **p, void *end,
			   struct ceph_watch_item **watchers,
			   u32 *num_watchers)
{}

/*
 * On success, the caller is responsible for:
 *
 *     kfree(watchers);
 */
int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
			    struct ceph_object_id *oid,
			    struct ceph_object_locator *oloc,
			    struct ceph_watch_item **watchers,
			    u32 *num_watchers)
{}
EXPORT_SYMBOL();

/*
 * Call all pending notify callbacks - for use after a watch is
 * unregistered, to make sure no more callbacks for it will be invoked
 */
void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
{}
EXPORT_SYMBOL();

void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
{}
EXPORT_SYMBOL();

/*
 * Execute an OSD class method on an object.
 *
 * @flags: CEPH_OSD_FLAG_*
 * @resp_len: in/out param for reply length
 */
int ceph_osdc_call(struct ceph_osd_client *osdc,
		   struct ceph_object_id *oid,
		   struct ceph_object_locator *oloc,
		   const char *class, const char *method,
		   unsigned int flags,
		   struct page *req_page, size_t req_len,
		   struct page **resp_pages, size_t *resp_len)
{}
EXPORT_SYMBOL();

/*
 * reset all osd connections
 */
void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
{}

/*
 * init, shutdown
 */
int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
{}

void ceph_osdc_stop(struct ceph_osd_client *osdc)
{}

int osd_req_op_copy_from_init(struct ceph_osd_request *req,
			      u64 src_snapid, u64 src_version,
			      struct ceph_object_id *src_oid,
			      struct ceph_object_locator *src_oloc,
			      u32 src_fadvise_flags,
			      u32 dst_fadvise_flags,
			      u32 truncate_seq, u64 truncate_size,
			      u8 copy_from_flags)
{}
EXPORT_SYMBOL();

int __init ceph_osdc_setup(void)
{}

void ceph_osdc_cleanup(void)
{}

/*
 * handle incoming message
 */
static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{}

/* How much sparse data was requested? */
static u64 sparse_data_requested(struct ceph_osd_request *req)
{}

/*
 * Lookup and return message for incoming reply.  Don't try to do
 * anything about a larger than preallocated data portion of the
 * message at the moment - for now, just skip the message.
 */
static struct ceph_msg *get_reply(struct ceph_connection *con,
				  struct ceph_msg_header *hdr,
				  int *skip)
{}

static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
{}

static struct ceph_msg *osd_alloc_msg(struct ceph_connection *con,
				      struct ceph_msg_header *hdr,
				      int *skip)
{}

/*
 * Wrappers to refcount containing ceph_osd struct
 */
static struct ceph_connection *osd_get_con(struct ceph_connection *con)
{}

static void osd_put_con(struct ceph_connection *con)
{}

/*
 * authentication
 */

/*
 * Note: returned pointer is the address of a structure that's
 * managed separately.  Caller must *not* attempt to free it.
 */
static struct ceph_auth_handshake *
osd_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
{}

static int osd_add_authorizer_challenge(struct ceph_connection *con,
				    void *challenge_buf, int challenge_buf_len)
{}

static int osd_verify_authorizer_reply(struct ceph_connection *con)
{}

static int osd_invalidate_authorizer(struct ceph_connection *con)
{}

static int osd_get_auth_request(struct ceph_connection *con,
				void *buf, int *buf_len,
				void **authorizer, int *authorizer_len)
{}

static int osd_handle_auth_reply_more(struct ceph_connection *con,
				      void *reply, int reply_len,
				      void *buf, int *buf_len,
				      void **authorizer, int *authorizer_len)
{}

static int osd_handle_auth_done(struct ceph_connection *con,
				u64 global_id, void *reply, int reply_len,
				u8 *session_key, int *session_key_len,
				u8 *con_secret, int *con_secret_len)
{}

static int osd_handle_auth_bad_method(struct ceph_connection *con,
				      int used_proto, int result,
				      const int *allowed_protos, int proto_cnt,
				      const int *allowed_modes, int mode_cnt)
{}

static void osd_reencode_message(struct ceph_msg *msg)
{}

static int osd_sign_message(struct ceph_msg *msg)
{}

static int osd_check_message_signature(struct ceph_msg *msg)
{}

static void advance_cursor(struct ceph_msg_data_cursor *cursor, size_t len,
			   bool zero)
{}

static int prep_next_sparse_read(struct ceph_connection *con,
				 struct ceph_msg_data_cursor *cursor)
{}

#ifdef __BIG_ENDIAN
static inline void convert_extent_map(struct ceph_sparse_read *sr)
{
	int i;

	for (i = 0; i < sr->sr_count; i++) {
		struct ceph_sparse_extent *ext = &sr->sr_extent[i];

		ext->off = le64_to_cpu((__force __le64)ext->off);
		ext->len = le64_to_cpu((__force __le64)ext->len);
	}
}
#else
static inline void convert_extent_map(struct ceph_sparse_read *sr)
{}
#endif

static int osd_sparse_read(struct ceph_connection *con,
			   struct ceph_msg_data_cursor *cursor,
			   char **pbuf)
{}

static const struct ceph_connection_operations osd_con_ops =;