linux/fs/ocfs2/dlmglue.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * dlmglue.c
 *
 * Code which implements an OCFS2 specific interface to our DLM.
 *
 * Copyright (C) 2003, 2004 Oracle.  All rights reserved.
 */

#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/quotaops.h>
#include <linux/sched/signal.h>

#define MLOG_MASK_PREFIX
#include <cluster/masklog.h>

#include "ocfs2.h"
#include "ocfs2_lockingver.h"

#include "alloc.h"
#include "dcache.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "file.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "stackglue.h"
#include "slot_map.h"
#include "super.h"
#include "uptodate.h"
#include "quota.h"
#include "refcounttree.h"
#include "acl.h"

#include "buffer_head_io.h"

struct ocfs2_mask_waiter {};

static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);

/*
 * Return value from ->downconvert_worker functions.
 *
 * These control the precise actions of ocfs2_unblock_lock()
 * and ocfs2_process_blocked_lock()
 *
 */
enum ocfs2_unblock_action {};

struct ocfs2_unblock_ctl {};

/* Lockdep class keys */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
#endif

static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
					int new_level);
static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);

static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
				     int blocking);

static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
				       int blocking);

static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
				     struct ocfs2_lock_res *lockres);

static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);

static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
					    int new_level);
static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
					 int blocking);

#define mlog_meta_lvb(__level, __lockres)

/* This aids in debugging situations where a bad LVB might be involved. */
static void ocfs2_dump_meta_lvb_info(u64 level,
				     const char *function,
				     unsigned int line,
				     struct ocfs2_lock_res *lockres)
{}


/*
 * OCFS2 Lock Resource Operations
 *
 * These fine tune the behavior of the generic dlmglue locking infrastructure.
 *
 * The most basic of lock types can point ->l_priv to their respective
 * struct ocfs2_super and allow the default actions to manage things.
 *
 * Right now, each lock type also needs to implement an init function,
 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
 * should be called when the lock is no longer needed (i.e., object
 * destruction time).
 */
struct ocfs2_lock_res_ops {};

/*
 * Some locks want to "refresh" potentially stale data when a
 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
 * individual lockres l_flags member from the ast function. It is
 * expected that the locking wrapper will clear the
 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
 */
#define LOCK_TYPE_REQUIRES_REFRESH

/*
 * Indicate that a lock type makes use of the lock value block. The
 * ->set_lvb lock type callback must be defined.
 */
#define LOCK_TYPE_USES_LVB

static const struct ocfs2_lock_res_ops ocfs2_inode_rw_lops =;

static const struct ocfs2_lock_res_ops ocfs2_inode_inode_lops =;

static const struct ocfs2_lock_res_ops ocfs2_super_lops =;

static const struct ocfs2_lock_res_ops ocfs2_rename_lops =;

static const struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops =;

static const struct ocfs2_lock_res_ops ocfs2_trim_fs_lops =;

static const struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops =;

static const struct ocfs2_lock_res_ops ocfs2_dentry_lops =;

static const struct ocfs2_lock_res_ops ocfs2_inode_open_lops =;

static const struct ocfs2_lock_res_ops ocfs2_flock_lops =;

static const struct ocfs2_lock_res_ops ocfs2_qinfo_lops =;

static const struct ocfs2_lock_res_ops ocfs2_refcount_block_lops =;

static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
{}

static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
{}

static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
{}

static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
{}

static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
{}

static inline struct ocfs2_refcount_tree *
ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
{}

static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
{}

static int ocfs2_lock_create(struct ocfs2_super *osb,
			     struct ocfs2_lock_res *lockres,
			     int level,
			     u32 dlm_flags);
static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
						     int wanted);
static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
				   struct ocfs2_lock_res *lockres,
				   int level, unsigned long caller_ip);
static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
					struct ocfs2_lock_res *lockres,
					int level)
{}

static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
					struct ocfs2_lock_res *lockres);
static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
						int convert);
#define ocfs2_log_dlm_error(_func, _err, _lockres)
static int ocfs2_downconvert_thread(void *arg);
static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
					struct ocfs2_lock_res *lockres);
static int ocfs2_inode_lock_update(struct inode *inode,
				  struct buffer_head **bh);
static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
static inline int ocfs2_highest_compat_lock_level(int level);
static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
					      int new_level);
static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
				  struct ocfs2_lock_res *lockres,
				  int new_level,
				  int lvb,
				  unsigned int generation);
static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
				        struct ocfs2_lock_res *lockres);
static int ocfs2_cancel_convert(struct ocfs2_super *osb,
				struct ocfs2_lock_res *lockres);


static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
				  u64 blkno,
				  u32 generation,
				  char *name)
{}

static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);

static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
				       struct ocfs2_dlm_debug *dlm_debug)
{}

static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
{}

#ifdef CONFIG_OCFS2_FS_STATS
static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
{}

static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
				    struct ocfs2_mask_waiter *mw, int ret)
{}

static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
{}

static inline void ocfs2_track_lock_wait(struct ocfs2_lock_res *lockres)
{}

static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{}
#else
static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
{
}
static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
			   int level, struct ocfs2_mask_waiter *mw, int ret)
{
}
static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
{
}
static inline void ocfs2_track_lock_wait(struct ocfs2_lock_res *lockres)
{
}
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
}
#endif

static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
				       struct ocfs2_lock_res *res,
				       enum ocfs2_lock_type type,
				       const struct ocfs2_lock_res_ops *ops,
				       void *priv)
{}

void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
{}

void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
			       enum ocfs2_lock_type type,
			       unsigned int generation,
			       struct inode *inode)
{}

static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
{}

static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
{}

static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
{}

static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
{}

static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
{}

void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
				u64 parent, struct inode *inode)
{}

static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
				      struct ocfs2_super *osb)
{}

static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
				       struct ocfs2_super *osb)
{}

static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
					 struct ocfs2_super *osb)
{}

static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
{}

void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
{}

void ocfs2_trim_fs_lock_res_uninit(struct ocfs2_super *osb)
{}

static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
					    struct ocfs2_super *osb)
{}

void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
			      struct ocfs2_file_private *fp)
{}

void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
			       struct ocfs2_mem_dqinfo *info)
{}

void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
				  struct ocfs2_super *osb, u64 ref_blkno,
				  unsigned int generation)
{}

void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
{}

/*
 * Keep a list of processes who have interest in a lockres.
 * Note: this is now only uesed for check recursive cluster locking.
 */
static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
				   struct ocfs2_lock_holder *oh)
{}

static struct ocfs2_lock_holder *
ocfs2_pid_holder(struct ocfs2_lock_res *lockres,
		struct pid *pid)
{}

static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
				       struct ocfs2_lock_holder *oh)
{}


static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
				     int level)
{}

static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
				     int level)
{}

/* WARNING: This function lives in a world where the only three lock
 * levels are EX, PR, and NL. It *will* have to be adjusted when more
 * lock types are added. */
static inline int ocfs2_highest_compat_lock_level(int level)
{}

static void lockres_set_flags(struct ocfs2_lock_res *lockres,
			      unsigned long newflags)
{}
static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
{}
static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
				unsigned long clear)
{}

static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
{}

static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
{}

static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
{}

static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
				     int level)
{}

/*
 * OCFS2_LOCK_PENDING and l_pending_gen.
 *
 * Why does OCFS2_LOCK_PENDING exist?  To close a race between setting
 * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock().  See ocfs2_unblock_lock()
 * for more details on the race.
 *
 * OCFS2_LOCK_PENDING closes the race quite nicely.  However, it introduces
 * a race on itself.  In o2dlm, we can get the ast before ocfs2_dlm_lock()
 * returns.  The ast clears OCFS2_LOCK_BUSY, and must therefore clear
 * OCFS2_LOCK_PENDING at the same time.  When ocfs2_dlm_lock() returns,
 * the caller is going to try to clear PENDING again.  If nothing else is
 * happening, __lockres_clear_pending() sees PENDING is unset and does
 * nothing.
 *
 * But what if another path (eg downconvert thread) has just started a
 * new locking action?  The other path has re-set PENDING.  Our path
 * cannot clear PENDING, because that will re-open the original race
 * window.
 *
 * [Example]
 *
 * ocfs2_meta_lock()
 *  ocfs2_cluster_lock()
 *   set BUSY
 *   set PENDING
 *   drop l_lock
 *   ocfs2_dlm_lock()
 *    ocfs2_locking_ast()		ocfs2_downconvert_thread()
 *     clear PENDING			 ocfs2_unblock_lock()
 *					  take_l_lock
 *					  !BUSY
 *					  ocfs2_prepare_downconvert()
 *					   set BUSY
 *					   set PENDING
 *					  drop l_lock
 *   take l_lock
 *   clear PENDING
 *   drop l_lock
 *			<window>
 *					  ocfs2_dlm_lock()
 *
 * So as you can see, we now have a window where l_lock is not held,
 * PENDING is not set, and ocfs2_dlm_lock() has not been called.
 *
 * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
 * set by ocfs2_prepare_downconvert().  That wasn't nice.
 *
 * To solve this we introduce l_pending_gen.  A call to
 * lockres_clear_pending() will only do so when it is passed a generation
 * number that matches the lockres.  lockres_set_pending() will return the
 * current generation number.  When ocfs2_cluster_lock() goes to clear
 * PENDING, it passes the generation it got from set_pending().  In our
 * example above, the generation numbers will *not* match.  Thus,
 * ocfs2_cluster_lock() will not clear the PENDING set by
 * ocfs2_prepare_downconvert().
 */

/* Unlocked version for ocfs2_locking_ast() */
static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
				    unsigned int generation,
				    struct ocfs2_super *osb)
{}

/* Locked version for callers of ocfs2_dlm_lock() */
static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
				  unsigned int generation,
				  struct ocfs2_super *osb)
{}

static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
{}

static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
{}

static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
{}

static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
{}

/*
 * This is the filesystem locking protocol.  It provides the lock handling
 * hooks for the underlying DLM.  It has a maximum version number.
 * The version number allows interoperability with systems running at
 * the same major number and an equal or smaller minor number.
 *
 * Whenever the filesystem does new things with locks (adds or removes a
 * lock, orders them differently, does different things underneath a lock),
 * the version must be changed.  The protocol is negotiated when joining
 * the dlm domain.  A node may join the domain if its major version is
 * identical to all other nodes and its minor version is greater than
 * or equal to all other nodes.  When its minor version is greater than
 * the other nodes, it will run at the minor version specified by the
 * other nodes.
 *
 * If a locking change is made that will not be compatible with older
 * versions, the major number must be increased and the minor version set
 * to zero.  If a change merely adds a behavior that can be disabled when
 * speaking to older versions, the minor version must be increased.  If a
 * change adds a fully backwards compatible change (eg, LVB changes that
 * are just ignored by older versions), the version does not need to be
 * updated.
 */
static struct ocfs2_locking_protocol lproto =;

void ocfs2_set_locking_protocol(void)
{}

static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
						int convert)
{}

/* Note: If we detect another process working on the lock (i.e.,
 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
 * to do the right thing in that case.
 */
static int ocfs2_lock_create(struct ocfs2_super *osb,
			     struct ocfs2_lock_res *lockres,
			     int level,
			     u32 dlm_flags)
{}

static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
					int flag)
{}

static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)

{}

static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)

{}

/* predict what lock level we'll be dropping down to on behalf
 * of another node, and return true if the currently wanted
 * level will be compatible with it. */
static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
						     int wanted)
{}

static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
{}

static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
{}

static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
				    struct ocfs2_mask_waiter *mw,
				    unsigned long mask,
				    unsigned long goal)
{}

/* returns 0 if the mw that was removed was already satisfied, -EBUSY
 * if the mask still hadn't reached its goal */
static int __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
				      struct ocfs2_mask_waiter *mw)
{}

static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
				      struct ocfs2_mask_waiter *mw)
{}

static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
					     struct ocfs2_lock_res *lockres)
{}

static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
				struct ocfs2_lock_res *lockres,
				int level,
				u32 lkm_flags,
				int arg_flags,
				int l_subclass,
				unsigned long caller_ip)
{}

static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
				     struct ocfs2_lock_res *lockres,
				     int level,
				     u32 lkm_flags,
				     int arg_flags)
{}


static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
				   struct ocfs2_lock_res *lockres,
				   int level,
				   unsigned long caller_ip)
{}

static int ocfs2_create_new_lock(struct ocfs2_super *osb,
				 struct ocfs2_lock_res *lockres,
				 int ex,
				 int local)
{}

/* Grants us an EX lock on the data and metadata resources, skipping
 * the normal cluster directory lookup. Use this ONLY on newly created
 * inodes which other nodes can't possibly see, and which haven't been
 * hashed in the inode hash yet. This can give us a good performance
 * increase as it'll skip the network broadcast normally associated
 * with creating a new lock resource. */
int ocfs2_create_new_inode_locks(struct inode *inode)
{}

int ocfs2_rw_lock(struct inode *inode, int write)
{}

int ocfs2_try_rw_lock(struct inode *inode, int write)
{}

void ocfs2_rw_unlock(struct inode *inode, int write)
{}

/*
 * ocfs2_open_lock always get PR mode lock.
 */
int ocfs2_open_lock(struct inode *inode)
{}

int ocfs2_try_open_lock(struct inode *inode, int write)
{}

/*
 * ocfs2_open_unlock unlock PR and EX mode open locks.
 */
void ocfs2_open_unlock(struct inode *inode)
{}

static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
				     int level)
{}

/*
 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
 * flock() calls. The locking approach this requires is sufficiently
 * different from all other cluster lock types that we implement a
 * separate path to the "low-level" dlm calls. In particular:
 *
 * - No optimization of lock levels is done - we take at exactly
 *   what's been requested.
 *
 * - No lock caching is employed. We immediately downconvert to
 *   no-lock at unlock time. This also means flock locks never go on
 *   the blocking list).
 *
 * - Since userspace can trivially deadlock itself with flock, we make
 *   sure to allow cancellation of a misbehaving applications flock()
 *   request.
 *
 * - Access to any flock lockres doesn't require concurrency, so we
 *   can simplify the code by requiring the caller to guarantee
 *   serialization of dlmglue flock calls.
 */
int ocfs2_file_lock(struct file *file, int ex, int trylock)
{}

void ocfs2_file_unlock(struct file *file)
{}

static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
					struct ocfs2_lock_res *lockres)
{}

#define OCFS2_SEC_BITS
#define OCFS2_SEC_SHIFT
#define OCFS2_NSEC_MASK

/* LVB only has room for 64 bits of time here so we pack it for
 * now. */
static u64 ocfs2_pack_timespec(struct timespec64 *spec)
{}

/* Call this with the lockres locked. I am reasonably sure we don't
 * need ip_lock in this function as anyone who would be changing those
 * values is supposed to be blocked in ocfs2_inode_lock right now. */
static void __ocfs2_stuff_meta_lvb(struct inode *inode)
{}

static void ocfs2_unpack_timespec(struct timespec64 *spec,
				  u64 packed_time)
{}

static int ocfs2_refresh_inode_from_lvb(struct inode *inode)
{}

static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
					      struct ocfs2_lock_res *lockres)
{}

/* Determine whether a lock resource needs to be refreshed, and
 * arbitrate who gets to refresh it.
 *
 *   0 means no refresh needed.
 *
 *   > 0 means you need to refresh this and you MUST call
 *   ocfs2_complete_lock_res_refresh afterwards. */
static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
{}

/* If status is non zero, I'll mark it as not being in refresh
 * anymroe, but i won't clear the needs refresh flag. */
static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
						   int status)
{}

/* may or may not return a bh if it went to disk. */
static int ocfs2_inode_lock_update(struct inode *inode,
				  struct buffer_head **bh)
{}

static int ocfs2_assign_bh(struct inode *inode,
			   struct buffer_head **ret_bh,
			   struct buffer_head *passed_bh)
{}

/*
 * returns < 0 error if the callback will never be called, otherwise
 * the result of the lock will be communicated via the callback.
 */
int ocfs2_inode_lock_full_nested(struct inode *inode,
				 struct buffer_head **ret_bh,
				 int ex,
				 int arg_flags,
				 int subclass)
{}

/*
 * This is working around a lock inversion between tasks acquiring DLM
 * locks while holding a page lock and the downconvert thread which
 * blocks dlm lock acquiry while acquiring page locks.
 *
 * ** These _with_page variantes are only intended to be called from aop
 * methods that hold page locks and return a very specific *positive* error
 * code that aop methods pass up to the VFS -- test for errors with != 0. **
 *
 * The DLM is called such that it returns -EAGAIN if it would have
 * blocked waiting for the downconvert thread.  In that case we unlock
 * our page so the downconvert thread can make progress.  Once we've
 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
 * that called us can bubble that back up into the VFS who will then
 * immediately retry the aop call.
 */
int ocfs2_inode_lock_with_page(struct inode *inode,
			      struct buffer_head **ret_bh,
			      int ex,
			      struct page *page)
{}

int ocfs2_inode_lock_atime(struct inode *inode,
			  struct vfsmount *vfsmnt,
			  int *level, int wait)
{}

void ocfs2_inode_unlock(struct inode *inode,
		       int ex)
{}

/*
 * This _tracker variantes are introduced to deal with the recursive cluster
 * locking issue. The idea is to keep track of a lock holder on the stack of
 * the current process. If there's a lock holder on the stack, we know the
 * task context is already protected by cluster locking. Currently, they're
 * used in some VFS entry routines.
 *
 * return < 0 on error, return == 0 if there's no lock holder on the stack
 * before this call, return == 1 if this call would be a recursive locking.
 * return == -1 if this lock attempt will cause an upgrade which is forbidden.
 *
 * When taking lock levels into account,we face some different situations.
 *
 * 1. no lock is held
 *    In this case, just lock the inode as requested and return 0
 *
 * 2. We are holding a lock
 *    For this situation, things diverges into several cases
 *
 *    wanted     holding	     what to do
 *    ex		ex	    see 2.1 below
 *    ex		pr	    see 2.2 below
 *    pr		ex	    see 2.1 below
 *    pr		pr	    see 2.1 below
 *
 *    2.1 lock level that is been held is compatible
 *    with the wanted level, so no lock action will be tacken.
 *
 *    2.2 Otherwise, an upgrade is needed, but it is forbidden.
 *
 * Reason why upgrade within a process is forbidden is that
 * lock upgrade may cause dead lock. The following illustrates
 * how it happens.
 *
 *         thread on node1                             thread on node2
 * ocfs2_inode_lock_tracker(ex=0)
 *
 *                                <======   ocfs2_inode_lock_tracker(ex=1)
 *
 * ocfs2_inode_lock_tracker(ex=1)
 */
int ocfs2_inode_lock_tracker(struct inode *inode,
			     struct buffer_head **ret_bh,
			     int ex,
			     struct ocfs2_lock_holder *oh)
{}

void ocfs2_inode_unlock_tracker(struct inode *inode,
				int ex,
				struct ocfs2_lock_holder *oh,
				int had_lock)
{}

int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
{}

void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
{}

int ocfs2_super_lock(struct ocfs2_super *osb,
		     int ex)
{}

void ocfs2_super_unlock(struct ocfs2_super *osb,
			int ex)
{}

int ocfs2_rename_lock(struct ocfs2_super *osb)
{}

void ocfs2_rename_unlock(struct ocfs2_super *osb)
{}

int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
{}

void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
{}

int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
		       struct ocfs2_trim_fs_info *info, int trylock)
{}

void ocfs2_trim_fs_unlock(struct ocfs2_super *osb,
			  struct ocfs2_trim_fs_info *info)
{}

int ocfs2_dentry_lock(struct dentry *dentry, int ex)
{}

void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
{}

/* Reference counting of the dlm debug structure. We want this because
 * open references on the debug inodes can live on after a mount, so
 * we can't rely on the ocfs2_super to always exist. */
static void ocfs2_dlm_debug_free(struct kref *kref)
{}

void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
{}

static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
{}

struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
{}

/* Access to this is arbitrated for us via seq_file->sem. */
struct ocfs2_dlm_seq_priv {};

static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
						 struct ocfs2_dlm_seq_priv *priv)
{}

static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
{}

static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
{}

static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
{}

/*
 * Version is used by debugfs.ocfs2 to determine the format being used
 *
 * New in version 2
 *	- Lock stats printed
 * New in version 3
 *	- Max time in lock stats is in usecs (instead of nsecs)
 * New in version 4
 *	- Add last pr/ex unlock times and first lock wait time in usecs
 */
#define OCFS2_DLM_DEBUG_STR_VERSION
static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
{}

static const struct seq_operations ocfs2_dlm_seq_ops =;

static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
{}

static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
{}

static const struct file_operations ocfs2_dlm_debug_fops =;

static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
{}

static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
{}

int ocfs2_dlm_init(struct ocfs2_super *osb)
{}

void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
			int hangup_pending)
{}

static int ocfs2_drop_lock(struct ocfs2_super *osb,
			   struct ocfs2_lock_res *lockres)
{}

static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
				       struct ocfs2_lock_res *lockres);

/* Mark the lockres as being dropped. It will no longer be
 * queued if blocking, but we still may have to wait on it
 * being dequeued from the downconvert thread before we can consider
 * it safe to drop.
 *
 * You can *not* attempt to call cluster_lock on this lockres anymore. */
void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
				struct ocfs2_lock_res *lockres)
{}

void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
			       struct ocfs2_lock_res *lockres)
{}

static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
{}

int ocfs2_drop_inode_locks(struct inode *inode)
{}

static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
					      int new_level)
{}

static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
				  struct ocfs2_lock_res *lockres,
				  int new_level,
				  int lvb,
				  unsigned int generation)
{}

/* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
				        struct ocfs2_lock_res *lockres)
{}

static int ocfs2_cancel_convert(struct ocfs2_super *osb,
				struct ocfs2_lock_res *lockres)
{}

static int ocfs2_unblock_lock(struct ocfs2_super *osb,
			      struct ocfs2_lock_res *lockres,
			      struct ocfs2_unblock_ctl *ctl)
{}

static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
				     int blocking)
{}

static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
				 struct ocfs2_lock_res *lockres,
				 int new_level)
{}

static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
					int new_level)
{}

static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
{}

/*
 * Does the final reference drop on our dentry lock. Right now this
 * happens in the downconvert thread, but we could choose to simplify the
 * dlmglue API and push these off to the ocfs2_wq in the future.
 */
static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
				     struct ocfs2_lock_res *lockres)
{}

/*
 * d_delete() matching dentries before the lock downconvert.
 *
 * At this point, any process waiting to destroy the
 * dentry_lock due to last ref count is stopped by the
 * OCFS2_LOCK_QUEUED flag.
 *
 * We have two potential problems
 *
 * 1) If we do the last reference drop on our dentry_lock (via dput)
 *    we'll wind up in ocfs2_release_dentry_lock(), waiting on
 *    the downconvert to finish. Instead we take an elevated
 *    reference and push the drop until after we've completed our
 *    unblock processing.
 *
 * 2) There might be another process with a final reference,
 *    waiting on us to finish processing. If this is the case, we
 *    detect it and exit out - there's no more dentries anyway.
 */
static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
				       int blocking)
{}

static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
					    int new_level)
{}

static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
					 int blocking)
{}

static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
{}

void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
{}

static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
{}

/* Lock quota info, this function expects at least shared lock on the quota file
 * so that we can safely refresh quota info from disk. */
int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
{}

int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
{}

void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
{}

static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
				       struct ocfs2_lock_res *lockres)
{}

static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
					struct ocfs2_lock_res *lockres)
{}

static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
{}

static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
{}

static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
{}

static int ocfs2_downconvert_thread(void *arg)
{}

void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
{}