linux/fs/xfs/xfs_log.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
 */
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_log.h"
#include "xfs_log_priv.h"
#include "xfs_trace.h"
#include "xfs_sysfs.h"
#include "xfs_sb.h"
#include "xfs_health.h"

struct kmem_cache	*xfs_log_ticket_cache;

/* Local miscellaneous function prototypes */
STATIC struct xlog *
xlog_alloc_log(
	struct xfs_mount	*mp,
	struct xfs_buftarg	*log_target,
	xfs_daddr_t		blk_offset,
	int			num_bblks);
STATIC void
xlog_dealloc_log(
	struct xlog		*log);

/* local state machine functions */
STATIC void xlog_state_done_syncing(
	struct xlog_in_core	*iclog);
STATIC void xlog_state_do_callback(
	struct xlog		*log);
STATIC int
xlog_state_get_iclog_space(
	struct xlog		*log,
	int			len,
	struct xlog_in_core	**iclog,
	struct xlog_ticket	*ticket,
	int			*logoffsetp);
STATIC void
xlog_sync(
	struct xlog		*log,
	struct xlog_in_core	*iclog,
	struct xlog_ticket	*ticket);
#if defined(DEBUG)
STATIC void
xlog_verify_iclog(
	struct xlog		*log,
	struct xlog_in_core	*iclog,
	int			count);
STATIC void
xlog_verify_tail_lsn(
	struct xlog		*log,
	struct xlog_in_core	*iclog);
#else
#define xlog_verify_iclog
#define xlog_verify_tail_lsn
#endif

STATIC int
xlog_iclogs_empty(
	struct xlog		*log);

static int
xfs_log_cover(struct xfs_mount *);

/*
 * We need to make sure the buffer pointer returned is naturally aligned for the
 * biggest basic data type we put into it. We have already accounted for this
 * padding when sizing the buffer.
 *
 * However, this padding does not get written into the log, and hence we have to
 * track the space used by the log vectors separately to prevent log space hangs
 * due to inaccurate accounting (i.e. a leak) of the used log space through the
 * CIL context ticket.
 *
 * We also add space for the xlog_op_header that describes this region in the
 * log. This prepends the data region we return to the caller to copy their data
 * into, so do all the static initialisation of the ophdr now. Because the ophdr
 * is not 8 byte aligned, we have to be careful to ensure that we align the
 * start of the buffer such that the region we return to the call is 8 byte
 * aligned and packed against the tail of the ophdr.
 */
void *
xlog_prepare_iovec(
	struct xfs_log_vec	*lv,
	struct xfs_log_iovec	**vecp,
	uint			type)
{}

static inline void
xlog_grant_sub_space(
	struct xlog_grant_head	*head,
	int64_t			bytes)
{}

static inline void
xlog_grant_add_space(
	struct xlog_grant_head	*head,
	int64_t			bytes)
{}

static void
xlog_grant_head_init(
	struct xlog_grant_head	*head)
{}

void
xlog_grant_return_space(
	struct xlog	*log,
	xfs_lsn_t	old_head,
	xfs_lsn_t	new_head)
{}

/*
 * Return the space in the log between the tail and the head.  In the case where
 * we have overrun available reservation space, return 0. The memory barrier
 * pairs with the smp_wmb() in xlog_cil_ail_insert() to ensure that grant head
 * vs tail space updates are seen in the correct order and hence avoid
 * transients as space is transferred from the grant heads to the AIL on commit
 * completion.
 */
static uint64_t
xlog_grant_space_left(
	struct xlog		*log,
	struct xlog_grant_head	*head)
{}

STATIC void
xlog_grant_head_wake_all(
	struct xlog_grant_head	*head)
{}

static inline int
xlog_ticket_reservation(
	struct xlog		*log,
	struct xlog_grant_head	*head,
	struct xlog_ticket	*tic)
{}

STATIC bool
xlog_grant_head_wake(
	struct xlog		*log,
	struct xlog_grant_head	*head,
	int			*free_bytes)
{}

STATIC int
xlog_grant_head_wait(
	struct xlog		*log,
	struct xlog_grant_head	*head,
	struct xlog_ticket	*tic,
	int			need_bytes) __releases(&head->lock)
					    __acquires(&head->lock)
{}

/*
 * Atomically get the log space required for a log ticket.
 *
 * Once a ticket gets put onto head->waiters, it will only return after the
 * needed reservation is satisfied.
 *
 * This function is structured so that it has a lock free fast path. This is
 * necessary because every new transaction reservation will come through this
 * path. Hence any lock will be globally hot if we take it unconditionally on
 * every pass.
 *
 * As tickets are only ever moved on and off head->waiters under head->lock, we
 * only need to take that lock if we are going to add the ticket to the queue
 * and sleep. We can avoid taking the lock if the ticket was never added to
 * head->waiters because the t_queue list head will be empty and we hold the
 * only reference to it so it can safely be checked unlocked.
 */
STATIC int
xlog_grant_head_check(
	struct xlog		*log,
	struct xlog_grant_head	*head,
	struct xlog_ticket	*tic,
	int			*need_bytes)
{}

bool
xfs_log_writable(
	struct xfs_mount	*mp)
{}

/*
 * Replenish the byte reservation required by moving the grant write head.
 */
int
xfs_log_regrant(
	struct xfs_mount	*mp,
	struct xlog_ticket	*tic)
{}

/*
 * Reserve log space and return a ticket corresponding to the reservation.
 *
 * Each reservation is going to reserve extra space for a log record header.
 * When writes happen to the on-disk log, we don't subtract the length of the
 * log record header from any reservation.  By wasting space in each
 * reservation, we prevent over allocation problems.
 */
int
xfs_log_reserve(
	struct xfs_mount	*mp,
	int			unit_bytes,
	int			cnt,
	struct xlog_ticket	**ticp,
	bool			permanent)
{}

/*
 * Run all the pending iclog callbacks and wake log force waiters and iclog
 * space waiters so they can process the newly set shutdown state. We really
 * don't care what order we process callbacks here because the log is shut down
 * and so state cannot change on disk anymore. However, we cannot wake waiters
 * until the callbacks have been processed because we may be in unmount and
 * we must ensure that all AIL operations the callbacks perform have completed
 * before we tear down the AIL.
 *
 * We avoid processing actively referenced iclogs so that we don't run callbacks
 * while the iclog owner might still be preparing the iclog for IO submssion.
 * These will be caught by xlog_state_iclog_release() and call this function
 * again to process any callbacks that may have been added to that iclog.
 */
static void
xlog_state_shutdown_callbacks(
	struct xlog		*log)
{}

/*
 * Flush iclog to disk if this is the last reference to the given iclog and the
 * it is in the WANT_SYNC state.
 *
 * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
 * log tail is updated correctly. NEED_FUA indicates that the iclog will be
 * written to stable storage, and implies that a commit record is contained
 * within the iclog. We need to ensure that the log tail does not move beyond
 * the tail that the first commit record in the iclog ordered against, otherwise
 * correct recovery of that checkpoint becomes dependent on future operations
 * performed on this iclog.
 *
 * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
 * current tail into iclog. Once the iclog tail is set, future operations must
 * not modify it, otherwise they potentially violate ordering constraints for
 * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
 * the iclog will get zeroed on activation of the iclog after sync, so we
 * always capture the tail lsn on the iclog on the first NEED_FUA release
 * regardless of the number of active reference counts on this iclog.
 */
int
xlog_state_release_iclog(
	struct xlog		*log,
	struct xlog_in_core	*iclog,
	struct xlog_ticket	*ticket)
{}

/*
 * Mount a log filesystem
 *
 * mp		- ubiquitous xfs mount point structure
 * log_target	- buftarg of on-disk log device
 * blk_offset	- Start block # where block size is 512 bytes (BBSIZE)
 * num_bblocks	- Number of BBSIZE blocks in on-disk log
 *
 * Return error or zero.
 */
int
xfs_log_mount(
	xfs_mount_t		*mp,
	struct xfs_buftarg	*log_target,
	xfs_daddr_t		blk_offset,
	int			num_bblks)
{}

/*
 * Finish the recovery of the file system.  This is separate from the
 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
 * in the root and real-time bitmap inodes between calling xfs_log_mount() and
 * here.
 *
 * If we finish recovery successfully, start the background log work. If we are
 * not doing recovery, then we have a RO filesystem and we don't need to start
 * it.
 */
int
xfs_log_mount_finish(
	struct xfs_mount	*mp)
{}

/*
 * The mount has failed. Cancel the recovery if it hasn't completed and destroy
 * the log.
 */
void
xfs_log_mount_cancel(
	struct xfs_mount	*mp)
{}

/*
 * Flush out the iclog to disk ensuring that device caches are flushed and
 * the iclog hits stable storage before any completion waiters are woken.
 */
static inline int
xlog_force_iclog(
	struct xlog_in_core	*iclog)
{}

/*
 * Cycle all the iclogbuf locks to make sure all log IO completion
 * is done before we tear down these buffers.
 */
static void
xlog_wait_iclog_completion(struct xlog *log)
{}

/*
 * Wait for the iclog and all prior iclogs to be written disk as required by the
 * log force state machine. Waiting on ic_force_wait ensures iclog completions
 * have been ordered and callbacks run before we are woken here, hence
 * guaranteeing that all the iclogs up to this one are on stable storage.
 */
int
xlog_wait_on_iclog(
	struct xlog_in_core	*iclog)
		__releases(iclog->ic_log->l_icloglock)
{}

/*
 * Write out an unmount record using the ticket provided. We have to account for
 * the data space used in the unmount ticket as this write is not done from a
 * transaction context that has already done the accounting for us.
 */
static int
xlog_write_unmount_record(
	struct xlog		*log,
	struct xlog_ticket	*ticket)
{}

/*
 * Mark the filesystem clean by writing an unmount record to the head of the
 * log.
 */
static void
xlog_unmount_write(
	struct xlog		*log)
{}

static void
xfs_log_unmount_verify_iclog(
	struct xlog		*log)
{}

/*
 * Unmount record used to have a string "Unmount filesystem--" in the
 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
 * We just write the magic number now since that particular field isn't
 * currently architecture converted and "Unmount" is a bit foo.
 * As far as I know, there weren't any dependencies on the old behaviour.
 */
static void
xfs_log_unmount_write(
	struct xfs_mount	*mp)
{}

/*
 * Empty the log for unmount/freeze.
 *
 * To do this, we first need to shut down the background log work so it is not
 * trying to cover the log as we clean up. We then need to unpin all objects in
 * the log so we can then flush them out. Once they have completed their IO and
 * run the callbacks removing themselves from the AIL, we can cover the log.
 */
int
xfs_log_quiesce(
	struct xfs_mount	*mp)
{}

void
xfs_log_clean(
	struct xfs_mount	*mp)
{}

/*
 * Shut down and release the AIL and Log.
 *
 * During unmount, we need to ensure we flush all the dirty metadata objects
 * from the AIL so that the log is empty before we write the unmount record to
 * the log. Once this is done, we can tear down the AIL and the log.
 */
void
xfs_log_unmount(
	struct xfs_mount	*mp)
{}

void
xfs_log_item_init(
	struct xfs_mount	*mp,
	struct xfs_log_item	*item,
	int			type,
	const struct xfs_item_ops *ops)
{}

/*
 * Wake up processes waiting for log space after we have moved the log tail.
 */
void
xfs_log_space_wake(
	struct xfs_mount	*mp)
{}

/*
 * Determine if we have a transaction that has gone to disk that needs to be
 * covered. To begin the transition to the idle state firstly the log needs to
 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
 * we start attempting to cover the log.
 *
 * Only if we are then in a state where covering is needed, the caller is
 * informed that dummy transactions are required to move the log into the idle
 * state.
 *
 * If there are any items in the AIl or CIL, then we do not want to attempt to
 * cover the log as we may be in a situation where there isn't log space
 * available to run a dummy transaction and this can lead to deadlocks when the
 * tail of the log is pinned by an item that is modified in the CIL.  Hence
 * there's no point in running a dummy transaction at this point because we
 * can't start trying to idle the log until both the CIL and AIL are empty.
 */
static bool
xfs_log_need_covered(
	struct xfs_mount	*mp)
{}

/*
 * Explicitly cover the log. This is similar to background log covering but
 * intended for usage in quiesce codepaths. The caller is responsible to ensure
 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
 * must all be empty.
 */
static int
xfs_log_cover(
	struct xfs_mount	*mp)
{}

static void
xlog_ioend_work(
	struct work_struct	*work)
{}

/*
 * Return size of each in-core log record buffer.
 *
 * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
 *
 * If the filesystem blocksize is too large, we may need to choose a
 * larger size since the directory code currently logs entire blocks.
 */
STATIC void
xlog_get_iclog_buffer_size(
	struct xfs_mount	*mp,
	struct xlog		*log)
{}

void
xfs_log_work_queue(
	struct xfs_mount        *mp)
{}

/*
 * Clear the log incompat flags if we have the opportunity.
 *
 * This only happens if we're about to log the second dummy transaction as part
 * of covering the log.
 */
static inline void
xlog_clear_incompat(
	struct xlog		*log)
{}

/*
 * Every sync period we need to unpin all items in the AIL and push them to
 * disk. If there is nothing dirty, then we might need to cover the log to
 * indicate that the filesystem is idle.
 */
static void
xfs_log_worker(
	struct work_struct	*work)
{}

/*
 * This routine initializes some of the log structure for a given mount point.
 * Its primary purpose is to fill in enough, so recovery can occur.  However,
 * some other stuff may be filled in too.
 */
STATIC struct xlog *
xlog_alloc_log(
	struct xfs_mount	*mp,
	struct xfs_buftarg	*log_target,
	xfs_daddr_t		blk_offset,
	int			num_bblks)
{}	/* xlog_alloc_log */

/*
 * Stamp cycle number in every block
 */
STATIC void
xlog_pack_data(
	struct xlog		*log,
	struct xlog_in_core	*iclog,
	int			roundoff)
{}

/*
 * Calculate the checksum for a log buffer.
 *
 * This is a little more complicated than it should be because the various
 * headers and the actual data are non-contiguous.
 */
__le32
xlog_cksum(
	struct xlog		*log,
	struct xlog_rec_header	*rhead,
	char			*dp,
	int			size)
{}

static void
xlog_bio_end_io(
	struct bio		*bio)
{}

static int
xlog_map_iclog_data(
	struct bio		*bio,
	void			*data,
	size_t			count)
{}

STATIC void
xlog_write_iclog(
	struct xlog		*log,
	struct xlog_in_core	*iclog,
	uint64_t		bno,
	unsigned int		count)
{}

/*
 * We need to bump cycle number for the part of the iclog that is
 * written to the start of the log. Watch out for the header magic
 * number case, though.
 */
static void
xlog_split_iclog(
	struct xlog		*log,
	void			*data,
	uint64_t		bno,
	unsigned int		count)
{}

static int
xlog_calc_iclog_size(
	struct xlog		*log,
	struct xlog_in_core	*iclog,
	uint32_t		*roundoff)
{}

/*
 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
 * fashion.  Previously, we should have moved the current iclog
 * ptr in the log to point to the next available iclog.  This allows further
 * write to continue while this code syncs out an iclog ready to go.
 * Before an in-core log can be written out, the data section must be scanned
 * to save away the 1st word of each BBSIZE block into the header.  We replace
 * it with the current cycle count.  Each BBSIZE block is tagged with the
 * cycle count because there in an implicit assumption that drives will
 * guarantee that entire 512 byte blocks get written at once.  In other words,
 * we can't have part of a 512 byte block written and part not written.  By
 * tagging each block, we will know which blocks are valid when recovering
 * after an unclean shutdown.
 *
 * This routine is single threaded on the iclog.  No other thread can be in
 * this routine with the same iclog.  Changing contents of iclog can there-
 * fore be done without grabbing the state machine lock.  Updating the global
 * log will require grabbing the lock though.
 *
 * The entire log manager uses a logical block numbering scheme.  Only
 * xlog_write_iclog knows about the fact that the log may not start with
 * block zero on a given device.
 */
STATIC void
xlog_sync(
	struct xlog		*log,
	struct xlog_in_core	*iclog,
	struct xlog_ticket	*ticket)
{}

/*
 * Deallocate a log structure
 */
STATIC void
xlog_dealloc_log(
	struct xlog	*log)
{}

/*
 * Update counters atomically now that memcpy is done.
 */
static inline void
xlog_state_finish_copy(
	struct xlog		*log,
	struct xlog_in_core	*iclog,
	int			record_cnt,
	int			copy_bytes)
{}

/*
 * print out info relating to regions written which consume
 * the reservation
 */
void
xlog_print_tic_res(
	struct xfs_mount	*mp,
	struct xlog_ticket	*ticket)
{}

/*
 * Print a summary of the transaction.
 */
void
xlog_print_trans(
	struct xfs_trans	*tp)
{}

static inline void
xlog_write_iovec(
	struct xlog_in_core	*iclog,
	uint32_t		*log_offset,
	void			*data,
	uint32_t		write_len,
	int			*bytes_left,
	uint32_t		*record_cnt,
	uint32_t		*data_cnt)
{}

/*
 * Write log vectors into a single iclog which is guaranteed by the caller
 * to have enough space to write the entire log vector into.
 */
static void
xlog_write_full(
	struct xfs_log_vec	*lv,
	struct xlog_ticket	*ticket,
	struct xlog_in_core	*iclog,
	uint32_t		*log_offset,
	uint32_t		*len,
	uint32_t		*record_cnt,
	uint32_t		*data_cnt)
{}

static int
xlog_write_get_more_iclog_space(
	struct xlog_ticket	*ticket,
	struct xlog_in_core	**iclogp,
	uint32_t		*log_offset,
	uint32_t		len,
	uint32_t		*record_cnt,
	uint32_t		*data_cnt)
{}

/*
 * Write log vectors into a single iclog which is smaller than the current chain
 * length. We write until we cannot fit a full record into the remaining space
 * and then stop. We return the log vector that is to be written that cannot
 * wholly fit in the iclog.
 */
static int
xlog_write_partial(
	struct xfs_log_vec	*lv,
	struct xlog_ticket	*ticket,
	struct xlog_in_core	**iclogp,
	uint32_t		*log_offset,
	uint32_t		*len,
	uint32_t		*record_cnt,
	uint32_t		*data_cnt)
{}

/*
 * Write some region out to in-core log
 *
 * This will be called when writing externally provided regions or when
 * writing out a commit record for a given transaction.
 *
 * General algorithm:
 *	1. Find total length of this write.  This may include adding to the
 *		lengths passed in.
 *	2. Check whether we violate the tickets reservation.
 *	3. While writing to this iclog
 *	    A. Reserve as much space in this iclog as can get
 *	    B. If this is first write, save away start lsn
 *	    C. While writing this region:
 *		1. If first write of transaction, write start record
 *		2. Write log operation header (header per region)
 *		3. Find out if we can fit entire region into this iclog
 *		4. Potentially, verify destination memcpy ptr
 *		5. Memcpy (partial) region
 *		6. If partial copy, release iclog; otherwise, continue
 *			copying more regions into current iclog
 *	4. Mark want sync bit (in simulation mode)
 *	5. Release iclog for potential flush to on-disk log.
 *
 * ERRORS:
 * 1.	Panic if reservation is overrun.  This should never happen since
 *	reservation amounts are generated internal to the filesystem.
 * NOTES:
 * 1. Tickets are single threaded data structures.
 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
 *	syncing routine.  When a single log_write region needs to span
 *	multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
 *	on all log operation writes which don't contain the end of the
 *	region.  The XLOG_END_TRANS bit is used for the in-core log
 *	operation which contains the end of the continued log_write region.
 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
 *	we don't really know exactly how much space will be used.  As a result,
 *	we don't update ic_offset until the end when we know exactly how many
 *	bytes have been written out.
 */
int
xlog_write(
	struct xlog		*log,
	struct xfs_cil_ctx	*ctx,
	struct list_head	*lv_chain,
	struct xlog_ticket	*ticket,
	uint32_t		len)

{}

static void
xlog_state_activate_iclog(
	struct xlog_in_core	*iclog,
	int			*iclogs_changed)
{}

/*
 * Loop through all iclogs and mark all iclogs currently marked DIRTY as
 * ACTIVE after iclog I/O has completed.
 */
static void
xlog_state_activate_iclogs(
	struct xlog		*log,
	int			*iclogs_changed)
{}

static int
xlog_covered_state(
	int			prev_state,
	int			iclogs_changed)
{}

STATIC void
xlog_state_clean_iclog(
	struct xlog		*log,
	struct xlog_in_core	*dirty_iclog)
{}

STATIC xfs_lsn_t
xlog_get_lowest_lsn(
	struct xlog		*log)
{}

/*
 * Return true if we need to stop processing, false to continue to the next
 * iclog. The caller will need to run callbacks if the iclog is returned in the
 * XLOG_STATE_CALLBACK state.
 */
static bool
xlog_state_iodone_process_iclog(
	struct xlog		*log,
	struct xlog_in_core	*iclog)
{}

/*
 * Loop over all the iclogs, running attached callbacks on them. Return true if
 * we ran any callbacks, indicating that we dropped the icloglock. We don't need
 * to handle transient shutdown state here at all because
 * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown
 * cleanup of the callbacks.
 */
static bool
xlog_state_do_iclog_callbacks(
	struct xlog		*log)
		__releases(&log->l_icloglock)
		__acquires(&log->l_icloglock)
{}


/*
 * Loop running iclog completion callbacks until there are no more iclogs in a
 * state that can run callbacks.
 */
STATIC void
xlog_state_do_callback(
	struct xlog		*log)
{}


/*
 * Finish transitioning this iclog to the dirty state.
 *
 * Callbacks could take time, so they are done outside the scope of the
 * global state machine log lock.
 */
STATIC void
xlog_state_done_syncing(
	struct xlog_in_core	*iclog)
{}

/*
 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
 * sleep.  We wait on the flush queue on the head iclog as that should be
 * the first iclog to complete flushing. Hence if all iclogs are syncing,
 * we will wait here and all new writes will sleep until a sync completes.
 *
 * The in-core logs are used in a circular fashion. They are not used
 * out-of-order even when an iclog past the head is free.
 *
 * return:
 *	* log_offset where xlog_write() can start writing into the in-core
 *		log's data space.
 *	* in-core log pointer to which xlog_write() should write.
 *	* boolean indicating this is a continued write to an in-core log.
 *		If this is the last write, then the in-core log's offset field
 *		needs to be incremented, depending on the amount of data which
 *		is copied.
 */
STATIC int
xlog_state_get_iclog_space(
	struct xlog		*log,
	int			len,
	struct xlog_in_core	**iclogp,
	struct xlog_ticket	*ticket,
	int			*logoffsetp)
{}

/*
 * The first cnt-1 times a ticket goes through here we don't need to move the
 * grant write head because the permanent reservation has reserved cnt times the
 * unit amount.  Release part of current permanent unit reservation and reset
 * current reservation to be one units worth.  Also move grant reservation head
 * forward.
 */
void
xfs_log_ticket_regrant(
	struct xlog		*log,
	struct xlog_ticket	*ticket)
{}

/*
 * Give back the space left from a reservation.
 *
 * All the information we need to make a correct determination of space left
 * is present.  For non-permanent reservations, things are quite easy.  The
 * count should have been decremented to zero.  We only need to deal with the
 * space remaining in the current reservation part of the ticket.  If the
 * ticket contains a permanent reservation, there may be left over space which
 * needs to be released.  A count of N means that N-1 refills of the current
 * reservation can be done before we need to ask for more space.  The first
 * one goes to fill up the first current reservation.  Once we run out of
 * space, the count will stay at zero and the only space remaining will be
 * in the current reservation field.
 */
void
xfs_log_ticket_ungrant(
	struct xlog		*log,
	struct xlog_ticket	*ticket)
{}

/*
 * This routine will mark the current iclog in the ring as WANT_SYNC and move
 * the current iclog pointer to the next iclog in the ring.
 */
void
xlog_state_switch_iclogs(
	struct xlog		*log,
	struct xlog_in_core	*iclog,
	int			eventual_size)
{}

/*
 * Force the iclog to disk and check if the iclog has been completed before
 * xlog_force_iclog() returns. This can happen on synchronous (e.g.
 * pmem) or fast async storage because we drop the icloglock to issue the IO.
 * If completion has already occurred, tell the caller so that it can avoid an
 * unnecessary wait on the iclog.
 */
static int
xlog_force_and_check_iclog(
	struct xlog_in_core	*iclog,
	bool			*completed)
{}

/*
 * Write out all data in the in-core log as of this exact moment in time.
 *
 * Data may be written to the in-core log during this call.  However,
 * we don't guarantee this data will be written out.  A change from past
 * implementation means this routine will *not* write out zero length LRs.
 *
 * Basically, we try and perform an intelligent scan of the in-core logs.
 * If we determine there is no flushable data, we just return.  There is no
 * flushable data if:
 *
 *	1. the current iclog is active and has no data; the previous iclog
 *		is in the active or dirty state.
 *	2. the current iclog is drity, and the previous iclog is in the
 *		active or dirty state.
 *
 * We may sleep if:
 *
 *	1. the current iclog is not in the active nor dirty state.
 *	2. the current iclog dirty, and the previous iclog is not in the
 *		active nor dirty state.
 *	3. the current iclog is active, and there is another thread writing
 *		to this particular iclog.
 *	4. a) the current iclog is active and has no other writers
 *	   b) when we return from flushing out this iclog, it is still
 *		not in the active nor dirty state.
 */
int
xfs_log_force(
	struct xfs_mount	*mp,
	uint			flags)
{}

/*
 * Force the log to a specific LSN.
 *
 * If an iclog with that lsn can be found:
 *	If it is in the DIRTY state, just return.
 *	If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
 *		state and go to sleep or return.
 *	If it is in any other state, go to sleep or return.
 *
 * Synchronous forces are implemented with a wait queue.  All callers trying
 * to force a given lsn to disk must wait on the queue attached to the
 * specific in-core log.  When given in-core log finally completes its write
 * to disk, that thread will wake up all threads waiting on the queue.
 */
static int
xlog_force_lsn(
	struct xlog		*log,
	xfs_lsn_t		lsn,
	uint			flags,
	int			*log_flushed,
	bool			already_slept)
{}

/*
 * Force the log to a specific checkpoint sequence.
 *
 * First force the CIL so that all the required changes have been flushed to the
 * iclogs. If the CIL force completed it will return a commit LSN that indicates
 * the iclog that needs to be flushed to stable storage. If the caller needs
 * a synchronous log force, we will wait on the iclog with the LSN returned by
 * xlog_cil_force_seq() to be completed.
 */
int
xfs_log_force_seq(
	struct xfs_mount	*mp,
	xfs_csn_t		seq,
	uint			flags,
	int			*log_flushed)
{}

/*
 * Free a used ticket when its refcount falls to zero.
 */
void
xfs_log_ticket_put(
	xlog_ticket_t	*ticket)
{}

xlog_ticket_t *
xfs_log_ticket_get(
	xlog_ticket_t	*ticket)
{}

/*
 * Figure out the total log space unit (in bytes) that would be
 * required for a log ticket.
 */
static int
xlog_calc_unit_res(
	struct xlog		*log,
	int			unit_bytes,
	int			*niclogs)
{}

int
xfs_log_calc_unit_res(
	struct xfs_mount	*mp,
	int			unit_bytes)
{}

/*
 * Allocate and initialise a new log ticket.
 */
struct xlog_ticket *
xlog_ticket_alloc(
	struct xlog		*log,
	int			unit_bytes,
	int			cnt,
	bool			permanent)
{}

#if defined(DEBUG)
static void
xlog_verify_dump_tail(
	struct xlog		*log,
	struct xlog_in_core	*iclog)
{}

/* Check if the new iclog will fit in the log. */
STATIC void
xlog_verify_tail_lsn(
	struct xlog		*log,
	struct xlog_in_core	*iclog)
{}

/*
 * Perform a number of checks on the iclog before writing to disk.
 *
 * 1. Make sure the iclogs are still circular
 * 2. Make sure we have a good magic number
 * 3. Make sure we don't have magic numbers in the data
 * 4. Check fields of each log operation header for:
 *	A. Valid client identifier
 *	B. tid ptr value falls in valid ptr space (user space code)
 *	C. Length in log record header is correct according to the
 *		individual operation headers within record.
 * 5. When a bwrite will occur within 5 blocks of the front of the physical
 *	log, check the preceding blocks of the physical log to make sure all
 *	the cycle numbers agree with the current cycle number.
 */
STATIC void
xlog_verify_iclog(
	struct xlog		*log,
	struct xlog_in_core	*iclog,
	int			count)
{}
#endif

/*
 * Perform a forced shutdown on the log.
 *
 * This can be called from low level log code to trigger a shutdown, or from the
 * high level mount shutdown code when the mount shuts down.
 *
 * Our main objectives here are to make sure that:
 *	a. if the shutdown was not due to a log IO error, flush the logs to
 *	   disk. Anything modified after this is ignored.
 *	b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
 *	   parties to find out. Nothing new gets queued after this is done.
 *	c. Tasks sleeping on log reservations, pinned objects and
 *	   other resources get woken up.
 *	d. The mount is also marked as shut down so that log triggered shutdowns
 *	   still behave the same as if they called xfs_forced_shutdown().
 *
 * Return true if the shutdown cause was a log IO error and we actually shut the
 * log down.
 */
bool
xlog_force_shutdown(
	struct xlog	*log,
	uint32_t	shutdown_flags)
{}

STATIC int
xlog_iclogs_empty(
	struct xlog	*log)
{}

/*
 * Verify that an LSN stamped into a piece of metadata is valid. This is
 * intended for use in read verifiers on v5 superblocks.
 */
bool
xfs_log_check_lsn(
	struct xfs_mount	*mp,
	xfs_lsn_t		lsn)
{}