// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_mount.h" #include "xfs_trans.h" #include "xfs_trans_priv.h" #include "xfs_buf_item.h" #include "xfs_inode.h" #include "xfs_inode_item.h" #include "xfs_quota.h" #include "xfs_dquot_item.h" #include "xfs_dquot.h" #include "xfs_trace.h" #include "xfs_log.h" #include "xfs_log_priv.h" #include "xfs_error.h" struct kmem_cache *xfs_buf_item_cache; static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip) { … } /* Is this log iovec plausibly large enough to contain the buffer log format? */ bool xfs_buf_log_check_iovec( struct xfs_log_iovec *iovec) { … } static inline int xfs_buf_log_format_size( struct xfs_buf_log_format *blfp) { … } static inline bool xfs_buf_item_straddle( struct xfs_buf *bp, uint offset, int first_bit, int nbits) { … } /* * Return the number of log iovecs and space needed to log the given buf log * item segment. * * It calculates this as 1 iovec for the buf log format structure and 1 for each * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged * in a single iovec. */ STATIC void xfs_buf_item_size_segment( struct xfs_buf_log_item *bip, struct xfs_buf_log_format *blfp, uint offset, int *nvecs, int *nbytes) { … } /* * Return the number of log iovecs and space needed to log the given buf log * item. * * Discontiguous buffers need a format structure per region that is being * logged. This makes the changes in the buffer appear to log recovery as though * they came from separate buffers, just like would occur if multiple buffers * were used instead of a single discontiguous buffer. This enables * discontiguous buffers to be in-memory constructs, completely transparent to * what ends up on disk. * * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log * format structures. If the item has previously been logged and has dirty * regions, we do not relog them in stale buffers. This has the effect of * reducing the size of the relogged item by the amount of dirty data tracked * by the log item. This can result in the committing transaction reducing the * amount of space being consumed by the CIL. */ STATIC void xfs_buf_item_size( struct xfs_log_item *lip, int *nvecs, int *nbytes) { … } static inline void xfs_buf_item_copy_iovec( struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, struct xfs_buf *bp, uint offset, int first_bit, uint nbits) { … } static void xfs_buf_item_format_segment( struct xfs_buf_log_item *bip, struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, uint offset, struct xfs_buf_log_format *blfp) { … } /* * This is called to fill in the vector of log iovecs for the * given log buf item. It fills the first entry with a buf log * format structure, and the rest point to contiguous chunks * within the buffer. */ STATIC void xfs_buf_item_format( struct xfs_log_item *lip, struct xfs_log_vec *lv) { … } /* * This is called to pin the buffer associated with the buf log item in memory * so it cannot be written out. * * We take a reference to the buffer log item here so that the BLI life cycle * extends at least until the buffer is unpinned via xfs_buf_item_unpin() and * inserted into the AIL. * * We also need to take a reference to the buffer itself as the BLI unpin * processing requires accessing the buffer after the BLI has dropped the final * BLI reference. See xfs_buf_item_unpin() for an explanation. * If unpins race to drop the final BLI reference and only the * BLI owns a reference to the buffer, then the loser of the race can have the * buffer fgreed from under it (e.g. on shutdown). Taking a buffer reference per * pin count ensures the life cycle of the buffer extends for as * long as we hold the buffer pin reference in xfs_buf_item_unpin(). */ STATIC void xfs_buf_item_pin( struct xfs_log_item *lip) { … } /* * This is called to unpin the buffer associated with the buf log item which was * previously pinned with a call to xfs_buf_item_pin(). We enter this function * with a buffer pin count, a buffer reference and a BLI reference. * * We must drop the BLI reference before we unpin the buffer because the AIL * doesn't acquire a BLI reference whenever it accesses it. Therefore if the * refcount drops to zero, the bli could still be AIL resident and the buffer * submitted for I/O at any point before we return. This can result in IO * completion freeing the buffer while we are still trying to access it here. * This race condition can also occur in shutdown situations where we abort and * unpin buffers from contexts other that journal IO completion. * * Hence we have to hold a buffer reference per pin count to ensure that the * buffer cannot be freed until we have finished processing the unpin operation. * The reference is taken in xfs_buf_item_pin(), and we must hold it until we * are done processing the buffer state. In the case of an abort (remove = * true) then we re-use the current pin reference as the IO reference we hand * off to IO failure handling. */ STATIC void xfs_buf_item_unpin( struct xfs_log_item *lip, int remove) { … } STATIC uint xfs_buf_item_push( struct xfs_log_item *lip, struct list_head *buffer_list) { … } /* * Drop the buffer log item refcount and take appropriate action. This helper * determines whether the bli must be freed or not, since a decrement to zero * does not necessarily mean the bli is unused. * * Return true if the bli is freed, false otherwise. */ bool xfs_buf_item_put( struct xfs_buf_log_item *bip) { … } /* * Release the buffer associated with the buf log item. If there is no dirty * logged data associated with the buffer recorded in the buf log item, then * free the buf log item and remove the reference to it in the buffer. * * This call ignores the recursion count. It is only called when the buffer * should REALLY be unlocked, regardless of the recursion count. * * We unconditionally drop the transaction's reference to the log item. If the * item was logged, then another reference was taken when it was pinned, so we * can safely drop the transaction reference now. This also allows us to avoid * potential races with the unpin code freeing the bli by not referencing the * bli after we've dropped the reference count. * * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item * if necessary but do not unlock the buffer. This is for support of * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't * free the item. */ STATIC void xfs_buf_item_release( struct xfs_log_item *lip) { … } STATIC void xfs_buf_item_committing( struct xfs_log_item *lip, xfs_csn_t seq) { … } /* * This is called to find out where the oldest active copy of the * buf log item in the on disk log resides now that the last log * write of it completed at the given lsn. * We always re-log all the dirty data in a buffer, so usually the * latest copy in the on disk log is the only one that matters. For * those cases we simply return the given lsn. * * The one exception to this is for buffers full of newly allocated * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF * flag set, indicating that only the di_next_unlinked fields from the * inodes in the buffers will be replayed during recovery. If the * original newly allocated inode images have not yet been flushed * when the buffer is so relogged, then we need to make sure that we * keep the old images in the 'active' portion of the log. We do this * by returning the original lsn of that transaction here rather than * the current one. */ STATIC xfs_lsn_t xfs_buf_item_committed( struct xfs_log_item *lip, xfs_lsn_t lsn) { … } #ifdef DEBUG_EXPENSIVE static int xfs_buf_item_precommit( struct xfs_trans *tp, struct xfs_log_item *lip) { … } #else #define xfs_buf_item_precommit … #endif static const struct xfs_item_ops xfs_buf_item_ops = …; STATIC void xfs_buf_item_get_format( struct xfs_buf_log_item *bip, int count) { … } STATIC void xfs_buf_item_free_format( struct xfs_buf_log_item *bip) { … } /* * Allocate a new buf log item to go with the given buffer. * Set the buffer's b_log_item field to point to the new * buf log item. */ int xfs_buf_item_init( struct xfs_buf *bp, struct xfs_mount *mp) { … } /* * Mark bytes first through last inclusive as dirty in the buf * item's bitmap. */ static void xfs_buf_item_log_segment( uint first, uint last, uint *map) { … } /* * Mark bytes first through last inclusive as dirty in the buf * item's bitmap. */ void xfs_buf_item_log( struct xfs_buf_log_item *bip, uint first, uint last) { … } /* * Return true if the buffer has any ranges logged/dirtied by a transaction, * false otherwise. */ bool xfs_buf_item_dirty_format( struct xfs_buf_log_item *bip) { … } STATIC void xfs_buf_item_free( struct xfs_buf_log_item *bip) { … } /* * xfs_buf_item_relse() is called when the buf log item is no longer needed. */ void xfs_buf_item_relse( struct xfs_buf *bp) { … } void xfs_buf_item_done( struct xfs_buf *bp) { … }