linux/fs/btrfs/qgroup.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2011 STRATO.  All rights reserved.
 */

#include <linux/sched.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/btrfs.h>
#include <linux/sched/mm.h>

#include "ctree.h"
#include "transaction.h"
#include "disk-io.h"
#include "locking.h"
#include "ulist.h"
#include "backref.h"
#include "extent_io.h"
#include "qgroup.h"
#include "block-group.h"
#include "sysfs.h"
#include "tree-mod-log.h"
#include "fs.h"
#include "accessors.h"
#include "extent-tree.h"
#include "root-tree.h"
#include "tree-checker.h"

enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info)
{}

bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info)
{}

bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info)
{}

/*
 * Helpers to access qgroup reservation
 *
 * Callers should ensure the lock context and type are valid
 */

static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
{}

#ifdef CONFIG_BTRFS_DEBUG
static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
{}
#endif

static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
			   struct btrfs_qgroup *qgroup, u64 num_bytes,
			   enum btrfs_qgroup_rsv_type type)
{}

static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
			       struct btrfs_qgroup *qgroup, u64 num_bytes,
			       enum btrfs_qgroup_rsv_type type)
{}

static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
				     struct btrfs_qgroup *dest,
				     const struct btrfs_qgroup *src)
{}

static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
					 struct btrfs_qgroup *dest,
					 const struct btrfs_qgroup *src)
{}

static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
					   int mod)
{}

static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
					   int mod)
{}

static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq)
{}

static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq)
{}

static int
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
		   int init_flags);
static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);

/* must be called with qgroup_ioctl_lock held */
static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info,
					   u64 qgroupid)
{}

/*
 * Add qgroup to the filesystem's qgroup tree.
 *
 * Must be called with qgroup_lock held and @prealloc preallocated.
 *
 * The control on the lifespan of @prealloc would be transferred to this
 * function, thus caller should no longer touch @prealloc.
 */
static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
					  struct btrfs_qgroup *prealloc,
					  u64 qgroupid)
{}

static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
			    struct btrfs_qgroup *qgroup)
{}

/* must be called with qgroup_lock held */
static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
{}

/*
 * Add relation specified by two qgroups.
 *
 * Must be called with qgroup_lock held, the ownership of @prealloc is
 * transferred to this function and caller should not touch it anymore.
 *
 * Return: 0        on success
 *         -ENOENT  if one of the qgroups is NULL
 *         <0       other errors
 */
static int __add_relation_rb(struct btrfs_qgroup_list *prealloc,
			     struct btrfs_qgroup *member,
			     struct btrfs_qgroup *parent)
{}

/*
 * Add relation specified by two qgroup ids.
 *
 * Must be called with qgroup_lock held.
 *
 * Return: 0        on success
 *         -ENOENT  if one of the ids does not exist
 *         <0       other errors
 */
static int add_relation_rb(struct btrfs_fs_info *fs_info,
			   struct btrfs_qgroup_list *prealloc,
			   u64 memberid, u64 parentid)
{}

/* Must be called with qgroup_lock held */
static int del_relation_rb(struct btrfs_fs_info *fs_info,
			   u64 memberid, u64 parentid)
{}

#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
			       u64 rfer, u64 excl)
{}
#endif

static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info)
{}

static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
				   struct extent_buffer *leaf, int slot,
				   struct btrfs_qgroup_status_item *ptr)
{}

/*
 * The full config is read in one go, only called from open_ctree()
 * It doesn't use any locking, as at this point we're still single-threaded
 */
int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
{}

/*
 * Called in close_ctree() when quota is still enabled.  This verifies we don't
 * leak some reserved space.
 *
 * Return false if no reserved space is left.
 * Return true if some reserved space is leaked.
 */
bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
{}

/*
 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
 * first two are in single-threaded paths.And for the third one, we have set
 * quota_root to be null with qgroup_lock held before, so it is safe to clean
 * up the in-memory structures without qgroup_lock held.
 */
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
{}

static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
				    u64 dst)
{}

static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
				    u64 dst)
{}

static int add_qgroup_item(struct btrfs_trans_handle *trans,
			   struct btrfs_root *quota_root, u64 qgroupid)
{}

static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
{}

static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
				    struct btrfs_qgroup *qgroup)
{}

static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
				   struct btrfs_qgroup *qgroup)
{}

static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
{}

/*
 * called with qgroup_lock held
 */
static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root)
{}

int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
		       struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
{}

/*
 * It is possible to have outstanding ordered extents which reserved bytes
 * before we disabled. We need to fully flush delalloc, ordered extents, and a
 * commit to ensure that we don't leak such reservations, only to have them
 * come back if we re-enable.
 *
 * - enable simple quotas
 * - reserve space
 * - release it, store rsv_bytes in OE
 * - disable quotas
 * - enable simple quotas (qgroup rsv are all 0)
 * - OE finishes
 * - run delayed refs
 * - free rsv_bytes, resulting in miscounting or even underflow
 */
static int flush_reservations(struct btrfs_fs_info *fs_info)
{}

int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
{}

static void qgroup_dirty(struct btrfs_fs_info *fs_info,
			 struct btrfs_qgroup *qgroup)
{}

static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
{}

static void qgroup_iterator_clean(struct list_head *head)
{}

/*
 * The easy accounting, we're updating qgroup relationship whose child qgroup
 * only has exclusive extents.
 *
 * In this case, all exclusive extents will also be exclusive for parent, so
 * excl/rfer just get added/removed.
 *
 * So is qgroup reservation space, which should also be added/removed to
 * parent.
 * Or when child tries to release reservation space, parent will underflow its
 * reservation (for relationship adding case).
 *
 * Caller should hold fs_info->qgroup_lock.
 */
static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
				    struct btrfs_qgroup *src, int sign)
{}


/*
 * Quick path for updating qgroup with only excl refs.
 *
 * In that case, just update all parent will be enough.
 * Or we needs to do a full rescan.
 * Caller should also hold fs_info->qgroup_lock.
 *
 * Return 0 for quick update, return >0 for need to full rescan
 * and mark INCONSISTENT flag.
 * Return < 0 for other error.
 */
static int quick_update_accounting(struct btrfs_fs_info *fs_info,
				   u64 src, u64 dst, int sign)
{}

/*
 * Add relation between @src and @dst qgroup. The @prealloc is allocated by the
 * callers and transferred here (either used or freed on error).
 */
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
			      struct btrfs_qgroup_list *prealloc)
{}

static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
				 u64 dst)
{}

int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
			      u64 dst)
{}

int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
{}

/*
 * Return 0 if we can not delete the qgroup (not empty or has children etc).
 * Return >0 if we can delete the qgroup.
 * Return <0 for other errors during tree search.
 */
static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
{}

int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
{}

int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid)
{}

int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
		       struct btrfs_qgroup_limit *limit)
{}

/*
 * Inform qgroup to trace one dirty extent, its info is recorded in @record.
 * So qgroup can account it at transaction committing time.
 *
 * No lock version, caller must acquire delayed ref lock and allocated memory,
 * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
 *
 * Return 0 for success insert
 * Return >0 for existing record, caller can free @record safely.
 * Error is not possible
 */
int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
				struct btrfs_delayed_ref_root *delayed_refs,
				struct btrfs_qgroup_extent_record *record)
{}

/*
 * Post handler after qgroup_trace_extent_nolock().
 *
 * NOTE: Current qgroup does the expensive backref walk at transaction
 * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
 * new transaction.
 * This is designed to allow btrfs_find_all_roots() to get correct new_roots
 * result.
 *
 * However for old_roots there is no need to do backref walk at that time,
 * since we search commit roots to walk backref and result will always be
 * correct.
 *
 * Due to the nature of no lock version, we can't do backref there.
 * So we must call btrfs_qgroup_trace_extent_post() after exiting
 * spinlock context.
 *
 * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
 * using current root, then we can move all expensive backref walk out of
 * transaction committing, but not now as qgroup accounting will be wrong again.
 */
int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
				   struct btrfs_qgroup_extent_record *qrecord)
{}

/*
 * Inform qgroup to trace one dirty extent, specified by @bytenr and
 * @num_bytes.
 * So qgroup can account it at commit trans time.
 *
 * Better encapsulated version, with memory allocation and backref walk for
 * commit roots.
 * So this can sleep.
 *
 * Return 0 if the operation is done.
 * Return <0 for error, like memory allocation failure or invalid parameter
 * (NULL trans)
 */
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
			      u64 num_bytes)
{}

/*
 * Inform qgroup to trace all leaf items of data
 *
 * Return 0 for success
 * Return <0 for error(ENOMEM)
 */
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
				  struct extent_buffer *eb)
{}

/*
 * Walk up the tree from the bottom, freeing leaves and any interior
 * nodes which have had all slots visited. If a node (leaf or
 * interior) is freed, the node above it will have it's slot
 * incremented. The root node will never be freed.
 *
 * At the end of this function, we should have a path which has all
 * slots incremented to the next position for a search. If we need to
 * read a new node it will be NULL and the node above it will have the
 * correct slot selected for a later read.
 *
 * If we increment the root nodes slot counter past the number of
 * elements, 1 is returned to signal completion of the search.
 */
static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
{}

/*
 * Helper function to trace a subtree tree block swap.
 *
 * The swap will happen in highest tree block, but there may be a lot of
 * tree blocks involved.
 *
 * For example:
 *  OO = Old tree blocks
 *  NN = New tree blocks allocated during balance
 *
 *           File tree (257)                  Reloc tree for 257
 * L2              OO                                NN
 *               /    \                            /    \
 * L1          OO      OO (a)                    OO      NN (a)
 *            / \     / \                       / \     / \
 * L0       OO   OO OO   OO                   OO   OO NN   NN
 *                  (b)  (c)                          (b)  (c)
 *
 * When calling qgroup_trace_extent_swap(), we will pass:
 * @src_eb = OO(a)
 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
 * @dst_level = 0
 * @root_level = 1
 *
 * In that case, qgroup_trace_extent_swap() will search from OO(a) to
 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
 *
 * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
 *
 * 1) Tree search from @src_eb
 *    It should acts as a simplified btrfs_search_slot().
 *    The key for search can be extracted from @dst_path->nodes[dst_level]
 *    (first key).
 *
 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
 *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
 *    They should be marked during previous (@dst_level = 1) iteration.
 *
 * 3) Mark file extents in leaves dirty
 *    We don't have good way to pick out new file extents only.
 *    So we still follow the old method by scanning all file extents in
 *    the leave.
 *
 * This function can free us from keeping two paths, thus later we only need
 * to care about how to iterate all new tree blocks in reloc tree.
 */
static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
				    struct extent_buffer *src_eb,
				    struct btrfs_path *dst_path,
				    int dst_level, int root_level,
				    bool trace_leaf)
{}

/*
 * Helper function to do recursive generation-aware depth-first search, to
 * locate all new tree blocks in a subtree of reloc tree.
 *
 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
 *         reloc tree
 * L2         NN (a)
 *          /    \
 * L1    OO        NN (b)
 *      /  \      /  \
 * L0  OO  OO    OO  NN
 *               (c) (d)
 * If we pass:
 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
 * @cur_level = 1
 * @root_level = 1
 *
 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
 * above tree blocks along with their counter parts in file tree.
 * While during search, old tree blocks OO(c) will be skipped as tree block swap
 * won't affect OO(c).
 */
static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
					   struct extent_buffer *src_eb,
					   struct btrfs_path *dst_path,
					   int cur_level, int root_level,
					   u64 last_snapshot, bool trace_leaf)
{}

static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
				struct extent_buffer *src_eb,
				struct extent_buffer *dst_eb,
				u64 last_snapshot, bool trace_leaf)
{}

/*
 * Inform qgroup to trace a whole subtree, including all its child tree
 * blocks and data.
 * The root tree block is specified by @root_eb.
 *
 * Normally used by relocation(tree block swap) and subvolume deletion.
 *
 * Return 0 for success
 * Return <0 for error(ENOMEM or tree search error)
 */
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
			       struct extent_buffer *root_eb,
			       u64 root_gen, int root_level)
{}

static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
{}

static void qgroup_iterator_nested_clean(struct list_head *head)
{}

#define UPDATE_NEW
#define UPDATE_OLD
/*
 * Walk all of the roots that points to the bytenr and adjust their refcnts.
 */
static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
				 struct ulist *roots, struct list_head *qgroups,
				 u64 seq, int update_old)
{}

/*
 * Update qgroup rfer/excl counters.
 * Rfer update is easy, codes can explain themselves.
 *
 * Excl update is tricky, the update is split into 2 parts.
 * Part 1: Possible exclusive <-> sharing detect:
 *	|	A	|	!A	|
 *  -------------------------------------
 *  B	|	*	|	-	|
 *  -------------------------------------
 *  !B	|	+	|	**	|
 *  -------------------------------------
 *
 * Conditions:
 * A:	cur_old_roots < nr_old_roots	(not exclusive before)
 * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
 * B:	cur_new_roots < nr_new_roots	(not exclusive now)
 * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
 *
 * Results:
 * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
 * *: Definitely not changed.		**: Possible unchanged.
 *
 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
 *
 * To make the logic clear, we first use condition A and B to split
 * combination into 4 results.
 *
 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
 * only on variant maybe 0.
 *
 * Lastly, check result **, since there are 2 variants maybe 0, split them
 * again(2x2).
 * But this time we don't need to consider other things, the codes and logic
 * is easy to understand now.
 */
static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
				   struct list_head *qgroups, u64 nr_old_roots,
				   u64 nr_new_roots, u64 num_bytes, u64 seq)
{}

/*
 * Check if the @roots potentially is a list of fs tree roots
 *
 * Return 0 for definitely not a fs/subvol tree roots ulist
 * Return 1 for possible fs/subvol tree roots in the list (considering an empty
 *          one as well)
 */
static int maybe_fs_roots(struct ulist *roots)
{}

int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
				u64 num_bytes, struct ulist *old_roots,
				struct ulist *new_roots)
{}

int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
{}

/*
 * Writes all changed qgroups to disk.
 * Called by the transaction commit path and the qgroup assign ioctl.
 */
int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
{}

int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
			       struct btrfs_qgroup_inherit *inherit,
			       size_t size)
{}

static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
			       u64 inode_rootid,
			       struct btrfs_qgroup_inherit **inherit)
{}

/*
 * Check if we can skip rescan when inheriting qgroups.  If @src has a single
 * @parent, and that @parent is owning all its bytes exclusively, we can skip
 * the full rescan, by just adding nodesize to the @parent's excl/rfer.
 *
 * Return <0 for fatal errors (like srcid/parentid has no qgroup).
 * Return 0 if a quick inherit is done.
 * Return >0 if a quick inherit is not possible, and a full rescan is needed.
 */
static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
					 u64 srcid, u64 parentid)
{}

/*
 * Copy the accounting information between qgroups. This is necessary
 * when a snapshot or a subvolume is created. Throwing an error will
 * cause a transaction abort so we take extra care here to only error
 * when a readonly fs is a reasonable outcome.
 */
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
			 u64 objectid, u64 inode_rootid,
			 struct btrfs_qgroup_inherit *inherit)
{}

static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
{}

static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
			  enum btrfs_qgroup_rsv_type type)
{}

/*
 * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
 * qgroup).
 *
 * Will handle all higher level qgroup too.
 *
 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
 * This special case is only used for META_PERTRANS type.
 */
void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
			       u64 ref_root, u64 num_bytes,
			       enum btrfs_qgroup_rsv_type type)
{}

/*
 * Check if the leaf is the last leaf. Which means all node pointers
 * are at their last position.
 */
static bool is_last_leaf(struct btrfs_path *path)
{}

/*
 * returns < 0 on error, 0 when more leafs are to be scanned.
 * returns 1 when done.
 */
static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
			      struct btrfs_path *path)
{}

static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
{}

static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
{}

/*
 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
 * memory required for the rescan context.
 */
static int
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
		   int init_flags)
{}

static void
qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
{}

int
btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
{}

int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
				     bool interruptible)
{}

/*
 * this is only called from open_ctree where we're still single threaded, thus
 * locking is omitted here.
 */
void
btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
{}

#define rbtree_iterate_from_safe(node, next, start)

static int qgroup_unreserve_range(struct btrfs_inode *inode,
				  struct extent_changeset *reserved, u64 start,
				  u64 len)
{}

/*
 * Try to free some space for qgroup.
 *
 * For qgroup, there are only 3 ways to free qgroup space:
 * - Flush nodatacow write
 *   Any nodatacow write will free its reserved data space at run_delalloc_range().
 *   In theory, we should only flush nodatacow inodes, but it's not yet
 *   possible, so we need to flush the whole root.
 *
 * - Wait for ordered extents
 *   When ordered extents are finished, their reserved metadata is finally
 *   converted to per_trans status, which can be freed by later commit
 *   transaction.
 *
 * - Commit transaction
 *   This would free the meta_per_trans space.
 *   In theory this shouldn't provide much space, but any more qgroup space
 *   is needed.
 */
static int try_flush_qgroup(struct btrfs_root *root)
{}

static int qgroup_reserve_data(struct btrfs_inode *inode,
			struct extent_changeset **reserved_ret, u64 start,
			u64 len)
{}

/*
 * Reserve qgroup space for range [start, start + len).
 *
 * This function will either reserve space from related qgroups or do nothing
 * if the range is already reserved.
 *
 * Return 0 for successful reservation
 * Return <0 for error (including -EQUOT)
 *
 * NOTE: This function may sleep for memory allocation, dirty page flushing and
 *	 commit transaction. So caller should not hold any dirty page locked.
 */
int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
			struct extent_changeset **reserved_ret, u64 start,
			u64 len)
{}

/* Free ranges specified by @reserved, normally in error path */
static int qgroup_free_reserved_data(struct btrfs_inode *inode,
				     struct extent_changeset *reserved,
				     u64 start, u64 len, u64 *freed_ret)
{}

static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
			struct extent_changeset *reserved, u64 start, u64 len,
			u64 *released, int free)
{}

/*
 * Free a reserved space range from io_tree and related qgroups
 *
 * Should be called when a range of pages get invalidated before reaching disk.
 * Or for error cleanup case.
 * if @reserved is given, only reserved range in [@start, @start + @len) will
 * be freed.
 *
 * For data written to disk, use btrfs_qgroup_release_data().
 *
 * NOTE: This function may sleep for memory allocation.
 */
int btrfs_qgroup_free_data(struct btrfs_inode *inode,
			   struct extent_changeset *reserved,
			   u64 start, u64 len, u64 *freed)
{}

/*
 * Release a reserved space range from io_tree only.
 *
 * Should be called when a range of pages get written to disk and corresponding
 * FILE_EXTENT is inserted into corresponding root.
 *
 * Since new qgroup accounting framework will only update qgroup numbers at
 * commit_transaction() time, its reserved space shouldn't be freed from
 * related qgroups.
 *
 * But we should release the range from io_tree, to allow further write to be
 * COWed.
 *
 * NOTE: This function may sleep for memory allocation.
 */
int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
{}

static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
			      enum btrfs_qgroup_rsv_type type)
{}

static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
			     enum btrfs_qgroup_rsv_type type)
{}

int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
			      enum btrfs_qgroup_rsv_type type, bool enforce)
{}

int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
				enum btrfs_qgroup_rsv_type type, bool enforce,
				bool noflush)
{}

/*
 * Per-transaction meta reservation should be all freed at transaction commit
 * time
 */
void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
{}

void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
			      enum btrfs_qgroup_rsv_type type)
{}

static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
				int num_bytes)
{}

/*
 * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
 *
 * This is called when preallocated meta reservation needs to be used.
 * Normally after btrfs_join_transaction() call.
 */
void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
{}

/*
 * Check qgroup reserved space leaking, normally at destroy inode
 * time
 */
void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
{}

void btrfs_qgroup_init_swapped_blocks(
	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
{}

/*
 * Delete all swapped blocks record of @root.
 * Every record here means we skipped a full subtree scan for qgroup.
 *
 * Gets called when committing one transaction.
 */
void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
{}

/*
 * Add subtree roots record into @subvol_root.
 *
 * @subvol_root:	tree root of the subvolume tree get swapped
 * @bg:			block group under balance
 * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
 * @reloc_parent/slot:	pointer to the subtree root in reloc tree
 *			BOTH POINTERS ARE BEFORE TREE SWAP
 * @last_snapshot:	last snapshot generation of the subvolume tree
 */
int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
		struct btrfs_root *subvol_root,
		struct btrfs_block_group *bg,
		struct extent_buffer *subvol_parent, int subvol_slot,
		struct extent_buffer *reloc_parent, int reloc_slot,
		u64 last_snapshot)
{}

/*
 * Check if the tree block is a subtree root, and if so do the needed
 * delayed subtree trace for qgroup.
 *
 * This is called during btrfs_cow_block().
 */
int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
					 struct btrfs_root *root,
					 struct extent_buffer *subvol_eb)
{}

void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
{}

void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes)
{}

int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
			      const struct btrfs_squota_delta *delta)
{}