linux/fs/bcachefs/alloc_foreground.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright 2012 Google, Inc.
 *
 * Foreground allocator code: allocate buckets from freelist, and allocate in
 * sector granularity from writepoints.
 *
 * bch2_bucket_alloc() allocates a single bucket from a specific device.
 *
 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
 * in a given filesystem.
 */

#include "bcachefs.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
#include "backpointers.h"
#include "btree_iter.h"
#include "btree_update.h"
#include "btree_gc.h"
#include "buckets.h"
#include "buckets_waiting_for_journal.h"
#include "clock.h"
#include "debug.h"
#include "disk_groups.h"
#include "ec.h"
#include "error.h"
#include "io_write.h"
#include "journal.h"
#include "movinggc.h"
#include "nocow_locking.h"
#include "trace.h"

#include <linux/math64.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>

static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
					   struct mutex *lock)
{}

const char * const bch2_watermarks[] =;

/*
 * Open buckets represent a bucket that's currently being allocated from.  They
 * serve two purposes:
 *
 *  - They track buckets that have been partially allocated, allowing for
 *    sub-bucket sized allocations - they're used by the sector allocator below
 *
 *  - They provide a reference to the buckets they own that mark and sweep GC
 *    can find, until the new allocation has a pointer to it inserted into the
 *    btree
 *
 * When allocating some space with the sector allocator, the allocation comes
 * with a reference to an open bucket - the caller is required to put that
 * reference _after_ doing the index update that makes its allocation reachable.
 */

void bch2_reset_alloc_cursors(struct bch_fs *c)
{}

static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
{}

static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
{}

void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
{}

void bch2_open_bucket_write_error(struct bch_fs *c,
				  struct open_buckets *obs,
				  unsigned dev)
{}

static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
{}

static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
{}

/* _only_ for allocating the journal on a new device: */
long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
{}

static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
{}

static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
					      u64 bucket,
					      enum bch_watermark watermark,
					      const struct bch_alloc_v4 *a,
					      struct bucket_alloc_state *s,
					      struct closure *cl)
{}

static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
					    enum bch_watermark watermark, u64 free_entry,
					    struct bucket_alloc_state *s,
					    struct bkey_s_c freespace_k,
					    struct closure *cl)
{}

/*
 * This path is for before the freespace btree is initialized:
 *
 * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
 */
static noinline struct open_bucket *
bch2_bucket_alloc_early(struct btree_trans *trans,
			struct bch_dev *ca,
			enum bch_watermark watermark,
			struct bucket_alloc_state *s,
			struct closure *cl)
{}

static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
						   struct bch_dev *ca,
						   enum bch_watermark watermark,
						   struct bucket_alloc_state *s,
						   struct closure *cl)
{}

static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
					 enum bch_watermark watermark,
					 enum bch_data_type data_type,
					 struct closure *cl,
					 struct bch_dev_usage *usage,
					 struct bucket_alloc_state *s,
					 struct open_bucket *ob)
{}

/**
 * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
 * @trans:	transaction object
 * @ca:		device to allocate from
 * @watermark:	how important is this allocation?
 * @data_type:	BCH_DATA_journal, btree, user...
 * @cl:		if not NULL, closure to be used to wait if buckets not available
 * @usage:	for secondarily also returning the current device usage
 *
 * Returns:	an open_bucket on success, or an ERR_PTR() on failure.
 */
static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
				      struct bch_dev *ca,
				      enum bch_watermark watermark,
				      enum bch_data_type data_type,
				      struct closure *cl,
				      struct bch_dev_usage *usage)
{}

struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
				      enum bch_watermark watermark,
				      enum bch_data_type data_type,
				      struct closure *cl)
{}

static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
			    unsigned l, unsigned r)
{}

#define dev_stripe_cmp(l, r)

struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
					  struct dev_stripe_state *stripe,
					  struct bch_devs_mask *devs)
{}

static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
			       struct dev_stripe_state *stripe,
			       struct bch_dev_usage *usage)
{}

void bch2_dev_stripe_increment(struct bch_dev *ca,
			       struct dev_stripe_state *stripe)
{}

static int add_new_bucket(struct bch_fs *c,
			   struct open_buckets *ptrs,
			   struct bch_devs_mask *devs_may_alloc,
			   unsigned nr_replicas,
			   unsigned *nr_effective,
			   bool *have_cache,
			   unsigned flags,
			   struct open_bucket *ob)
{}

int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
		      struct open_buckets *ptrs,
		      struct dev_stripe_state *stripe,
		      struct bch_devs_mask *devs_may_alloc,
		      unsigned nr_replicas,
		      unsigned *nr_effective,
		      bool *have_cache,
		      unsigned flags,
		      enum bch_data_type data_type,
		      enum bch_watermark watermark,
		      struct closure *cl)
{}

/* Allocate from stripes: */

/*
 * if we can't allocate a new stripe because there are already too many
 * partially filled stripes, force allocating from an existing stripe even when
 * it's to a device we don't want:
 */

static int bucket_alloc_from_stripe(struct btree_trans *trans,
			 struct open_buckets *ptrs,
			 struct write_point *wp,
			 struct bch_devs_mask *devs_may_alloc,
			 u16 target,
			 unsigned nr_replicas,
			 unsigned *nr_effective,
			 bool *have_cache,
			 enum bch_watermark watermark,
			 unsigned flags,
			 struct closure *cl)
{}

/* Sector allocator */

static bool want_bucket(struct bch_fs *c,
			struct write_point *wp,
			struct bch_devs_mask *devs_may_alloc,
			bool *have_cache, bool ec,
			struct open_bucket *ob)
{}

static int bucket_alloc_set_writepoint(struct bch_fs *c,
				       struct open_buckets *ptrs,
				       struct write_point *wp,
				       struct bch_devs_mask *devs_may_alloc,
				       unsigned nr_replicas,
				       unsigned *nr_effective,
				       bool *have_cache,
				       bool ec, unsigned flags)
{}

static int bucket_alloc_set_partial(struct bch_fs *c,
				    struct open_buckets *ptrs,
				    struct write_point *wp,
				    struct bch_devs_mask *devs_may_alloc,
				    unsigned nr_replicas,
				    unsigned *nr_effective,
				    bool *have_cache, bool ec,
				    enum bch_watermark watermark,
				    unsigned flags)
{}

static int __open_bucket_add_buckets(struct btree_trans *trans,
			struct open_buckets *ptrs,
			struct write_point *wp,
			struct bch_devs_list *devs_have,
			u16 target,
			bool erasure_code,
			unsigned nr_replicas,
			unsigned *nr_effective,
			bool *have_cache,
			enum bch_watermark watermark,
			unsigned flags,
			struct closure *_cl)
{}

static int open_bucket_add_buckets(struct btree_trans *trans,
			struct open_buckets *ptrs,
			struct write_point *wp,
			struct bch_devs_list *devs_have,
			u16 target,
			unsigned erasure_code,
			unsigned nr_replicas,
			unsigned *nr_effective,
			bool *have_cache,
			enum bch_watermark watermark,
			unsigned flags,
			struct closure *cl)
{}

/**
 * should_drop_bucket - check if this is open_bucket should go away
 * @ob:		open_bucket to predicate on
 * @c:		filesystem handle
 * @ca:		if set, we're killing buckets for a particular device
 * @ec:		if true, we're shutting down erasure coding and killing all ec
 *		open_buckets
 *		otherwise, return true
 * Returns: true if we should kill this open_bucket
 *
 * We're killing open_buckets because we're shutting down a device, erasure
 * coding, or the entire filesystem - check if this open_bucket matches:
 */
static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
			       struct bch_dev *ca, bool ec)
{}

static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
				 bool ec, struct write_point *wp)
{}

void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
			    bool ec)
{}

static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
						 unsigned long write_point)
{}

static struct write_point *__writepoint_find(struct hlist_head *head,
					     unsigned long write_point)
{}

static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
{}

static bool try_increase_writepoints(struct bch_fs *c)
{}

static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
{}

static struct write_point *writepoint_find(struct btree_trans *trans,
					   unsigned long write_point)
{}

static noinline void
deallocate_extra_replicas(struct bch_fs *c,
			  struct open_buckets *ptrs,
			  struct open_buckets *ptrs_no_use,
			  unsigned extra_replicas)
{}

/*
 * Get us an open_bucket we can allocate from, return with it locked:
 */
int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
			     unsigned target,
			     unsigned erasure_code,
			     struct write_point_specifier write_point,
			     struct bch_devs_list *devs_have,
			     unsigned nr_replicas,
			     unsigned nr_replicas_required,
			     enum bch_watermark watermark,
			     unsigned flags,
			     struct closure *cl,
			     struct write_point **wp_ret)
{}

struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
{}

void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
				    struct bkey_i *k, unsigned sectors,
				    bool cached)
{}

/*
 * Append pointers to the space we just allocated to @k, and mark @sectors space
 * as allocated out of @ob
 */
void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
{}

static inline void writepoint_init(struct write_point *wp,
				   enum bch_data_type type)
{}

void bch2_fs_allocator_foreground_init(struct bch_fs *c)
{}

void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
{}

void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c,
			       struct bch_dev *ca)
{}

void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
{}

static const char * const bch2_write_point_states[] =;

static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
				     struct write_point *wp)
{}

void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
{}

void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
{}

void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
{}

static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
{}

static inline unsigned allocator_wait_timeout(struct bch_fs *c)
{}

void __bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
{}