linux/fs/bcachefs/buckets.h

/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Code for manipulating bucket marks for garbage collection.
 *
 * Copyright 2014 Datera, Inc.
 */

#ifndef _BUCKETS_H
#define _BUCKETS_H

#include "buckets_types.h"
#include "extents.h"
#include "sb-members.h"

static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s)
{}

static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
{}

static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
{}

static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset)
{}

#define for_each_bucket(_b, _buckets)

/*
 * Ugly hack alert:
 *
 * We need to cram a spinlock in a single byte, because that's what we have left
 * in struct bucket, and we care about the size of these - during fsck, we need
 * in memory state for every single bucket on every device.
 *
 * We used to do
 *   while (xchg(&b->lock, 1) cpu_relax();
 * but, it turns out not all architectures support xchg on a single byte.
 *
 * So now we use bit_spin_lock(), with fun games since we can't burn a whole
 * ulong for this - we just need to make sure the lock bit always ends up in the
 * first byte.
 */

#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define BUCKET_LOCK_BITNR
#else
#define BUCKET_LOCK_BITNR
#endif

ulong_byte_assert;

static inline void bucket_unlock(struct bucket *b)
{}

static inline void bucket_lock(struct bucket *b)
{}

static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)
{}

static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
{}

static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
{}

static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
{}

static inline u8 bucket_gen_get(struct bch_dev *ca, size_t b)
{}

static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
				   const struct bch_extent_ptr *ptr)
{}

static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca,
					 const struct bch_extent_ptr *ptr)
{}

static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca,
						const struct bch_extent_ptr *ptr,
						u32 *bucket_offset)
{}

static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
					   const struct bch_extent_ptr *ptr)
{}

static inline enum bch_data_type ptr_data_type(const struct bkey *k,
					       const struct bch_extent_ptr *ptr)
{}

static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
{}

static inline int gen_cmp(u8 a, u8 b)
{}

static inline int gen_after(u8 a, u8 b)
{}

static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
{}

/**
 * dev_ptr_stale() - check if a pointer points into a bucket that has been
 * invalidated.
 */
static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
{}

/* Device usage: */

void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
{}

void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev_usage *);

static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
{}

static inline u64 dev_buckets_free(struct bch_dev *ca,
				   struct bch_dev_usage usage,
				   enum bch_watermark watermark)
{}

static inline u64 __dev_buckets_available(struct bch_dev *ca,
					  struct bch_dev_usage usage,
					  enum bch_watermark watermark)
{}

static inline u64 dev_buckets_available(struct bch_dev *ca,
					enum bch_watermark watermark)
{}

/* Filesystem usage: */

static inline unsigned dev_usage_u64s(void)
{}

struct bch_fs_usage_short
bch2_fs_usage_read_short(struct bch_fs *);

int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *,
			   struct bkey_s_c, const struct bch_extent_ptr *,
			   s64, enum bch_data_type, u8, u8, u32 *);

int bch2_check_fix_ptrs(struct btree_trans *,
			enum btree_id, unsigned, struct bkey_s_c,
			enum btree_iter_update_trigger_flags);

int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
			struct bkey_s_c, struct bkey_s,
			enum btree_iter_update_trigger_flags);
int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
			  struct bkey_s_c, struct bkey_s,
			  enum btree_iter_update_trigger_flags);

#define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)

void bch2_trans_account_disk_usage_change(struct btree_trans *);

int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
				    enum bch_data_type, unsigned,
				    enum btree_iter_update_trigger_flags);
int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
				    enum btree_iter_update_trigger_flags);
int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
				    enum btree_iter_update_trigger_flags);
int bch2_trans_mark_dev_sbs(struct bch_fs *);

static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
{}

static inline const char *bch2_data_type_str(enum bch_data_type type)
{}

/* disk reservations: */

static inline void bch2_disk_reservation_put(struct bch_fs *c,
					     struct disk_reservation *res)
{}

#define BCH_DISK_RESERVATION_NOFAIL

int __bch2_disk_reservation_add(struct bch_fs *,
				struct disk_reservation *,
				u64, int);

static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
					    u64 sectors, int flags)
{}

static inline struct disk_reservation
bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
{}

static inline int bch2_disk_reservation_get(struct bch_fs *c,
					    struct disk_reservation *res,
					    u64 sectors, unsigned nr_replicas,
					    int flags)
{}

#define RESERVE_FACTOR

static inline u64 avail_factor(u64 r)
{}

void bch2_buckets_nouse_free(struct bch_fs *);
int bch2_buckets_nouse_alloc(struct bch_fs *);

int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
void bch2_dev_buckets_free(struct bch_dev *);
int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);

#endif /* _BUCKETS_H */