#include "bcachefs.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
#include "btree_update.h"
#include "buckets.h"
#include "checksum.h"
#include "clock.h"
#include "compress.h"
#include "data_update.h"
#include "disk_groups.h"
#include "ec.h"
#include "error.h"
#include "io_read.h"
#include "io_misc.h"
#include "io_write.h"
#include "subvolume.h"
#include "trace.h"
#include <linux/sched/mm.h>
#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
static bool bch2_target_congested(struct bch_fs *c, u16 target)
{
const struct bch_devs_mask *devs;
unsigned d, nr = 0, total = 0;
u64 now = local_clock(), last;
s64 congested;
struct bch_dev *ca;
if (!target)
return false;
rcu_read_lock();
devs = bch2_target_to_mask(c, target) ?:
&c->rw_devs[BCH_DATA_user];
for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
ca = rcu_dereference(c->devs[d]);
if (!ca)
continue;
congested = atomic_read(&ca->congested);
last = READ_ONCE(ca->congested_last);
if (time_after64(now, last))
congested -= (now - last) >> 12;
total += max(congested, 0LL);
nr++;
}
rcu_read_unlock();
return bch2_rand_range(nr * CONGESTED_MAX) < total;
}
#else
static bool bch2_target_congested(struct bch_fs *c, u16 target)
{ … }
#endif
struct promote_op { … };
static const struct rhashtable_params bch_promote_params = …;
static inline int should_promote(struct bch_fs *c, struct bkey_s_c k,
struct bpos pos,
struct bch_io_opts opts,
unsigned flags,
struct bch_io_failures *failed)
{ … }
static void promote_free(struct bch_fs *c, struct promote_op *op)
{ … }
static void promote_done(struct bch_write_op *wop)
{ … }
static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
{ … }
static struct promote_op *__promote_alloc(struct btree_trans *trans,
enum btree_id btree_id,
struct bkey_s_c k,
struct bpos pos,
struct extent_ptr_decoded *pick,
struct bch_io_opts opts,
unsigned sectors,
struct bch_read_bio **rbio,
struct bch_io_failures *failed)
{ … }
noinline
static struct promote_op *promote_alloc(struct btree_trans *trans,
struct bvec_iter iter,
struct bkey_s_c k,
struct extent_ptr_decoded *pick,
struct bch_io_opts opts,
unsigned flags,
struct bch_read_bio **rbio,
bool *bounce,
bool *read_full,
struct bch_io_failures *failed)
{ … }
#define READ_RETRY_AVOID …
#define READ_RETRY …
#define READ_ERR …
enum rbio_context { … };
static inline struct bch_read_bio *
bch2_rbio_parent(struct bch_read_bio *rbio)
{ … }
__always_inline
static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
enum rbio_context context,
struct workqueue_struct *wq)
{ … }
static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
{ … }
static void bch2_rbio_done(struct bch_read_bio *rbio)
{ … }
static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
struct bvec_iter bvec_iter,
struct bch_io_failures *failed,
unsigned flags)
{ … }
static void bch2_rbio_retry(struct work_struct *work)
{ … }
static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
blk_status_t error)
{ … }
static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
struct bch_read_bio *rbio)
{ … }
static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
{ … }
static void __bch2_read_endio(struct work_struct *work)
{ … }
static void bch2_read_endio(struct bio *bio)
{ … }
int __bch2_read_indirect_extent(struct btree_trans *trans,
unsigned *offset_into_extent,
struct bkey_buf *orig_k)
{ … }
static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
struct bch_dev *ca,
struct bkey_s_c k,
struct bch_extent_ptr ptr)
{ … }
int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
struct bvec_iter iter, struct bpos read_pos,
enum btree_id data_btree, struct bkey_s_c k,
unsigned offset_into_extent,
struct bch_io_failures *failed, unsigned flags)
{ … }
void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
struct bvec_iter bvec_iter, subvol_inum inum,
struct bch_io_failures *failed, unsigned flags)
{ … }
void bch2_fs_io_read_exit(struct bch_fs *c)
{ … }
int bch2_fs_io_read_init(struct bch_fs *c)
{ … }