linux/fs/bcachefs/rcu_pending.c

// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt)

#include <linux/generic-radix-tree.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/srcu.h>
#include <linux/vmalloc.h>

#include "rcu_pending.h"
#include "darray.h"
#include "util.h"

#define static_array_for_each(_a, _i)

enum rcu_pending_special {};

#define RCU_PENDING_KVFREE_FN
#define RCU_PENDING_CALL_RCU_FN

static inline unsigned long __get_state_synchronize_rcu(struct srcu_struct *ssp)
{}

static inline unsigned long __start_poll_synchronize_rcu(struct srcu_struct *ssp)
{}

static inline bool __poll_state_synchronize_rcu(struct srcu_struct *ssp, unsigned long cookie)
{}

static inline void __rcu_barrier(struct srcu_struct *ssp)
{}

static inline void __call_rcu(struct srcu_struct *ssp, struct rcu_head *rhp,
			      rcu_callback_t func)
{}

struct rcu_pending_seq {};

struct rcu_pending_list {};

struct rcu_pending_pcpu {};

static bool __rcu_pending_has_pending(struct rcu_pending_pcpu *p)
{}

static void rcu_pending_list_merge(struct rcu_pending_list *l1,
				   struct rcu_pending_list *l2)
{}

static void rcu_pending_list_add(struct rcu_pending_list *l,
				 struct rcu_head *n)
{}

static void merge_expired_lists(struct rcu_pending_pcpu *p)
{}

#ifndef __KERNEL__
static inline void kfree_bulk(size_t nr, void ** p)
{
	while (nr--)
		kfree(*p);
}

#define local_irq_save
#endif

static noinline void __process_finished_items(struct rcu_pending *pending,
					      struct rcu_pending_pcpu *p,
					      unsigned long flags)
{}

static bool process_finished_items(struct rcu_pending *pending,
				   struct rcu_pending_pcpu *p,
				   unsigned long flags)
{}

static void rcu_pending_work(struct work_struct *work)
{}

static void rcu_pending_rcu_cb(struct rcu_head *rcu)
{}

static __always_inline struct rcu_pending_seq *
get_object_radix(struct rcu_pending_pcpu *p, unsigned long seq)
{}

static noinline bool
rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, unsigned long seq,
			 struct rcu_head *head, void *ptr,
			 unsigned long *flags)
{}

/*
 * __rcu_pending_enqueue: enqueue a pending RCU item, to be processed (via
 * pending->pracess) once grace period elapses.
 *
 * Attempt to enqueue items onto a radix tree; if memory allocation fails, fall
 * back to a linked list.
 *
 * - If @ptr is NULL, we're enqueuing an item for a generic @pending with a
 *   process callback
 *
 * - If @ptr and @head are both not NULL, we're kvfree_rcu()
 *
 * - If @ptr is not NULL and @head is, we're kvfree_rcu_mightsleep()
 *
 * - If @may_sleep is true, will do GFP_KERNEL memory allocations and process
 *   expired items.
 */
static __always_inline void
__rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *head,
		      void *ptr, bool may_sleep)
{}

void rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *obj)
{}

static struct rcu_head *rcu_pending_pcpu_dequeue(struct rcu_pending_pcpu *p)
{}

struct rcu_head *rcu_pending_dequeue(struct rcu_pending *pending)
{}

struct rcu_head *rcu_pending_dequeue_from_all(struct rcu_pending *pending)
{}

static bool rcu_pending_has_pending_or_armed(struct rcu_pending *pending)
{}

void rcu_pending_exit(struct rcu_pending *pending)
{}

/**
 * rcu_pending_init: - initialize a rcu_pending
 *
 * @pending:	Object to init
 * @srcu:	May optionally be used with an srcu_struct; if NULL, uses normal
 *		RCU flavor
 * @process:	Callback function invoked on objects once their RCU barriers
 *		have completed; if NULL, kvfree() is used.
 */
int rcu_pending_init(struct rcu_pending *pending,
		     struct srcu_struct *srcu,
		     rcu_pending_process_fn process)
{}