#include <linux/cpumask.h>
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include "bpf_lru_list.h"
#define LOCAL_FREE_TARGET …
#define LOCAL_NR_SCANS …
#define PERCPU_FREE_TARGET …
#define PERCPU_NR_SCANS …
#define LOCAL_LIST_IDX(t) …
#define LOCAL_FREE_LIST_IDX …
#define LOCAL_PENDING_LIST_IDX …
#define IS_LOCAL_LIST_TYPE(t) …
static int get_next_cpu(int cpu)
{ … }
static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
{ … }
static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
{ … }
static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
{ … }
static void bpf_lru_node_clear_ref(struct bpf_lru_node *node)
{ … }
static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
enum bpf_lru_list_type type)
{ … }
static void bpf_lru_list_count_dec(struct bpf_lru_list *l,
enum bpf_lru_list_type type)
{ … }
static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l,
struct bpf_lru_node *node,
struct list_head *free_list,
enum bpf_lru_list_type tgt_free_type)
{ … }
static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
struct bpf_lru_node *node,
enum bpf_lru_list_type tgt_type)
{ … }
static void __bpf_lru_node_move(struct bpf_lru_list *l,
struct bpf_lru_node *node,
enum bpf_lru_list_type tgt_type)
{ … }
static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l)
{ … }
static void __bpf_lru_list_rotate_active(struct bpf_lru *lru,
struct bpf_lru_list *l)
{ … }
static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru,
struct bpf_lru_list *l)
{ … }
static unsigned int
__bpf_lru_list_shrink_inactive(struct bpf_lru *lru,
struct bpf_lru_list *l,
unsigned int tgt_nshrink,
struct list_head *free_list,
enum bpf_lru_list_type tgt_free_type)
{ … }
static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l)
{ … }
static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru,
struct bpf_lru_list *l,
unsigned int tgt_nshrink,
struct list_head *free_list,
enum bpf_lru_list_type tgt_free_type)
{ … }
static void __local_list_flush(struct bpf_lru_list *l,
struct bpf_lru_locallist *loc_l)
{ … }
static void bpf_lru_list_push_free(struct bpf_lru_list *l,
struct bpf_lru_node *node)
{ … }
static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
struct bpf_lru_locallist *loc_l)
{ … }
static void __local_list_add_pending(struct bpf_lru *lru,
struct bpf_lru_locallist *loc_l,
int cpu,
struct bpf_lru_node *node,
u32 hash)
{ … }
static struct bpf_lru_node *
__local_list_pop_free(struct bpf_lru_locallist *loc_l)
{ … }
static struct bpf_lru_node *
__local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l)
{ … }
static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
u32 hash)
{ … }
static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
u32 hash)
{ … }
struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
{ … }
static void bpf_common_lru_push_free(struct bpf_lru *lru,
struct bpf_lru_node *node)
{ … }
static void bpf_percpu_lru_push_free(struct bpf_lru *lru,
struct bpf_lru_node *node)
{ … }
void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
{ … }
static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
u32 node_offset, u32 elem_size,
u32 nr_elems)
{ … }
static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
u32 node_offset, u32 elem_size,
u32 nr_elems)
{ … }
void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
u32 elem_size, u32 nr_elems)
{ … }
static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
{ … }
static void bpf_lru_list_init(struct bpf_lru_list *l)
{ … }
int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
del_from_htab_func del_from_htab, void *del_arg)
{ … }
void bpf_lru_destroy(struct bpf_lru *lru)
{ … }