linux/kernel/bpf/bpf_local_storage.c

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook  */
#include <linux/rculist.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/bpf.h>
#include <linux/btf_ids.h>
#include <linux/bpf_local_storage.h>
#include <net/sock.h>
#include <uapi/linux/sock_diag.h>
#include <uapi/linux/btf.h>
#include <linux/rcupdate.h>
#include <linux/rcupdate_trace.h>
#include <linux/rcupdate_wait.h>

#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK

static struct bpf_local_storage_map_bucket *
select_bucket(struct bpf_local_storage_map *smap,
	      struct bpf_local_storage_elem *selem)
{}

static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
{}

static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
			 u32 size)
{}

static struct bpf_local_storage __rcu **
owner_storage(struct bpf_local_storage_map *smap, void *owner)
{}

static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
{}

static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
{}

static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
{}

static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
{}

struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
		void *value, bool charge_mem, gfp_t gfp_flags)
{}

/* rcu tasks trace callback for bpf_ma == false */
static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
{}

static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
{}

static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
{}

/* Handle bpf_ma == false */
static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
				     bool vanilla_rcu)
{}

static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
				   struct bpf_local_storage_map *smap,
				   bool bpf_ma, bool reuse_now)
{}

/* rcu tasks trace callback for bpf_ma == false */
static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
{}

/* Handle bpf_ma == false */
static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
			     bool vanilla_rcu)
{}

static void bpf_selem_free_rcu(struct rcu_head *rcu)
{}

static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
{}

void bpf_selem_free(struct bpf_local_storage_elem *selem,
		    struct bpf_local_storage_map *smap,
		    bool reuse_now)
{}

/* local_storage->lock must be held and selem->local_storage == local_storage.
 * The caller must ensure selem->smap is still valid to be
 * dereferenced for its smap->elem_size and smap->cache_idx.
 */
static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
					    struct bpf_local_storage_elem *selem,
					    bool uncharge_mem, bool reuse_now)
{}

static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
				 struct bpf_local_storage_map *storage_smap,
				 struct bpf_local_storage_elem *selem)
{}

static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
				     bool reuse_now)
{}

void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
				   struct bpf_local_storage_elem *selem)
{}

static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
{}

void bpf_selem_link_map(struct bpf_local_storage_map *smap,
			struct bpf_local_storage_elem *selem)
{}

void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
{}

void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
				      struct bpf_local_storage_map *smap,
				      struct bpf_local_storage_elem *selem)
{}

static int check_flags(const struct bpf_local_storage_data *old_sdata,
		       u64 map_flags)
{}

int bpf_local_storage_alloc(void *owner,
			    struct bpf_local_storage_map *smap,
			    struct bpf_local_storage_elem *first_selem,
			    gfp_t gfp_flags)
{}

/* sk cannot be going away because it is linking new elem
 * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
 * Otherwise, it will become a leak (and other memory issues
 * during map destruction).
 */
struct bpf_local_storage_data *
bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
			 void *value, u64 map_flags, gfp_t gfp_flags)
{}

static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
{}

static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
					     u16 idx)
{}

int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
{}

int bpf_local_storage_map_check_btf(const struct bpf_map *map,
				    const struct btf *btf,
				    const struct btf_type *key_type,
				    const struct btf_type *value_type)
{}

void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
{}

u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
{}

/* When bpf_ma == true, the bpf_mem_alloc is used to allocate and free memory.
 * A deadlock free allocator is useful for storage that the bpf prog can easily
 * get a hold of the owner PTR_TO_BTF_ID in any context. eg. bpf_get_current_task_btf.
 * The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
 * memory immediately. To be reuse-immediate safe, the owner destruction
 * code path needs to go through a rcu grace period before calling
 * bpf_local_storage_destroy().
 *
 * When bpf_ma == false, the kmalloc and kfree are used.
 */
struct bpf_map *
bpf_local_storage_map_alloc(union bpf_attr *attr,
			    struct bpf_local_storage_cache *cache,
			    bool bpf_ma)
{}

void bpf_local_storage_map_free(struct bpf_map *map,
				struct bpf_local_storage_cache *cache,
				int __percpu *busy_counter)
{}