linux/kernel/bpf/hashtab.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 * Copyright (c) 2016 Facebook
 */
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
#include <linux/rcupdate_wait.h>
#include <linux/random.h>
#include <uapi/linux/btf.h>
#include <linux/rcupdate_trace.h>
#include <linux/btf_ids.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
#include "map_in_map.h"
#include <linux/bpf_mem_alloc.h>

#define HTAB_CREATE_FLAG_MASK

#define BATCH_OPS(_name)

/*
 * The bucket lock has two protection scopes:
 *
 * 1) Serializing concurrent operations from BPF programs on different
 *    CPUs
 *
 * 2) Serializing concurrent operations from BPF programs and sys_bpf()
 *
 * BPF programs can execute in any context including perf, kprobes and
 * tracing. As there are almost no limits where perf, kprobes and tracing
 * can be invoked from the lock operations need to be protected against
 * deadlocks. Deadlocks can be caused by recursion and by an invocation in
 * the lock held section when functions which acquire this lock are invoked
 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
 * variable bpf_prog_active, which prevents BPF programs attached to perf
 * events, kprobes and tracing to be invoked before the prior invocation
 * from one of these contexts completed. sys_bpf() uses the same mechanism
 * by pinning the task to the current CPU and incrementing the recursion
 * protection across the map operation.
 *
 * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
 * operations like memory allocations (even with GFP_ATOMIC) from atomic
 * contexts. This is required because even with GFP_ATOMIC the memory
 * allocator calls into code paths which acquire locks with long held lock
 * sections. To ensure the deterministic behaviour these locks are regular
 * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
 * true atomic contexts on an RT kernel are the low level hardware
 * handling, scheduling, low level interrupt handling, NMIs etc. None of
 * these contexts should ever do memory allocations.
 *
 * As regular device interrupt handlers and soft interrupts are forced into
 * thread context, the existing code which does
 *   spin_lock*(); alloc(GFP_ATOMIC); spin_unlock*();
 * just works.
 *
 * In theory the BPF locks could be converted to regular spinlocks as well,
 * but the bucket locks and percpu_freelist locks can be taken from
 * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
 * atomic contexts even on RT. Before the introduction of bpf_mem_alloc,
 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
 * because there is no memory allocation within the lock held sections. However
 * after hash map was fully converted to use bpf_mem_alloc, there will be
 * non-synchronous memory allocation for non-preallocated hash map, so it is
 * safe to always use raw spinlock for bucket lock.
 */
struct bucket {};

#define HASHTAB_MAP_LOCK_COUNT
#define HASHTAB_MAP_LOCK_MASK

struct bpf_htab {};

/* each htab element is struct htab_elem + key + value */
struct htab_elem {};

static inline bool htab_is_prealloc(const struct bpf_htab *htab)
{}

static void htab_init_buckets(struct bpf_htab *htab)
{}

static inline int htab_lock_bucket(const struct bpf_htab *htab,
				   struct bucket *b, u32 hash,
				   unsigned long *pflags)
{}

static inline void htab_unlock_bucket(const struct bpf_htab *htab,
				      struct bucket *b, u32 hash,
				      unsigned long flags)
{}

static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);

static bool htab_is_lru(const struct bpf_htab *htab)
{}

static bool htab_is_percpu(const struct bpf_htab *htab)
{}

static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
				     void __percpu *pptr)
{}

static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
{}

static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
{}

static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
{}

static bool htab_has_extra_elems(struct bpf_htab *htab)
{}

static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab)
{}

static void htab_free_prealloced_fields(struct bpf_htab *htab)
{}

static void htab_free_elems(struct bpf_htab *htab)
{}

/* The LRU list has a lock (lru_lock). Each htab bucket has a lock
 * (bucket_lock). If both locks need to be acquired together, the lock
 * order is always lru_lock -> bucket_lock and this only happens in
 * bpf_lru_list.c logic. For example, certain code path of
 * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
 * will acquire lru_lock first followed by acquiring bucket_lock.
 *
 * In hashtab.c, to avoid deadlock, lock acquisition of
 * bucket_lock followed by lru_lock is not allowed. In such cases,
 * bucket_lock needs to be released first before acquiring lru_lock.
 */
static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
					  u32 hash)
{}

static int prealloc_init(struct bpf_htab *htab)
{}

static void prealloc_destroy(struct bpf_htab *htab)
{}

static int alloc_extra_elems(struct bpf_htab *htab)
{}

/* Called from syscall */
static int htab_map_alloc_check(union bpf_attr *attr)
{}

static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{}

static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
{}

static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
{}

static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
{}

/* this lookup function can only be called with bucket lock taken */
static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
					 void *key, u32 key_size)
{}

/* can be called without bucket lock. it will repeat the loop in
 * the unlikely event when elements moved from one bucket into another
 * while link list is being walked
 */
static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
					       u32 hash, void *key,
					       u32 key_size, u32 n_buckets)
{}

/* Called from syscall or from eBPF program directly, so
 * arguments have to match bpf_map_lookup_elem() exactly.
 * The return value is adjusted by BPF instructions
 * in htab_map_gen_lookup().
 */
static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
{}

static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
{}

/* inline bpf_map_lookup_elem() call.
 * Instead of:
 * bpf_prog
 *   bpf_map_lookup_elem
 *     map->ops->map_lookup_elem
 *       htab_map_lookup_elem
 *         __htab_map_lookup_elem
 * do:
 * bpf_prog
 *   __htab_map_lookup_elem
 */
static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
{}

static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
							void *key, const bool mark)
{}

static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
{}

static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
{}

static int htab_lru_map_gen_lookup(struct bpf_map *map,
				   struct bpf_insn *insn_buf)
{}

static void check_and_free_fields(struct bpf_htab *htab,
				  struct htab_elem *elem)
{}

/* It is called from the bpf_lru_list when the LRU needs to delete
 * older elements from the htab.
 */
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
{}

/* Called from syscall */
static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{}

static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
{}

static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
{}

static bool is_map_full(struct bpf_htab *htab)
{}

static void inc_elem_count(struct bpf_htab *htab)
{}

static void dec_elem_count(struct bpf_htab *htab)
{}


static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
{}

static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
			    void *value, bool onallcpus)
{}

static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
			    void *value, bool onallcpus)
{}

static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
{}

static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
					 void *value, u32 key_size, u32 hash,
					 bool percpu, bool onallcpus,
					 struct htab_elem *old_elem)
{}

static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
		       u64 map_flags)
{}

/* Called from syscall or from eBPF program */
static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
				 u64 map_flags)
{}

static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
{}

static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
				     u64 map_flags)
{}

static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
					  void *value, u64 map_flags,
					  bool onallcpus)
{}

static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
					      void *value, u64 map_flags,
					      bool onallcpus)
{}

static long htab_percpu_map_update_elem(struct bpf_map *map, void *key,
					void *value, u64 map_flags)
{}

static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
					    void *value, u64 map_flags)
{}

/* Called from syscall or from eBPF program */
static long htab_map_delete_elem(struct bpf_map *map, void *key)
{}

static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
{}

static void delete_all_elements(struct bpf_htab *htab)
{}

static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab)
{}

static void htab_map_free_timers_and_wq(struct bpf_map *map)
{}

/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
static void htab_map_free(struct bpf_map *map)
{}

static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
				   struct seq_file *m)
{}

static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
					     void *value, bool is_lru_map,
					     bool is_percpu, u64 flags)
{}

static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
					   void *value, u64 flags)
{}

static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
						  void *key, void *value,
						  u64 flags)
{}

static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
					       void *value, u64 flags)
{}

static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
						      void *key, void *value,
						      u64 flags)
{}

static int
__htab_map_lookup_and_delete_batch(struct bpf_map *map,
				   const union bpf_attr *attr,
				   union bpf_attr __user *uattr,
				   bool do_delete, bool is_lru_map,
				   bool is_percpu)
{}

static int
htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
			     union bpf_attr __user *uattr)
{}

static int
htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
					const union bpf_attr *attr,
					union bpf_attr __user *uattr)
{}

static int
htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
		      union bpf_attr __user *uattr)
{}

static int
htab_map_lookup_and_delete_batch(struct bpf_map *map,
				 const union bpf_attr *attr,
				 union bpf_attr __user *uattr)
{}

static int
htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
				 const union bpf_attr *attr,
				 union bpf_attr __user *uattr)
{}

static int
htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
					    const union bpf_attr *attr,
					    union bpf_attr __user *uattr)
{}

static int
htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
			  union bpf_attr __user *uattr)
{}

static int
htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
				     const union bpf_attr *attr,
				     union bpf_attr __user *uattr)
{}

struct bpf_iter_seq_hash_map_info {};

static struct htab_elem *
bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
			   struct htab_elem *prev_elem)
{}

static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
{}

static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}

static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
{}

static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
{}

static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
{}

static int bpf_iter_init_hash_map(void *priv_data,
				  struct bpf_iter_aux_info *aux)
{}

static void bpf_iter_fini_hash_map(void *priv_data)
{}

static const struct seq_operations bpf_hash_map_seq_ops =;

static const struct bpf_iter_seq_info iter_seq_info =;

static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
				   void *callback_ctx, u64 flags)
{}

static u64 htab_map_mem_usage(const struct bpf_map *map)
{}

BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
const struct bpf_map_ops htab_map_ops =;

const struct bpf_map_ops htab_lru_map_ops =;

/* Called from eBPF program */
static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
{}

/* inline bpf_map_lookup_elem() call for per-CPU hashmap */
static int htab_percpu_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
{}

static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
{}

static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
{}

static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
{}

int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
{}

int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
			   u64 map_flags)
{}

static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
					  struct seq_file *m)
{}

const struct bpf_map_ops htab_percpu_map_ops =;

const struct bpf_map_ops htab_lru_percpu_map_ops =;

static int fd_htab_map_alloc_check(union bpf_attr *attr)
{}

static void fd_htab_map_free(struct bpf_map *map)
{}

/* only called from syscall */
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
{}

/* only called from syscall */
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
				void *key, void *value, u64 map_flags)
{}

static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
{}

static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
{}

static int htab_of_map_gen_lookup(struct bpf_map *map,
				  struct bpf_insn *insn_buf)
{}

static void htab_of_map_free(struct bpf_map *map)
{}

const struct bpf_map_ops htab_of_maps_map_ops =;