linux/lib/rhashtable.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
 * Copyright (c) 2015 Herbert Xu <[email protected]>
 * Copyright (c) 2014-2015 Thomas Graf <[email protected]>
 * Copyright (c) 2008-2014 Patrick McHardy <[email protected]>
 *
 * Code partially derived from nft_hash
 * Rewritten with rehash code from br_multicast plus single list
 * pointer as suggested by Josh Triplett
 */

#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
#include <linux/sched.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <linux/rhashtable.h>
#include <linux/err.h>
#include <linux/export.h>

#define HASH_DEFAULT_SIZE
#define HASH_MIN_SIZE

nested_table;

static u32 head_hashfn(struct rhashtable *ht,
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
{}

#ifdef CONFIG_PROVE_LOCKING
#define ASSERT_RHT_MUTEX(HT)

int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{}
EXPORT_SYMBOL_GPL();

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{}
EXPORT_SYMBOL_GPL();
#else
#define ASSERT_RHT_MUTEX
#endif

static inline union nested_table *nested_table_top(
	const struct bucket_table *tbl)
{}

static void nested_table_free(union nested_table *ntbl, unsigned int size)
{}

static void nested_bucket_table_free(const struct bucket_table *tbl)
{}

static void bucket_table_free(const struct bucket_table *tbl)
{}

static void bucket_table_free_rcu(struct rcu_head *head)
{}

static union nested_table *nested_table_alloc(struct rhashtable *ht,
					      union nested_table __rcu **prev,
					      bool leaf)
{}

static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
						      size_t nbuckets,
						      gfp_t gfp)
{}

static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
					       size_t nbuckets,
					       gfp_t gfp)
{}

static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
						  struct bucket_table *tbl)
{}

static int rhashtable_rehash_one(struct rhashtable *ht,
				 struct rhash_lock_head __rcu **bkt,
				 unsigned int old_hash)
{}

static int rhashtable_rehash_chain(struct rhashtable *ht,
				    unsigned int old_hash)
{}

static int rhashtable_rehash_attach(struct rhashtable *ht,
				    struct bucket_table *old_tbl,
				    struct bucket_table *new_tbl)
{}

static int rhashtable_rehash_table(struct rhashtable *ht)
{}

static int rhashtable_rehash_alloc(struct rhashtable *ht,
				   struct bucket_table *old_tbl,
				   unsigned int size)
{}

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
 * This function shrinks the hash table to fit, i.e., the smallest
 * size would not cause it to expand right away automatically.
 *
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
 */
static int rhashtable_shrink(struct rhashtable *ht)
{}

static void rht_deferred_worker(struct work_struct *work)
{}

static int rhashtable_insert_rehash(struct rhashtable *ht,
				    struct bucket_table *tbl)
{}

static void *rhashtable_lookup_one(struct rhashtable *ht,
				   struct rhash_lock_head __rcu **bkt,
				   struct bucket_table *tbl, unsigned int hash,
				   const void *key, struct rhash_head *obj)
{}

static struct bucket_table *rhashtable_insert_one(
	struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
	struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
	void *data)
{}

static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
				   struct rhash_head *obj)
{}

void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
			     struct rhash_head *obj)
{}
EXPORT_SYMBOL_GPL();

/**
 * rhashtable_walk_enter - Initialise an iterator
 * @ht:		Table to walk over
 * @iter:	Hash table Iterator
 *
 * This function prepares a hash table walk.
 *
 * Note that if you restart a walk after rhashtable_walk_stop you
 * may see the same object twice.  Also, you may miss objects if
 * there are removals in between rhashtable_walk_stop and the next
 * call to rhashtable_walk_start.
 *
 * For a completely stable walk you should construct your own data
 * structure outside the hash table.
 *
 * This function may be called from any process context, including
 * non-preemptable context, but cannot be called from softirq or
 * hardirq context.
 *
 * You must call rhashtable_walk_exit after this function returns.
 */
void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
{}
EXPORT_SYMBOL_GPL();

/**
 * rhashtable_walk_exit - Free an iterator
 * @iter:	Hash table Iterator
 *
 * This function frees resources allocated by rhashtable_walk_enter.
 */
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{}
EXPORT_SYMBOL_GPL();

/**
 * rhashtable_walk_start_check - Start a hash table walk
 * @iter:	Hash table iterator
 *
 * Start a hash table walk at the current iterator position.  Note that we take
 * the RCU lock in all cases including when we return an error.  So you must
 * always call rhashtable_walk_stop to clean up.
 *
 * Returns zero if successful.
 *
 * Returns -EAGAIN if resize event occurred.  Note that the iterator
 * will rewind back to the beginning and you may use it immediately
 * by calling rhashtable_walk_next.
 *
 * rhashtable_walk_start is defined as an inline variant that returns
 * void. This is preferred in cases where the caller would ignore
 * resize events and always continue.
 */
int rhashtable_walk_start_check(struct rhashtable_iter *iter)
	__acquires(RCU)
{}
EXPORT_SYMBOL_GPL();

/**
 * __rhashtable_walk_find_next - Find the next element in a table (or the first
 * one in case of a new walk).
 *
 * @iter:	Hash table iterator
 *
 * Returns the found object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occurred.
 */
static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
{}

/**
 * rhashtable_walk_next - Return the next object and advance the iterator
 * @iter:	Hash table iterator
 *
 * Note that you must call rhashtable_walk_stop when you are finished
 * with the walk.
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occurred.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{}
EXPORT_SYMBOL_GPL();

/**
 * rhashtable_walk_peek - Return the next object but don't advance the iterator
 * @iter:	Hash table iterator
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occurred.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_peek(struct rhashtable_iter *iter)
{}
EXPORT_SYMBOL_GPL();

/**
 * rhashtable_walk_stop - Finish a hash table walk
 * @iter:	Hash table iterator
 *
 * Finish a hash table walk.  Does not reset the iterator to the start of the
 * hash table.
 */
void rhashtable_walk_stop(struct rhashtable_iter *iter)
	__releases(RCU)
{}
EXPORT_SYMBOL_GPL();

static size_t rounded_hashtable_size(const struct rhashtable_params *params)
{}

static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
{}

/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
 *	.hashfn = jhash,
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.hashfn = jhash,
 *	.obj_hashfn = my_hash_fn,
 * };
 */
int rhashtable_init_noprof(struct rhashtable *ht,
		    const struct rhashtable_params *params)
{}
EXPORT_SYMBOL_GPL();

/**
 * rhltable_init - initialize a new hash list table
 * @hlt:	hash list table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash list table.
 *
 * See documentation for rhashtable_init.
 */
int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params)
{}
EXPORT_SYMBOL_GPL();

static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
				void (*free_fn)(void *ptr, void *arg),
				void *arg)
{}

/**
 * rhashtable_free_and_destroy - free elements and destroy hash table
 * @ht:		the hash table to destroy
 * @free_fn:	callback to release resources of element
 * @arg:	pointer passed to free_fn
 *
 * Stops an eventual async resize. If defined, invokes free_fn for each
 * element to releasal resources. Please note that RCU protected
 * readers may still be accessing the elements. Releasing of resources
 * must occur in a compatible manner. Then frees the bucket array.
 *
 * This function will eventually sleep to wait for an async resize
 * to complete. The caller is responsible that no further write operations
 * occurs in parallel.
 */
void rhashtable_free_and_destroy(struct rhashtable *ht,
				 void (*free_fn)(void *ptr, void *arg),
				 void *arg)
{}
EXPORT_SYMBOL_GPL();

void rhashtable_destroy(struct rhashtable *ht)
{}
EXPORT_SYMBOL_GPL();

struct rhash_lock_head __rcu **__rht_bucket_nested(
	const struct bucket_table *tbl, unsigned int hash)
{}
EXPORT_SYMBOL_GPL();

struct rhash_lock_head __rcu **rht_bucket_nested(
	const struct bucket_table *tbl, unsigned int hash)
{}
EXPORT_SYMBOL_GPL();

struct rhash_lock_head __rcu **rht_bucket_nested_insert(
	struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{}
EXPORT_SYMBOL_GPL();