linux/kernel/bpf/offload.c

/*
 * Copyright (C) 2017-2018 Netronome Systems, Inc.
 *
 * This software is licensed under the GNU General License Version 2,
 * June 1991 as shown in the file COPYING in the top-level directory of this
 * source tree.
 *
 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
 */

#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/bug.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/proc_ns.h>
#include <linux/rhashtable.h>
#include <linux/rtnetlink.h>
#include <linux/rwsem.h>
#include <net/xdp.h>

/* Protects offdevs, members of bpf_offload_netdev and offload members
 * of all progs.
 * RTNL lock cannot be taken when holding this lock.
 */
static DECLARE_RWSEM(bpf_devs_lock);

struct bpf_offload_dev {};

struct bpf_offload_netdev {};

static const struct rhashtable_params offdevs_params =;

static struct rhashtable offdevs;

static int bpf_dev_offload_check(struct net_device *netdev)
{}

static struct bpf_offload_netdev *
bpf_offload_find_netdev(struct net_device *netdev)
{}

static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
					     struct net_device *netdev)
{}

static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{}

static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
			       enum bpf_netdev_command cmd)
{}

static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
{}

static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
						struct net_device *netdev)
{}

static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev)
{}

int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
{}

int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog)
{}

int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
{}

int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
				 int insn_idx, int prev_insn_idx)
{}

int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
{}

void
bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
			      struct bpf_insn *insn)
{}

void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
{}

void bpf_prog_dev_bound_destroy(struct bpf_prog *prog)
{}

static int bpf_prog_offload_translate(struct bpf_prog *prog)
{}

static unsigned int bpf_prog_warn_on_exec(const void *ctx,
					  const struct bpf_insn *insn)
{}

int bpf_prog_offload_compile(struct bpf_prog *prog)
{}

struct ns_get_path_bpf_prog_args {};

static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
{}

int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
			       struct bpf_prog *prog)
{}

const struct bpf_prog_ops bpf_offload_prog_ops =;

struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
{}

void bpf_map_offload_map_free(struct bpf_map *map)
{}

u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
{}

int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
{}

int bpf_map_offload_update_elem(struct bpf_map *map,
				void *key, void *value, u64 flags)
{}

int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
{}

int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
{}

struct ns_get_path_bpf_map_args {};

static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
{}

int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
{}

static bool __bpf_offload_dev_match(struct bpf_prog *prog,
				    struct net_device *netdev)
{}

bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
{}
EXPORT_SYMBOL_GPL();

bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
{}

bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
{}

int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
				    struct net_device *netdev)
{}
EXPORT_SYMBOL_GPL();

void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
				       struct net_device *netdev)
{}
EXPORT_SYMBOL_GPL();

struct bpf_offload_dev *
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
{}
EXPORT_SYMBOL_GPL();

void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
{}
EXPORT_SYMBOL_GPL();

void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
{}
EXPORT_SYMBOL_GPL();

void bpf_dev_bound_netdev_unregister(struct net_device *dev)
{}

int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
			      struct bpf_prog_aux *prog_aux)
{}

void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id)
{}

static int __init bpf_offload_init(void)
{}

core_initcall(bpf_offload_init);