/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ /* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #ifndef __NFP_BPF_H__ #define __NFP_BPF_H__ … #include <linux/bitfield.h> #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/rhashtable.h> #include <linux/skbuff.h> #include <linux/types.h> #include <linux/wait.h> #include "../ccm.h" #include "../nfp_asm.h" #include "fw.h" #define cmsg_warn(bpf, msg...) … /* For relocation logic use up-most byte of branch instruction as scratch * area. Remember to clear this before sending instructions to HW! */ #define OP_RELO_TYPE … enum nfp_relo_type { … }; /* To make absolute relocated branches (branches other than RELO_BR_REL) * distinguishable in user space dumps from normal jumps, add a large offset * to them. */ #define BR_OFF_RELO … enum static_regs { … }; enum pkt_vec { … }; #define PKT_VEL_QSEL_SET_BIT … #define pv_len(np) … #define pv_ctm_ptr(np) … #define pv_qsel_set(np) … #define pv_qsel_val(np) … #define stack_reg(np) … #define stack_imm(np) … #define plen_reg(np) … #define pptr_reg(np) … #define imm_a(np) … #define imm_b(np) … #define imma_a(np) … #define imma_b(np) … #define imm_both(np) … #define ret_reg(np) … #define NFP_BPF_ABI_FLAGS … #define NFP_BPF_ABI_FLAG_MARK … /** * struct nfp_app_bpf - bpf app priv structure * @app: backpointer to the app * @ccm: common control message handler data * * @bpf_dev: BPF offload device handle * * @cmsg_key_sz: size of key in cmsg element array * @cmsg_val_sz: size of value in cmsg element array * * @map_list: list of offloaded maps * @maps_in_use: number of currently offloaded maps * @map_elems_in_use: number of elements allocated to offloaded maps * * @maps_neutral: hash table of offload-neutral maps (on pointer) * * @abi_version: global BPF ABI version * @cmsg_cache_cnt: number of entries to read for caching * * @adjust_head: adjust head capability * @adjust_head.flags: extra flags for adjust head * @adjust_head.off_min: minimal packet offset within buffer required * @adjust_head.off_max: maximum packet offset within buffer required * @adjust_head.guaranteed_sub: negative adjustment guaranteed possible * @adjust_head.guaranteed_add: positive adjustment guaranteed possible * * @maps: map capability * @maps.types: supported map types * @maps.max_maps: max number of maps supported * @maps.max_elems: max number of entries in each map * @maps.max_key_sz: max size of map key * @maps.max_val_sz: max size of map value * @maps.max_elem_sz: max size of map entry (key + value) * * @helpers: helper addressess for various calls * @helpers.map_lookup: map lookup helper address * @helpers.map_update: map update helper address * @helpers.map_delete: map delete helper address * @helpers.perf_event_output: output perf event to a ring buffer * * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) * @queue_select: BPF can set the RX queue ID in packet vector * @adjust_tail: BPF can simply trunc packet size for adjust tail * @cmsg_multi_ent: FW can pack multiple map entries in a single cmsg */ struct nfp_app_bpf { … }; enum nfp_bpf_map_use { … }; struct nfp_bpf_map_word { … }; #define NFP_BPF_MAP_CACHE_CNT … #define NFP_BPF_MAP_CACHE_TIME_NS … /** * struct nfp_bpf_map - private per-map data attached to BPF maps for offload * @offmap: pointer to the offloaded BPF map * @bpf: back pointer to bpf app private structure * @tid: table id identifying map on datapath * * @cache_lock: protects @cache_blockers, @cache_to, @cache * @cache_blockers: number of ops in flight which block caching * @cache_gen: counter incremented by every blocker on exit * @cache_to: time when cache will no longer be valid (ns) * @cache: skb with cached response * * @l: link on the nfp_app_bpf->map_list list * @use_map: map of how the value is used (in 4B chunks) */ struct nfp_bpf_map { … }; struct nfp_bpf_neutral_map { … }; extern const struct rhashtable_params nfp_bpf_maps_neutral_params; struct nfp_prog; struct nfp_insn_meta; instr_cb_t; #define nfp_prog_first_meta(nfp_prog) … #define nfp_prog_last_meta(nfp_prog) … #define nfp_meta_next(meta) … #define nfp_meta_prev(meta) … /** * struct nfp_bpf_reg_state - register state for calls * @reg: BPF register state from latest path * @var_off: for stack arg - changes stack offset on different paths */ struct nfp_bpf_reg_state { … }; #define FLAG_INSN_IS_JUMP_DST … #define FLAG_INSN_IS_SUBPROG_START … #define FLAG_INSN_PTR_CALLER_STACK_FRAME … /* Instruction is pointless, noop even on its own */ #define FLAG_INSN_SKIP_NOOP … /* Instruction is optimized out based on preceding instructions */ #define FLAG_INSN_SKIP_PREC_DEPENDENT … /* Instruction is optimized by the verifier */ #define FLAG_INSN_SKIP_VERIFIER_OPT … /* Instruction needs to zero extend to high 32-bit */ #define FLAG_INSN_DO_ZEXT … #define FLAG_INSN_SKIP_MASK … /** * struct nfp_insn_meta - BPF instruction wrapper * @insn: BPF instruction * @ptr: pointer type for memory operations * @ldst_gather_len: memcpy length gathered from load/store sequence * @paired_st: the paired store insn at the head of the sequence * @ptr_not_const: pointer is not always constant * @pkt_cache: packet data cache information * @pkt_cache.range_start: start offset for associated packet data cache * @pkt_cache.range_end: end offset for associated packet data cache * @pkt_cache.do_init: this read needs to initialize packet data cache * @xadd_over_16bit: 16bit immediate is not guaranteed * @xadd_maybe_16bit: 16bit immediate is possible * @jmp_dst: destination info for jump instructions * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB * @num_insns_after_br: number of insns following a branch jump, used for fixup * @func_id: function id for call instructions * @arg1: arg1 for call instructions * @arg2: arg2 for call instructions * @umin_src: copy of core verifier umin_value for src opearnd. * @umax_src: copy of core verifier umax_value for src operand. * @umin_dst: copy of core verifier umin_value for dst opearnd. * @umax_dst: copy of core verifier umax_value for dst operand. * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags * @subprog_idx: index of subprogram to which the instruction belongs * @double_cb: callback for second part of the instruction * @l: link on nfp_prog->insns list */ struct nfp_insn_meta { … }; #define BPF_SIZE_MASK … static inline u8 mbpf_class(const struct nfp_insn_meta *meta) { … } static inline u8 mbpf_src(const struct nfp_insn_meta *meta) { … } static inline u8 mbpf_op(const struct nfp_insn_meta *meta) { … } static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_load(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_store(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_atomic(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta) { … } static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta) { … } #define STACK_FRAME_ALIGN … /** * struct nfp_bpf_subprog_info - nfp BPF sub-program (a.k.a. function) info * @stack_depth: maximum stack depth used by this sub-program * @needs_reg_push: whether sub-program uses callee-saved registers */ struct nfp_bpf_subprog_info { … }; /** * struct nfp_prog - nfp BPF program * @bpf: backpointer to the bpf app priv structure * @prog: machine code * @prog_len: number of valid instructions in @prog array * @__prog_alloc_len: alloc size of @prog array * @stack_size: total amount of stack used * @verifier_meta: temporary storage for verifier's insn meta * @type: BPF program type * @last_bpf_off: address of the last instruction translated from BPF * @tgt_out: jump target for normal exit * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) * @tgt_call_push_regs: jump target for subroutine for saving R6~R9 to stack * @tgt_call_pop_regs: jump target for subroutine used for restoring R6~R9 * @n_translated: number of successfully translated instructions (for errors) * @error: error code if something went wrong * @stack_frame_depth: max stack depth for current frame * @adjust_head_location: if program has single adjust head call - the insn no. * @map_records_cnt: the number of map pointers recorded for this prog * @subprog_cnt: number of sub-programs, including main function * @map_records: the map record pointers from bpf->maps_neutral * @subprog: pointer to an array of objects holding info about sub-programs * @n_insns: number of instructions on @insns list * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) */ struct nfp_prog { … }; /** * struct nfp_bpf_vnic - per-vNIC BPF priv structure * @tc_prog: currently loaded cls_bpf program * @start_off: address of the first instruction in the memory * @tgt_done: jump target to get the next packet */ struct nfp_bpf_vnic { … }; bool nfp_is_subprog_start(struct nfp_insn_meta *meta); void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog); int nfp_bpf_jit(struct nfp_prog *prog); bool nfp_bpf_supported_opcode(u8 code); bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog, unsigned int mtu); int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); int nfp_bpf_finalize(struct bpf_verifier_env *env); int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off, struct bpf_insn *insn); int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops; struct netdev_bpf; struct nfp_app; struct nfp_net; int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf); int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, bool old_prog, struct netlink_ext_ack *extack); struct nfp_insn_meta * nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int insn_idx); void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf); unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf); unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf); long long int nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map); int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, void *next_key); int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, void *key, void *value, u64 flags); int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key); int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, void *key, void *value); int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, void *key, void *next_key); int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, unsigned int len); void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb); void nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len); #endif