linux/drivers/net/ethernet/netronome/nfp/bpf/jit.c

// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2016-2018 Netronome Systems, Inc. */

#define pr_fmt(fmt)

#include <linux/bug.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/kernel.h>
#include <linux/pkt_cls.h>
#include <linux/reciprocal_div.h>
#include <linux/unistd.h>

#include "main.h"
#include "../nfp_asm.h"
#include "../nfp_net_ctrl.h"

/* --- NFP prog --- */
/* Foreach "multiple" entries macros provide pos and next<n> pointers.
 * It's safe to modify the next pointers (but not pos).
 */
#define nfp_for_each_insn_walk2(nfp_prog, pos, next)

#define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2)

static bool
nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
{}

static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
{}

static bool
nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
{}

/* --- Emitters --- */
static void
__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
	   u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx,
	   bool indir)
{}

static void
emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
	     swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir)
{}

static void
emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
	 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx)
{}

static void
emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
	       swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx)
{}

static void
__emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
	  enum br_ctx_signal_state css, u16 addr, u8 defer)
{}

static void
emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
	     enum nfp_relo_type relo)
{}

static void
emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
{}

static void
__emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer,
	      bool set, bool src_lmextn)
{}

static void
emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr,
		 u8 defer, bool set, enum nfp_relo_type relo)
{}

static void
emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer)
{}

static void
__emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
	      u8 defer, bool dst_lmextn, bool src_lmextn)
{}

static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer)
{}

static void
__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
	     enum immed_width width, bool invert,
	     enum immed_shift shift, bool wr_both,
	     bool dst_lmextn, bool src_lmextn)
{}

static void
emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
	   enum immed_width width, bool invert, enum immed_shift shift)
{}

static void
__emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
	   enum shf_sc sc, u8 shift,
	   u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both,
	   bool dst_lmextn, bool src_lmextn)
{}

static void
emit_shf(struct nfp_prog *nfp_prog, swreg dst,
	 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift)
{}

static void
emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst,
	       swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc)
{}

static void
__emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
	   u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
	   bool dst_lmextn, bool src_lmextn)
{}

static void
emit_alu(struct nfp_prog *nfp_prog, swreg dst,
	 swreg lreg, enum alu_op op, swreg rreg)
{}

static void
__emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg,
	   enum mul_type type, enum mul_step step, u16 breg, bool swap,
	   bool wr_both, bool dst_lmextn, bool src_lmextn)
{}

static void
emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type,
	 enum mul_step step, swreg rreg)
{}

static void
__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
		u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
		bool zero, bool swap, bool wr_both,
		bool dst_lmextn, bool src_lmextn)
{}

static void
emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
		  enum shf_sc sc, u8 shift, bool zero)
{}

static void
emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
	      enum shf_sc sc, u8 shift)
{}

static void
__emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr,
	    bool dst_lmextn, bool src_lmextn)
{}

static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr)
{}

/* CSR value is read in following immed[gpr, 0] */
static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr)
{}

static void emit_nop(struct nfp_prog *nfp_prog)
{}

/* --- Wrappers --- */
static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
{}

static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
{}

static void
wrp_zext(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst)
{}

static void
wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
	       enum nfp_relo_type relo)
{}

/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
 * If the @imm is small enough encode it directly in operand and return
 * otherwise load @imm to a spare register and return its encoding.
 */
static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
{}

/* re_load_imm_any() - encode immediate or use tmp register (restricted)
 * If the @imm is small enough encode it directly in operand and return
 * otherwise load @imm to a spare register and return its encoding.
 */
static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
{}

static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
{}

static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
{}

static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
{}

/* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the
 * result to @dst from low end.
 */
static void
wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
		u8 offset)
{}

/* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the
 * result to @dst from offset, there is no change on the other bits of @dst.
 */
static void
wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src,
		   u8 field_len, u8 offset)
{}

static void
addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
	      swreg *rega, swreg *regb)
{}

/* NFP has Command Push Pull bus which supports bluk memory operations. */
static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
data_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, swreg offset,
	u8 dst_gpr, int size)
{}

static int
data_ld_host_order(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
		   u8 dst_gpr, swreg lreg, swreg rreg, int size,
		   enum cmd_mode mode)
{}

static int
data_ld_host_order_addr32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
			  u8 src_gpr, swreg offset, u8 dst_gpr, u8 size)
{}

static int
data_ld_host_order_addr40(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
			  u8 src_gpr, swreg offset, u8 dst_gpr, u8 size)
{}

static int
construct_data_ind_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
		      u16 offset, u16 src, u8 size)
{}

static int
construct_data_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
		  u16 offset, u8 size)
{}

static int
data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
		    u8 src_gpr, u8 size)
{}

static int
data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
		   u64 imm, u8 size)
{}

lmem_step;

static int
wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off,
	      unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
	      bool needs_inc)
{}

static int
wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off,
	       unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
	       bool needs_inc)
{}

static int
mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	     unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr,
	     bool clr_gpr, lmem_step step)
{}

static void
wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
{}

static int
wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	      enum alu_op alu_op, bool skip)
{}

static int
wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	      enum alu_op alu_op)
{}

static int
wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	      enum alu_op alu_op)
{}

static int
wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	      enum alu_op alu_op)
{}

static void
wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
		 enum br_mask br_mask, u16 off)
{}

static int
wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	     enum alu_op alu_op, enum br_mask br_mask)
{}

static const struct jmp_code_map {} jmp_code_map[] =;

static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta)
{}

static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
{}

static void
wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
	    swreg rreg, bool gen_high_half)
{}

static void
wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
	    swreg rreg)
{}

static int
wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	bool gen_high_half, bool ropnd_from_reg)
{}

static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm)
{}

static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

/* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

/* Pseudo code:
 *   if shift_amt >= 32
 *     dst_high = dst_low << shift_amt[4:0]
 *     dst_low = 0;
 *   else
 *     dst_high = (dst_high, dst_low) >> (32 - shift_amt)
 *     dst_low = dst_low << shift_amt
 *
 * The indirect shift will use the same logic at runtime.
 */
static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
{}

static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
{}

/* NOTE: for indirect left shift, HIGH part should be calculated first. */
static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
{}

static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
{}

static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
{}

static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

/* Pseudo code:
 *   if shift_amt >= 32
 *     dst_high = 0;
 *     dst_low = dst_high >> shift_amt[4:0]
 *   else
 *     dst_high = dst_high >> shift_amt
 *     dst_low = (dst_high, dst_low) >> shift_amt
 *
 * The indirect shift will use the same logic at runtime.
 */
static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
{}

static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

/* NOTE: for indirect right shift, LOW part should be calculated first. */
static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
{}

static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
{}

static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
{}

static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
{}

static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

/* Code logic is the same as __shr_imm64 except ashr requires signedness bit
 * told through PREV_ALU result.
 */
static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
{}

static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
{}

static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
{}

static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
{}

static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
{}

/* Like ashr_imm64, but need to use indirect shift. */
static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
__ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
	   u8 shift_amt)
{}

static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
__shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
	  u8 shift_amt)
{}

static int shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
__shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
	  u8 shift_amt)
{}

static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	      unsigned int size, unsigned int ptr_off)
{}

static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
		       u8 size)
{}

static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
		       u8 size)
{}

static int
mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	     unsigned int size)
{}

static int
mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	     unsigned int size)
{}

static void
mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog,
			   struct nfp_insn_meta *meta)
{}

static int
mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
				     struct nfp_insn_meta *meta,
				     unsigned int size)
{}

static int
mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog,
				   struct nfp_insn_meta *meta,
				   unsigned int size)
{}

static int
mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog,
			   struct nfp_insn_meta *meta, unsigned int size)
{}

static int
mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	unsigned int size)
{}

static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	    unsigned int size)
{}

static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
		  unsigned int size)
{}

static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	     unsigned int size)
{}

static int
mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	      unsigned int size, unsigned int ptr_off)
{}

static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
	unsigned int size)
{}

static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64)
{}

static int mem_atomic4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int mem_atomic8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int jeq32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static bool nfp_is_main_function(struct nfp_insn_meta *meta)
{}

static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int
nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static const instr_cb_t instr_cb[256] =;

/* --- Assembler logic --- */
static int
nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
		     struct nfp_insn_meta *jmp_dst, u32 br_idx)
{}

static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
{}

static void nfp_intro(struct nfp_prog *nfp_prog)
{}

static void
nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

static void
nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{}

bool nfp_is_subprog_start(struct nfp_insn_meta *meta)
{}

static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
{}

static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
{}

static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog)
{}

static void nfp_push_callee_registers(struct nfp_prog *nfp_prog)
{}

static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog)
{}

static void nfp_outro(struct nfp_prog *nfp_prog)
{}

static int nfp_translate(struct nfp_prog *nfp_prog)
{}

/* --- Optimizations --- */
static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
{}

/* abs(insn.imm) will fit better into unrestricted reg immediate -
 * convert add/sub of a negative number into a sub/add of a positive one.
 */
static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
{}

/* Remove masking after load since our load guarantees this is not needed */
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{}

static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
{}

/* load/store pair that forms memory copy sould look like the following:
 *
 *   ld_width R, [addr_src + offset_src]
 *   st_width [addr_dest + offset_dest], R
 *
 * The destination register of load and source register of store should
 * be the same, load and store should also perform at the same width.
 * If either of addr_src or addr_dest is stack pointer, we don't do the
 * CPP optimization as stack is modelled by registers on NFP.
 */
static bool
curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
		    struct nfp_insn_meta *st_meta)
{}

/* Currently, we only support chaining load/store pairs if:
 *
 *  - Their address base registers are the same.
 *  - Their address offsets are in the same order.
 *  - They operate at the same memory width.
 *  - There is no jump into the middle of them.
 */
static bool
curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta,
			      struct nfp_insn_meta *st_meta,
			      struct bpf_insn *prev_ld,
			      struct bpf_insn *prev_st)
{}

/* Return TRUE if cross memory access happens. Cross memory access means
 * store area is overlapping with load area that a later load might load
 * the value from previous store, for this case we can't treat the sequence
 * as an memory copy.
 */
static bool
cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta,
		 struct nfp_insn_meta *head_st_meta)
{}

/* This pass try to identify the following instructoin sequences.
 *
 *   load R, [regA + offA]
 *   store [regB + offB], R
 *   load R, [regA + offA + const_imm_A]
 *   store [regB + offB + const_imm_A], R
 *   load R, [regA + offA + 2 * const_imm_A]
 *   store [regB + offB + 2 * const_imm_A], R
 *   ...
 *
 * Above sequence is typically generated by compiler when lowering
 * memcpy. NFP prefer using CPP instructions to accelerate it.
 */
static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
{}

static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog)
{}

static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
{}

static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
{}

static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
{}

static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
{}

int nfp_bpf_jit(struct nfp_prog *nfp_prog)
{}

void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog)
{}

bool nfp_bpf_supported_opcode(u8 code)
{}

void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
{}