linux/arch/x86/include/asm/xor_avx.h

/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_X86_XOR_AVX_H
#define _ASM_X86_XOR_AVX_H

/*
 * Optimized RAID-5 checksumming functions for AVX
 *
 * Copyright (C) 2012 Intel Corporation
 * Author: Jim Kukunas <[email protected]>
 *
 * Based on Ingo Molnar and Zach Brown's respective MMX and SSE routines
 */

#include <linux/compiler.h>
#include <asm/fpu/api.h>

#define BLOCK4(i)

#define BLOCK16()

static void xor_avx_2(unsigned long bytes, unsigned long * __restrict p0,
		      const unsigned long * __restrict p1)
{}

static void xor_avx_3(unsigned long bytes, unsigned long * __restrict p0,
		      const unsigned long * __restrict p1,
		      const unsigned long * __restrict p2)
{}

static void xor_avx_4(unsigned long bytes, unsigned long * __restrict p0,
		      const unsigned long * __restrict p1,
		      const unsigned long * __restrict p2,
		      const unsigned long * __restrict p3)
{}

static void xor_avx_5(unsigned long bytes, unsigned long * __restrict p0,
	     const unsigned long * __restrict p1,
	     const unsigned long * __restrict p2,
	     const unsigned long * __restrict p3,
	     const unsigned long * __restrict p4)
{}

static struct xor_block_template xor_block_avx =;

#define AVX_XOR_SPEED

#define AVX_SELECT(FASTEST)

#endif