linux/crypto/vmac.c

/*
 * VMAC: Message Authentication Code using Universal Hashing
 *
 * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
 *
 * Copyright (c) 2009, Intel Corporation.
 * Copyright (c) 2018, Google Inc.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 */

/*
 * Derived from:
 *	VMAC and VHASH Implementation by Ted Krovetz ([email protected]) and Wei Dai.
 *	This implementation is herby placed in the public domain.
 *	The authors offers no warranty. Use at your own risk.
 *	Last modified: 17 APR 08, 1700 PDT
 */

#include <asm/unaligned.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>

/*
 * User definable settings.
 */
#define VMAC_TAG_LEN
#define VMAC_KEY_SIZE
#define VMAC_KEY_LEN
#define VMAC_NHBYTES
#define VMAC_NONCEBYTES

/* per-transform (per-key) context */
struct vmac_tfm_ctx {};

/* per-request context */
struct vmac_desc_ctx {};

/*
 * Constants and masks
 */
#define UINT64_C(x)
static const u64 p64   =0xfffffffffffffeff);	/* 2^64 - 257 prime  */
static const u64 m62   =0x3fffffffffffffff);	/* 62-bit mask       */
static const u64 m63   =0x7fffffffffffffff);	/* 63-bit mask       */
static const u64 m64   =0xffffffffffffffff);	/* 64-bit mask       */
static const u64 mpoly =0x1fffffff1fffffff);	/* Poly key mask     */

#define pe64_to_cpup

#ifdef __LITTLE_ENDIAN
#define INDEX_HIGH
#define INDEX_LOW
#else
#define INDEX_HIGH
#define INDEX_LOW
#endif

/*
 * The following routines are used in this implementation. They are
 * written via macros to simulate zero-overhead call-by-reference.
 *
 * MUL64: 64x64->128-bit multiplication
 * PMUL64: assumes top bits cleared on inputs
 * ADD128: 128x128->128-bit addition
 */

#define ADD128(rh, rl, ih, il)

#define MUL32(i1, i2)

#define PMUL64(rh, rl, i1, i2)

#define MUL64(rh, rl, i1, i2)

/*
 * For highest performance the L1 NH and L2 polynomial hashes should be
 * carefully implemented to take advantage of one's target architecture.
 * Here these two hash functions are defined multiple time; once for
 * 64-bit architectures, once for 32-bit SSE2 architectures, and once
 * for the rest (32-bit) architectures.
 * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
 * Optionally, nh_vmac_nhbytes can be defined (for multiples of
 * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
 * NH computations at once).
 */

#ifdef CONFIG_64BIT

#define nh_16(mp, kp, nw, rh, rl)

#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1)

#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
#define nh_vmac_nhbytes(mp, kp, nw, rh, rl)

#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1)
#endif

#define poly_step(ah, al, kh, kl, mh, ml)

#else /* ! CONFIG_64BIT */

#ifndef nh_16
#define nh_16
#endif

static void poly_step_func(u64 *ahi, u64 *alo,
			const u64 *kh, const u64 *kl,
			const u64 *mh, const u64 *ml)
{
#define a0
#define a1
#define a2
#define a3
#define k0
#define k1
#define k2
#define k3

	u64 p, q, t;
	u32 t2;

	p = MUL32(a3, k3);
	p += p;
	p += *(u64 *)mh;
	p += MUL32(a0, k2);
	p += MUL32(a1, k1);
	p += MUL32(a2, k0);
	t = (u32)(p);
	p >>= 32;
	p += MUL32(a0, k3);
	p += MUL32(a1, k2);
	p += MUL32(a2, k1);
	p += MUL32(a3, k0);
	t |= ((u64)((u32)p & 0x7fffffff)) << 32;
	p >>= 31;
	p += (u64)(((u32 *)ml)[INDEX_LOW]);
	p += MUL32(a0, k0);
	q =  MUL32(a1, k3);
	q += MUL32(a2, k2);
	q += MUL32(a3, k1);
	q += q;
	p += q;
	t2 = (u32)(p);
	p >>= 32;
	p += (u64)(((u32 *)ml)[INDEX_HIGH]);
	p += MUL32(a0, k1);
	p += MUL32(a1, k0);
	q =  MUL32(a2, k3);
	q += MUL32(a3, k2);
	q += q;
	p += q;
	*(u64 *)(alo) = (p << 32) | t2;
	p >>= 32;
	*(u64 *)(ahi) = p + t;

#undef a0
#undef a1
#undef a2
#undef a3
#undef k0
#undef k1
#undef k2
#undef k3
}

#define poly_step

#endif  /* end of specialized NH and poly definitions */

/* At least nh_16 is defined. Defined others as needed here */
#ifndef nh_16_2
#define nh_16_2
#endif
#ifndef nh_vmac_nhbytes
#define nh_vmac_nhbytes
#endif
#ifndef nh_vmac_nhbytes_2
#define nh_vmac_nhbytes_2
#endif

static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{}

/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
			 struct vmac_desc_ctx *dctx,
			 const __le64 *mptr, unsigned int blocks)
{}

static int vmac_setkey(struct crypto_shash *tfm,
		       const u8 *key, unsigned int keylen)
{}

static int vmac_init(struct shash_desc *desc)
{}

static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
{}

static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
		       struct vmac_desc_ctx *dctx)
{}

static int vmac_final(struct shash_desc *desc, u8 *out)
{}

static int vmac_init_tfm(struct crypto_tfm *tfm)
{}

static void vmac_exit_tfm(struct crypto_tfm *tfm)
{}

static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{}

static struct crypto_template vmac64_tmpl =;

static int __init vmac_module_init(void)
{}

static void __exit vmac_module_exit(void)
{}

subsys_initcall(vmac_module_init);
module_exit(vmac_module_exit);

MODULE_LICENSE();
MODULE_DESCRIPTION();
MODULE_ALIAS_CRYPTO();
MODULE_IMPORT_NS();