linux/lib/crypto/curve25519-hacl64.c

// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
 * Copyright (C) 2016-2017 INRIA and Microsoft Corporation.
 * Copyright (C) 2018-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
 *
 * This is a machine-generated formally verified implementation of Curve25519
 * ECDH from: <https://github.com/mitls/hacl-star>. Though originally machine
 * generated, it has been tweaked to be suitable for use in the kernel. It is
 * optimized for 64-bit machines that can efficiently work with 128-bit
 * integer types.
 */

#include <asm/unaligned.h>
#include <crypto/curve25519.h>
#include <linux/string.h>

static __always_inline u64 u64_eq_mask(u64 a, u64 b)
{}

static __always_inline u64 u64_gte_mask(u64 a, u64 b)
{}

static __always_inline void modulo_carry_top(u64 *b)
{}

static __always_inline void fproduct_copy_from_wide_(u64 *output, u128 *input)
{}

static __always_inline void
fproduct_sum_scalar_multiplication_(u128 *output, u64 *input, u64 s)
{}

static __always_inline void fproduct_carry_wide_(u128 *tmp)
{}

static __always_inline void fmul_shift_reduce(u64 *output)
{}

static __always_inline void fmul_mul_shift_reduce_(u128 *output, u64 *input,
						   u64 *input21)
{}

static __always_inline void fmul_fmul(u64 *output, u64 *input, u64 *input21)
{}

static __always_inline void fsquare_fsquare__(u128 *tmp, u64 *output)
{}

static __always_inline void fsquare_fsquare_(u128 *tmp, u64 *output)
{}

static __always_inline void fsquare_fsquare_times_(u64 *output, u128 *tmp,
						   u32 count1)
{}

static __always_inline void fsquare_fsquare_times(u64 *output, u64 *input,
						  u32 count1)
{}

static __always_inline void fsquare_fsquare_times_inplace(u64 *output,
							  u32 count1)
{}

static __always_inline void crecip_crecip(u64 *out, u64 *z)
{}

static __always_inline void fsum(u64 *a, u64 *b)
{}

static __always_inline void fdifference(u64 *a, u64 *b)
{}

static __always_inline void fscalar(u64 *output, u64 *b, u64 s)
{}

static __always_inline void fmul(u64 *output, u64 *a, u64 *b)
{}

static __always_inline void crecip(u64 *output, u64 *input)
{}

static __always_inline void point_swap_conditional_step(u64 *a, u64 *b,
							u64 swap1, u32 ctr)
{}

static __always_inline void point_swap_conditional5(u64 *a, u64 *b, u64 swap1)
{}

static __always_inline void point_swap_conditional(u64 *a, u64 *b, u64 iswap)
{}

static __always_inline void point_copy(u64 *output, u64 *input)
{}

static __always_inline void addanddouble_fmonty(u64 *pp, u64 *ppq, u64 *p,
						u64 *pq, u64 *qmqp)
{}

static __always_inline void
ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2,
				       u64 *q, u8 byt)
{}

static __always_inline void
ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2,
					      u64 *nqpq2, u64 *q, u8 byt)
{}

static __always_inline void
ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2,
				  u64 *q, u8 byt, u32 i)
{}

static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq,
							  u64 *nqpq, u64 *nq2,
							  u64 *nqpq2, u64 *q,
							  u32 i)
{}

static void ladder_cmult(u64 *result, u8 *n1, u64 *q)
{}

static __always_inline void format_fexpand(u64 *output, const u8 *input)
{}

static __always_inline void format_fcontract_first_carry_pass(u64 *input)
{}

static __always_inline void format_fcontract_first_carry_full(u64 *input)
{}

static __always_inline void format_fcontract_second_carry_pass(u64 *input)
{}

static __always_inline void format_fcontract_second_carry_full(u64 *input)
{}

static __always_inline void format_fcontract_trim(u64 *input)
{}

static __always_inline void format_fcontract_store(u8 *output, u64 *input)
{}

static __always_inline void format_fcontract(u8 *output, u64 *input)
{}

static __always_inline void format_scalar_of_point(u8 *scalar, u64 *point)
{}

void curve25519_generic(u8 mypublic[CURVE25519_KEY_SIZE],
			const u8 secret[CURVE25519_KEY_SIZE],
			const u8 basepoint[CURVE25519_KEY_SIZE])
{}