#ifndef _TLS_OFFLOAD_H
#define _TLS_OFFLOAD_H
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/crypto.h>
#include <linux/socket.h>
#include <linux/tcp.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h>
#include <net/tcp.h>
#include <net/strparser.h>
#include <crypto/aead.h>
#include <uapi/linux/tls.h>
struct tls_rec;
#define TLS_MAX_PAYLOAD_SIZE …
#define TLS_HEADER_SIZE …
#define TLS_NONCE_OFFSET …
#define TLS_CRYPTO_INFO_READY(info) …
#define TLS_AAD_SPACE_SIZE …
#define TLS_MAX_IV_SIZE …
#define TLS_MAX_SALT_SIZE …
#define TLS_TAG_SIZE …
#define TLS_MAX_REC_SEQ_SIZE …
#define TLS_MAX_AAD_SIZE …
#define TLS_AES_CCM_IV_B0_BYTE …
#define TLS_SM4_CCM_IV_B0_BYTE …
enum { … };
struct tx_work { … };
struct tls_sw_context_tx { … };
struct tls_strparser { … };
struct tls_sw_context_rx { … };
struct tls_record_info { … };
#define TLS_DRIVER_STATE_SIZE_TX …
struct tls_offload_context_tx { … };
enum tls_context_flags { … };
struct cipher_context { … };
tls_crypto_context;
struct tls_prot_info { … };
struct tls_context { … };
enum tls_offload_ctx_dir { … };
struct tlsdev_ops { … };
enum tls_offload_sync_type { … };
#define TLS_DEVICE_RESYNC_NH_START_IVAL …
#define TLS_DEVICE_RESYNC_NH_MAX_IVAL …
#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX …
struct tls_offload_resync_async { … };
#define TLS_DRIVER_STATE_SIZE_RX …
struct tls_offload_context_rx { … };
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn);
static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
{ … }
static inline u32 tls_record_start_seq(struct tls_record_info *rec)
{ … }
struct sk_buff *
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
struct sk_buff *
tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
static inline bool tls_is_skb_tx_device_offloaded(const struct sk_buff *skb)
{ … }
static inline struct tls_context *tls_get_ctx(const struct sock *sk)
{ … }
static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
const struct tls_context *tls_ctx)
{ … }
static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
const struct tls_context *tls_ctx)
{ … }
static inline struct tls_offload_context_tx *
tls_offload_ctx_tx(const struct tls_context *tls_ctx)
{ … }
static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
{ … }
static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
{ … }
static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
{ … }
static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
enum tls_offload_ctx_dir direction)
{ … }
static inline void *
tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
{ … }
#define RESYNC_REQ …
#define RESYNC_REQ_ASYNC …
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{ … }
static inline void
tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
{ … }
static inline void
tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
{ … }
static inline void
tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
{ … }
static inline bool tls_offload_tx_resync_pending(struct sock *sk)
{ … }
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
#ifdef CONFIG_TLS_DEVICE
void tls_device_sk_destruct(struct sock *sk);
void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
{ … }
#endif
#endif