// SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2020 Chelsio Communications. All rights reserved. */ #define pr_fmt(fmt) … #include <linux/skbuff.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/ip.h> #include <net/ipv6.h> #include <linux/netdevice.h> #include <crypto/aes.h> #include <linux/skbuff_ref.h> #include "chcr_ktls.h" static LIST_HEAD(uld_ctx_list); static DEFINE_MUTEX(dev_mutex); /* chcr_get_nfrags_to_send: get the remaining nfrags after start offset * @skb: skb * @start: start offset. * @len: how much data to send after @start */ static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len) { … } static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info); static void clear_conn_resources(struct chcr_ktls_info *tx_info); /* * chcr_ktls_save_keys: calculate and save crypto keys. * @tx_info - driver specific tls info. * @crypto_info - tls crypto information. * @direction - TX/RX direction. * return - SUCCESS/FAILURE. */ static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info, struct tls_crypto_info *crypto_info, enum tls_offload_ctx_dir direction) { … } /* * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection. * @sk - tcp socket. * @tx_info - driver specific tls info. * @atid - connection active tid. * return - send success/failure. */ static int chcr_ktls_act_open_req(struct sock *sk, struct chcr_ktls_info *tx_info, int atid) { … } #if IS_ENABLED(CONFIG_IPV6) /* * chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection. * @sk - tcp socket. * @tx_info - driver specific tls info. * @atid - connection active tid. * return - send success/failure. */ static int chcr_ktls_act_open_req6(struct sock *sk, struct chcr_ktls_info *tx_info, int atid) { … } #endif /* #if IS_ENABLED(CONFIG_IPV6) */ /* * chcr_setup_connection: create a TCB entry so that TP will form tcp packets. * @sk - tcp socket. * @tx_info - driver specific tls info. * return: NET_TX_OK/NET_XMIT_DROP */ static int chcr_setup_connection(struct sock *sk, struct chcr_ktls_info *tx_info) { … } /* * chcr_set_tcb_field: update tcb fields. * @tx_info - driver specific tls info. * @word - TCB word. * @mask - TCB word related mask. * @val - TCB word related value. * @no_reply - set 1 if not looking for TP response. */ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word, u64 mask, u64 val, int no_reply) { … } /* * chcr_ktls_dev_del: call back for tls_dev_del. * Remove the tid and l2t entry and close the connection. * it per connection basis. * @netdev - net device. * @tls_cts - tls context. * @direction - TX/RX crypto direction */ static void chcr_ktls_dev_del(struct net_device *netdev, struct tls_context *tls_ctx, enum tls_offload_ctx_dir direction) { … } /* * chcr_ktls_dev_add: call back for tls_dev_add. * Create a tcb entry for TP. Also add l2t entry for the connection. And * generate keys & save those keys locally. * @netdev - net device. * @tls_cts - tls context. * @direction - TX/RX crypto direction * return: SUCCESS/FAILURE. */ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, enum tls_offload_ctx_dir direction, struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn) { … } /* * chcr_init_tcb_fields: Initialize tcb fields to handle TCP seq number * handling. * @tx_info - driver specific tls info. * return: NET_TX_OK/NET_XMIT_DROP */ static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info) { … } /* * chcr_ktls_cpl_act_open_rpl: connection reply received from TP. */ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input) { … } /* * chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP. */ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input) { … } static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, u32 tid, void *pos, u16 word, struct sge_eth_txq *q, u64 mask, u64 val, u32 reply) { … } /* * chcr_write_cpl_set_tcb_ulp: update tcb values. * TCB is responsible to create tcp headers, so all the related values * should be correctly updated. * @tx_info - driver specific tls info. * @q - tx queue on which packet is going out. * @tid - TCB identifier. * @pos - current index where should we start writing. * @word - TCB word. * @mask - TCB word related mask. * @val - TCB word related value. * @reply - set 1 if looking for TP response. * return - next position to write. */ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, struct sge_eth_txq *q, u32 tid, void *pos, u16 word, u64 mask, u64 val, u32 reply) { … } /* * chcr_ktls_xmit_tcb_cpls: update tcb entry so that TP will create the header * with updated values like tcp seq, ack, window etc. * @tx_info - driver specific tls info. * @q - TX queue. * @tcp_seq * @tcp_ack * @tcp_win * return: NETDEV_TX_BUSY/NET_TX_OK. */ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info, struct sge_eth_txq *q, u64 tcp_seq, u64 tcp_ack, u64 tcp_win, bool offset) { … } /* * chcr_ktls_get_tx_flits * returns number of flits to be sent out, it includes key context length, WR * size and skb fragments. */ static unsigned int chcr_ktls_get_tx_flits(u32 nr_frags, unsigned int key_ctx_len) { … } /* * chcr_ktls_check_tcp_options: To check if there is any TCP option available * other than timestamp. * @skb - skb contains partial record.. * return: 1 / 0 */ static int chcr_ktls_check_tcp_options(struct tcphdr *tcp) { … } /* * chcr_ktls_write_tcp_options : TP can't send out all the options, we need to * send out separately. * @tx_info - driver specific tls info. * @skb - skb contains partial record.. * @q - TX queue. * @tx_chan - channel number. * return: NETDEV_TX_OK/NETDEV_TX_BUSY. */ static int chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, struct sge_eth_txq *q, uint32_t tx_chan) { … } /* * chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb * received has partial end part of the record, send out the complete record, so * that crypto block will be able to generate TAG/HASH. * @skb - segment which has complete or partial end part. * @tx_info - driver specific tls info. * @q - TX queue. * @tcp_seq * @tcp_push - tcp push bit. * @mss - segment size. * return: NETDEV_TX_BUSY/NET_TX_OK. */ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb, struct chcr_ktls_info *tx_info, struct sge_eth_txq *q, u32 tcp_seq, bool is_last_wr, u32 data_len, u32 skb_offset, u32 nfrags, bool tcp_push, u32 mss) { … } /* * chcr_ktls_xmit_wr_short: This is to send out partial records. If its * a middle part of a record, fetch the prior data to make it 16 byte aligned * and then only send it out. * * @skb - skb contains partial record.. * @tx_info - driver specific tls info. * @q - TX queue. * @tcp_seq * @tcp_push - tcp push bit. * @mss - segment size. * @tls_rec_offset - offset from start of the tls record. * @perior_data - data before the current segment, required to make this record * 16 byte aligned. * @prior_data_len - prior_data length (less than 16) * return: NETDEV_TX_BUSY/NET_TX_OK. */ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb, struct chcr_ktls_info *tx_info, struct sge_eth_txq *q, u32 tcp_seq, bool tcp_push, u32 mss, u32 tls_rec_offset, u8 *prior_data, u32 prior_data_len, u32 data_len, u32 skb_offset) { … } /* * chcr_ktls_tx_plaintxt: This handler will take care of the records which has * only plain text (only tls header and iv) * @tx_info - driver specific tls info. * @skb - skb contains partial record.. * @tcp_seq * @mss - segment size. * @tcp_push - tcp push bit. * @q - TX queue. * @port_id : port number * @perior_data - data before the current segment, required to make this record * 16 byte aligned. * @prior_data_len - prior_data length (less than 16) * return: NETDEV_TX_BUSY/NET_TX_OK. */ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info, struct sk_buff *skb, u32 tcp_seq, u32 mss, bool tcp_push, struct sge_eth_txq *q, u32 port_id, u8 *prior_data, u32 data_len, u32 skb_offset, u32 prior_data_len) { … } static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info, struct sk_buff *skb, struct sge_eth_txq *q) { … } /* * chcr_ktls_copy_record_in_skb * @nskb - new skb where the frags to be added. * @skb - old skb, to copy socket and destructor details. * @record - specific record which has complete 16k record in frags. */ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb, struct sk_buff *skb, struct tls_record_info *record) { … } /* * chcr_end_part_handler: This handler will handle the record which * is complete or if record's end part is received. T6 adapter has a issue that * it can't send out TAG with partial record so if its an end part then we have * to send TAG as well and for which we need to fetch the complete record and * send it to crypto module. * @tx_info - driver specific tls info. * @skb - skb contains partial record. * @record - complete record of 16K size. * @tcp_seq * @mss - segment size in which TP needs to chop a packet. * @tcp_push_no_fin - tcp push if fin is not set. * @q - TX queue. * @tls_end_offset - offset from end of the record. * @last wr : check if this is the last part of the skb going out. * return: NETDEV_TX_OK/NETDEV_TX_BUSY. */ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, struct sk_buff *skb, struct tls_record_info *record, u32 tcp_seq, int mss, bool tcp_push_no_fin, struct sge_eth_txq *q, u32 skb_offset, u32 tls_end_offset, bool last_wr) { … } /* * chcr_short_record_handler: This handler will take care of the records which * doesn't have end part (1st part or the middle part(/s) of a record). In such * cases, AES CTR will be used in place of AES GCM to send out partial packet. * This partial record might be the first part of the record, or the middle * part. In case of middle record we should fetch the prior data to make it 16 * byte aligned. If it has a partial tls header or iv then get to the start of * tls header. And if it has partial TAG, then remove the complete TAG and send * only the payload. * There is one more possibility that it gets a partial header, send that * portion as a plaintext. * @tx_info - driver specific tls info. * @skb - skb contains partial record.. * @record - complete record of 16K size. * @tcp_seq * @mss - segment size in which TP needs to chop a packet. * @tcp_push_no_fin - tcp push if fin is not set. * @q - TX queue. * @tls_end_offset - offset from end of the record. * return: NETDEV_TX_OK/NETDEV_TX_BUSY. */ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info, struct sk_buff *skb, struct tls_record_info *record, u32 tcp_seq, int mss, bool tcp_push_no_fin, u32 data_len, u32 skb_offset, struct sge_eth_txq *q, u32 tls_end_offset) { … } static int chcr_ktls_sw_fallback(struct sk_buff *skb, struct chcr_ktls_info *tx_info, struct sge_eth_txq *q) { … } /* nic tls TX handler */ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) { … } static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi) { … } static const struct tlsdev_ops chcr_ktls_ops = …; static chcr_handler_func work_handlers[NUM_CPL_CMDS] = …; static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp, const struct pkt_gl *pgl) { … } static void clear_conn_resources(struct chcr_ktls_info *tx_info) { … } static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx) { … } static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state) { … } static struct cxgb4_uld_info chcr_ktls_uld_info = …; static int __init chcr_ktls_init(void) { … } static void __exit chcr_ktls_exit(void) { … } module_init(…) …; module_exit(chcr_ktls_exit); MODULE_DESCRIPTION(…) …; MODULE_LICENSE(…) …; MODULE_AUTHOR(…) …; MODULE_VERSION(…);