Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aea06eb2 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'TLS-offload-rx-netdev-and-mlx5'

Boris Pismenny says:

====================
TLS offload rx, netdev & mlx5

The following series provides TLS RX inline crypto offload.

v5->v4:
    - Remove the Kconfig to mutually exclude both IPsec and TLS

v4->v3:
    - Remove the iov revert for zero copy send flow

v2->v3:
    - Fix typo
    - Adjust cover letter
    - Fix bug in zero copy flows
    - Use network byte order for the record number in resync
    - Adjust the sequence provided in resync

v1->v2:
    - Fix bisectability problems due to variable name changes
    - Fix potential uninitialized return value

This series completes the generic infrastructure to offload TLS crypto to
a network devices. It enables the kernel TLS socket to skip decryption and
authentication operations for SKBs marked as decrypted on the receive
side of the data path. Leaving those computationally expensive operations
to the NIC.

This infrastructure doesn't require a TCP offload engine. Instead, the
NIC decrypts a packet's payload if the packet contains the expected TCP
sequence number. The TLS record authentication tag remains unmodified
regardless of decryption. If the packet is decrypted successfully and it
contains an authentication tag, then the authentication check has passed.
Otherwise, if the authentication fails, then the packet is provided
unmodified and the KTLS layer is responsible for handling it.
Out-Of-Order TCP packets are provided unmodified. As a result,
in the slow path some of the SKBs are decrypted while others remain as
ciphertext.

The GRO and TCP layers must not coalesce decrypted and non-decrypted SKBs.
At the worst case a received TLS record consists of both plaintext
and ciphertext packets. These partially decrypted records must be
reencrypted, only to be decrypted.

The notable differences between SW KTLS and NIC offloaded TLS
implementations are as follows:
1. Partial decryption - Software must handle the case of a TLS record
that was only partially decrypted by HW. This can happen due to packet
reordering.
2. Resynchronization - tls_read_size calls the device driver to
resynchronize HW whenever it lost track of the TLS record framing in
the TCP stream.

The infrastructure should be extendable to support various NIC offload
implementations.  However it is currently written with the
implementation below in mind:
The NIC identifies packets that should be offloaded according to
the 5-tuple and the TCP sequence number. If these match and the
packet is decrypted and authenticated successfully, then a syndrome
is provided to software. Otherwise, the packet is unmodified.
Decrypted and non-decrypted packets aren't coalesced by the network stack,
and the KTLS layer decrypts and authenticates partially decrypted records.
The NIC provides an indication whenever a resync is required. The resync
operation is triggered by the KTLS layer while parsing TLS record headers.

Finally, we measure the performance obtained by running single stream
iperf with two Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz machines connected
back-to-back with Innova TLS (40Gbps) NICs. We compare TCP (upper bound)
and KTLS-Offload running both in Tx and Rx. The results show that the
performance of offload is comparable to TCP.

                          | Bandwidth (Gbps) | CPU Tx (%) | CPU rx (%)
TCP                       | 28.8             | 5          | 12
KTLS-Offload-Tx-Rx 	  | 28.6	     | 7          | 14

Paper: https://netdevconf.org/2.2/papers/pismenny-tlscrypto-talk.pdf


====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cc98419a b3ccf978
Loading
Loading
Loading
Loading
+37 −0
Original line number Diff line number Diff line
#ifndef __MLX5E_ACCEL_H__
#define __MLX5E_ACCEL_H__

#ifdef CONFIG_MLX5_ACCEL

#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "en.h"

static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
{
	__be16 *ethtype;

	if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
		return false;
	ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
	if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
		return false;
	return true;
}

static inline void remove_metadata_hdr(struct sk_buff *skb)
{
	struct ethhdr *old_eth;
	struct ethhdr *new_eth;

	/* Remove the metadata from the buffer */
	old_eth = (struct ethhdr *)skb->data;
	new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
	memmove(new_eth, old_eth, 2 * ETH_ALEN);
	/* Ethertype is already in its new place */
	skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
}

#endif /* CONFIG_MLX5_ACCEL */

#endif /* __MLX5E_EN_ACCEL_H__ */
+16 −7
Original line number Diff line number Diff line
@@ -37,17 +37,26 @@
#include "mlx5_core.h"
#include "fpga/tls.h"

int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
			    struct tls_crypto_info *crypto_info,
			       u32 start_offload_tcp_sn, u32 *p_swid)
			    u32 start_offload_tcp_sn, u32 *p_swid,
			    bool direction_sx)
{
	return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info,
					 start_offload_tcp_sn, p_swid);
	return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
				      start_offload_tcp_sn, p_swid,
				      direction_sx);
}

void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid)
void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
			     bool direction_sx)
{
	mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL);
	mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
}

int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
			     u64 rcd_sn)
{
	return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
}

bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
+17 −9
Original line number Diff line number Diff line
@@ -60,10 +60,14 @@ struct mlx5_ifc_tls_flow_bits {
	u8         reserved_at_2[0x1e];
};

int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
			    struct tls_crypto_info *crypto_info,
			       u32 start_offload_tcp_sn, u32 *p_swid);
void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid);
			    u32 start_offload_tcp_sn, u32 *p_swid,
			    bool direction_sx);
void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
			     bool direction_sx);
int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
			     u64 rcd_sn);
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
@@ -71,11 +75,15 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);

#else

static inline int
mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
static int
mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
			struct tls_crypto_info *crypto_info,
			   u32 start_offload_tcp_sn, u32 *p_swid) { return 0; }
static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { }
			u32 start_offload_tcp_sn, u32 *p_swid,
			bool direction_sx) { return -ENOTSUPP; }
static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
					   bool direction_sx) { }
static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle,
					   u32 seq, u64 rcd_sn) { return 0; }
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
+5 −15
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@

#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h"
#include "accel/accel.h"
#include "en.h"

enum {
@@ -346,19 +347,12 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
}

struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
					  struct sk_buff *skb)
					  struct sk_buff *skb, u32 *cqe_bcnt)
{
	struct mlx5e_ipsec_metadata *mdata;
	struct ethhdr *old_eth;
	struct ethhdr *new_eth;
	struct xfrm_state *xs;
	__be16 *ethtype;

	/* Detect inline metadata */
	if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
		return skb;
	ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
	if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
	if (!is_metadata_hdr_valid(skb))
		return skb;

	/* Use the metadata */
@@ -369,12 +363,8 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
		return NULL;
	}

	/* Remove the metadata from the buffer */
	old_eth = (struct ethhdr *)skb->data;
	new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
	memmove(new_eth, old_eth, 2 * ETH_ALEN);
	/* Ethertype is already in its new place */
	skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
	remove_metadata_hdr(skb);
	*cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;

	return skb;
}
+1 −1
Original line number Diff line number Diff line
@@ -41,7 +41,7 @@
#include "en.h"

struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
					  struct sk_buff *skb);
					  struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);

void mlx5e_ipsec_inverse_table_init(void);
Loading