Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9f30e5c5 authored by David S. Miller's avatar David S. Miller
Browse files


Steffen Klassert says:

====================
pull request (net-next): ipsec-next 2017-12-22

1) Separate ESP handling from segmentation for GRO packets.
   This unifies the IPsec GSO and non GSO codepath.

2) Add asynchronous callbacks for xfrm on layer 2. This
   adds the necessary infrastructure to core networking.

3) Allow to use the layer2 IPsec GSO codepath for software
   crypto, all infrastructure is there now.

4) Also allow IPsec GSO with software crypto for local sockets.

5) Don't require synchronous crypto fallback on IPsec offloading,
   it is not needed anymore.

6) Check for xdo_dev_state_free and only call it if implemented.
   From Shannon Nelson.

7) Check for the required add and delete functions when a driver
   registers xdo_dev_ops. From Shannon Nelson.

8) Define xfrmdev_ops only with offload config.
   From Shannon Nelson.

9) Update the xfrm stats documentation.
   From Shannon Nelson.

Please pull or let me know if there are problems.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 04f629f7 1a4bb1d1
Loading
Loading
Loading
Loading
+14 −6
Original line number Original line Diff line number Diff line
@@ -5,13 +5,15 @@ Masahide NAKAMURA <nakam@linux-ipv6.org>


Transformation Statistics
Transformation Statistics
-------------------------
-------------------------
xfrm_proc is a statistics shown factor dropped by transformation
for developer.
It is a counter designed from current transformation source code
and defined like linux private MIB.


Inbound statistics
The xfrm_proc code is a set of statistics showing numbers of packets
~~~~~~~~~~~~~~~~~~
dropped by the transformation code and why.  These counters are defined
as part of the linux private MIB.  These counters can be viewed in
/proc/net/xfrm_stat.


Inbound errors
~~~~~~~~~~~~~~
XfrmInError:
XfrmInError:
	All errors which is not matched others
	All errors which is not matched others
XfrmInBufferError:
XfrmInBufferError:
@@ -46,6 +48,10 @@ XfrmInPolBlock:
	Policy discards
	Policy discards
XfrmInPolError:
XfrmInPolError:
	Policy error
	Policy error
XfrmAcquireError:
	State hasn't been fully acquired before use
XfrmFwdHdrError:
	Forward routing of a packet is not allowed


Outbound errors
Outbound errors
~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~
@@ -72,3 +78,5 @@ XfrmOutPolDead:
	Policy is dead
	Policy is dead
XfrmOutPolError:
XfrmOutPolError:
	Policy error
	Policy error
XfrmOutStateInvalid:
	State is invalid, perhaps expired
+5 −3
Original line number Original line Diff line number Diff line
@@ -1726,7 +1726,7 @@ struct net_device {
	const struct ndisc_ops *ndisc_ops;
	const struct ndisc_ops *ndisc_ops;
#endif
#endif


#ifdef CONFIG_XFRM
#ifdef CONFIG_XFRM_OFFLOAD
	const struct xfrmdev_ops *xfrmdev_ops;
	const struct xfrmdev_ops *xfrmdev_ops;
#endif
#endif


@@ -2793,7 +2793,9 @@ struct softnet_data {
	struct Qdisc		*output_queue;
	struct Qdisc		*output_queue;
	struct Qdisc		**output_queue_tailp;
	struct Qdisc		**output_queue_tailp;
	struct sk_buff		*completion_queue;
	struct sk_buff		*completion_queue;

#ifdef CONFIG_XFRM_OFFLOAD
	struct sk_buff_head	xfrm_backlog;
#endif
#ifdef CONFIG_RPS
#ifdef CONFIG_RPS
	/* input_queue_head should be written by cpu owning this struct,
	/* input_queue_head should be written by cpu owning this struct,
	 * and only read by other cpus. Worth using a cache line.
	 * and only read by other cpus. Worth using a cache line.
@@ -3325,7 +3327,7 @@ int dev_get_phys_port_id(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev,
			   char *name, size_t len);
			   char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
				    struct netdev_queue *txq, int *ret);
				    struct netdev_queue *txq, int *ret);


+24 −5
Original line number Original line Diff line number Diff line
@@ -1051,6 +1051,7 @@ struct xfrm_offload {
#define	XFRM_GSO_SEGMENT	16
#define	XFRM_GSO_SEGMENT	16
#define	XFRM_GRO		32
#define	XFRM_GRO		32
#define	XFRM_ESP_NO_TRAILER	64
#define	XFRM_ESP_NO_TRAILER	64
#define	XFRM_DEV_RESUME		128


	__u32			status;
	__u32			status;
#define CRYPTO_SUCCESS				1
#define CRYPTO_SUCCESS				1
@@ -1874,21 +1875,28 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
{
{
	return skb->sp->xvec[skb->sp->len - 1];
	return skb->sp->xvec[skb->sp->len - 1];
}
}
#endif

static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
{
{
#ifdef CONFIG_XFRM
	struct sec_path *sp = skb->sp;
	struct sec_path *sp = skb->sp;


	if (!sp || !sp->olen || sp->len != sp->olen)
	if (!sp || !sp->olen || sp->len != sp->olen)
		return NULL;
		return NULL;


	return &sp->ovec[sp->olen - 1];
	return &sp->ovec[sp->olen - 1];
}
#else
	return NULL;
#endif
#endif
}


void __net_init xfrm_dev_init(void);
void __net_init xfrm_dev_init(void);


#ifdef CONFIG_XFRM_OFFLOAD
#ifdef CONFIG_XFRM_OFFLOAD
int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
void xfrm_dev_resume(struct sk_buff *skb);
void xfrm_dev_backlog(struct softnet_data *sd);
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
		       struct xfrm_user_offload *xuo);
		       struct xfrm_user_offload *xuo);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
@@ -1902,6 +1910,8 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
		return false;
		return false;


	xdst = (struct xfrm_dst *) dst;
	xdst = (struct xfrm_dst *) dst;
	if (!x->xso.offload_handle && !xdst->child->xfrm)
		return true;
	if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
	if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
	    !xdst->child->xfrm)
	    !xdst->child->xfrm)
		return true;
		return true;
@@ -1923,15 +1933,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
	 struct net_device *dev = xso->dev;
	 struct net_device *dev = xso->dev;


	if (dev && dev->xfrmdev_ops) {
	if (dev && dev->xfrmdev_ops) {
		if (dev->xfrmdev_ops->xdo_dev_state_free)
			dev->xfrmdev_ops->xdo_dev_state_free(x);
			dev->xfrmdev_ops->xdo_dev_state_free(x);
		xso->dev = NULL;
		xso->dev = NULL;
		dev_put(dev);
		dev_put(dev);
	}
	}
}
}
#else
#else
static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
static inline void xfrm_dev_resume(struct sk_buff *skb)
{
{
	return 0;
}

static inline void xfrm_dev_backlog(struct softnet_data *sd)
{
}

static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
{
	return skb;
}
}


static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
+12 −7
Original line number Original line Diff line number Diff line
@@ -3059,7 +3059,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
}
}
EXPORT_SYMBOL(skb_csum_hwoffload_help);
EXPORT_SYMBOL(skb_csum_hwoffload_help);


static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
{
{
	netdev_features_t features;
	netdev_features_t features;


@@ -3083,9 +3083,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
		    __skb_linearize(skb))
		    __skb_linearize(skb))
			goto out_kfree_skb;
			goto out_kfree_skb;


		if (validate_xmit_xfrm(skb, features))
			goto out_kfree_skb;

		/* If packet is not checksummed and device does not
		/* If packet is not checksummed and device does not
		 * support checksumming for this protocol, complete
		 * support checksumming for this protocol, complete
		 * checksumming here.
		 * checksumming here.
@@ -3102,6 +3099,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
		}
		}
	}
	}


	skb = validate_xmit_xfrm(skb, features, again);

	return skb;
	return skb;


out_kfree_skb:
out_kfree_skb:
@@ -3111,7 +3110,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
	return NULL;
	return NULL;
}
}


struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
{
{
	struct sk_buff *next, *head = NULL, *tail;
	struct sk_buff *next, *head = NULL, *tail;


@@ -3122,7 +3121,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
		/* in case skb wont be segmented, point to itself */
		/* in case skb wont be segmented, point to itself */
		skb->prev = skb;
		skb->prev = skb;


		skb = validate_xmit_skb(skb, dev);
		skb = validate_xmit_skb(skb, dev, again);
		if (!skb)
		if (!skb)
			continue;
			continue;


@@ -3449,6 +3448,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
	struct netdev_queue *txq;
	struct netdev_queue *txq;
	struct Qdisc *q;
	struct Qdisc *q;
	int rc = -ENOMEM;
	int rc = -ENOMEM;
	bool again = false;


	skb_reset_mac_header(skb);
	skb_reset_mac_header(skb);


@@ -3510,7 +3510,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
				     XMIT_RECURSION_LIMIT))
				     XMIT_RECURSION_LIMIT))
				goto recursion_alert;
				goto recursion_alert;


			skb = validate_xmit_skb(skb, dev);
			skb = validate_xmit_skb(skb, dev, &again);
			if (!skb)
			if (!skb)
				goto out;
				goto out;


@@ -4194,6 +4194,8 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
				spin_unlock(root_lock);
				spin_unlock(root_lock);
		}
		}
	}
	}

	xfrm_dev_backlog(sd);
}
}


#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
@@ -8875,6 +8877,9 @@ static int __init net_dev_init(void)


		skb_queue_head_init(&sd->input_pkt_queue);
		skb_queue_head_init(&sd->input_pkt_queue);
		skb_queue_head_init(&sd->process_queue);
		skb_queue_head_init(&sd->process_queue);
#ifdef CONFIG_XFRM_OFFLOAD
		skb_queue_head_init(&sd->xfrm_backlog);
#endif
		INIT_LIST_HEAD(&sd->poll_list);
		INIT_LIST_HEAD(&sd->poll_list);
		sd->output_queue_tailp = &sd->output_queue;
		sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
#ifdef CONFIG_RPS
+23 −13
Original line number Original line Diff line number Diff line
@@ -121,15 +121,33 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
static void esp_output_done(struct crypto_async_request *base, int err)
static void esp_output_done(struct crypto_async_request *base, int err)
{
{
	struct sk_buff *skb = base->data;
	struct sk_buff *skb = base->data;
	struct xfrm_offload *xo = xfrm_offload(skb);
	void *tmp;
	void *tmp;
	struct dst_entry *dst = skb_dst(skb);
	struct xfrm_state *x;
	struct xfrm_state *x = dst->xfrm;

	if (xo && (xo->flags & XFRM_DEV_RESUME))
		x = skb->sp->xvec[skb->sp->len - 1];
	else
		x = skb_dst(skb)->xfrm;


	tmp = ESP_SKB_CB(skb)->tmp;
	tmp = ESP_SKB_CB(skb)->tmp;
	esp_ssg_unref(x, tmp);
	esp_ssg_unref(x, tmp);
	kfree(tmp);
	kfree(tmp);

	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
		if (err) {
			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
			kfree_skb(skb);
			return;
		}

		skb_push(skb, skb->data - skb_mac_header(skb));
		secpath_reset(skb);
		xfrm_dev_resume(skb);
	} else {
		xfrm_output_resume(skb, err);
		xfrm_output_resume(skb, err);
	}
	}
}


/* Move ESP header back into place. */
/* Move ESP header back into place. */
static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
@@ -825,17 +843,13 @@ static int esp_init_aead(struct xfrm_state *x)
	char aead_name[CRYPTO_MAX_ALG_NAME];
	char aead_name[CRYPTO_MAX_ALG_NAME];
	struct crypto_aead *aead;
	struct crypto_aead *aead;
	int err;
	int err;
	u32 mask = 0;


	err = -ENAMETOOLONG;
	err = -ENAMETOOLONG;
	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
		goto error;
		goto error;


	if (x->xso.offload_handle)
	aead = crypto_alloc_aead(aead_name, 0, 0);
		mask |= CRYPTO_ALG_ASYNC;

	aead = crypto_alloc_aead(aead_name, 0, mask);
	err = PTR_ERR(aead);
	err = PTR_ERR(aead);
	if (IS_ERR(aead))
	if (IS_ERR(aead))
		goto error;
		goto error;
@@ -865,7 +879,6 @@ static int esp_init_authenc(struct xfrm_state *x)
	char authenc_name[CRYPTO_MAX_ALG_NAME];
	char authenc_name[CRYPTO_MAX_ALG_NAME];
	unsigned int keylen;
	unsigned int keylen;
	int err;
	int err;
	u32 mask = 0;


	err = -EINVAL;
	err = -EINVAL;
	if (!x->ealg)
	if (!x->ealg)
@@ -891,10 +904,7 @@ static int esp_init_authenc(struct xfrm_state *x)
			goto error;
			goto error;
	}
	}


	if (x->xso.offload_handle)
	aead = crypto_alloc_aead(authenc_name, 0, 0);
		mask |= CRYPTO_ALG_ASYNC;

	aead = crypto_alloc_aead(authenc_name, 0, mask);
	err = PTR_ERR(aead);
	err = PTR_ERR(aead);
	if (IS_ERR(aead))
	if (IS_ERR(aead))
		goto error;
		goto error;
Loading