xfrm: Separate ESP handling from segmentation for GRO packets.

We change the ESP GSO handlers to only segment the packets.
The ESP handling and encryption is defered to validate_xmit_xfrm()
where this is done for non GRO packets too. This makes the code
more robust and prepares for asynchronous crypto handling.

Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
This commit is contained in:
Steffen Klassert 2017-12-20 10:41:31 +01:00
parent f39a5c01c3
commit 3dca3f38cf
7 changed files with 129 additions and 132 deletions

View File

@ -1888,7 +1888,7 @@ static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
void __net_init xfrm_dev_init(void);
#ifdef CONFIG_XFRM_OFFLOAD
int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_user_offload *xuo);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
@ -1929,9 +1929,9 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
}
}
#else
static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
{
return 0;
return skb;
}
static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)

View File

@ -3083,9 +3083,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
__skb_linearize(skb))
goto out_kfree_skb;
if (validate_xmit_xfrm(skb, features))
goto out_kfree_skb;
/* If packet is not checksummed and device does not
* support checksumming for this protocol, complete
* checksumming here.
@ -3102,6 +3099,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
}
}
skb = validate_xmit_xfrm(skb, features);
return skb;
out_kfree_skb:

View File

@ -108,75 +108,36 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
__u32 seq;
int err = 0;
struct sk_buff *skb2;
struct xfrm_state *x;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
if (!xo)
goto out;
seq = xo->seq.low;
return ERR_PTR(-EINVAL);
x = skb->sp->xvec[skb->sp->len - 1];
aead = x->data;
esph = ip_esp_hdr(skb);
if (esph->spi != x->id.spi)
goto out;
return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
goto out;
return ERR_PTR(-EINVAL);
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb->encap_hdr_csum = 1;
if (!(features & NETIF_F_HW_ESP))
if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
(x->xso.dev != skb->dev))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
segs = x->outer_mode->gso_segment(x, skb, esp_features);
if (IS_ERR_OR_NULL(segs))
goto out;
xo->flags |= XFRM_GSO_SEGMENT;
__skb_pull(skb, skb->data - skb_mac_header(skb));
skb2 = segs;
do {
struct sk_buff *nskb = skb2->next;
xo = xfrm_offload(skb2);
xo->flags |= XFRM_GSO_SEGMENT;
xo->seq.low = seq;
xo->seq.hi = xfrm_replay_seqhi(x, seq);
if(!(features & NETIF_F_HW_ESP))
xo->flags |= CRYPTO_FALLBACK;
x->outer_mode->xmit(x, skb2);
err = x->type_offload->xmit(x, skb2, esp_features);
if (err) {
kfree_skb_list(segs);
return ERR_PTR(err);
}
if (!skb_is_gso(skb2))
seq++;
else
seq += skb_shinfo(skb2)->gso_segs;
skb_push(skb2, skb2->mac_len);
skb2 = nskb;
} while (skb2);
out:
return segs;
return x->outer_mode->gso_segment(x, skb, esp_features);
}
static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@ -203,6 +164,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
struct crypto_aead *aead;
struct esp_info esp;
bool hw_offload = true;
__u32 seq;
esp.inplace = true;
@ -241,23 +203,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
return esp.nfrags;
}
seq = xo->seq.low;
esph = esp.esph;
esph->spi = x->id.spi;
skb_push(skb, -skb_network_offset(skb));
if (xo->flags & XFRM_GSO_SEGMENT) {
esph->seq_no = htonl(xo->seq.low);
} else {
ip_hdr(skb)->tot_len = htons(skb->len);
ip_send_check(ip_hdr(skb));
esph->seq_no = htonl(seq);
if (!skb_is_gso(skb))
xo->seq.low++;
else
xo->seq.low += skb_shinfo(skb)->gso_segs;
}
esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
ip_hdr(skb)->tot_len = htons(skb->len);
ip_send_check(ip_hdr(skb));
if (hw_offload)
return 0;
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
err = esp_output_tail(x, skb, &esp);
if (err)
return err;

View File

@ -105,18 +105,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x,
{
__skb_push(skb, skb->mac_len);
return skb_mac_gso_segment(skb, features);
}
static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
{
struct xfrm_offload *xo = xfrm_offload(skb);
if (xo->flags & XFRM_GSO_SEGMENT) {
skb->network_header = skb->network_header - x->props.header_len;
if (xo->flags & XFRM_GSO_SEGMENT)
skb->transport_header = skb->network_header +
sizeof(struct iphdr);
}
skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len);

View File

@ -135,75 +135,36 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
__u32 seq;
int err = 0;
struct sk_buff *skb2;
struct xfrm_state *x;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
if (!xo)
goto out;
seq = xo->seq.low;
return ERR_PTR(-EINVAL);
x = skb->sp->xvec[skb->sp->len - 1];
aead = x->data;
esph = ip_esp_hdr(skb);
if (esph->spi != x->id.spi)
goto out;
return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
goto out;
return ERR_PTR(-EINVAL);
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb->encap_hdr_csum = 1;
if (!(features & NETIF_F_HW_ESP))
if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
(x->xso.dev != skb->dev))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
segs = x->outer_mode->gso_segment(x, skb, esp_features);
if (IS_ERR_OR_NULL(segs))
goto out;
xo->flags |= XFRM_GSO_SEGMENT;
__skb_pull(skb, skb->data - skb_mac_header(skb));
skb2 = segs;
do {
struct sk_buff *nskb = skb2->next;
xo = xfrm_offload(skb2);
xo->flags |= XFRM_GSO_SEGMENT;
xo->seq.low = seq;
xo->seq.hi = xfrm_replay_seqhi(x, seq);
if(!(features & NETIF_F_HW_ESP))
xo->flags |= CRYPTO_FALLBACK;
x->outer_mode->xmit(x, skb2);
err = x->type_offload->xmit(x, skb2, esp_features);
if (err) {
kfree_skb_list(segs);
return ERR_PTR(err);
}
if (!skb_is_gso(skb2))
seq++;
else
seq += skb_shinfo(skb2)->gso_segs;
skb_push(skb2, skb2->mac_len);
skb2 = nskb;
} while (skb2);
out:
return segs;
return x->outer_mode->gso_segment(x, skb, esp_features);
}
static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@ -222,6 +183,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
{
int len;
int err;
int alen;
int blksize;
@ -230,6 +192,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
struct crypto_aead *aead;
struct esp_info esp;
bool hw_offload = true;
__u32 seq;
esp.inplace = true;
@ -265,28 +228,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
return esp.nfrags;
}
seq = xo->seq.low;
esph = ip_esp_hdr(skb);
esph->spi = x->id.spi;
skb_push(skb, -skb_network_offset(skb));
if (xo->flags & XFRM_GSO_SEGMENT) {
esph->seq_no = htonl(xo->seq.low);
} else {
int len;
esph->seq_no = htonl(seq);
len = skb->len - sizeof(struct ipv6hdr);
if (len > IPV6_MAXPLEN)
len = 0;
ipv6_hdr(skb)->payload_len = htons(len);
if (!skb_is_gso(skb))
xo->seq.low++;
else
xo->seq.low += skb_shinfo(skb)->gso_segs;
}
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
len = skb->len - sizeof(struct ipv6hdr);
if (len > IPV6_MAXPLEN)
len = 0;
ipv6_hdr(skb)->payload_len = htons(len);
if (hw_offload)
return 0;
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
err = esp6_output_tail(x, skb, &esp);
if (err)
return err;

View File

@ -105,17 +105,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x,
{
__skb_push(skb, skb->mac_len);
return skb_mac_gso_segment(skb, features);
}
static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
{
struct xfrm_offload *xo = xfrm_offload(skb);
if (xo->flags & XFRM_GSO_SEGMENT) {
skb->network_header = skb->network_header - x->props.header_len;
if (xo->flags & XFRM_GSO_SEGMENT)
skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
}
skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len);

View File

@ -23,32 +23,99 @@
#include <linux/notifier.h>
#ifdef CONFIG_XFRM_OFFLOAD
int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
{
int err;
__u32 seq;
struct xfrm_state *x;
struct sk_buff *skb2;
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
if (skb_is_gso(skb))
return 0;
if (!xo)
return skb;
if (xo) {
x = skb->sp->xvec[skb->sp->len - 1];
if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
return 0;
if (!(features & NETIF_F_HW_ESP))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
x = skb->sp->xvec[skb->sp->len - 1];
if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
return skb;
if (skb_is_gso(skb)) {
struct net_device *dev = skb->dev;
if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
struct sk_buff *segs;
/* Packet got rerouted, fixup features and segment it. */
esp_features = esp_features & ~(NETIF_F_HW_ESP
| NETIF_F_GSO_ESP);
segs = skb_gso_segment(skb, esp_features);
if (IS_ERR(segs)) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
kfree_skb(skb);
return NULL;
} else {
consume_skb(skb);
skb = segs;
}
} else {
return skb;
}
}
if (!skb->next) {
x->outer_mode->xmit(x, skb);
err = x->type_offload->xmit(x, skb, features);
err = x->type_offload->xmit(x, skb, esp_features);
if (err) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
return err;
kfree_skb(skb);
return NULL;
}
skb_push(skb, skb->data - skb_mac_header(skb));
return skb;
}
return 0;
skb2 = skb;
seq = xo->seq.low;
do {
struct sk_buff *nskb = skb2->next;
xo = xfrm_offload(skb2);
xo->flags |= XFRM_GSO_SEGMENT;
xo->seq.low = seq;
xo->seq.hi = xfrm_replay_seqhi(x, seq);
if(!(features & NETIF_F_HW_ESP))
xo->flags |= CRYPTO_FALLBACK;
x->outer_mode->xmit(x, skb2);
err = x->type_offload->xmit(x, skb2, esp_features);
if (err) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
skb2->next = nskb;
kfree_skb_list(skb2);
return NULL;
}
if (!skb_is_gso(skb2))
seq++;
else
seq += skb_shinfo(skb2)->gso_segs;
skb_push(skb2, skb2->data - skb_mac_header(skb2));
skb2 = nskb;
} while (skb2);
return skb;
}
EXPORT_SYMBOL_GPL(validate_xmit_xfrm);