Pravin Shelar mentioned that GSO could potentially generate
wrong TX checksum if skb has fragments that are overwritten
by the user between the checksum computation and transmit.
He suggested to linearize skbs but this extra copy can be
avoided for normal tcp skbs cooked by tcp_sendmsg().
This patch introduces a new SKB_GSO_SHARED_FRAG flag, set
in skb_shinfo(skb)->gso_type if at least one frag can be
modified by the user.
Typical sources of such possible overwrites are {vm}splice(),
sendfile(), and macvtap/tun/virtio_net drivers.
Tested:
$ netperf -H 7.7.8.84
MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to
7.7.8.84 () port 0 AF_INET
Recv Send Send
Socket Socket Message Elapsed
Size Size Size Time Throughput
bytes bytes bytes secs. 10^6bits/sec
87380 16384 16384 10.00 3959.52
$ netperf -H 7.7.8.84 -t TCP_SENDFILE
TCP SENDFILE TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 7.7.8.84 ()
port 0 AF_INET
Recv Send Send
Socket Socket Message Elapsed
Size Size Size Time Throughput
bytes bytes bytes secs. 10^6bits/sec
87380 16384 16384 10.00 3216.80
Performance of the SENDFILE is impacted by the extra allocation and
copy, and because we use order-0 pages, while the TCP_STREAM uses
bigger pages.
Reported-by: Pravin Shelar <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
skb->data_len += len;
skb->len += len;
skb->truesize += truesize;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
atomic_add(truesize, &skb->sk->sk_wmem_alloc);
while (len) {
int off = base & ~PAGE_MASK;
if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
- skb_shinfo(skb)->gso_type = gso_type;
+ skb_shinfo(skb)->gso_type |= gso_type;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb->data_len += len;
skb->len += len;
skb->truesize += truesize;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
atomic_add(truesize, &skb->sk->sk_wmem_alloc);
while (len) {
int off = base & ~PAGE_MASK;
}
if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ unsigned short gso_type = 0;
+
pr_debug("GSO!\n");
switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ gso_type = SKB_GSO_TCPV6;
break;
case VIRTIO_NET_HDR_GSO_UDP:
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ gso_type = SKB_GSO_UDP;
break;
default:
tun->dev->stats.rx_frame_errors++;
}
if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+ gso_type |= SKB_GSO_TCP_ECN;
skb_shinfo(skb)->gso_size = gso.gso_size;
+ skb_shinfo(skb)->gso_type |= gso_type;
if (skb_shinfo(skb)->gso_size == 0) {
tun->dev->stats.rx_frame_errors++;
kfree_skb(skb);
skb->len += size;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
*len -= size;
}
ntohs(skb->protocol), skb->len, skb->pkt_type);
if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ unsigned short gso_type = 0;
+
pr_debug("GSO!\n");
switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_UDP:
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ gso_type = SKB_GSO_UDP;
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ gso_type = SKB_GSO_TCPV6;
break;
default:
net_warn_ratelimited("%s: bad gso type %u.\n",
}
if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+ gso_type |= SKB_GSO_TCP_ECN;
skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
if (skb_shinfo(skb)->gso_size == 0) {
goto frame_err;
}
+ skb_shinfo(skb)->gso_type |= gso_type;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0;
SKB_GSO_TCPV6 = 1 << 4,
SKB_GSO_FCOE = 1 << 5,
+
+ /* This indicates at least one fragment might be overwritten
+ * (as in vmsplice(), sendfile() ...)
+ * If we need to compute a TX checksum, we'll need to copy
+ * all frags to avoid possible bad checksum
+ */
+ SKB_GSO_SHARED_FRAG = 1 << 6,
};
#if BITS_PER_LONG > 32
return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
}
+/**
+ * skb_has_shared_frag - can any frag be overwritten
+ * @skb: buffer to test
+ *
+ * Return true if the skb has at least one frag that might be modified
+ * by an external entity (as in vmsplice()/sendfile())
+ */
+static inline bool skb_has_shared_frag(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_type & SKB_GSO_SHARED_FRAG;
+}
+
/**
* skb_linearize_cow - make sure skb is linear and writable
* @skb: buffer to process
return -EINVAL;
}
+ /* Before computing a checksum, we should make sure no frag could
+ * be modified by an external entity : checksum could be wrong.
+ */
+ if (skb_has_shared_frag(skb)) {
+ ret = __skb_linearize(skb);
+ if (ret)
+ goto out;
+ }
+
offset = skb_checksum_start_offset(skb);
BUG_ON(offset >= skb_headlen(skb));
csum = skb_checksum(skb, offset, skb->len - offset, 0);
{
int pos = skb_headlen(skb);
+ skb_shinfo(skb1)->gso_type = skb_shinfo(skb)->gso_type;
+
if (len < pos) /* Split line is inside header. */
skb_split_inside_header(skb, skb1, len, pos);
else /* Second chunk has no header, nothing to copy. */
skb_copy_from_linear_data_offset(skb, offset,
skb_put(nskb, hsize), hsize);
+ skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
+
while (pos < offset + len && i < nfrags) {
*frag = skb_shinfo(skb)->frags[i];
__skb_frag_ref(frag);
SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
+ SKB_GSO_SHARED_FRAG |
0)))
goto out;
static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- const struct iphdr *old_iph = ip_hdr(skb);
+ const struct iphdr *old_iph;
const struct iphdr *tiph;
struct flowi4 fl4;
u8 tos;
skb_checksum_help(skb))
goto tx_error;
+ old_iph = ip_hdr(skb);
+
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
__be16 df = tiph->frag_off;
struct rtable *rt; /* Route to the other host */
struct net_device *tdev; /* Device to other host */
- const struct iphdr *old_iph = ip_hdr(skb);
+ const struct iphdr *old_iph;
struct iphdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
__be32 dst = tiph->daddr;
skb_checksum_help(skb))
goto tx_error;
+ old_iph = ip_hdr(skb);
+
if (tos & 1)
tos = old_iph->tos;
skb_fill_page_desc(skb, i, page, offset, copy);
}
+ skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
+
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_TCPV6 |
+ SKB_GSO_SHARED_FRAG |
0) ||
!(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
goto out;
*/
if (!skb_shinfo(prev)->gso_size) {
skb_shinfo(prev)->gso_size = mss;
- skb_shinfo(prev)->gso_type = sk->sk_gso_type;
+ skb_shinfo(prev)->gso_type |= sk->sk_gso_type;
}
/* CHECKME: To clear or not to clear? Mimics normal skb currently */
if (skb_shinfo(skb)->gso_segs <= 1) {
skb_shinfo(skb)->gso_size = 0;
- skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_type &= SKB_GSO_SHARED_FRAG;
}
/* Difference in this won't matter, both ACKed by the same cumul. ACK */
static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
+ skb_shinfo(skb)->gso_type &= SKB_GSO_SHARED_FRAG;
if (skb->len <= mss_now || !sk_can_gso(sk) ||
skb->ip_summed == CHECKSUM_NONE) {
/* Avoid the costly divide in the normal
*/
skb_shinfo(skb)->gso_segs = 1;
skb_shinfo(skb)->gso_size = 0;
- skb_shinfo(skb)->gso_type = 0;
} else {
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
skb_shinfo(skb)->gso_size = mss_now;
- skb_shinfo(skb)->gso_type = sk->sk_gso_type;
+ skb_shinfo(skb)->gso_type |= sk->sk_gso_type;
}
}
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_TCPV6 |
+ SKB_GSO_SHARED_FRAG |
0)))
goto out;