]> Pileus Git - ~andy/linux/blobdiff - drivers/net/xen-netfront.c
Merge branch 'devicetree/next' into spi/next
[~andy/linux] / drivers / net / xen-netfront.c
index 546de574982477b02c63b3bd07998d5ea8ce34fa..da1f121203466c10bc668f84d1f511486273e292 100644 (file)
@@ -120,6 +120,9 @@ struct netfront_info {
        unsigned long rx_pfn_array[NET_RX_RING_SIZE];
        struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
        struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+
+       /* Statistics */
+       int rx_gso_checksum_fixup;
 };
 
 struct netfront_rx_info {
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
        return cons;
 }
 
-static int skb_checksum_setup(struct sk_buff *skb)
+static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 {
        struct iphdr *iph;
        unsigned char *th;
        int err = -EPROTO;
+       int recalculate_partial_csum = 0;
+
+       /*
+        * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+        * peers can fail to set NETRXF_csum_blank when sending a GSO
+        * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+        * recalculate the partial checksum.
+        */
+       if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+               struct netfront_info *np = netdev_priv(dev);
+               np->rx_gso_checksum_fixup++;
+               skb->ip_summed = CHECKSUM_PARTIAL;
+               recalculate_partial_csum = 1;
+       }
+
+       /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
 
        if (skb->protocol != htons(ETH_P_IP))
                goto out;
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
        switch (iph->protocol) {
        case IPPROTO_TCP:
                skb->csum_offset = offsetof(struct tcphdr, check);
+
+               if (recalculate_partial_csum) {
+                       struct tcphdr *tcph = (struct tcphdr *)th;
+                       tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                        skb->len - iph->ihl*4,
+                                                        IPPROTO_TCP, 0);
+               }
                break;
        case IPPROTO_UDP:
                skb->csum_offset = offsetof(struct udphdr, check);
+
+               if (recalculate_partial_csum) {
+                       struct udphdr *udph = (struct udphdr *)th;
+                       udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                        skb->len - iph->ihl*4,
+                                                        IPPROTO_UDP, 0);
+               }
                break;
        default:
                if (net_ratelimit())
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
                /* Ethernet work: Delayed to here as it peeks the header. */
                skb->protocol = eth_type_trans(skb, dev);
 
-               if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       if (skb_checksum_setup(skb)) {
-                               kfree_skb(skb);
-                               packets_dropped++;
-                               dev->stats.rx_errors++;
-                               continue;
-                       }
+               if (checksum_setup(dev, skb)) {
+                       kfree_skb(skb);
+                       packets_dropped++;
+                       dev->stats.rx_errors++;
+                       continue;
                }
 
                dev->stats.rx_packets++;
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
        }
 }
 
+static const struct xennet_stat {
+       char name[ETH_GSTRING_LEN];
+       u16 offset;
+} xennet_stats[] = {
+       {
+               "rx_gso_checksum_fixup",
+               offsetof(struct netfront_info, rx_gso_checksum_fixup)
+       },
+};
+
+static int xennet_get_sset_count(struct net_device *dev, int string_set)
+{
+       switch (string_set) {
+       case ETH_SS_STATS:
+               return ARRAY_SIZE(xennet_stats);
+       default:
+               return -EINVAL;
+       }
+}
+
+static void xennet_get_ethtool_stats(struct net_device *dev,
+                                    struct ethtool_stats *stats, u64 * data)
+{
+       void *np = netdev_priv(dev);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+               data[i] = *(int *)(np + xennet_stats[i].offset);
+}
+
+static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+                       memcpy(data + i * ETH_GSTRING_LEN,
+                              xennet_stats[i].name, ETH_GSTRING_LEN);
+               break;
+       }
+}
+
 static const struct ethtool_ops xennet_ethtool_ops =
 {
        .set_tx_csum = ethtool_op_set_tx_csum,
        .set_sg = xennet_set_sg,
        .set_tso = xennet_set_tso,
        .get_link = ethtool_op_get_link,
+
+       .get_sset_count = xennet_get_sset_count,
+       .get_ethtool_stats = xennet_get_ethtool_stats,
+       .get_strings = xennet_get_strings,
 };
 
 #ifdef CONFIG_SYSFS