]> Pileus Git - ~andy/linux/commitdiff
net: Switch to using the new packet offload infrustructure
authorVlad Yasevich <vyasevic@redhat.com>
Thu, 15 Nov 2012 08:49:11 +0000 (08:49 +0000)
committerDavid S. Miller <davem@davemloft.net>
Thu, 15 Nov 2012 22:36:17 +0000 (17:36 -0500)
Convert to using the new GSO/GRO registration mechanism and new
packet offload structure.

Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/netdevice.h
net/core/dev.c
net/ipv4/af_inet.c
net/ipv6/af_inet6.c

index d45a58db4ba3a8bf317b6810650d981926661e69..61bc8483031f180fd1bcb00972411cba97890750 100644 (file)
@@ -1509,12 +1509,6 @@ struct packet_type {
                                         struct net_device *,
                                         struct packet_type *,
                                         struct net_device *);
-       struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
-                                               netdev_features_t features);
-       int                     (*gso_send_check)(struct sk_buff *skb);
-       struct sk_buff          **(*gro_receive)(struct sk_buff **head,
-                                              struct sk_buff *skb);
-       int                     (*gro_complete)(struct sk_buff *skb);
        bool                    (*id_match)(struct packet_type *ptype,
                                            struct sock *sk);
        void                    *af_packet_priv;
index 6884f8783bdd058e004cba477284cddc6214509f..cf843a256cc6feeeabdd20dd0b37332e00dc505c 100644 (file)
@@ -2072,7 +2072,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
        netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
-       struct packet_type *ptype;
+       struct packet_offload *ptype;
        __be16 type = skb->protocol;
        int vlan_depth = ETH_HLEN;
        int err;
@@ -2101,9 +2101,8 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
        }
 
        rcu_read_lock();
-       list_for_each_entry_rcu(ptype,
-                       &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
-               if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
+       list_for_each_entry_rcu(ptype, &offload_base, list) {
+               if (ptype->type == type && ptype->gso_segment) {
                        if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
                                err = ptype->gso_send_check(skb);
                                segs = ERR_PTR(err);
@@ -3522,9 +3521,9 @@ static void flush_backlog(void *arg)
 
 static int napi_gro_complete(struct sk_buff *skb)
 {
-       struct packet_type *ptype;
+       struct packet_offload *ptype;
        __be16 type = skb->protocol;
-       struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
+       struct list_head *head = &offload_base;
        int err = -ENOENT;
 
        if (NAPI_GRO_CB(skb)->count == 1) {
@@ -3534,7 +3533,7 @@ static int napi_gro_complete(struct sk_buff *skb)
 
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, head, list) {
-               if (ptype->type != type || ptype->dev || !ptype->gro_complete)
+               if (ptype->type != type || !ptype->gro_complete)
                        continue;
 
                err = ptype->gro_complete(skb);
@@ -3584,9 +3583,9 @@ EXPORT_SYMBOL(napi_gro_flush);
 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
        struct sk_buff **pp = NULL;
-       struct packet_type *ptype;
+       struct packet_offload *ptype;
        __be16 type = skb->protocol;
-       struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
+       struct list_head *head = &offload_base;
        int same_flow;
        int mac_len;
        enum gro_result ret;
@@ -3599,7 +3598,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, head, list) {
-               if (ptype->type != type || ptype->dev || !ptype->gro_receive)
+               if (ptype->type != type || !ptype->gro_receive)
                        continue;
 
                skb_set_network_header(skb, skb_gro_offset(skb));
index 766c596585631e1001bdd64c80639554c6ab7831..4c99c5fdba3f1b814b77bcde3ff0d24aef84dfbe 100644 (file)
@@ -1662,6 +1662,10 @@ static int ipv4_proc_init(void);
 static struct packet_type ip_packet_type __read_mostly = {
        .type = cpu_to_be16(ETH_P_IP),
        .func = ip_rcv,
+};
+
+static struct packet_offload ip_packet_offload __read_mostly = {
+       .type = cpu_to_be16(ETH_P_IP),
        .gso_send_check = inet_gso_send_check,
        .gso_segment = inet_gso_segment,
        .gro_receive = inet_gro_receive,
@@ -1781,6 +1785,7 @@ static int __init inet_init(void)
 
        ipfrag_init();
 
+       dev_add_offload(&ip_packet_offload);
        dev_add_pack(&ip_packet_type);
 
        rc = 0;
index a974247a9ae40bdb6c83cf5a60a6432761bd84e7..6e245177608c98d268e4420c857166b78ea95906 100644 (file)
@@ -938,6 +938,10 @@ out_unlock:
 static struct packet_type ipv6_packet_type __read_mostly = {
        .type = cpu_to_be16(ETH_P_IPV6),
        .func = ipv6_rcv,
+};
+
+static struct packet_offload ipv6_packet_offload __read_mostly = {
+       .type = cpu_to_be16(ETH_P_IPV6),
        .gso_send_check = ipv6_gso_send_check,
        .gso_segment = ipv6_gso_segment,
        .gro_receive = ipv6_gro_receive,
@@ -946,6 +950,7 @@ static struct packet_type ipv6_packet_type __read_mostly = {
 
 static int __init ipv6_packet_init(void)
 {
+       dev_add_offload(&ipv6_packet_offload);
        dev_add_pack(&ipv6_packet_type);
        return 0;
 }
@@ -953,6 +958,7 @@ static int __init ipv6_packet_init(void)
 static void ipv6_packet_cleanup(void)
 {
        dev_remove_pack(&ipv6_packet_type);
+       dev_remove_offload(&ipv6_packet_offload);
 }
 
 static int __net_init ipv6_init_mibs(struct net *net)