]> Pileus Git - ~andy/linux/blobdiff - net/netfilter/nfnetlink_queue_core.c
netfilter: nfnetlink_queue: allow to attach expectations to conntracks
[~andy/linux] / net / netfilter / nfnetlink_queue_core.c
index 5352b2d2d5bf644cffd04ed5a571924f623f65d5..e8c9f3bb779cd6c108409911f348f73f5a1689f7 100644 (file)
 
 #define NFQNL_QMAX_DEFAULT 1024
 
+/* We're using struct nlattr which has 16bit nla_len. Note that nla_len
+ * includes the header length. Thus, the maximum packet length that we
+ * support is 65531 bytes. We send truncated packets if the specified length
+ * is larger than that.  Userspace can check for presence of NFQA_CAP_LEN
+ * attribute to detect truncation.
+ */
+#define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)
+
 struct nfqnl_instance {
        struct hlist_node hlist;                /* global list of queues */
        struct rcu_head rcu;
@@ -122,7 +130,7 @@ instance_create(struct nfnl_queue_net *q, u_int16_t queue_num,
        inst->queue_num = queue_num;
        inst->peer_portid = portid;
        inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
-       inst->copy_range = 0xffff;
+       inst->copy_range = NFQNL_MAX_COPY_RANGE;
        inst->copy_mode = NFQNL_COPY_NONE;
        spin_lock_init(&inst->lock);
        INIT_LIST_HEAD(&inst->queue_list);
@@ -272,12 +280,17 @@ nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
        skb_shinfo(to)->nr_frags = j;
 }
 
-static int nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet)
+static int
+nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
+                     bool csum_verify)
 {
        __u32 flags = 0;
 
        if (packet->ip_summed == CHECKSUM_PARTIAL)
                flags = NFQA_SKB_CSUMNOTREADY;
+       else if (csum_verify)
+               flags = NFQA_SKB_CSUM_NOTVERIFIED;
+
        if (skb_is_gso(packet))
                flags |= NFQA_SKB_GSO;
 
@@ -302,6 +315,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        struct net_device *outdev;
        struct nf_conn *ct = NULL;
        enum ip_conntrack_info uninitialized_var(ctinfo);
+       bool csum_verify;
 
        size =    nlmsg_total_size(sizeof(struct nfgenmsg))
                + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
@@ -319,6 +333,12 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        if (entskb->tstamp.tv64)
                size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
 
+       if (entry->hook <= NF_INET_FORWARD ||
+          (entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
+               csum_verify = !skb_csum_unnecessary(entskb);
+       else
+               csum_verify = false;
+
        outdev = entry->outdev;
 
        switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
@@ -333,10 +353,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
                        return NULL;
 
                data_len = ACCESS_ONCE(queue->copy_range);
-               if (data_len == 0 || data_len > entskb->len)
+               if (data_len > entskb->len)
                        data_len = entskb->len;
 
-
                if (!entskb->head_frag ||
                    skb_headlen(entskb) < L1_CACHE_BYTES ||
                    skb_shinfo(entskb)->nr_frags >= MAX_SKB_FRAGS)
@@ -465,10 +484,11 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
                goto nla_put_failure;
 
-       if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
+       if (cap_len > data_len &&
+           nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
                goto nla_put_failure;
 
-       if (nfqnl_put_packet_info(skb, entskb))
+       if (nfqnl_put_packet_info(skb, entskb, csum_verify))
                goto nla_put_failure;
 
        if (data_len) {
@@ -509,10 +529,6 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
        }
        spin_lock_bh(&queue->lock);
 
-       if (!queue->peer_portid) {
-               err = -EINVAL;
-               goto err_out_free_nskb;
-       }
        if (queue->queue_total >= queue->queue_maxlen) {
                if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
                        failopen = 1;
@@ -731,13 +747,8 @@ nfqnl_set_mode(struct nfqnl_instance *queue,
 
        case NFQNL_COPY_PACKET:
                queue->copy_mode = mode;
-               /* We're using struct nlattr which has 16bit nla_len. Note that
-                * nla_len includes the header length. Thus, the maximum packet
-                * length that we support is 65531 bytes. We send truncated
-                * packets if the specified length is larger than that.
-                */
-               if (range > 0xffff - NLA_HDRLEN)
-                       queue->copy_range = 0xffff - NLA_HDRLEN;
+               if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
+                       queue->copy_range = NFQNL_MAX_COPY_RANGE;
                else
                        queue->copy_range = range;
                break;
@@ -800,7 +811,7 @@ static int
 nfqnl_rcv_dev_event(struct notifier_block *this,
                    unsigned long event, void *ptr)
 {
-       struct net_device *dev = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        /* Drop any packets associated with the downed device */
        if (event == NETDEV_DOWN)
@@ -848,6 +859,7 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
        [NFQA_MARK]             = { .type = NLA_U32 },
        [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
        [NFQA_CT]               = { .type = NLA_UNSPEC },
+       [NFQA_EXP]              = { .type = NLA_UNSPEC },
 };
 
 static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -976,9 +988,14 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
        if (entry == NULL)
                return -ENOENT;
 
-       rcu_read_lock();
-       if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
+       if (nfqa[NFQA_CT]) {
                ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
+               if (ct && nfqa[NFQA_EXP]) {
+                       nfqnl_attach_expect(ct, nfqa[NFQA_EXP],
+                                           NETLINK_CB(skb).portid,
+                                           nlmsg_report(nlh));
+               }
+       }
 
        if (nfqa[NFQA_PAYLOAD]) {
                u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
@@ -991,7 +1008,6 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
                if (ct)
                        nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
        }
-       rcu_read_unlock();
 
        if (nfqa[NFQA_MARK])
                entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));