]> Pileus Git - ~andy/linux/commitdiff
net: Unmap fragment page once iterator is done
authorWedson Almeida Filho <wedsonaf@gmail.com>
Mon, 24 Jun 2013 06:33:48 +0000 (23:33 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 24 Jun 2013 08:46:01 +0000 (01:46 -0700)
Callers of skb_seq_read() are currently forced to call skb_abort_seq_read()
even when consuming all the data because the last call to skb_seq_read (the
one that returns 0 to indicate the end) fails to unmap the last fragment page.

With this patch callers will be allowed to traverse the SKB data by calling
skb_prepare_seq_read() once and repeatedly calling skb_seq_read() as originally
intended (and documented in the original commit 677e90eda), that is, only call
skb_abort_seq_read() if the sequential read is actually aborted.

Signed-off-by: Wedson Almeida Filho <wedsonaf@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/scsi/libiscsi_tcp.c
net/batman-adv/main.c
net/core/skbuff.c

index 552e8a2b6f5f35da246bd06f44fb55fbda5f6ce4..448eae850b9c9c3d12ba2302ac2a18c843c70f87 100644 (file)
@@ -906,7 +906,6 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
                        ISCSI_DBG_TCP(conn, "no more data avail. Consumed %d\n",
                                      consumed);
                        *status = ISCSI_TCP_SKB_DONE;
-                       skb_abort_seq_read(&seq);
                        goto skb_done;
                }
                BUG_ON(segment->copied >= segment->size);
index 51aafd669cbbd6a7714f9ab075f647313c8234d5..08125f3f6064ddf42e63c872a43b8f2e68a9f941 100644 (file)
@@ -473,7 +473,6 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
                crc = crc32c(crc, data, len);
                consumed += len;
        }
-       skb_abort_seq_read(&st);
 
        return htonl(crc);
 }
index edf37578e21e61aedeefd6052cd5096b25e3a6d4..9f73eca29fbe5156303f340664324bc1940a3c0c 100644 (file)
@@ -2541,8 +2541,13 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
        unsigned int block_limit, abs_offset = consumed + st->lower_offset;
        skb_frag_t *frag;
 
-       if (unlikely(abs_offset >= st->upper_offset))
+       if (unlikely(abs_offset >= st->upper_offset)) {
+               if (st->frag_data) {
+                       kunmap_atomic(st->frag_data);
+                       st->frag_data = NULL;
+               }
                return 0;
+       }
 
 next_skb:
        block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;