]> Pileus Git - ~andy/linux/commitdiff
ixgbevf: add BP_EXTENDED_STATS for CONFIG_NET_RX_BUSY_POLL
authorJacob Keller <jacob.e.keller@intel.com>
Sat, 21 Sep 2013 06:24:25 +0000 (06:24 +0000)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Tue, 29 Oct 2013 11:15:11 +0000 (04:15 -0700)
This patch adds the extended statistics similar to the ixgbe driver. These
statistics keep track of how often the busy polling yields, as well as how many
packets are cleaned or missed by the polling routine.

Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c

index 21adb1bc1706b4e80a59f1f40e98be44b238b05e..44f7c42d8f58b322165acd8317abc94d10e08fbf 100644 (file)
@@ -74,6 +74,14 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
                                                zero_base)},
        {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
                                              zero_base)},
+#ifdef BP_EXTENDED_STATS
+       {"rx_bp_poll_yield", IXGBEVF_STAT(bp_rx_yields, zero_base, zero_base)},
+       {"rx_bp_cleaned", IXGBEVF_STAT(bp_rx_cleaned, zero_base, zero_base)},
+       {"rx_bp_misses", IXGBEVF_STAT(bp_rx_missed, zero_base, zero_base)},
+       {"tx_bp_napi_yield", IXGBEVF_STAT(bp_tx_yields, zero_base, zero_base)},
+       {"tx_bp_cleaned", IXGBEVF_STAT(bp_tx_cleaned, zero_base, zero_base)},
+       {"tx_bp_misses", IXGBEVF_STAT(bp_tx_missed, zero_base, zero_base)},
+#endif
 };
 
 #define IXGBE_QUEUE_STATS_LEN 0
@@ -391,6 +399,30 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        int i;
+#ifdef BP_EXTENDED_STATS
+       u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
+           tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
+
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               rx_yields += adapter->rx_ring[i].bp_yields;
+               rx_cleaned += adapter->rx_ring[i].bp_cleaned;
+               rx_yields += adapter->rx_ring[i].bp_yields;
+       }
+
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               tx_yields += adapter->tx_ring[i].bp_yields;
+               tx_cleaned += adapter->tx_ring[i].bp_cleaned;
+               tx_yields += adapter->tx_ring[i].bp_yields;
+       }
+
+       adapter->bp_rx_yields = rx_yields;
+       adapter->bp_rx_cleaned = rx_cleaned;
+       adapter->bp_rx_missed = rx_missed;
+
+       adapter->bp_tx_yields = tx_yields;
+       adapter->bp_tx_cleaned = tx_cleaned;
+       adapter->bp_tx_missed = tx_missed;
+#endif
 
        ixgbevf_update_stats(adapter);
        for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
index 85c7560e237b54c16f1b8c7ac992c1d7a0c48232..78e4c510bd75ed8c9a5839ae170578ee3cb1136b 100644 (file)
@@ -40,6 +40,7 @@
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
 #include <net/busy_poll.h>
+#define BP_EXTENDED_STATS
 #endif
 
 /* wrapper around a pointer to a socket buffer,
@@ -80,6 +81,11 @@ struct ixgbevf_ring {
        struct u64_stats_sync   syncp;
        u64 hw_csum_rx_error;
        u64 hw_csum_rx_good;
+#ifdef BP_EXTENDED_STATS
+       u64 bp_yields;
+       u64 bp_misses;
+       u64 bp_cleaned;
+#endif
 
        u16 head;
        u16 tail;
@@ -181,6 +187,9 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
                WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
                q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
                rc = false;
+#ifdef BP_EXTENDED_STATS
+               q_vector->tx.ring->bp_yields++;
+#endif
        } else {
                /* we don't care if someone yielded */
                q_vector->state = IXGBEVF_QV_STATE_NAPI;
@@ -213,6 +222,9 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
        if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
                q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
                rc = false;
+#ifdef BP_EXTENDED_STATS
+               q_vector->rx.ring->bp_yields++;
+#endif
        } else {
                /* preserve yield marks */
                q_vector->state |= IXGBEVF_QV_STATE_POLL;
@@ -358,6 +370,16 @@ struct ixgbevf_adapter {
        unsigned int tx_ring_count;
        unsigned int rx_ring_count;
 
+#ifdef BP_EXTENDED_STATS
+       u64 bp_rx_yields;
+       u64 bp_rx_cleaned;
+       u64 bp_rx_missed;
+
+       u64 bp_tx_yields;
+       u64 bp_tx_cleaned;
+       u64 bp_tx_missed;
+#endif
+
        u32 link_speed;
        bool link_up;
 
index bc03a2ec5c0aef221147301ddba591e0b2346b32..b16b694951b842743c30e88f5218ce879589161f 100644 (file)
@@ -648,6 +648,12 @@ static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
 
        ixgbevf_for_each_ring(ring, q_vector->rx) {
                found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
+#ifdef BP_EXTENDED_STATS
+               if (found)
+                       ring->bp_cleaned += found;
+               else
+                       ring->bp_misses++;
+#endif
                if (found)
                        break;
        }