]> Pileus Git - ~andy/linux/blobdiff - drivers/net/ehea/ehea_main.c
Merge remote branch 'gcl/next' into next
[~andy/linux] / drivers / net / ehea / ehea_main.c
index bb7d306fb44656c3070fa3c20e2ef7b0759c73fc..b95f087cd5a9c4a51525718e90ba65b56cf81b99 100644 (file)
@@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
        struct ehea_port *port = netdev_priv(dev);
        struct net_device_stats *stats = &port->stats;
        struct hcp_ehea_port_cb2 *cb2;
-       u64 hret, rx_packets, tx_packets;
+       u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
        int i;
 
        memset(stats, 0, sizeof(*stats));
@@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
                ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
 
        rx_packets = 0;
-       for (i = 0; i < port->num_def_qps; i++)
+       for (i = 0; i < port->num_def_qps; i++) {
                rx_packets += port->port_res[i].rx_packets;
+               rx_bytes   += port->port_res[i].rx_bytes;
+       }
 
        tx_packets = 0;
-       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
                tx_packets += port->port_res[i].tx_packets;
+               tx_bytes   += port->port_res[i].tx_bytes;
+       }
 
        stats->tx_packets = tx_packets;
        stats->multicast = cb2->rxmcp;
        stats->rx_errors = cb2->rxuerr;
-       stats->rx_bytes = cb2->rxo;
-       stats->tx_bytes = cb2->txo;
+       stats->rx_bytes = rx_bytes;
+       stats->tx_bytes = tx_bytes;
        stats->rx_packets = rx_packets;
 
 out_herr:
@@ -396,6 +400,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
                        skb_arr_rq1[index] = netdev_alloc_skb(dev,
                                                              EHEA_L_PKT_SIZE);
                        if (!skb_arr_rq1[index]) {
+                               ehea_info("Unable to allocate enough skb in the array\n");
                                pr->rq1_skba.os_skbs = fill_wqes - i;
                                break;
                        }
@@ -418,13 +423,20 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
        struct net_device *dev = pr->port->netdev;
        int i;
 
-       for (i = 0; i < pr->rq1_skba.len; i++) {
+       if (nr_rq1a > pr->rq1_skba.len) {
+               ehea_error("NR_RQ1A bigger than skb array len\n");
+               return;
+       }
+
+       for (i = 0; i < nr_rq1a; i++) {
                skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
-               if (!skb_arr_rq1[i])
+               if (!skb_arr_rq1[i]) {
+                       ehea_info("No enough memory to allocate skb array\n");
                        break;
+               }
        }
        /* Ring doorbell */
-       ehea_update_rq1a(pr->qp, nr_rq1a);
+       ehea_update_rq1a(pr->qp, i);
 }
 
 static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -671,7 +683,7 @@ static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
        int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
                              pr->port->vgrp);
 
-       if (use_lro) {
+       if (skb->dev->features & NETIF_F_LRO) {
                if (vlan_extracted)
                        lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
                                                     pr->port->vgrp,
@@ -703,6 +715,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
        int skb_arr_rq2_len = pr->rq2_skba.len;
        int skb_arr_rq3_len = pr->rq3_skba.len;
        int processed, processed_rq1, processed_rq2, processed_rq3;
+       u64 processed_bytes = 0;
        int wqe_index, last_wqe_index, rq, port_reset;
 
        processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
@@ -730,8 +743,10 @@ static int ehea_proc_rwqes(struct net_device *dev,
 
                                        skb = netdev_alloc_skb(dev,
                                                               EHEA_L_PKT_SIZE);
-                                       if (!skb)
+                                       if (!skb) {
+                                               ehea_info("Not enough memory to allocate skb\n");
                                                break;
+                                       }
                                }
                                skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
                                                 cqe->num_bytes_transfered - 4);
@@ -760,6 +775,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
                                processed_rq3++;
                        }
 
+                       processed_bytes += skb->len;
                        ehea_proc_skb(pr, cqe, skb);
                } else {
                        pr->p_stats.poll_receive_errors++;
@@ -771,10 +787,11 @@ static int ehea_proc_rwqes(struct net_device *dev,
                }
                cqe = ehea_poll_rq1(qp, &wqe_index);
        }
-       if (use_lro)
+       if (dev->features & NETIF_F_LRO)
                lro_flush_all(&pr->lro_mgr);
 
        pr->rx_packets += processed;
+       pr->rx_bytes += processed_bytes;
 
        ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
        ehea_refill_rq2(pr, processed_rq2);
@@ -1509,9 +1526,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
        enum ehea_eq_type eq_type = EHEA_EQ;
        struct ehea_qp_init_attr *init_attr = NULL;
        int ret = -EIO;
+       u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
+
+       tx_bytes = pr->tx_bytes;
+       tx_packets = pr->tx_packets;
+       rx_bytes = pr->rx_bytes;
+       rx_packets = pr->rx_packets;
 
        memset(pr, 0, sizeof(struct ehea_port_res));
 
+       pr->tx_bytes = rx_bytes;
+       pr->tx_packets = tx_packets;
+       pr->rx_bytes = rx_bytes;
+       pr->rx_packets = rx_packets;
+
        pr->port = port;
        spin_lock_init(&pr->xmit_lock);
        spin_lock_init(&pr->netif_queue);
@@ -2249,6 +2277,14 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        memset(swqe, 0, SWQE_HEADER_SIZE);
        atomic_dec(&pr->swqe_avail);
 
+       if (vlan_tx_tag_present(skb)) {
+               swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
+               swqe->vlan_tag = vlan_tx_tag_get(skb);
+       }
+
+       pr->tx_packets++;
+       pr->tx_bytes += skb->len;
+
        if (skb->len <= SWQE3_MAX_IMM) {
                u32 sig_iv = port->sig_comp_iv;
                u32 swqe_num = pr->swqe_id_counter;
@@ -2279,11 +2315,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
        pr->swqe_id_counter += 1;
 
-       if (vlan_tx_tag_present(skb)) {
-               swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
-               swqe->vlan_tag = vlan_tx_tag_get(skb);
-       }
-
        if (netif_msg_tx_queued(port)) {
                ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
                ehea_dump(swqe, 512, "swqe");
@@ -2295,7 +2326,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        ehea_post_swqe(pr->qp, swqe);
-       pr->tx_packets++;
 
        if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
                spin_lock_irqsave(&pr->netif_queue, flags);
@@ -3248,6 +3278,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
                      | NETIF_F_LLTX;
        dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
 
+       if (use_lro)
+               dev->features |= NETIF_F_LRO;
+
        INIT_WORK(&port->reset_task, ehea_reset_port);
 
        ret = register_netdev(dev);