]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/mellanox/mlx4/en_netdev.c
mlx4_en: Fix BQL reset TX queue call point
[~andy/linux] / drivers / net / ethernet / mellanox / mlx4 / en_netdev.c
1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/hash.h>
40 #include <net/ip.h>
41
42 #include <linux/mlx4/driver.h>
43 #include <linux/mlx4/device.h>
44 #include <linux/mlx4/cmd.h>
45 #include <linux/mlx4/cq.h>
46
47 #include "mlx4_en.h"
48 #include "en_port.h"
49
50 int mlx4_en_setup_tc(struct net_device *dev, u8 up)
51 {
52         struct mlx4_en_priv *priv = netdev_priv(dev);
53         int i;
54         unsigned int offset = 0;
55
56         if (up && up != MLX4_EN_NUM_UP)
57                 return -EINVAL;
58
59         netdev_set_num_tc(dev, up);
60
61         /* Partition Tx queues evenly amongst UP's */
62         for (i = 0; i < up; i++) {
63                 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
64                 offset += priv->num_tx_rings_p_up;
65         }
66
67         return 0;
68 }
69
70 #ifdef CONFIG_RFS_ACCEL
71
72 struct mlx4_en_filter {
73         struct list_head next;
74         struct work_struct work;
75
76         __be32 src_ip;
77         __be32 dst_ip;
78         __be16 src_port;
79         __be16 dst_port;
80
81         int rxq_index;
82         struct mlx4_en_priv *priv;
83         u32 flow_id;                    /* RFS infrastructure id */
84         int id;                         /* mlx4_en driver id */
85         u64 reg_id;                     /* Flow steering API id */
86         u8 activated;                   /* Used to prevent expiry before filter
87                                          * is attached
88                                          */
89         struct hlist_node filter_chain;
90 };
91
92 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
93
94 static void mlx4_en_filter_work(struct work_struct *work)
95 {
96         struct mlx4_en_filter *filter = container_of(work,
97                                                      struct mlx4_en_filter,
98                                                      work);
99         struct mlx4_en_priv *priv = filter->priv;
100         struct mlx4_spec_list spec_tcp = {
101                 .id = MLX4_NET_TRANS_RULE_ID_TCP,
102                 {
103                         .tcp_udp = {
104                                 .dst_port = filter->dst_port,
105                                 .dst_port_msk = (__force __be16)-1,
106                                 .src_port = filter->src_port,
107                                 .src_port_msk = (__force __be16)-1,
108                         },
109                 },
110         };
111         struct mlx4_spec_list spec_ip = {
112                 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
113                 {
114                         .ipv4 = {
115                                 .dst_ip = filter->dst_ip,
116                                 .dst_ip_msk = (__force __be32)-1,
117                                 .src_ip = filter->src_ip,
118                                 .src_ip_msk = (__force __be32)-1,
119                         },
120                 },
121         };
122         struct mlx4_spec_list spec_eth = {
123                 .id = MLX4_NET_TRANS_RULE_ID_ETH,
124         };
125         struct mlx4_net_trans_rule rule = {
126                 .list = LIST_HEAD_INIT(rule.list),
127                 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
128                 .exclusive = 1,
129                 .allow_loopback = 1,
130                 .promisc_mode = MLX4_FS_PROMISC_NONE,
131                 .port = priv->port,
132                 .priority = MLX4_DOMAIN_RFS,
133         };
134         int rc;
135         __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
136
137         list_add_tail(&spec_eth.list, &rule.list);
138         list_add_tail(&spec_ip.list, &rule.list);
139         list_add_tail(&spec_tcp.list, &rule.list);
140
141         rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
142         memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
143         memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
144
145         filter->activated = 0;
146
147         if (filter->reg_id) {
148                 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
149                 if (rc && rc != -ENOENT)
150                         en_err(priv, "Error detaching flow. rc = %d\n", rc);
151         }
152
153         rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
154         if (rc)
155                 en_err(priv, "Error attaching flow. err = %d\n", rc);
156
157         mlx4_en_filter_rfs_expire(priv);
158
159         filter->activated = 1;
160 }
161
162 static inline struct hlist_head *
163 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
164                    __be16 src_port, __be16 dst_port)
165 {
166         unsigned long l;
167         int bucket_idx;
168
169         l = (__force unsigned long)src_port |
170             ((__force unsigned long)dst_port << 2);
171         l ^= (__force unsigned long)(src_ip ^ dst_ip);
172
173         bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
174
175         return &priv->filter_hash[bucket_idx];
176 }
177
178 static struct mlx4_en_filter *
179 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
180                      __be32 dst_ip, __be16 src_port, __be16 dst_port,
181                      u32 flow_id)
182 {
183         struct mlx4_en_filter *filter = NULL;
184
185         filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
186         if (!filter)
187                 return NULL;
188
189         filter->priv = priv;
190         filter->rxq_index = rxq_index;
191         INIT_WORK(&filter->work, mlx4_en_filter_work);
192
193         filter->src_ip = src_ip;
194         filter->dst_ip = dst_ip;
195         filter->src_port = src_port;
196         filter->dst_port = dst_port;
197
198         filter->flow_id = flow_id;
199
200         filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
201
202         list_add_tail(&filter->next, &priv->filters);
203         hlist_add_head(&filter->filter_chain,
204                        filter_hash_bucket(priv, src_ip, dst_ip, src_port,
205                                           dst_port));
206
207         return filter;
208 }
209
210 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
211 {
212         struct mlx4_en_priv *priv = filter->priv;
213         int rc;
214
215         list_del(&filter->next);
216
217         rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
218         if (rc && rc != -ENOENT)
219                 en_err(priv, "Error detaching flow. rc = %d\n", rc);
220
221         kfree(filter);
222 }
223
224 static inline struct mlx4_en_filter *
225 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
226                     __be16 src_port, __be16 dst_port)
227 {
228         struct hlist_node *elem;
229         struct mlx4_en_filter *filter;
230         struct mlx4_en_filter *ret = NULL;
231
232         hlist_for_each_entry(filter, elem,
233                              filter_hash_bucket(priv, src_ip, dst_ip,
234                                                 src_port, dst_port),
235                              filter_chain) {
236                 if (filter->src_ip == src_ip &&
237                     filter->dst_ip == dst_ip &&
238                     filter->src_port == src_port &&
239                     filter->dst_port == dst_port) {
240                         ret = filter;
241                         break;
242                 }
243         }
244
245         return ret;
246 }
247
248 static int
249 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
250                    u16 rxq_index, u32 flow_id)
251 {
252         struct mlx4_en_priv *priv = netdev_priv(net_dev);
253         struct mlx4_en_filter *filter;
254         const struct iphdr *ip;
255         const __be16 *ports;
256         __be32 src_ip;
257         __be32 dst_ip;
258         __be16 src_port;
259         __be16 dst_port;
260         int nhoff = skb_network_offset(skb);
261         int ret = 0;
262
263         if (skb->protocol != htons(ETH_P_IP))
264                 return -EPROTONOSUPPORT;
265
266         ip = (const struct iphdr *)(skb->data + nhoff);
267         if (ip_is_fragment(ip))
268                 return -EPROTONOSUPPORT;
269
270         ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
271
272         src_ip = ip->saddr;
273         dst_ip = ip->daddr;
274         src_port = ports[0];
275         dst_port = ports[1];
276
277         if (ip->protocol != IPPROTO_TCP)
278                 return -EPROTONOSUPPORT;
279
280         spin_lock_bh(&priv->filters_lock);
281         filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
282         if (filter) {
283                 if (filter->rxq_index == rxq_index)
284                         goto out;
285
286                 filter->rxq_index = rxq_index;
287         } else {
288                 filter = mlx4_en_filter_alloc(priv, rxq_index,
289                                               src_ip, dst_ip,
290                                               src_port, dst_port, flow_id);
291                 if (!filter) {
292                         ret = -ENOMEM;
293                         goto err;
294                 }
295         }
296
297         queue_work(priv->mdev->workqueue, &filter->work);
298
299 out:
300         ret = filter->id;
301 err:
302         spin_unlock_bh(&priv->filters_lock);
303
304         return ret;
305 }
306
307 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
308                              struct mlx4_en_rx_ring *rx_ring)
309 {
310         struct mlx4_en_filter *filter, *tmp;
311         LIST_HEAD(del_list);
312
313         spin_lock_bh(&priv->filters_lock);
314         list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
315                 list_move(&filter->next, &del_list);
316                 hlist_del(&filter->filter_chain);
317         }
318         spin_unlock_bh(&priv->filters_lock);
319
320         list_for_each_entry_safe(filter, tmp, &del_list, next) {
321                 cancel_work_sync(&filter->work);
322                 mlx4_en_filter_free(filter);
323         }
324 }
325
326 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
327 {
328         struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
329         LIST_HEAD(del_list);
330         int i = 0;
331
332         spin_lock_bh(&priv->filters_lock);
333         list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
334                 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
335                         break;
336
337                 if (filter->activated &&
338                     !work_pending(&filter->work) &&
339                     rps_may_expire_flow(priv->dev,
340                                         filter->rxq_index, filter->flow_id,
341                                         filter->id)) {
342                         list_move(&filter->next, &del_list);
343                         hlist_del(&filter->filter_chain);
344                 } else
345                         last_filter = filter;
346
347                 i++;
348         }
349
350         if (last_filter && (&last_filter->next != priv->filters.next))
351                 list_move(&priv->filters, &last_filter->next);
352
353         spin_unlock_bh(&priv->filters_lock);
354
355         list_for_each_entry_safe(filter, tmp, &del_list, next)
356                 mlx4_en_filter_free(filter);
357 }
358 #endif
359
360 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
361 {
362         struct mlx4_en_priv *priv = netdev_priv(dev);
363         struct mlx4_en_dev *mdev = priv->mdev;
364         int err;
365         int idx;
366
367         en_dbg(HW, priv, "adding VLAN:%d\n", vid);
368
369         set_bit(vid, priv->active_vlans);
370
371         /* Add VID to port VLAN filter */
372         mutex_lock(&mdev->state_lock);
373         if (mdev->device_up && priv->port_up) {
374                 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
375                 if (err)
376                         en_err(priv, "Failed configuring VLAN filter\n");
377         }
378         if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
379                 en_err(priv, "failed adding vlan %d\n", vid);
380         mutex_unlock(&mdev->state_lock);
381
382         return 0;
383 }
384
385 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
386 {
387         struct mlx4_en_priv *priv = netdev_priv(dev);
388         struct mlx4_en_dev *mdev = priv->mdev;
389         int err;
390         int idx;
391
392         en_dbg(HW, priv, "Killing VID:%d\n", vid);
393
394         clear_bit(vid, priv->active_vlans);
395
396         /* Remove VID from port VLAN filter */
397         mutex_lock(&mdev->state_lock);
398         if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
399                 mlx4_unregister_vlan(mdev->dev, priv->port, idx);
400         else
401                 en_err(priv, "could not find vid %d in cache\n", vid);
402
403         if (mdev->device_up && priv->port_up) {
404                 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
405                 if (err)
406                         en_err(priv, "Failed configuring VLAN filter\n");
407         }
408         mutex_unlock(&mdev->state_lock);
409
410         return 0;
411 }
412
413 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
414 {
415         unsigned int i;
416         for (i = ETH_ALEN - 1; i; --i) {
417                 dst_mac[i] = src_mac & 0xff;
418                 src_mac >>= 8;
419         }
420         memset(&dst_mac[ETH_ALEN], 0, 2);
421 }
422
423 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
424                                 unsigned char *mac, int *qpn, u64 *reg_id)
425 {
426         struct mlx4_en_dev *mdev = priv->mdev;
427         struct mlx4_dev *dev = mdev->dev;
428         int err;
429
430         switch (dev->caps.steering_mode) {
431         case MLX4_STEERING_MODE_B0: {
432                 struct mlx4_qp qp;
433                 u8 gid[16] = {0};
434
435                 qp.qpn = *qpn;
436                 memcpy(&gid[10], mac, ETH_ALEN);
437                 gid[5] = priv->port;
438
439                 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
440                 break;
441         }
442         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
443                 struct mlx4_spec_list spec_eth = { {NULL} };
444                 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
445
446                 struct mlx4_net_trans_rule rule = {
447                         .queue_mode = MLX4_NET_TRANS_Q_FIFO,
448                         .exclusive = 0,
449                         .allow_loopback = 1,
450                         .promisc_mode = MLX4_FS_PROMISC_NONE,
451                         .priority = MLX4_DOMAIN_NIC,
452                 };
453
454                 rule.port = priv->port;
455                 rule.qpn = *qpn;
456                 INIT_LIST_HEAD(&rule.list);
457
458                 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
459                 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
460                 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
461                 list_add_tail(&spec_eth.list, &rule.list);
462
463                 err = mlx4_flow_attach(dev, &rule, reg_id);
464                 break;
465         }
466         default:
467                 return -EINVAL;
468         }
469         if (err)
470                 en_warn(priv, "Failed Attaching Unicast\n");
471
472         return err;
473 }
474
475 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
476                                      unsigned char *mac, int qpn, u64 reg_id)
477 {
478         struct mlx4_en_dev *mdev = priv->mdev;
479         struct mlx4_dev *dev = mdev->dev;
480
481         switch (dev->caps.steering_mode) {
482         case MLX4_STEERING_MODE_B0: {
483                 struct mlx4_qp qp;
484                 u8 gid[16] = {0};
485
486                 qp.qpn = qpn;
487                 memcpy(&gid[10], mac, ETH_ALEN);
488                 gid[5] = priv->port;
489
490                 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
491                 break;
492         }
493         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
494                 mlx4_flow_detach(dev, reg_id);
495                 break;
496         }
497         default:
498                 en_err(priv, "Invalid steering mode.\n");
499         }
500 }
501
502 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
503 {
504         struct mlx4_en_dev *mdev = priv->mdev;
505         struct mlx4_dev *dev = mdev->dev;
506         struct mlx4_mac_entry *entry;
507         int index = 0;
508         int err = 0;
509         u64 reg_id;
510         int *qpn = &priv->base_qpn;
511         u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
512
513         en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
514                priv->dev->dev_addr);
515         index = mlx4_register_mac(dev, priv->port, mac);
516         if (index < 0) {
517                 err = index;
518                 en_err(priv, "Failed adding MAC: %pM\n",
519                        priv->dev->dev_addr);
520                 return err;
521         }
522
523         if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
524                 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
525                 *qpn = base_qpn + index;
526                 return 0;
527         }
528
529         err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
530         en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
531         if (err) {
532                 en_err(priv, "Failed to reserve qp for mac registration\n");
533                 goto qp_err;
534         }
535
536         err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
537         if (err)
538                 goto steer_err;
539
540         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
541         if (!entry) {
542                 err = -ENOMEM;
543                 goto alloc_err;
544         }
545         memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
546         entry->reg_id = reg_id;
547
548         hlist_add_head_rcu(&entry->hlist,
549                            &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
550
551         return 0;
552
553 alloc_err:
554         mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
555
556 steer_err:
557         mlx4_qp_release_range(dev, *qpn, 1);
558
559 qp_err:
560         mlx4_unregister_mac(dev, priv->port, mac);
561         return err;
562 }
563
564 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
565 {
566         struct mlx4_en_dev *mdev = priv->mdev;
567         struct mlx4_dev *dev = mdev->dev;
568         int qpn = priv->base_qpn;
569         u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
570
571         en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
572                priv->dev->dev_addr);
573         mlx4_unregister_mac(dev, priv->port, mac);
574
575         if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
576                 struct mlx4_mac_entry *entry;
577                 struct hlist_node *n, *tmp;
578                 struct hlist_head *bucket;
579                 unsigned int mac_hash;
580
581                 mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
582                 bucket = &priv->mac_hash[mac_hash];
583                 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
584                         if (ether_addr_equal_64bits(entry->mac,
585                                                     priv->dev->dev_addr)) {
586                                 en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
587                                        priv->port, priv->dev->dev_addr, qpn);
588                                 mlx4_en_uc_steer_release(priv, entry->mac,
589                                                          qpn, entry->reg_id);
590                                 mlx4_qp_release_range(dev, qpn, 1);
591
592                                 hlist_del_rcu(&entry->hlist);
593                                 kfree_rcu(entry, rcu);
594                                 break;
595                         }
596                 }
597         }
598 }
599
600 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
601                                unsigned char *new_mac, unsigned char *prev_mac)
602 {
603         struct mlx4_en_dev *mdev = priv->mdev;
604         struct mlx4_dev *dev = mdev->dev;
605         int err = 0;
606         u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
607
608         if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
609                 struct hlist_head *bucket;
610                 unsigned int mac_hash;
611                 struct mlx4_mac_entry *entry;
612                 struct hlist_node *n, *tmp;
613                 u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
614
615                 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
616                 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
617                         if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
618                                 mlx4_en_uc_steer_release(priv, entry->mac,
619                                                          qpn, entry->reg_id);
620                                 mlx4_unregister_mac(dev, priv->port,
621                                                     prev_mac_u64);
622                                 hlist_del_rcu(&entry->hlist);
623                                 synchronize_rcu();
624                                 memcpy(entry->mac, new_mac, ETH_ALEN);
625                                 entry->reg_id = 0;
626                                 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
627                                 hlist_add_head_rcu(&entry->hlist,
628                                                    &priv->mac_hash[mac_hash]);
629                                 mlx4_register_mac(dev, priv->port, new_mac_u64);
630                                 err = mlx4_en_uc_steer_add(priv, new_mac,
631                                                            &qpn,
632                                                            &entry->reg_id);
633                                 return err;
634                         }
635                 }
636                 return -EINVAL;
637         }
638
639         return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
640 }
641
642 u64 mlx4_en_mac_to_u64(u8 *addr)
643 {
644         u64 mac = 0;
645         int i;
646
647         for (i = 0; i < ETH_ALEN; i++) {
648                 mac <<= 8;
649                 mac |= addr[i];
650         }
651         return mac;
652 }
653
654 static int mlx4_en_set_mac(struct net_device *dev, void *addr)
655 {
656         struct mlx4_en_priv *priv = netdev_priv(dev);
657         struct mlx4_en_dev *mdev = priv->mdev;
658         struct sockaddr *saddr = addr;
659
660         if (!is_valid_ether_addr(saddr->sa_data))
661                 return -EADDRNOTAVAIL;
662
663         memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
664         queue_work(mdev->workqueue, &priv->mac_task);
665         return 0;
666 }
667
668 static void mlx4_en_do_set_mac(struct work_struct *work)
669 {
670         struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
671                                                  mac_task);
672         struct mlx4_en_dev *mdev = priv->mdev;
673         int err = 0;
674
675         mutex_lock(&mdev->state_lock);
676         if (priv->port_up) {
677                 /* Remove old MAC and insert the new one */
678                 err = mlx4_en_replace_mac(priv, priv->base_qpn,
679                                           priv->dev->dev_addr, priv->prev_mac);
680                 if (err)
681                         en_err(priv, "Failed changing HW MAC address\n");
682                 memcpy(priv->prev_mac, priv->dev->dev_addr,
683                        sizeof(priv->prev_mac));
684         } else
685                 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
686
687         mutex_unlock(&mdev->state_lock);
688 }
689
690 static void mlx4_en_clear_list(struct net_device *dev)
691 {
692         struct mlx4_en_priv *priv = netdev_priv(dev);
693         struct mlx4_en_mc_list *tmp, *mc_to_del;
694
695         list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
696                 list_del(&mc_to_del->list);
697                 kfree(mc_to_del);
698         }
699 }
700
701 static void mlx4_en_cache_mclist(struct net_device *dev)
702 {
703         struct mlx4_en_priv *priv = netdev_priv(dev);
704         struct netdev_hw_addr *ha;
705         struct mlx4_en_mc_list *tmp;
706
707         mlx4_en_clear_list(dev);
708         netdev_for_each_mc_addr(ha, dev) {
709                 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
710                 if (!tmp) {
711                         en_err(priv, "failed to allocate multicast list\n");
712                         mlx4_en_clear_list(dev);
713                         return;
714                 }
715                 memcpy(tmp->addr, ha->addr, ETH_ALEN);
716                 list_add_tail(&tmp->list, &priv->mc_list);
717         }
718 }
719
720 static void update_mclist_flags(struct mlx4_en_priv *priv,
721                                 struct list_head *dst,
722                                 struct list_head *src)
723 {
724         struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
725         bool found;
726
727         /* Find all the entries that should be removed from dst,
728          * These are the entries that are not found in src
729          */
730         list_for_each_entry(dst_tmp, dst, list) {
731                 found = false;
732                 list_for_each_entry(src_tmp, src, list) {
733                         if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
734                                 found = true;
735                                 break;
736                         }
737                 }
738                 if (!found)
739                         dst_tmp->action = MCLIST_REM;
740         }
741
742         /* Add entries that exist in src but not in dst
743          * mark them as need to add
744          */
745         list_for_each_entry(src_tmp, src, list) {
746                 found = false;
747                 list_for_each_entry(dst_tmp, dst, list) {
748                         if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
749                                 dst_tmp->action = MCLIST_NONE;
750                                 found = true;
751                                 break;
752                         }
753                 }
754                 if (!found) {
755                         new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
756                                          GFP_KERNEL);
757                         if (!new_mc) {
758                                 en_err(priv, "Failed to allocate current multicast list\n");
759                                 return;
760                         }
761                         memcpy(new_mc, src_tmp,
762                                sizeof(struct mlx4_en_mc_list));
763                         new_mc->action = MCLIST_ADD;
764                         list_add_tail(&new_mc->list, dst);
765                 }
766         }
767 }
768
769 static void mlx4_en_set_rx_mode(struct net_device *dev)
770 {
771         struct mlx4_en_priv *priv = netdev_priv(dev);
772
773         if (!priv->port_up)
774                 return;
775
776         queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
777 }
778
779 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
780                                      struct mlx4_en_dev *mdev)
781 {
782         int err = 0;
783
784         if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
785                 if (netif_msg_rx_status(priv))
786                         en_warn(priv, "Entering promiscuous mode\n");
787                 priv->flags |= MLX4_EN_FLAG_PROMISC;
788
789                 /* Enable promiscouos mode */
790                 switch (mdev->dev->caps.steering_mode) {
791                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
792                         err = mlx4_flow_steer_promisc_add(mdev->dev,
793                                                           priv->port,
794                                                           priv->base_qpn,
795                                                           MLX4_FS_PROMISC_UPLINK);
796                         if (err)
797                                 en_err(priv, "Failed enabling promiscuous mode\n");
798                         priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
799                         break;
800
801                 case MLX4_STEERING_MODE_B0:
802                         err = mlx4_unicast_promisc_add(mdev->dev,
803                                                        priv->base_qpn,
804                                                        priv->port);
805                         if (err)
806                                 en_err(priv, "Failed enabling unicast promiscuous mode\n");
807
808                         /* Add the default qp number as multicast
809                          * promisc
810                          */
811                         if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
812                                 err = mlx4_multicast_promisc_add(mdev->dev,
813                                                                  priv->base_qpn,
814                                                                  priv->port);
815                                 if (err)
816                                         en_err(priv, "Failed enabling multicast promiscuous mode\n");
817                                 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
818                         }
819                         break;
820
821                 case MLX4_STEERING_MODE_A0:
822                         err = mlx4_SET_PORT_qpn_calc(mdev->dev,
823                                                      priv->port,
824                                                      priv->base_qpn,
825                                                      1);
826                         if (err)
827                                 en_err(priv, "Failed enabling promiscuous mode\n");
828                         break;
829                 }
830
831                 /* Disable port multicast filter (unconditionally) */
832                 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
833                                           0, MLX4_MCAST_DISABLE);
834                 if (err)
835                         en_err(priv, "Failed disabling multicast filter\n");
836
837                 /* Disable port VLAN filter */
838                 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
839                 if (err)
840                         en_err(priv, "Failed disabling VLAN filter\n");
841         }
842 }
843
844 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
845                                        struct mlx4_en_dev *mdev)
846 {
847         int err = 0;
848
849         if (netif_msg_rx_status(priv))
850                 en_warn(priv, "Leaving promiscuous mode\n");
851         priv->flags &= ~MLX4_EN_FLAG_PROMISC;
852
853         /* Disable promiscouos mode */
854         switch (mdev->dev->caps.steering_mode) {
855         case MLX4_STEERING_MODE_DEVICE_MANAGED:
856                 err = mlx4_flow_steer_promisc_remove(mdev->dev,
857                                                      priv->port,
858                                                      MLX4_FS_PROMISC_UPLINK);
859                 if (err)
860                         en_err(priv, "Failed disabling promiscuous mode\n");
861                 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
862                 break;
863
864         case MLX4_STEERING_MODE_B0:
865                 err = mlx4_unicast_promisc_remove(mdev->dev,
866                                                   priv->base_qpn,
867                                                   priv->port);
868                 if (err)
869                         en_err(priv, "Failed disabling unicast promiscuous mode\n");
870                 /* Disable Multicast promisc */
871                 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
872                         err = mlx4_multicast_promisc_remove(mdev->dev,
873                                                             priv->base_qpn,
874                                                             priv->port);
875                         if (err)
876                                 en_err(priv, "Failed disabling multicast promiscuous mode\n");
877                         priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
878                 }
879                 break;
880
881         case MLX4_STEERING_MODE_A0:
882                 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
883                                              priv->port,
884                                              priv->base_qpn, 0);
885                 if (err)
886                         en_err(priv, "Failed disabling promiscuous mode\n");
887                 break;
888         }
889
890         /* Enable port VLAN filter */
891         err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
892         if (err)
893                 en_err(priv, "Failed enabling VLAN filter\n");
894 }
895
896 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
897                                  struct net_device *dev,
898                                  struct mlx4_en_dev *mdev)
899 {
900         struct mlx4_en_mc_list *mclist, *tmp;
901         u64 mcast_addr = 0;
902         u8 mc_list[16] = {0};
903         int err = 0;
904
905         /* Enable/disable the multicast filter according to IFF_ALLMULTI */
906         if (dev->flags & IFF_ALLMULTI) {
907                 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
908                                           0, MLX4_MCAST_DISABLE);
909                 if (err)
910                         en_err(priv, "Failed disabling multicast filter\n");
911
912                 /* Add the default qp number as multicast promisc */
913                 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
914                         switch (mdev->dev->caps.steering_mode) {
915                         case MLX4_STEERING_MODE_DEVICE_MANAGED:
916                                 err = mlx4_flow_steer_promisc_add(mdev->dev,
917                                                                   priv->port,
918                                                                   priv->base_qpn,
919                                                                   MLX4_FS_PROMISC_ALL_MULTI);
920                                 break;
921
922                         case MLX4_STEERING_MODE_B0:
923                                 err = mlx4_multicast_promisc_add(mdev->dev,
924                                                                  priv->base_qpn,
925                                                                  priv->port);
926                                 break;
927
928                         case MLX4_STEERING_MODE_A0:
929                                 break;
930                         }
931                         if (err)
932                                 en_err(priv, "Failed entering multicast promisc mode\n");
933                         priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
934                 }
935         } else {
936                 /* Disable Multicast promisc */
937                 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
938                         switch (mdev->dev->caps.steering_mode) {
939                         case MLX4_STEERING_MODE_DEVICE_MANAGED:
940                                 err = mlx4_flow_steer_promisc_remove(mdev->dev,
941                                                                      priv->port,
942                                                                      MLX4_FS_PROMISC_ALL_MULTI);
943                                 break;
944
945                         case MLX4_STEERING_MODE_B0:
946                                 err = mlx4_multicast_promisc_remove(mdev->dev,
947                                                                     priv->base_qpn,
948                                                                     priv->port);
949                                 break;
950
951                         case MLX4_STEERING_MODE_A0:
952                                 break;
953                         }
954                         if (err)
955                                 en_err(priv, "Failed disabling multicast promiscuous mode\n");
956                         priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
957                 }
958
959                 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
960                                           0, MLX4_MCAST_DISABLE);
961                 if (err)
962                         en_err(priv, "Failed disabling multicast filter\n");
963
964                 /* Flush mcast filter and init it with broadcast address */
965                 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
966                                     1, MLX4_MCAST_CONFIG);
967
968                 /* Update multicast list - we cache all addresses so they won't
969                  * change while HW is updated holding the command semaphor */
970                 netif_addr_lock_bh(dev);
971                 mlx4_en_cache_mclist(dev);
972                 netif_addr_unlock_bh(dev);
973                 list_for_each_entry(mclist, &priv->mc_list, list) {
974                         mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
975                         mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
976                                             mcast_addr, 0, MLX4_MCAST_CONFIG);
977                 }
978                 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
979                                           0, MLX4_MCAST_ENABLE);
980                 if (err)
981                         en_err(priv, "Failed enabling multicast filter\n");
982
983                 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
984                 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
985                         if (mclist->action == MCLIST_REM) {
986                                 /* detach this address and delete from list */
987                                 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
988                                 mc_list[5] = priv->port;
989                                 err = mlx4_multicast_detach(mdev->dev,
990                                                             &priv->rss_map.indir_qp,
991                                                             mc_list,
992                                                             MLX4_PROT_ETH,
993                                                             mclist->reg_id);
994                                 if (err)
995                                         en_err(priv, "Fail to detach multicast address\n");
996
997                                 /* remove from list */
998                                 list_del(&mclist->list);
999                                 kfree(mclist);
1000                         } else if (mclist->action == MCLIST_ADD) {
1001                                 /* attach the address */
1002                                 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1003                                 /* needed for B0 steering support */
1004                                 mc_list[5] = priv->port;
1005                                 err = mlx4_multicast_attach(mdev->dev,
1006                                                             &priv->rss_map.indir_qp,
1007                                                             mc_list,
1008                                                             priv->port, 0,
1009                                                             MLX4_PROT_ETH,
1010                                                             &mclist->reg_id);
1011                                 if (err)
1012                                         en_err(priv, "Fail to attach multicast address\n");
1013
1014                         }
1015                 }
1016         }
1017 }
1018
1019 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1020                                  struct net_device *dev,
1021                                  struct mlx4_en_dev *mdev)
1022 {
1023         struct netdev_hw_addr *ha;
1024         struct mlx4_mac_entry *entry;
1025         struct hlist_node *n, *tmp;
1026         bool found;
1027         u64 mac;
1028         int err = 0;
1029         struct hlist_head *bucket;
1030         unsigned int i;
1031         int removed = 0;
1032         u32 prev_flags;
1033
1034         /* Note that we do not need to protect our mac_hash traversal with rcu,
1035          * since all modification code is protected by mdev->state_lock
1036          */
1037
1038         /* find what to remove */
1039         for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1040                 bucket = &priv->mac_hash[i];
1041                 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
1042                         found = false;
1043                         netdev_for_each_uc_addr(ha, dev) {
1044                                 if (ether_addr_equal_64bits(entry->mac,
1045                                                             ha->addr)) {
1046                                         found = true;
1047                                         break;
1048                                 }
1049                         }
1050
1051                         /* MAC address of the port is not in uc list */
1052                         if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
1053                                 found = true;
1054
1055                         if (!found) {
1056                                 mac = mlx4_en_mac_to_u64(entry->mac);
1057                                 mlx4_en_uc_steer_release(priv, entry->mac,
1058                                                          priv->base_qpn,
1059                                                          entry->reg_id);
1060                                 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1061
1062                                 hlist_del_rcu(&entry->hlist);
1063                                 kfree_rcu(entry, rcu);
1064                                 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1065                                        entry->mac, priv->port);
1066                                 ++removed;
1067                         }
1068                 }
1069         }
1070
1071         /* if we didn't remove anything, there is no use in trying to add
1072          * again once we are in a forced promisc mode state
1073          */
1074         if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1075                 return;
1076
1077         prev_flags = priv->flags;
1078         priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1079
1080         /* find what to add */
1081         netdev_for_each_uc_addr(ha, dev) {
1082                 found = false;
1083                 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1084                 hlist_for_each_entry(entry, n, bucket, hlist) {
1085                         if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1086                                 found = true;
1087                                 break;
1088                         }
1089                 }
1090
1091                 if (!found) {
1092                         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1093                         if (!entry) {
1094                                 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1095                                        ha->addr, priv->port);
1096                                 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1097                                 break;
1098                         }
1099                         mac = mlx4_en_mac_to_u64(ha->addr);
1100                         memcpy(entry->mac, ha->addr, ETH_ALEN);
1101                         err = mlx4_register_mac(mdev->dev, priv->port, mac);
1102                         if (err < 0) {
1103                                 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1104                                        ha->addr, priv->port, err);
1105                                 kfree(entry);
1106                                 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1107                                 break;
1108                         }
1109                         err = mlx4_en_uc_steer_add(priv, ha->addr,
1110                                                    &priv->base_qpn,
1111                                                    &entry->reg_id);
1112                         if (err) {
1113                                 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1114                                        ha->addr, priv->port, err);
1115                                 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1116                                 kfree(entry);
1117                                 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1118                                 break;
1119                         } else {
1120                                 unsigned int mac_hash;
1121                                 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1122                                        ha->addr, priv->port);
1123                                 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1124                                 bucket = &priv->mac_hash[mac_hash];
1125                                 hlist_add_head_rcu(&entry->hlist, bucket);
1126                         }
1127                 }
1128         }
1129
1130         if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1131                 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1132                         priv->port);
1133         } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1134                 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1135                         priv->port);
1136         }
1137 }
1138
1139 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1140 {
1141         struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1142                                                  rx_mode_task);
1143         struct mlx4_en_dev *mdev = priv->mdev;
1144         struct net_device *dev = priv->dev;
1145
1146         mutex_lock(&mdev->state_lock);
1147         if (!mdev->device_up) {
1148                 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1149                 goto out;
1150         }
1151         if (!priv->port_up) {
1152                 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1153                 goto out;
1154         }
1155
1156         if (!netif_carrier_ok(dev)) {
1157                 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1158                         if (priv->port_state.link_state) {
1159                                 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1160                                 netif_carrier_on(dev);
1161                                 en_dbg(LINK, priv, "Link Up\n");
1162                         }
1163                 }
1164         }
1165
1166         if (dev->priv_flags & IFF_UNICAST_FLT)
1167                 mlx4_en_do_uc_filter(priv, dev, mdev);
1168
1169         /* Promsicuous mode: disable all filters */
1170         if ((dev->flags & IFF_PROMISC) ||
1171             (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1172                 mlx4_en_set_promisc_mode(priv, mdev);
1173                 goto out;
1174         }
1175
1176         /* Not in promiscuous mode */
1177         if (priv->flags & MLX4_EN_FLAG_PROMISC)
1178                 mlx4_en_clear_promisc_mode(priv, mdev);
1179
1180         mlx4_en_do_multicast(priv, dev, mdev);
1181 out:
1182         mutex_unlock(&mdev->state_lock);
1183 }
1184
1185 #ifdef CONFIG_NET_POLL_CONTROLLER
1186 static void mlx4_en_netpoll(struct net_device *dev)
1187 {
1188         struct mlx4_en_priv *priv = netdev_priv(dev);
1189         struct mlx4_en_cq *cq;
1190         unsigned long flags;
1191         int i;
1192
1193         for (i = 0; i < priv->rx_ring_num; i++) {
1194                 cq = &priv->rx_cq[i];
1195                 spin_lock_irqsave(&cq->lock, flags);
1196                 napi_synchronize(&cq->napi);
1197                 mlx4_en_process_rx_cq(dev, cq, 0);
1198                 spin_unlock_irqrestore(&cq->lock, flags);
1199         }
1200 }
1201 #endif
1202
1203 static void mlx4_en_tx_timeout(struct net_device *dev)
1204 {
1205         struct mlx4_en_priv *priv = netdev_priv(dev);
1206         struct mlx4_en_dev *mdev = priv->mdev;
1207
1208         if (netif_msg_timer(priv))
1209                 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1210
1211         priv->port_stats.tx_timeout++;
1212         en_dbg(DRV, priv, "Scheduling watchdog\n");
1213         queue_work(mdev->workqueue, &priv->watchdog_task);
1214 }
1215
1216
1217 static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
1218 {
1219         struct mlx4_en_priv *priv = netdev_priv(dev);
1220
1221         spin_lock_bh(&priv->stats_lock);
1222         memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
1223         spin_unlock_bh(&priv->stats_lock);
1224
1225         return &priv->ret_stats;
1226 }
1227
1228 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1229 {
1230         struct mlx4_en_cq *cq;
1231         int i;
1232
1233         /* If we haven't received a specific coalescing setting
1234          * (module param), we set the moderation parameters as follows:
1235          * - moder_cnt is set to the number of mtu sized packets to
1236          *   satisfy our coalescing target.
1237          * - moder_time is set to a fixed value.
1238          */
1239         priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1240         priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1241         priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1242         priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1243         en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1244                priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1245
1246         /* Setup cq moderation params */
1247         for (i = 0; i < priv->rx_ring_num; i++) {
1248                 cq = &priv->rx_cq[i];
1249                 cq->moder_cnt = priv->rx_frames;
1250                 cq->moder_time = priv->rx_usecs;
1251                 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1252                 priv->last_moder_packets[i] = 0;
1253                 priv->last_moder_bytes[i] = 0;
1254         }
1255
1256         for (i = 0; i < priv->tx_ring_num; i++) {
1257                 cq = &priv->tx_cq[i];
1258                 cq->moder_cnt = priv->tx_frames;
1259                 cq->moder_time = priv->tx_usecs;
1260         }
1261
1262         /* Reset auto-moderation params */
1263         priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1264         priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1265         priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1266         priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1267         priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1268         priv->adaptive_rx_coal = 1;
1269         priv->last_moder_jiffies = 0;
1270         priv->last_moder_tx_packets = 0;
1271 }
1272
1273 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1274 {
1275         unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1276         struct mlx4_en_cq *cq;
1277         unsigned long packets;
1278         unsigned long rate;
1279         unsigned long avg_pkt_size;
1280         unsigned long rx_packets;
1281         unsigned long rx_bytes;
1282         unsigned long rx_pkt_diff;
1283         int moder_time;
1284         int ring, err;
1285
1286         if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1287                 return;
1288
1289         for (ring = 0; ring < priv->rx_ring_num; ring++) {
1290                 spin_lock_bh(&priv->stats_lock);
1291                 rx_packets = priv->rx_ring[ring].packets;
1292                 rx_bytes = priv->rx_ring[ring].bytes;
1293                 spin_unlock_bh(&priv->stats_lock);
1294
1295                 rx_pkt_diff = ((unsigned long) (rx_packets -
1296                                 priv->last_moder_packets[ring]));
1297                 packets = rx_pkt_diff;
1298                 rate = packets * HZ / period;
1299                 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1300                                 priv->last_moder_bytes[ring])) / packets : 0;
1301
1302                 /* Apply auto-moderation only when packet rate
1303                  * exceeds a rate that it matters */
1304                 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1305                     avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1306                         if (rate < priv->pkt_rate_low)
1307                                 moder_time = priv->rx_usecs_low;
1308                         else if (rate > priv->pkt_rate_high)
1309                                 moder_time = priv->rx_usecs_high;
1310                         else
1311                                 moder_time = (rate - priv->pkt_rate_low) *
1312                                         (priv->rx_usecs_high - priv->rx_usecs_low) /
1313                                         (priv->pkt_rate_high - priv->pkt_rate_low) +
1314                                         priv->rx_usecs_low;
1315                 } else {
1316                         moder_time = priv->rx_usecs_low;
1317                 }
1318
1319                 if (moder_time != priv->last_moder_time[ring]) {
1320                         priv->last_moder_time[ring] = moder_time;
1321                         cq = &priv->rx_cq[ring];
1322                         cq->moder_time = moder_time;
1323                         err = mlx4_en_set_cq_moder(priv, cq);
1324                         if (err)
1325                                 en_err(priv, "Failed modifying moderation for cq:%d\n",
1326                                        ring);
1327                 }
1328                 priv->last_moder_packets[ring] = rx_packets;
1329                 priv->last_moder_bytes[ring] = rx_bytes;
1330         }
1331
1332         priv->last_moder_jiffies = jiffies;
1333 }
1334
1335 static void mlx4_en_do_get_stats(struct work_struct *work)
1336 {
1337         struct delayed_work *delay = to_delayed_work(work);
1338         struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1339                                                  stats_task);
1340         struct mlx4_en_dev *mdev = priv->mdev;
1341         int err;
1342
1343         mutex_lock(&mdev->state_lock);
1344         if (mdev->device_up) {
1345                 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1346                 if (err)
1347                         en_dbg(HW, priv, "Could not update stats\n");
1348
1349                 if (priv->port_up)
1350                         mlx4_en_auto_moderation(priv);
1351
1352                 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1353         }
1354         if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1355                 queue_work(mdev->workqueue, &priv->mac_task);
1356                 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1357         }
1358         mutex_unlock(&mdev->state_lock);
1359 }
1360
1361 static void mlx4_en_linkstate(struct work_struct *work)
1362 {
1363         struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1364                                                  linkstate_task);
1365         struct mlx4_en_dev *mdev = priv->mdev;
1366         int linkstate = priv->link_state;
1367
1368         mutex_lock(&mdev->state_lock);
1369         /* If observable port state changed set carrier state and
1370          * report to system log */
1371         if (priv->last_link_state != linkstate) {
1372                 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1373                         en_info(priv, "Link Down\n");
1374                         netif_carrier_off(priv->dev);
1375                 } else {
1376                         en_info(priv, "Link Up\n");
1377                         netif_carrier_on(priv->dev);
1378                 }
1379         }
1380         priv->last_link_state = linkstate;
1381         mutex_unlock(&mdev->state_lock);
1382 }
1383
1384
1385 int mlx4_en_start_port(struct net_device *dev)
1386 {
1387         struct mlx4_en_priv *priv = netdev_priv(dev);
1388         struct mlx4_en_dev *mdev = priv->mdev;
1389         struct mlx4_en_cq *cq;
1390         struct mlx4_en_tx_ring *tx_ring;
1391         int rx_index = 0;
1392         int tx_index = 0;
1393         int err = 0;
1394         int i;
1395         int j;
1396         u8 mc_list[16] = {0};
1397
1398         if (priv->port_up) {
1399                 en_dbg(DRV, priv, "start port called while port already up\n");
1400                 return 0;
1401         }
1402
1403         INIT_LIST_HEAD(&priv->mc_list);
1404         INIT_LIST_HEAD(&priv->curr_list);
1405         INIT_LIST_HEAD(&priv->ethtool_list);
1406         memset(&priv->ethtool_rules[0], 0,
1407                sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1408
1409         /* Calculate Rx buf size */
1410         dev->mtu = min(dev->mtu, priv->max_mtu);
1411         mlx4_en_calc_rx_buf(dev);
1412         en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1413
1414         /* Configure rx cq's and rings */
1415         err = mlx4_en_activate_rx_rings(priv);
1416         if (err) {
1417                 en_err(priv, "Failed to activate RX rings\n");
1418                 return err;
1419         }
1420         for (i = 0; i < priv->rx_ring_num; i++) {
1421                 cq = &priv->rx_cq[i];
1422
1423                 err = mlx4_en_activate_cq(priv, cq, i);
1424                 if (err) {
1425                         en_err(priv, "Failed activating Rx CQ\n");
1426                         goto cq_err;
1427                 }
1428                 for (j = 0; j < cq->size; j++)
1429                         cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1430                 err = mlx4_en_set_cq_moder(priv, cq);
1431                 if (err) {
1432                         en_err(priv, "Failed setting cq moderation parameters");
1433                         mlx4_en_deactivate_cq(priv, cq);
1434                         goto cq_err;
1435                 }
1436                 mlx4_en_arm_cq(priv, cq);
1437                 priv->rx_ring[i].cqn = cq->mcq.cqn;
1438                 ++rx_index;
1439         }
1440
1441         /* Set qp number */
1442         en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1443         err = mlx4_en_get_qp(priv);
1444         if (err) {
1445                 en_err(priv, "Failed getting eth qp\n");
1446                 goto cq_err;
1447         }
1448         mdev->mac_removed[priv->port] = 0;
1449
1450         err = mlx4_en_config_rss_steer(priv);
1451         if (err) {
1452                 en_err(priv, "Failed configuring rss steering\n");
1453                 goto mac_err;
1454         }
1455
1456         err = mlx4_en_create_drop_qp(priv);
1457         if (err)
1458                 goto rss_err;
1459
1460         /* Configure tx cq's and rings */
1461         for (i = 0; i < priv->tx_ring_num; i++) {
1462                 /* Configure cq */
1463                 cq = &priv->tx_cq[i];
1464                 err = mlx4_en_activate_cq(priv, cq, i);
1465                 if (err) {
1466                         en_err(priv, "Failed allocating Tx CQ\n");
1467                         goto tx_err;
1468                 }
1469                 err = mlx4_en_set_cq_moder(priv, cq);
1470                 if (err) {
1471                         en_err(priv, "Failed setting cq moderation parameters");
1472                         mlx4_en_deactivate_cq(priv, cq);
1473                         goto tx_err;
1474                 }
1475                 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1476                 cq->buf->wqe_index = cpu_to_be16(0xffff);
1477
1478                 /* Configure ring */
1479                 tx_ring = &priv->tx_ring[i];
1480                 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1481                         i / priv->num_tx_rings_p_up);
1482                 if (err) {
1483                         en_err(priv, "Failed allocating Tx ring\n");
1484                         mlx4_en_deactivate_cq(priv, cq);
1485                         goto tx_err;
1486                 }
1487                 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1488
1489                 /* Arm CQ for TX completions */
1490                 mlx4_en_arm_cq(priv, cq);
1491
1492                 /* Set initial ownership of all Tx TXBBs to SW (1) */
1493                 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1494                         *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1495                 ++tx_index;
1496         }
1497
1498         /* Configure port */
1499         err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1500                                     priv->rx_skb_size + ETH_FCS_LEN,
1501                                     priv->prof->tx_pause,
1502                                     priv->prof->tx_ppp,
1503                                     priv->prof->rx_pause,
1504                                     priv->prof->rx_ppp);
1505         if (err) {
1506                 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1507                        priv->port, err);
1508                 goto tx_err;
1509         }
1510         /* Set default qp number */
1511         err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1512         if (err) {
1513                 en_err(priv, "Failed setting default qp numbers\n");
1514                 goto tx_err;
1515         }
1516
1517         /* Init port */
1518         en_dbg(HW, priv, "Initializing port\n");
1519         err = mlx4_INIT_PORT(mdev->dev, priv->port);
1520         if (err) {
1521                 en_err(priv, "Failed Initializing port\n");
1522                 goto tx_err;
1523         }
1524
1525         /* Attach rx QP to bradcast address */
1526         memset(&mc_list[10], 0xff, ETH_ALEN);
1527         mc_list[5] = priv->port; /* needed for B0 steering support */
1528         if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1529                                   priv->port, 0, MLX4_PROT_ETH,
1530                                   &priv->broadcast_id))
1531                 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1532
1533         /* Must redo promiscuous mode setup. */
1534         priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1535
1536         /* Schedule multicast task to populate multicast list */
1537         queue_work(mdev->workqueue, &priv->rx_mode_task);
1538
1539         mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
1540
1541         priv->port_up = true;
1542         netif_tx_start_all_queues(dev);
1543         netif_device_attach(dev);
1544
1545         return 0;
1546
1547 tx_err:
1548         while (tx_index--) {
1549                 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
1550                 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
1551         }
1552         mlx4_en_destroy_drop_qp(priv);
1553 rss_err:
1554         mlx4_en_release_rss_steer(priv);
1555 mac_err:
1556         mlx4_en_put_qp(priv);
1557 cq_err:
1558         while (rx_index--)
1559                 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
1560         for (i = 0; i < priv->rx_ring_num; i++)
1561                 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
1562
1563         return err; /* need to close devices */
1564 }
1565
1566
1567 void mlx4_en_stop_port(struct net_device *dev, int detach)
1568 {
1569         struct mlx4_en_priv *priv = netdev_priv(dev);
1570         struct mlx4_en_dev *mdev = priv->mdev;
1571         struct mlx4_en_mc_list *mclist, *tmp;
1572         struct ethtool_flow_id *flow, *tmp_flow;
1573         int i;
1574         u8 mc_list[16] = {0};
1575
1576         if (!priv->port_up) {
1577                 en_dbg(DRV, priv, "stop port called while port already down\n");
1578                 return;
1579         }
1580
1581         /* Synchronize with tx routine */
1582         netif_tx_lock_bh(dev);
1583         if (detach)
1584                 netif_device_detach(dev);
1585         netif_tx_stop_all_queues(dev);
1586         netif_tx_unlock_bh(dev);
1587
1588         netif_tx_disable(dev);
1589
1590         /* Set port as not active */
1591         priv->port_up = false;
1592
1593         /* Promsicuous mode */
1594         if (mdev->dev->caps.steering_mode ==
1595             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1596                 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1597                                  MLX4_EN_FLAG_MC_PROMISC);
1598                 mlx4_flow_steer_promisc_remove(mdev->dev,
1599                                                priv->port,
1600                                                MLX4_FS_PROMISC_UPLINK);
1601                 mlx4_flow_steer_promisc_remove(mdev->dev,
1602                                                priv->port,
1603                                                MLX4_FS_PROMISC_ALL_MULTI);
1604         } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1605                 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1606
1607                 /* Disable promiscouos mode */
1608                 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1609                                             priv->port);
1610
1611                 /* Disable Multicast promisc */
1612                 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1613                         mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1614                                                       priv->port);
1615                         priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1616                 }
1617         }
1618
1619         /* Detach All multicasts */
1620         memset(&mc_list[10], 0xff, ETH_ALEN);
1621         mc_list[5] = priv->port; /* needed for B0 steering support */
1622         mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1623                               MLX4_PROT_ETH, priv->broadcast_id);
1624         list_for_each_entry(mclist, &priv->curr_list, list) {
1625                 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1626                 mc_list[5] = priv->port;
1627                 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1628                                       mc_list, MLX4_PROT_ETH, mclist->reg_id);
1629         }
1630         mlx4_en_clear_list(dev);
1631         list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1632                 list_del(&mclist->list);
1633                 kfree(mclist);
1634         }
1635
1636         /* Flush multicast filter */
1637         mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1638
1639         mlx4_en_destroy_drop_qp(priv);
1640
1641         /* Free TX Rings */
1642         for (i = 0; i < priv->tx_ring_num; i++) {
1643                 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
1644                 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
1645         }
1646         msleep(10);
1647
1648         for (i = 0; i < priv->tx_ring_num; i++)
1649                 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
1650
1651         /* Free RSS qps */
1652         mlx4_en_release_rss_steer(priv);
1653
1654         /* Unregister Mac address for the port */
1655         mlx4_en_put_qp(priv);
1656         if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
1657                 mdev->mac_removed[priv->port] = 1;
1658
1659         /* Remove flow steering rules for the port*/
1660         if (mdev->dev->caps.steering_mode ==
1661             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1662                 ASSERT_RTNL();
1663                 list_for_each_entry_safe(flow, tmp_flow,
1664                                          &priv->ethtool_list, list) {
1665                         mlx4_flow_detach(mdev->dev, flow->id);
1666                         list_del(&flow->list);
1667                 }
1668         }
1669
1670         /* Free RX Rings */
1671         for (i = 0; i < priv->rx_ring_num; i++) {
1672                 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
1673                 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
1674                         msleep(1);
1675                 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
1676         }
1677
1678         /* close port*/
1679         mlx4_CLOSE_PORT(mdev->dev, priv->port);
1680 }
1681
1682 static void mlx4_en_restart(struct work_struct *work)
1683 {
1684         struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1685                                                  watchdog_task);
1686         struct mlx4_en_dev *mdev = priv->mdev;
1687         struct net_device *dev = priv->dev;
1688
1689         en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1690
1691         mutex_lock(&mdev->state_lock);
1692         if (priv->port_up) {
1693                 mlx4_en_stop_port(dev, 1);
1694                 if (mlx4_en_start_port(dev))
1695                         en_err(priv, "Failed restarting port %d\n", priv->port);
1696         }
1697         mutex_unlock(&mdev->state_lock);
1698 }
1699
1700 static void mlx4_en_clear_stats(struct net_device *dev)
1701 {
1702         struct mlx4_en_priv *priv = netdev_priv(dev);
1703         struct mlx4_en_dev *mdev = priv->mdev;
1704         int i;
1705
1706         if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1707                 en_dbg(HW, priv, "Failed dumping statistics\n");
1708
1709         memset(&priv->stats, 0, sizeof(priv->stats));
1710         memset(&priv->pstats, 0, sizeof(priv->pstats));
1711         memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1712         memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1713
1714         for (i = 0; i < priv->tx_ring_num; i++) {
1715                 priv->tx_ring[i].bytes = 0;
1716                 priv->tx_ring[i].packets = 0;
1717                 priv->tx_ring[i].tx_csum = 0;
1718         }
1719         for (i = 0; i < priv->rx_ring_num; i++) {
1720                 priv->rx_ring[i].bytes = 0;
1721                 priv->rx_ring[i].packets = 0;
1722                 priv->rx_ring[i].csum_ok = 0;
1723                 priv->rx_ring[i].csum_none = 0;
1724         }
1725 }
1726
1727 static int mlx4_en_open(struct net_device *dev)
1728 {
1729         struct mlx4_en_priv *priv = netdev_priv(dev);
1730         struct mlx4_en_dev *mdev = priv->mdev;
1731         int err = 0;
1732
1733         mutex_lock(&mdev->state_lock);
1734
1735         if (!mdev->device_up) {
1736                 en_err(priv, "Cannot open - device down/disabled\n");
1737                 err = -EBUSY;
1738                 goto out;
1739         }
1740
1741         /* Reset HW statistics and SW counters */
1742         mlx4_en_clear_stats(dev);
1743
1744         err = mlx4_en_start_port(dev);
1745         if (err)
1746                 en_err(priv, "Failed starting port:%d\n", priv->port);
1747
1748 out:
1749         mutex_unlock(&mdev->state_lock);
1750         return err;
1751 }
1752
1753
1754 static int mlx4_en_close(struct net_device *dev)
1755 {
1756         struct mlx4_en_priv *priv = netdev_priv(dev);
1757         struct mlx4_en_dev *mdev = priv->mdev;
1758
1759         en_dbg(IFDOWN, priv, "Close port called\n");
1760
1761         mutex_lock(&mdev->state_lock);
1762
1763         mlx4_en_stop_port(dev, 0);
1764         netif_carrier_off(dev);
1765
1766         mutex_unlock(&mdev->state_lock);
1767         return 0;
1768 }
1769
1770 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1771 {
1772         int i;
1773
1774 #ifdef CONFIG_RFS_ACCEL
1775         free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1776         priv->dev->rx_cpu_rmap = NULL;
1777 #endif
1778
1779         for (i = 0; i < priv->tx_ring_num; i++) {
1780                 if (priv->tx_ring[i].tx_info)
1781                         mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1782                 if (priv->tx_cq[i].buf)
1783                         mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1784         }
1785
1786         for (i = 0; i < priv->rx_ring_num; i++) {
1787                 if (priv->rx_ring[i].rx_info)
1788                         mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1789                                 priv->prof->rx_ring_size, priv->stride);
1790                 if (priv->rx_cq[i].buf)
1791                         mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1792         }
1793
1794         if (priv->base_tx_qpn) {
1795                 mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
1796                 priv->base_tx_qpn = 0;
1797         }
1798 }
1799
1800 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1801 {
1802         struct mlx4_en_port_profile *prof = priv->prof;
1803         int i;
1804         int err;
1805
1806         err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
1807         if (err) {
1808                 en_err(priv, "failed reserving range for TX rings\n");
1809                 return err;
1810         }
1811
1812         /* Create tx Rings */
1813         for (i = 0; i < priv->tx_ring_num; i++) {
1814                 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1815                                       prof->tx_ring_size, i, TX))
1816                         goto err;
1817
1818                 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
1819                                            prof->tx_ring_size, TXBB_SIZE))
1820                         goto err;
1821         }
1822
1823         /* Create rx Rings */
1824         for (i = 0; i < priv->rx_ring_num; i++) {
1825                 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
1826                                       prof->rx_ring_size, i, RX))
1827                         goto err;
1828
1829                 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
1830                                            prof->rx_ring_size, priv->stride))
1831                         goto err;
1832         }
1833
1834 #ifdef CONFIG_RFS_ACCEL
1835         priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
1836         if (!priv->dev->rx_cpu_rmap)
1837                 goto err;
1838 #endif
1839
1840         return 0;
1841
1842 err:
1843         en_err(priv, "Failed to allocate NIC resources\n");
1844         return -ENOMEM;
1845 }
1846
1847
1848 void mlx4_en_destroy_netdev(struct net_device *dev)
1849 {
1850         struct mlx4_en_priv *priv = netdev_priv(dev);
1851         struct mlx4_en_dev *mdev = priv->mdev;
1852
1853         en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
1854
1855         /* Unregister device - this will close the port if it was up */
1856         if (priv->registered)
1857                 unregister_netdev(dev);
1858
1859         if (priv->allocated)
1860                 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
1861
1862         cancel_delayed_work(&priv->stats_task);
1863         /* flush any pending task for this netdev */
1864         flush_workqueue(mdev->workqueue);
1865
1866         /* Detach the netdev so tasks would not attempt to access it */
1867         mutex_lock(&mdev->state_lock);
1868         mdev->pndev[priv->port] = NULL;
1869         mutex_unlock(&mdev->state_lock);
1870
1871         mlx4_en_free_resources(priv);
1872
1873         kfree(priv->tx_ring);
1874         kfree(priv->tx_cq);
1875
1876         free_netdev(dev);
1877 }
1878
1879 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
1880 {
1881         struct mlx4_en_priv *priv = netdev_priv(dev);
1882         struct mlx4_en_dev *mdev = priv->mdev;
1883         int err = 0;
1884
1885         en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
1886                  dev->mtu, new_mtu);
1887
1888         if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
1889                 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
1890                 return -EPERM;
1891         }
1892         dev->mtu = new_mtu;
1893
1894         if (netif_running(dev)) {
1895                 mutex_lock(&mdev->state_lock);
1896                 if (!mdev->device_up) {
1897                         /* NIC is probably restarting - let watchdog task reset
1898                          * the port */
1899                         en_dbg(DRV, priv, "Change MTU called with card down!?\n");
1900                 } else {
1901                         mlx4_en_stop_port(dev, 1);
1902                         err = mlx4_en_start_port(dev);
1903                         if (err) {
1904                                 en_err(priv, "Failed restarting port:%d\n",
1905                                          priv->port);
1906                                 queue_work(mdev->workqueue, &priv->watchdog_task);
1907                         }
1908                 }
1909                 mutex_unlock(&mdev->state_lock);
1910         }
1911         return 0;
1912 }
1913
1914 static int mlx4_en_set_features(struct net_device *netdev,
1915                 netdev_features_t features)
1916 {
1917         struct mlx4_en_priv *priv = netdev_priv(netdev);
1918
1919         if (features & NETIF_F_LOOPBACK)
1920                 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
1921         else
1922                 priv->ctrl_flags &=
1923                         cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
1924
1925         mlx4_en_update_loopback_state(netdev, features);
1926
1927         return 0;
1928
1929 }
1930
1931 static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1932                            struct net_device *dev,
1933                            const unsigned char *addr, u16 flags)
1934 {
1935         struct mlx4_en_priv *priv = netdev_priv(dev);
1936         struct mlx4_dev *mdev = priv->mdev->dev;
1937         int err;
1938
1939         if (!mlx4_is_mfunc(mdev))
1940                 return -EOPNOTSUPP;
1941
1942         /* Hardware does not support aging addresses, allow only
1943          * permanent addresses if ndm_state is given
1944          */
1945         if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
1946                 en_info(priv, "Add FDB only supports static addresses\n");
1947                 return -EINVAL;
1948         }
1949
1950         if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
1951                 err = dev_uc_add_excl(dev, addr);
1952         else if (is_multicast_ether_addr(addr))
1953                 err = dev_mc_add_excl(dev, addr);
1954         else
1955                 err = -EINVAL;
1956
1957         /* Only return duplicate errors if NLM_F_EXCL is set */
1958         if (err == -EEXIST && !(flags & NLM_F_EXCL))
1959                 err = 0;
1960
1961         return err;
1962 }
1963
1964 static int mlx4_en_fdb_del(struct ndmsg *ndm,
1965                            struct net_device *dev,
1966                            const unsigned char *addr)
1967 {
1968         struct mlx4_en_priv *priv = netdev_priv(dev);
1969         struct mlx4_dev *mdev = priv->mdev->dev;
1970         int err;
1971
1972         if (!mlx4_is_mfunc(mdev))
1973                 return -EOPNOTSUPP;
1974
1975         if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
1976                 en_info(priv, "Del FDB only supports static addresses\n");
1977                 return -EINVAL;
1978         }
1979
1980         if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
1981                 err = dev_uc_del(dev, addr);
1982         else if (is_multicast_ether_addr(addr))
1983                 err = dev_mc_del(dev, addr);
1984         else
1985                 err = -EINVAL;
1986
1987         return err;
1988 }
1989
1990 static int mlx4_en_fdb_dump(struct sk_buff *skb,
1991                             struct netlink_callback *cb,
1992                             struct net_device *dev, int idx)
1993 {
1994         struct mlx4_en_priv *priv = netdev_priv(dev);
1995         struct mlx4_dev *mdev = priv->mdev->dev;
1996
1997         if (mlx4_is_mfunc(mdev))
1998                 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
1999
2000         return idx;
2001 }
2002
2003 static const struct net_device_ops mlx4_netdev_ops = {
2004         .ndo_open               = mlx4_en_open,
2005         .ndo_stop               = mlx4_en_close,
2006         .ndo_start_xmit         = mlx4_en_xmit,
2007         .ndo_select_queue       = mlx4_en_select_queue,
2008         .ndo_get_stats          = mlx4_en_get_stats,
2009         .ndo_set_rx_mode        = mlx4_en_set_rx_mode,
2010         .ndo_set_mac_address    = mlx4_en_set_mac,
2011         .ndo_validate_addr      = eth_validate_addr,
2012         .ndo_change_mtu         = mlx4_en_change_mtu,
2013         .ndo_tx_timeout         = mlx4_en_tx_timeout,
2014         .ndo_vlan_rx_add_vid    = mlx4_en_vlan_rx_add_vid,
2015         .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
2016 #ifdef CONFIG_NET_POLL_CONTROLLER
2017         .ndo_poll_controller    = mlx4_en_netpoll,
2018 #endif
2019         .ndo_set_features       = mlx4_en_set_features,
2020         .ndo_setup_tc           = mlx4_en_setup_tc,
2021 #ifdef CONFIG_RFS_ACCEL
2022         .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
2023 #endif
2024         .ndo_fdb_add            = mlx4_en_fdb_add,
2025         .ndo_fdb_del            = mlx4_en_fdb_del,
2026         .ndo_fdb_dump           = mlx4_en_fdb_dump,
2027 };
2028
2029 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2030                         struct mlx4_en_port_profile *prof)
2031 {
2032         struct net_device *dev;
2033         struct mlx4_en_priv *priv;
2034         int i;
2035         int err;
2036
2037         dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
2038                                  MAX_TX_RINGS, MAX_RX_RINGS);
2039         if (dev == NULL)
2040                 return -ENOMEM;
2041
2042         netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
2043         netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2044
2045         SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
2046         dev->dev_id =  port - 1;
2047
2048         /*
2049          * Initialize driver private data
2050          */
2051
2052         priv = netdev_priv(dev);
2053         memset(priv, 0, sizeof(struct mlx4_en_priv));
2054         priv->dev = dev;
2055         priv->mdev = mdev;
2056         priv->ddev = &mdev->pdev->dev;
2057         priv->prof = prof;
2058         priv->port = port;
2059         priv->port_up = false;
2060         priv->flags = prof->flags;
2061         priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
2062                         MLX4_WQE_CTRL_SOLICITED);
2063         priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2064         priv->tx_ring_num = prof->tx_ring_num;
2065
2066         priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
2067                                 GFP_KERNEL);
2068         if (!priv->tx_ring) {
2069                 err = -ENOMEM;
2070                 goto out;
2071         }
2072         priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_RX_RINGS,
2073                               GFP_KERNEL);
2074         if (!priv->tx_cq) {
2075                 err = -ENOMEM;
2076                 goto out;
2077         }
2078         priv->rx_ring_num = prof->rx_ring_num;
2079         priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2080         priv->mac_index = -1;
2081         priv->msg_enable = MLX4_EN_MSG_LEVEL;
2082         spin_lock_init(&priv->stats_lock);
2083         INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2084         INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
2085         INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2086         INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2087         INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2088 #ifdef CONFIG_MLX4_EN_DCB
2089         if (!mlx4_is_slave(priv->mdev->dev))
2090                 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2091 #endif
2092
2093         for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
2094                 INIT_HLIST_HEAD(&priv->mac_hash[i]);
2095
2096         /* Query for default mac and max mtu */
2097         priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2098
2099         /* Set default MAC */
2100         dev->addr_len = ETH_ALEN;
2101         mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
2102         if (!is_valid_ether_addr(dev->dev_addr)) {
2103                 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
2104                        priv->port, dev->dev_addr);
2105                 err = -EINVAL;
2106                 goto out;
2107         }
2108
2109         memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
2110
2111         priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2112                                           DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2113         err = mlx4_en_alloc_resources(priv);
2114         if (err)
2115                 goto out;
2116
2117 #ifdef CONFIG_RFS_ACCEL
2118         INIT_LIST_HEAD(&priv->filters);
2119         spin_lock_init(&priv->filters_lock);
2120 #endif
2121
2122         /* Allocate page for receive rings */
2123         err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2124                                 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2125         if (err) {
2126                 en_err(priv, "Failed to allocate page for rx qps\n");
2127                 goto out;
2128         }
2129         priv->allocated = 1;
2130
2131         /*
2132          * Initialize netdev entry points
2133          */
2134         dev->netdev_ops = &mlx4_netdev_ops;
2135         dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
2136         netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2137         netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
2138
2139         SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
2140
2141         /*
2142          * Set driver features
2143          */
2144         dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2145         if (mdev->LSO_support)
2146                 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2147
2148         dev->vlan_features = dev->hw_features;
2149
2150         dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
2151         dev->features = dev->hw_features | NETIF_F_HIGHDMA |
2152                         NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2153                         NETIF_F_HW_VLAN_FILTER;
2154         dev->hw_features |= NETIF_F_LOOPBACK;
2155
2156         if (mdev->dev->caps.steering_mode ==
2157             MLX4_STEERING_MODE_DEVICE_MANAGED)
2158                 dev->hw_features |= NETIF_F_NTUPLE;
2159
2160         if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
2161                 dev->priv_flags |= IFF_UNICAST_FLT;
2162
2163         mdev->pndev[port] = dev;
2164
2165         netif_carrier_off(dev);
2166         err = register_netdev(dev);
2167         if (err) {
2168                 en_err(priv, "Netdev registration failed for port %d\n", port);
2169                 goto out;
2170         }
2171         priv->registered = 1;
2172
2173         en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2174         en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2175
2176         mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
2177
2178         /* Configure port */
2179         mlx4_en_calc_rx_buf(dev);
2180         err = mlx4_SET_PORT_general(mdev->dev, priv->port,
2181                                     priv->rx_skb_size + ETH_FCS_LEN,
2182                                     prof->tx_pause, prof->tx_ppp,
2183                                     prof->rx_pause, prof->rx_ppp);
2184         if (err) {
2185                 en_err(priv, "Failed setting port general configurations "
2186                        "for port %d, with error %d\n", priv->port, err);
2187                 goto out;
2188         }
2189
2190         /* Init port */
2191         en_warn(priv, "Initializing port\n");
2192         err = mlx4_INIT_PORT(mdev->dev, priv->port);
2193         if (err) {
2194                 en_err(priv, "Failed Initializing port\n");
2195                 goto out;
2196         }
2197         mlx4_en_set_default_moderation(priv);
2198         queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2199         return 0;
2200
2201 out:
2202         mlx4_en_destroy_netdev(dev);
2203         return err;
2204 }
2205