]> Pileus Git - ~andy/linux/blob - drivers/infiniband/ulp/ipoib/ipoib_main.c
IPoIB: Add GRO support
[~andy/linux] / drivers / infiniband / ulp / ipoib / ipoib_main.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include "ipoib.h"
36
37 #include <linux/module.h>
38
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
43
44 #include <linux/if_arp.h>       /* For ARPHRD_xxx */
45
46 #include <linux/ip.h>
47 #include <linux/in.h>
48
49 #include <net/dst.h>
50
51 MODULE_AUTHOR("Roland Dreier");
52 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
53 MODULE_LICENSE("Dual BSD/GPL");
54
55 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
56 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
57
58 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
59 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
60 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
61 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
62
63 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
64 int ipoib_debug_level;
65
66 module_param_named(debug_level, ipoib_debug_level, int, 0644);
67 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
68 #endif
69
70 struct ipoib_path_iter {
71         struct net_device *dev;
72         struct ipoib_path  path;
73 };
74
75 static const u8 ipv4_bcast_addr[] = {
76         0x00, 0xff, 0xff, 0xff,
77         0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
78         0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
79 };
80
81 struct workqueue_struct *ipoib_workqueue;
82
83 struct ib_sa_client ipoib_sa_client;
84
85 static void ipoib_add_one(struct ib_device *device);
86 static void ipoib_remove_one(struct ib_device *device);
87
88 static struct ib_client ipoib_client = {
89         .name   = "ipoib",
90         .add    = ipoib_add_one,
91         .remove = ipoib_remove_one
92 };
93
94 int ipoib_open(struct net_device *dev)
95 {
96         struct ipoib_dev_priv *priv = netdev_priv(dev);
97
98         ipoib_dbg(priv, "bringing up interface\n");
99
100         set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
101
102         if (ipoib_pkey_dev_delay_open(dev))
103                 return 0;
104
105         if (ipoib_ib_dev_open(dev))
106                 goto err_disable;
107
108         if (ipoib_ib_dev_up(dev))
109                 goto err_stop;
110
111         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
112                 struct ipoib_dev_priv *cpriv;
113
114                 /* Bring up any child interfaces too */
115                 mutex_lock(&priv->vlan_mutex);
116                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
117                         int flags;
118
119                         flags = cpriv->dev->flags;
120                         if (flags & IFF_UP)
121                                 continue;
122
123                         dev_change_flags(cpriv->dev, flags | IFF_UP);
124                 }
125                 mutex_unlock(&priv->vlan_mutex);
126         }
127
128         netif_start_queue(dev);
129
130         return 0;
131
132 err_stop:
133         ipoib_ib_dev_stop(dev, 1);
134
135 err_disable:
136         clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
137
138         return -EINVAL;
139 }
140
141 static int ipoib_stop(struct net_device *dev)
142 {
143         struct ipoib_dev_priv *priv = netdev_priv(dev);
144
145         ipoib_dbg(priv, "stopping interface\n");
146
147         clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
148
149         netif_stop_queue(dev);
150
151         ipoib_ib_dev_down(dev, 0);
152         ipoib_ib_dev_stop(dev, 0);
153
154         if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
155                 struct ipoib_dev_priv *cpriv;
156
157                 /* Bring down any child interfaces too */
158                 mutex_lock(&priv->vlan_mutex);
159                 list_for_each_entry(cpriv, &priv->child_intfs, list) {
160                         int flags;
161
162                         flags = cpriv->dev->flags;
163                         if (!(flags & IFF_UP))
164                                 continue;
165
166                         dev_change_flags(cpriv->dev, flags & ~IFF_UP);
167                 }
168                 mutex_unlock(&priv->vlan_mutex);
169         }
170
171         return 0;
172 }
173
174 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
175 {
176         struct ipoib_dev_priv *priv = netdev_priv(dev);
177
178         /* dev->mtu > 2K ==> connected mode */
179         if (ipoib_cm_admin_enabled(dev)) {
180                 if (new_mtu > ipoib_cm_max_mtu(dev))
181                         return -EINVAL;
182
183                 if (new_mtu > priv->mcast_mtu)
184                         ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
185                                    priv->mcast_mtu);
186
187                 dev->mtu = new_mtu;
188                 return 0;
189         }
190
191         if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
192                 return -EINVAL;
193
194         priv->admin_mtu = new_mtu;
195
196         dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
197
198         return 0;
199 }
200
201 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
202 {
203         struct ipoib_dev_priv *priv = netdev_priv(dev);
204         struct rb_node *n = priv->path_tree.rb_node;
205         struct ipoib_path *path;
206         int ret;
207
208         while (n) {
209                 path = rb_entry(n, struct ipoib_path, rb_node);
210
211                 ret = memcmp(gid, path->pathrec.dgid.raw,
212                              sizeof (union ib_gid));
213
214                 if (ret < 0)
215                         n = n->rb_left;
216                 else if (ret > 0)
217                         n = n->rb_right;
218                 else
219                         return path;
220         }
221
222         return NULL;
223 }
224
225 static int __path_add(struct net_device *dev, struct ipoib_path *path)
226 {
227         struct ipoib_dev_priv *priv = netdev_priv(dev);
228         struct rb_node **n = &priv->path_tree.rb_node;
229         struct rb_node *pn = NULL;
230         struct ipoib_path *tpath;
231         int ret;
232
233         while (*n) {
234                 pn = *n;
235                 tpath = rb_entry(pn, struct ipoib_path, rb_node);
236
237                 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
238                              sizeof (union ib_gid));
239                 if (ret < 0)
240                         n = &pn->rb_left;
241                 else if (ret > 0)
242                         n = &pn->rb_right;
243                 else
244                         return -EEXIST;
245         }
246
247         rb_link_node(&path->rb_node, pn, n);
248         rb_insert_color(&path->rb_node, &priv->path_tree);
249
250         list_add_tail(&path->list, &priv->path_list);
251
252         return 0;
253 }
254
255 static void path_free(struct net_device *dev, struct ipoib_path *path)
256 {
257         struct ipoib_dev_priv *priv = netdev_priv(dev);
258         struct ipoib_neigh *neigh, *tn;
259         struct sk_buff *skb;
260         unsigned long flags;
261
262         while ((skb = __skb_dequeue(&path->queue)))
263                 dev_kfree_skb_irq(skb);
264
265         spin_lock_irqsave(&priv->lock, flags);
266
267         list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
268                 /*
269                  * It's safe to call ipoib_put_ah() inside priv->lock
270                  * here, because we know that path->ah will always
271                  * hold one more reference, so ipoib_put_ah() will
272                  * never do more than decrement the ref count.
273                  */
274                 if (neigh->ah)
275                         ipoib_put_ah(neigh->ah);
276
277                 ipoib_neigh_free(dev, neigh);
278         }
279
280         spin_unlock_irqrestore(&priv->lock, flags);
281
282         if (path->ah)
283                 ipoib_put_ah(path->ah);
284
285         kfree(path);
286 }
287
288 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
289
290 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
291 {
292         struct ipoib_path_iter *iter;
293
294         iter = kmalloc(sizeof *iter, GFP_KERNEL);
295         if (!iter)
296                 return NULL;
297
298         iter->dev = dev;
299         memset(iter->path.pathrec.dgid.raw, 0, 16);
300
301         if (ipoib_path_iter_next(iter)) {
302                 kfree(iter);
303                 return NULL;
304         }
305
306         return iter;
307 }
308
309 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
310 {
311         struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
312         struct rb_node *n;
313         struct ipoib_path *path;
314         int ret = 1;
315
316         spin_lock_irq(&priv->lock);
317
318         n = rb_first(&priv->path_tree);
319
320         while (n) {
321                 path = rb_entry(n, struct ipoib_path, rb_node);
322
323                 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
324                            sizeof (union ib_gid)) < 0) {
325                         iter->path = *path;
326                         ret = 0;
327                         break;
328                 }
329
330                 n = rb_next(n);
331         }
332
333         spin_unlock_irq(&priv->lock);
334
335         return ret;
336 }
337
338 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
339                           struct ipoib_path *path)
340 {
341         *path = iter->path;
342 }
343
344 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
345
346 void ipoib_mark_paths_invalid(struct net_device *dev)
347 {
348         struct ipoib_dev_priv *priv = netdev_priv(dev);
349         struct ipoib_path *path, *tp;
350
351         spin_lock_irq(&priv->lock);
352
353         list_for_each_entry_safe(path, tp, &priv->path_list, list) {
354                 ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n",
355                         be16_to_cpu(path->pathrec.dlid),
356                         path->pathrec.dgid.raw);
357                 path->valid =  0;
358         }
359
360         spin_unlock_irq(&priv->lock);
361 }
362
363 void ipoib_flush_paths(struct net_device *dev)
364 {
365         struct ipoib_dev_priv *priv = netdev_priv(dev);
366         struct ipoib_path *path, *tp;
367         LIST_HEAD(remove_list);
368         unsigned long flags;
369
370         netif_tx_lock_bh(dev);
371         spin_lock_irqsave(&priv->lock, flags);
372
373         list_splice_init(&priv->path_list, &remove_list);
374
375         list_for_each_entry(path, &remove_list, list)
376                 rb_erase(&path->rb_node, &priv->path_tree);
377
378         list_for_each_entry_safe(path, tp, &remove_list, list) {
379                 if (path->query)
380                         ib_sa_cancel_query(path->query_id, path->query);
381                 spin_unlock_irqrestore(&priv->lock, flags);
382                 netif_tx_unlock_bh(dev);
383                 wait_for_completion(&path->done);
384                 path_free(dev, path);
385                 netif_tx_lock_bh(dev);
386                 spin_lock_irqsave(&priv->lock, flags);
387         }
388
389         spin_unlock_irqrestore(&priv->lock, flags);
390         netif_tx_unlock_bh(dev);
391 }
392
393 static void path_rec_completion(int status,
394                                 struct ib_sa_path_rec *pathrec,
395                                 void *path_ptr)
396 {
397         struct ipoib_path *path = path_ptr;
398         struct net_device *dev = path->dev;
399         struct ipoib_dev_priv *priv = netdev_priv(dev);
400         struct ipoib_ah *ah = NULL;
401         struct ipoib_ah *old_ah = NULL;
402         struct ipoib_neigh *neigh, *tn;
403         struct sk_buff_head skqueue;
404         struct sk_buff *skb;
405         unsigned long flags;
406
407         if (!status)
408                 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
409                           be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
410         else
411                 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
412                           status, path->pathrec.dgid.raw);
413
414         skb_queue_head_init(&skqueue);
415
416         if (!status) {
417                 struct ib_ah_attr av;
418
419                 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
420                         ah = ipoib_create_ah(dev, priv->pd, &av);
421         }
422
423         spin_lock_irqsave(&priv->lock, flags);
424
425         if (ah) {
426                 path->pathrec = *pathrec;
427
428                 old_ah   = path->ah;
429                 path->ah = ah;
430
431                 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
432                           ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
433
434                 while ((skb = __skb_dequeue(&path->queue)))
435                         __skb_queue_tail(&skqueue, skb);
436
437                 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
438                         if (neigh->ah) {
439                                 WARN_ON(neigh->ah != old_ah);
440                                 /*
441                                  * Dropping the ah reference inside
442                                  * priv->lock is safe here, because we
443                                  * will hold one more reference from
444                                  * the original value of path->ah (ie
445                                  * old_ah).
446                                  */
447                                 ipoib_put_ah(neigh->ah);
448                         }
449                         kref_get(&path->ah->ref);
450                         neigh->ah = path->ah;
451                         memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
452                                sizeof(union ib_gid));
453
454                         if (ipoib_cm_enabled(dev, neigh->neighbour)) {
455                                 if (!ipoib_cm_get(neigh))
456                                         ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
457                                                                                path,
458                                                                                neigh));
459                                 if (!ipoib_cm_get(neigh)) {
460                                         list_del(&neigh->list);
461                                         if (neigh->ah)
462                                                 ipoib_put_ah(neigh->ah);
463                                         ipoib_neigh_free(dev, neigh);
464                                         continue;
465                                 }
466                         }
467
468                         while ((skb = __skb_dequeue(&neigh->queue)))
469                                 __skb_queue_tail(&skqueue, skb);
470                 }
471                 path->valid = 1;
472         }
473
474         path->query = NULL;
475         complete(&path->done);
476
477         spin_unlock_irqrestore(&priv->lock, flags);
478
479         if (old_ah)
480                 ipoib_put_ah(old_ah);
481
482         while ((skb = __skb_dequeue(&skqueue))) {
483                 skb->dev = dev;
484                 if (dev_queue_xmit(skb))
485                         ipoib_warn(priv, "dev_queue_xmit failed "
486                                    "to requeue packet\n");
487         }
488 }
489
490 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
491 {
492         struct ipoib_dev_priv *priv = netdev_priv(dev);
493         struct ipoib_path *path;
494
495         if (!priv->broadcast)
496                 return NULL;
497
498         path = kzalloc(sizeof *path, GFP_ATOMIC);
499         if (!path)
500                 return NULL;
501
502         path->dev = dev;
503
504         skb_queue_head_init(&path->queue);
505
506         INIT_LIST_HEAD(&path->neigh_list);
507
508         memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
509         path->pathrec.sgid          = priv->local_gid;
510         path->pathrec.pkey          = cpu_to_be16(priv->pkey);
511         path->pathrec.numb_path     = 1;
512         path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
513
514         return path;
515 }
516
517 static int path_rec_start(struct net_device *dev,
518                           struct ipoib_path *path)
519 {
520         struct ipoib_dev_priv *priv = netdev_priv(dev);
521
522         ipoib_dbg(priv, "Start path record lookup for %pI6\n",
523                   path->pathrec.dgid.raw);
524
525         init_completion(&path->done);
526
527         path->query_id =
528                 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
529                                    &path->pathrec,
530                                    IB_SA_PATH_REC_DGID          |
531                                    IB_SA_PATH_REC_SGID          |
532                                    IB_SA_PATH_REC_NUMB_PATH     |
533                                    IB_SA_PATH_REC_TRAFFIC_CLASS |
534                                    IB_SA_PATH_REC_PKEY,
535                                    1000, GFP_ATOMIC,
536                                    path_rec_completion,
537                                    path, &path->query);
538         if (path->query_id < 0) {
539                 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
540                 path->query = NULL;
541                 complete(&path->done);
542                 return path->query_id;
543         }
544
545         return 0;
546 }
547
548 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
549 {
550         struct ipoib_dev_priv *priv = netdev_priv(dev);
551         struct ipoib_path *path;
552         struct ipoib_neigh *neigh;
553         unsigned long flags;
554
555         neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour, skb->dev);
556         if (!neigh) {
557                 ++dev->stats.tx_dropped;
558                 dev_kfree_skb_any(skb);
559                 return;
560         }
561
562         spin_lock_irqsave(&priv->lock, flags);
563
564         path = __path_find(dev, skb_dst(skb)->neighbour->ha + 4);
565         if (!path) {
566                 path = path_rec_create(dev, skb_dst(skb)->neighbour->ha + 4);
567                 if (!path)
568                         goto err_path;
569
570                 __path_add(dev, path);
571         }
572
573         list_add_tail(&neigh->list, &path->neigh_list);
574
575         if (path->ah) {
576                 kref_get(&path->ah->ref);
577                 neigh->ah = path->ah;
578                 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
579                        sizeof(union ib_gid));
580
581                 if (ipoib_cm_enabled(dev, neigh->neighbour)) {
582                         if (!ipoib_cm_get(neigh))
583                                 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
584                         if (!ipoib_cm_get(neigh)) {
585                                 list_del(&neigh->list);
586                                 if (neigh->ah)
587                                         ipoib_put_ah(neigh->ah);
588                                 ipoib_neigh_free(dev, neigh);
589                                 goto err_drop;
590                         }
591                         if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
592                                 __skb_queue_tail(&neigh->queue, skb);
593                         else {
594                                 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
595                                            skb_queue_len(&neigh->queue));
596                                 goto err_drop;
597                         }
598                 } else {
599                         spin_unlock_irqrestore(&priv->lock, flags);
600                         ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
601                         return;
602                 }
603         } else {
604                 neigh->ah  = NULL;
605
606                 if (!path->query && path_rec_start(dev, path))
607                         goto err_list;
608
609                 __skb_queue_tail(&neigh->queue, skb);
610         }
611
612         spin_unlock_irqrestore(&priv->lock, flags);
613         return;
614
615 err_list:
616         list_del(&neigh->list);
617
618 err_path:
619         ipoib_neigh_free(dev, neigh);
620 err_drop:
621         ++dev->stats.tx_dropped;
622         dev_kfree_skb_any(skb);
623
624         spin_unlock_irqrestore(&priv->lock, flags);
625 }
626
627 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
628 {
629         struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
630
631         /* Look up path record for unicasts */
632         if (skb_dst(skb)->neighbour->ha[4] != 0xff) {
633                 neigh_add_path(skb, dev);
634                 return;
635         }
636
637         /* Add in the P_Key for multicasts */
638         skb_dst(skb)->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
639         skb_dst(skb)->neighbour->ha[9] = priv->pkey & 0xff;
640         ipoib_mcast_send(dev, skb_dst(skb)->neighbour->ha + 4, skb);
641 }
642
643 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
644                              struct ipoib_pseudoheader *phdr)
645 {
646         struct ipoib_dev_priv *priv = netdev_priv(dev);
647         struct ipoib_path *path;
648         unsigned long flags;
649
650         spin_lock_irqsave(&priv->lock, flags);
651
652         path = __path_find(dev, phdr->hwaddr + 4);
653         if (!path || !path->valid) {
654                 int new_path = 0;
655
656                 if (!path) {
657                         path = path_rec_create(dev, phdr->hwaddr + 4);
658                         new_path = 1;
659                 }
660                 if (path) {
661                         /* put pseudoheader back on for next time */
662                         skb_push(skb, sizeof *phdr);
663                         __skb_queue_tail(&path->queue, skb);
664
665                         if (!path->query && path_rec_start(dev, path)) {
666                                 spin_unlock_irqrestore(&priv->lock, flags);
667                                 if (new_path)
668                                         path_free(dev, path);
669                                 return;
670                         } else
671                                 __path_add(dev, path);
672                 } else {
673                         ++dev->stats.tx_dropped;
674                         dev_kfree_skb_any(skb);
675                 }
676
677                 spin_unlock_irqrestore(&priv->lock, flags);
678                 return;
679         }
680
681         if (path->ah) {
682                 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
683                           be16_to_cpu(path->pathrec.dlid));
684
685                 spin_unlock_irqrestore(&priv->lock, flags);
686                 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
687                 return;
688         } else if ((path->query || !path_rec_start(dev, path)) &&
689                    skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
690                 /* put pseudoheader back on for next time */
691                 skb_push(skb, sizeof *phdr);
692                 __skb_queue_tail(&path->queue, skb);
693         } else {
694                 ++dev->stats.tx_dropped;
695                 dev_kfree_skb_any(skb);
696         }
697
698         spin_unlock_irqrestore(&priv->lock, flags);
699 }
700
701 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
702 {
703         struct ipoib_dev_priv *priv = netdev_priv(dev);
704         struct ipoib_neigh *neigh;
705         unsigned long flags;
706
707         if (likely(skb_dst(skb) && skb_dst(skb)->neighbour)) {
708                 if (unlikely(!*to_ipoib_neigh(skb_dst(skb)->neighbour))) {
709                         ipoib_path_lookup(skb, dev);
710                         return NETDEV_TX_OK;
711                 }
712
713                 neigh = *to_ipoib_neigh(skb_dst(skb)->neighbour);
714
715                 if (unlikely((memcmp(&neigh->dgid.raw,
716                                      skb_dst(skb)->neighbour->ha + 4,
717                                      sizeof(union ib_gid))) ||
718                              (neigh->dev != dev))) {
719                         spin_lock_irqsave(&priv->lock, flags);
720                         /*
721                          * It's safe to call ipoib_put_ah() inside
722                          * priv->lock here, because we know that
723                          * path->ah will always hold one more reference,
724                          * so ipoib_put_ah() will never do more than
725                          * decrement the ref count.
726                          */
727                         if (neigh->ah)
728                                 ipoib_put_ah(neigh->ah);
729                         list_del(&neigh->list);
730                         ipoib_neigh_free(dev, neigh);
731                         spin_unlock_irqrestore(&priv->lock, flags);
732                         ipoib_path_lookup(skb, dev);
733                         return NETDEV_TX_OK;
734                 }
735
736                 if (ipoib_cm_get(neigh)) {
737                         if (ipoib_cm_up(neigh)) {
738                                 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
739                                 return NETDEV_TX_OK;
740                         }
741                 } else if (neigh->ah) {
742                         ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
743                         return NETDEV_TX_OK;
744                 }
745
746                 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
747                         spin_lock_irqsave(&priv->lock, flags);
748                         __skb_queue_tail(&neigh->queue, skb);
749                         spin_unlock_irqrestore(&priv->lock, flags);
750                 } else {
751                         ++dev->stats.tx_dropped;
752                         dev_kfree_skb_any(skb);
753                 }
754         } else {
755                 struct ipoib_pseudoheader *phdr =
756                         (struct ipoib_pseudoheader *) skb->data;
757                 skb_pull(skb, sizeof *phdr);
758
759                 if (phdr->hwaddr[4] == 0xff) {
760                         /* Add in the P_Key for multicast*/
761                         phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
762                         phdr->hwaddr[9] = priv->pkey & 0xff;
763
764                         ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
765                 } else {
766                         /* unicast GID -- should be ARP or RARP reply */
767
768                         if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
769                             (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
770                                 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
771                                            skb_dst(skb) ? "neigh" : "dst",
772                                            be16_to_cpup((__be16 *) skb->data),
773                                            IPOIB_QPN(phdr->hwaddr),
774                                            phdr->hwaddr + 4);
775                                 dev_kfree_skb_any(skb);
776                                 ++dev->stats.tx_dropped;
777                                 return NETDEV_TX_OK;
778                         }
779
780                         unicast_arp_send(skb, dev, phdr);
781                 }
782         }
783
784         return NETDEV_TX_OK;
785 }
786
787 static void ipoib_timeout(struct net_device *dev)
788 {
789         struct ipoib_dev_priv *priv = netdev_priv(dev);
790
791         ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
792                    jiffies_to_msecs(jiffies - dev->trans_start));
793         ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
794                    netif_queue_stopped(dev),
795                    priv->tx_head, priv->tx_tail);
796         /* XXX reset QP, etc. */
797 }
798
799 static int ipoib_hard_header(struct sk_buff *skb,
800                              struct net_device *dev,
801                              unsigned short type,
802                              const void *daddr, const void *saddr, unsigned len)
803 {
804         struct ipoib_header *header;
805
806         header = (struct ipoib_header *) skb_push(skb, sizeof *header);
807
808         header->proto = htons(type);
809         header->reserved = 0;
810
811         /*
812          * If we don't have a neighbour structure, stuff the
813          * destination address onto the front of the skb so we can
814          * figure out where to send the packet later.
815          */
816         if ((!skb_dst(skb) || !skb_dst(skb)->neighbour) && daddr) {
817                 struct ipoib_pseudoheader *phdr =
818                         (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
819                 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
820         }
821
822         return 0;
823 }
824
825 static void ipoib_set_mcast_list(struct net_device *dev)
826 {
827         struct ipoib_dev_priv *priv = netdev_priv(dev);
828
829         if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
830                 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
831                 return;
832         }
833
834         queue_work(ipoib_workqueue, &priv->restart_task);
835 }
836
837 static void ipoib_neigh_cleanup(struct neighbour *n)
838 {
839         struct ipoib_neigh *neigh;
840         struct ipoib_dev_priv *priv = netdev_priv(n->dev);
841         unsigned long flags;
842         struct ipoib_ah *ah = NULL;
843
844         neigh = *to_ipoib_neigh(n);
845         if (neigh)
846                 priv = netdev_priv(neigh->dev);
847         else
848                 return;
849         ipoib_dbg(priv,
850                   "neigh_cleanup for %06x %pI6\n",
851                   IPOIB_QPN(n->ha),
852                   n->ha + 4);
853
854         spin_lock_irqsave(&priv->lock, flags);
855
856         if (neigh->ah)
857                 ah = neigh->ah;
858         list_del(&neigh->list);
859         ipoib_neigh_free(n->dev, neigh);
860
861         spin_unlock_irqrestore(&priv->lock, flags);
862
863         if (ah)
864                 ipoib_put_ah(ah);
865 }
866
867 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
868                                       struct net_device *dev)
869 {
870         struct ipoib_neigh *neigh;
871
872         neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
873         if (!neigh)
874                 return NULL;
875
876         neigh->neighbour = neighbour;
877         neigh->dev = dev;
878         memset(&neigh->dgid.raw, 0, sizeof (union ib_gid));
879         *to_ipoib_neigh(neighbour) = neigh;
880         skb_queue_head_init(&neigh->queue);
881         ipoib_cm_set(neigh, NULL);
882
883         return neigh;
884 }
885
886 void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
887 {
888         struct sk_buff *skb;
889         *to_ipoib_neigh(neigh->neighbour) = NULL;
890         while ((skb = __skb_dequeue(&neigh->queue))) {
891                 ++dev->stats.tx_dropped;
892                 dev_kfree_skb_any(skb);
893         }
894         if (ipoib_cm_get(neigh))
895                 ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
896         kfree(neigh);
897 }
898
899 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
900 {
901         parms->neigh_cleanup = ipoib_neigh_cleanup;
902
903         return 0;
904 }
905
906 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
907 {
908         struct ipoib_dev_priv *priv = netdev_priv(dev);
909
910         /* Allocate RX/TX "rings" to hold queued skbs */
911         priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
912                                 GFP_KERNEL);
913         if (!priv->rx_ring) {
914                 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
915                        ca->name, ipoib_recvq_size);
916                 goto out;
917         }
918
919         priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
920         if (!priv->tx_ring) {
921                 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
922                        ca->name, ipoib_sendq_size);
923                 goto out_rx_ring_cleanup;
924         }
925         memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
926
927         /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
928
929         if (ipoib_ib_dev_init(dev, ca, port))
930                 goto out_tx_ring_cleanup;
931
932         return 0;
933
934 out_tx_ring_cleanup:
935         vfree(priv->tx_ring);
936
937 out_rx_ring_cleanup:
938         kfree(priv->rx_ring);
939
940 out:
941         return -ENOMEM;
942 }
943
944 void ipoib_dev_cleanup(struct net_device *dev)
945 {
946         struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
947
948         ipoib_delete_debug_files(dev);
949
950         /* Delete any child interfaces first */
951         list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
952                 unregister_netdev(cpriv->dev);
953                 ipoib_dev_cleanup(cpriv->dev);
954                 free_netdev(cpriv->dev);
955         }
956
957         ipoib_ib_dev_cleanup(dev);
958
959         kfree(priv->rx_ring);
960         vfree(priv->tx_ring);
961
962         priv->rx_ring = NULL;
963         priv->tx_ring = NULL;
964 }
965
966 static const struct header_ops ipoib_header_ops = {
967         .create = ipoib_hard_header,
968 };
969
970 static const struct net_device_ops ipoib_netdev_ops = {
971         .ndo_open                = ipoib_open,
972         .ndo_stop                = ipoib_stop,
973         .ndo_change_mtu          = ipoib_change_mtu,
974         .ndo_start_xmit          = ipoib_start_xmit,
975         .ndo_tx_timeout          = ipoib_timeout,
976         .ndo_set_multicast_list  = ipoib_set_mcast_list,
977         .ndo_neigh_setup         = ipoib_neigh_setup_dev,
978 };
979
980 static void ipoib_setup(struct net_device *dev)
981 {
982         struct ipoib_dev_priv *priv = netdev_priv(dev);
983
984         dev->netdev_ops          = &ipoib_netdev_ops;
985         dev->header_ops          = &ipoib_header_ops;
986
987         ipoib_set_ethtool_ops(dev);
988
989         netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
990
991         dev->watchdog_timeo      = HZ;
992
993         dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
994
995         /*
996          * We add in INFINIBAND_ALEN to allow for the destination
997          * address "pseudoheader" for skbs without neighbour struct.
998          */
999         dev->hard_header_len     = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
1000         dev->addr_len            = INFINIBAND_ALEN;
1001         dev->type                = ARPHRD_INFINIBAND;
1002         dev->tx_queue_len        = ipoib_sendq_size * 2;
1003         dev->features            = (NETIF_F_VLAN_CHALLENGED     |
1004                                     NETIF_F_HIGHDMA);
1005         dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
1006
1007         memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1008
1009         netif_carrier_off(dev);
1010
1011         priv->dev = dev;
1012
1013         spin_lock_init(&priv->lock);
1014
1015         mutex_init(&priv->vlan_mutex);
1016
1017         INIT_LIST_HEAD(&priv->path_list);
1018         INIT_LIST_HEAD(&priv->child_intfs);
1019         INIT_LIST_HEAD(&priv->dead_ahs);
1020         INIT_LIST_HEAD(&priv->multicast_list);
1021
1022         INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
1023         INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
1024         INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1025         INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
1026         INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
1027         INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
1028         INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1029         INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1030 }
1031
1032 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1033 {
1034         struct net_device *dev;
1035
1036         dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
1037                            ipoib_setup);
1038         if (!dev)
1039                 return NULL;
1040
1041         return netdev_priv(dev);
1042 }
1043
1044 static ssize_t show_pkey(struct device *dev,
1045                          struct device_attribute *attr, char *buf)
1046 {
1047         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1048
1049         return sprintf(buf, "0x%04x\n", priv->pkey);
1050 }
1051 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1052
1053 static ssize_t show_umcast(struct device *dev,
1054                            struct device_attribute *attr, char *buf)
1055 {
1056         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1057
1058         return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1059 }
1060
1061 static ssize_t set_umcast(struct device *dev,
1062                           struct device_attribute *attr,
1063                           const char *buf, size_t count)
1064 {
1065         struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1066         unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1067
1068         if (umcast_val > 0) {
1069                 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1070                 ipoib_warn(priv, "ignoring multicast groups joined directly "
1071                                 "by userspace\n");
1072         } else
1073                 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1074
1075         return count;
1076 }
1077 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1078
1079 int ipoib_add_umcast_attr(struct net_device *dev)
1080 {
1081         return device_create_file(&dev->dev, &dev_attr_umcast);
1082 }
1083
1084 static ssize_t create_child(struct device *dev,
1085                             struct device_attribute *attr,
1086                             const char *buf, size_t count)
1087 {
1088         int pkey;
1089         int ret;
1090
1091         if (sscanf(buf, "%i", &pkey) != 1)
1092                 return -EINVAL;
1093
1094         if (pkey < 0 || pkey > 0xffff)
1095                 return -EINVAL;
1096
1097         /*
1098          * Set the full membership bit, so that we join the right
1099          * broadcast group, etc.
1100          */
1101         pkey |= 0x8000;
1102
1103         ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1104
1105         return ret ? ret : count;
1106 }
1107 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
1108
1109 static ssize_t delete_child(struct device *dev,
1110                             struct device_attribute *attr,
1111                             const char *buf, size_t count)
1112 {
1113         int pkey;
1114         int ret;
1115
1116         if (sscanf(buf, "%i", &pkey) != 1)
1117                 return -EINVAL;
1118
1119         if (pkey < 0 || pkey > 0xffff)
1120                 return -EINVAL;
1121
1122         ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1123
1124         return ret ? ret : count;
1125
1126 }
1127 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
1128
1129 int ipoib_add_pkey_attr(struct net_device *dev)
1130 {
1131         return device_create_file(&dev->dev, &dev_attr_pkey);
1132 }
1133
1134 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1135 {
1136         struct ib_device_attr *device_attr;
1137         int result = -ENOMEM;
1138
1139         device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1140         if (!device_attr) {
1141                 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1142                        hca->name, sizeof *device_attr);
1143                 return result;
1144         }
1145
1146         result = ib_query_device(hca, device_attr);
1147         if (result) {
1148                 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1149                        hca->name, result);
1150                 kfree(device_attr);
1151                 return result;
1152         }
1153         priv->hca_caps = device_attr->device_cap_flags;
1154
1155         kfree(device_attr);
1156
1157         if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1158                 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
1159                 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1160         }
1161
1162         priv->dev->features |= NETIF_F_GRO;
1163
1164         if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1165                 priv->dev->features |= NETIF_F_TSO;
1166
1167         return 0;
1168 }
1169
1170
1171 static struct net_device *ipoib_add_port(const char *format,
1172                                          struct ib_device *hca, u8 port)
1173 {
1174         struct ipoib_dev_priv *priv;
1175         struct ib_port_attr attr;
1176         int result = -ENOMEM;
1177
1178         priv = ipoib_intf_alloc(format);
1179         if (!priv)
1180                 goto alloc_mem_failed;
1181
1182         SET_NETDEV_DEV(priv->dev, hca->dma_device);
1183         priv->dev->dev_id = port - 1;
1184
1185         if (!ib_query_port(hca, port, &attr))
1186                 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1187         else {
1188                 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1189                        hca->name, port);
1190                 goto device_init_failed;
1191         }
1192
1193         /* MTU will be reset when mcast join happens */
1194         priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
1195         priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
1196
1197         result = ib_query_pkey(hca, port, 0, &priv->pkey);
1198         if (result) {
1199                 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1200                        hca->name, port, result);
1201                 goto device_init_failed;
1202         }
1203
1204         if (ipoib_set_dev_features(priv, hca))
1205                 goto device_init_failed;
1206
1207         /*
1208          * Set the full membership bit, so that we join the right
1209          * broadcast group, etc.
1210          */
1211         priv->pkey |= 0x8000;
1212
1213         priv->dev->broadcast[8] = priv->pkey >> 8;
1214         priv->dev->broadcast[9] = priv->pkey & 0xff;
1215
1216         result = ib_query_gid(hca, port, 0, &priv->local_gid);
1217         if (result) {
1218                 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1219                        hca->name, port, result);
1220                 goto device_init_failed;
1221         } else
1222                 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1223
1224         result = ipoib_dev_init(priv->dev, hca, port);
1225         if (result < 0) {
1226                 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1227                        hca->name, port, result);
1228                 goto device_init_failed;
1229         }
1230
1231         INIT_IB_EVENT_HANDLER(&priv->event_handler,
1232                               priv->ca, ipoib_event);
1233         result = ib_register_event_handler(&priv->event_handler);
1234         if (result < 0) {
1235                 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1236                        "port %d (ret = %d)\n",
1237                        hca->name, port, result);
1238                 goto event_failed;
1239         }
1240
1241         result = register_netdev(priv->dev);
1242         if (result) {
1243                 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1244                        hca->name, port, result);
1245                 goto register_failed;
1246         }
1247
1248         ipoib_create_debug_files(priv->dev);
1249
1250         if (ipoib_cm_add_mode_attr(priv->dev))
1251                 goto sysfs_failed;
1252         if (ipoib_add_pkey_attr(priv->dev))
1253                 goto sysfs_failed;
1254         if (ipoib_add_umcast_attr(priv->dev))
1255                 goto sysfs_failed;
1256         if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1257                 goto sysfs_failed;
1258         if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1259                 goto sysfs_failed;
1260
1261         return priv->dev;
1262
1263 sysfs_failed:
1264         ipoib_delete_debug_files(priv->dev);
1265         unregister_netdev(priv->dev);
1266
1267 register_failed:
1268         ib_unregister_event_handler(&priv->event_handler);
1269         flush_workqueue(ipoib_workqueue);
1270
1271 event_failed:
1272         ipoib_dev_cleanup(priv->dev);
1273
1274 device_init_failed:
1275         free_netdev(priv->dev);
1276
1277 alloc_mem_failed:
1278         return ERR_PTR(result);
1279 }
1280
1281 static void ipoib_add_one(struct ib_device *device)
1282 {
1283         struct list_head *dev_list;
1284         struct net_device *dev;
1285         struct ipoib_dev_priv *priv;
1286         int s, e, p;
1287
1288         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1289                 return;
1290
1291         dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1292         if (!dev_list)
1293                 return;
1294
1295         INIT_LIST_HEAD(dev_list);
1296
1297         if (device->node_type == RDMA_NODE_IB_SWITCH) {
1298                 s = 0;
1299                 e = 0;
1300         } else {
1301                 s = 1;
1302                 e = device->phys_port_cnt;
1303         }
1304
1305         for (p = s; p <= e; ++p) {
1306                 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
1307                         continue;
1308                 dev = ipoib_add_port("ib%d", device, p);
1309                 if (!IS_ERR(dev)) {
1310                         priv = netdev_priv(dev);
1311                         list_add_tail(&priv->list, dev_list);
1312                 }
1313         }
1314
1315         ib_set_client_data(device, &ipoib_client, dev_list);
1316 }
1317
1318 static void ipoib_remove_one(struct ib_device *device)
1319 {
1320         struct ipoib_dev_priv *priv, *tmp;
1321         struct list_head *dev_list;
1322
1323         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1324                 return;
1325
1326         dev_list = ib_get_client_data(device, &ipoib_client);
1327
1328         list_for_each_entry_safe(priv, tmp, dev_list, list) {
1329                 ib_unregister_event_handler(&priv->event_handler);
1330
1331                 rtnl_lock();
1332                 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
1333                 rtnl_unlock();
1334
1335                 flush_workqueue(ipoib_workqueue);
1336
1337                 unregister_netdev(priv->dev);
1338                 ipoib_dev_cleanup(priv->dev);
1339                 free_netdev(priv->dev);
1340         }
1341
1342         kfree(dev_list);
1343 }
1344
1345 static int __init ipoib_init_module(void)
1346 {
1347         int ret;
1348
1349         ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1350         ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1351         ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1352
1353         ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1354         ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1355         ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
1356 #ifdef CONFIG_INFINIBAND_IPOIB_CM
1357         ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1358 #endif
1359
1360         /*
1361          * When copying small received packets, we only copy from the
1362          * linear data part of the SKB, so we rely on this condition.
1363          */
1364         BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
1365
1366         ret = ipoib_register_debugfs();
1367         if (ret)
1368                 return ret;
1369
1370         /*
1371          * We create our own workqueue mainly because we want to be
1372          * able to flush it when devices are being removed.  We can't
1373          * use schedule_work()/flush_scheduled_work() because both
1374          * unregister_netdev() and linkwatch_event take the rtnl lock,
1375          * so flush_scheduled_work() can deadlock during device
1376          * removal.
1377          */
1378         ipoib_workqueue = create_singlethread_workqueue("ipoib");
1379         if (!ipoib_workqueue) {
1380                 ret = -ENOMEM;
1381                 goto err_fs;
1382         }
1383
1384         ib_sa_register_client(&ipoib_sa_client);
1385
1386         ret = ib_register_client(&ipoib_client);
1387         if (ret)
1388                 goto err_sa;
1389
1390         return 0;
1391
1392 err_sa:
1393         ib_sa_unregister_client(&ipoib_sa_client);
1394         destroy_workqueue(ipoib_workqueue);
1395
1396 err_fs:
1397         ipoib_unregister_debugfs();
1398
1399         return ret;
1400 }
1401
1402 static void __exit ipoib_cleanup_module(void)
1403 {
1404         ib_unregister_client(&ipoib_client);
1405         ib_sa_unregister_client(&ipoib_sa_client);
1406         ipoib_unregister_debugfs();
1407         destroy_workqueue(ipoib_workqueue);
1408 }
1409
1410 module_init(ipoib_init_module);
1411 module_exit(ipoib_cleanup_module);