2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
44 #include <linux/if_arp.h> /* For ARPHRD_xxx */
49 #include <linux/jhash.h>
52 MODULE_AUTHOR("Roland Dreier");
53 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
54 MODULE_LICENSE("Dual BSD/GPL");
56 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
57 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
59 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
60 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
61 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
62 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
64 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
65 int ipoib_debug_level;
67 module_param_named(debug_level, ipoib_debug_level, int, 0644);
68 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
71 struct ipoib_path_iter {
72 struct net_device *dev;
73 struct ipoib_path path;
76 static const u8 ipv4_bcast_addr[] = {
77 0x00, 0xff, 0xff, 0xff,
78 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
82 struct workqueue_struct *ipoib_workqueue;
84 struct ib_sa_client ipoib_sa_client;
86 static void ipoib_add_one(struct ib_device *device);
87 static void ipoib_remove_one(struct ib_device *device);
88 static void ipoib_neigh_reclaim(struct rcu_head *rp);
90 static struct ib_client ipoib_client = {
93 .remove = ipoib_remove_one
96 int ipoib_open(struct net_device *dev)
98 struct ipoib_dev_priv *priv = netdev_priv(dev);
100 ipoib_dbg(priv, "bringing up interface\n");
102 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
104 if (ipoib_pkey_dev_delay_open(dev))
107 if (ipoib_ib_dev_open(dev))
110 if (ipoib_ib_dev_up(dev))
113 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
114 struct ipoib_dev_priv *cpriv;
116 /* Bring up any child interfaces too */
117 mutex_lock(&priv->vlan_mutex);
118 list_for_each_entry(cpriv, &priv->child_intfs, list) {
121 flags = cpriv->dev->flags;
125 dev_change_flags(cpriv->dev, flags | IFF_UP);
127 mutex_unlock(&priv->vlan_mutex);
130 netif_start_queue(dev);
135 ipoib_ib_dev_stop(dev, 1);
138 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
143 static int ipoib_stop(struct net_device *dev)
145 struct ipoib_dev_priv *priv = netdev_priv(dev);
147 ipoib_dbg(priv, "stopping interface\n");
149 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
151 netif_stop_queue(dev);
153 ipoib_ib_dev_down(dev, 1);
154 ipoib_ib_dev_stop(dev, 0);
156 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
157 struct ipoib_dev_priv *cpriv;
159 /* Bring down any child interfaces too */
160 mutex_lock(&priv->vlan_mutex);
161 list_for_each_entry(cpriv, &priv->child_intfs, list) {
164 flags = cpriv->dev->flags;
165 if (!(flags & IFF_UP))
168 dev_change_flags(cpriv->dev, flags & ~IFF_UP);
170 mutex_unlock(&priv->vlan_mutex);
176 static void ipoib_uninit(struct net_device *dev)
178 ipoib_dev_cleanup(dev);
181 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
183 struct ipoib_dev_priv *priv = netdev_priv(dev);
185 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
186 features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
191 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
193 struct ipoib_dev_priv *priv = netdev_priv(dev);
195 /* dev->mtu > 2K ==> connected mode */
196 if (ipoib_cm_admin_enabled(dev)) {
197 if (new_mtu > ipoib_cm_max_mtu(dev))
200 if (new_mtu > priv->mcast_mtu)
201 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
208 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
211 priv->admin_mtu = new_mtu;
213 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
218 int ipoib_set_mode(struct net_device *dev, const char *buf)
220 struct ipoib_dev_priv *priv = netdev_priv(dev);
222 /* flush paths if we switch modes so that connections are restarted */
223 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
224 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
225 ipoib_warn(priv, "enabling connected mode "
226 "will cause multicast packet drops\n");
227 netdev_update_features(dev);
229 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
231 ipoib_flush_paths(dev);
236 if (!strcmp(buf, "datagram\n")) {
237 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
238 netdev_update_features(dev);
239 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
241 ipoib_flush_paths(dev);
249 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
251 struct ipoib_dev_priv *priv = netdev_priv(dev);
252 struct rb_node *n = priv->path_tree.rb_node;
253 struct ipoib_path *path;
257 path = rb_entry(n, struct ipoib_path, rb_node);
259 ret = memcmp(gid, path->pathrec.dgid.raw,
260 sizeof (union ib_gid));
273 static int __path_add(struct net_device *dev, struct ipoib_path *path)
275 struct ipoib_dev_priv *priv = netdev_priv(dev);
276 struct rb_node **n = &priv->path_tree.rb_node;
277 struct rb_node *pn = NULL;
278 struct ipoib_path *tpath;
283 tpath = rb_entry(pn, struct ipoib_path, rb_node);
285 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
286 sizeof (union ib_gid));
295 rb_link_node(&path->rb_node, pn, n);
296 rb_insert_color(&path->rb_node, &priv->path_tree);
298 list_add_tail(&path->list, &priv->path_list);
303 static void path_free(struct net_device *dev, struct ipoib_path *path)
307 while ((skb = __skb_dequeue(&path->queue)))
308 dev_kfree_skb_irq(skb);
310 ipoib_dbg(netdev_priv(dev), "path_free\n");
312 /* remove all neigh connected to this path */
313 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
316 ipoib_put_ah(path->ah);
321 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
323 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
325 struct ipoib_path_iter *iter;
327 iter = kmalloc(sizeof *iter, GFP_KERNEL);
332 memset(iter->path.pathrec.dgid.raw, 0, 16);
334 if (ipoib_path_iter_next(iter)) {
342 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
344 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
346 struct ipoib_path *path;
349 spin_lock_irq(&priv->lock);
351 n = rb_first(&priv->path_tree);
354 path = rb_entry(n, struct ipoib_path, rb_node);
356 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
357 sizeof (union ib_gid)) < 0) {
366 spin_unlock_irq(&priv->lock);
371 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
372 struct ipoib_path *path)
377 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
379 void ipoib_mark_paths_invalid(struct net_device *dev)
381 struct ipoib_dev_priv *priv = netdev_priv(dev);
382 struct ipoib_path *path, *tp;
384 spin_lock_irq(&priv->lock);
386 list_for_each_entry_safe(path, tp, &priv->path_list, list) {
387 ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n",
388 be16_to_cpu(path->pathrec.dlid),
389 path->pathrec.dgid.raw);
393 spin_unlock_irq(&priv->lock);
396 void ipoib_flush_paths(struct net_device *dev)
398 struct ipoib_dev_priv *priv = netdev_priv(dev);
399 struct ipoib_path *path, *tp;
400 LIST_HEAD(remove_list);
403 netif_tx_lock_bh(dev);
404 spin_lock_irqsave(&priv->lock, flags);
406 list_splice_init(&priv->path_list, &remove_list);
408 list_for_each_entry(path, &remove_list, list)
409 rb_erase(&path->rb_node, &priv->path_tree);
411 list_for_each_entry_safe(path, tp, &remove_list, list) {
413 ib_sa_cancel_query(path->query_id, path->query);
414 spin_unlock_irqrestore(&priv->lock, flags);
415 netif_tx_unlock_bh(dev);
416 wait_for_completion(&path->done);
417 path_free(dev, path);
418 netif_tx_lock_bh(dev);
419 spin_lock_irqsave(&priv->lock, flags);
422 spin_unlock_irqrestore(&priv->lock, flags);
423 netif_tx_unlock_bh(dev);
426 static void path_rec_completion(int status,
427 struct ib_sa_path_rec *pathrec,
430 struct ipoib_path *path = path_ptr;
431 struct net_device *dev = path->dev;
432 struct ipoib_dev_priv *priv = netdev_priv(dev);
433 struct ipoib_ah *ah = NULL;
434 struct ipoib_ah *old_ah = NULL;
435 struct ipoib_neigh *neigh, *tn;
436 struct sk_buff_head skqueue;
441 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
442 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
444 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
445 status, path->pathrec.dgid.raw);
447 skb_queue_head_init(&skqueue);
450 struct ib_ah_attr av;
452 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
453 ah = ipoib_create_ah(dev, priv->pd, &av);
456 spin_lock_irqsave(&priv->lock, flags);
458 if (!IS_ERR_OR_NULL(ah)) {
459 path->pathrec = *pathrec;
464 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
465 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
467 while ((skb = __skb_dequeue(&path->queue)))
468 __skb_queue_tail(&skqueue, skb);
470 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
472 WARN_ON(neigh->ah != old_ah);
474 * Dropping the ah reference inside
475 * priv->lock is safe here, because we
476 * will hold one more reference from
477 * the original value of path->ah (ie
480 ipoib_put_ah(neigh->ah);
482 kref_get(&path->ah->ref);
483 neigh->ah = path->ah;
485 if (ipoib_cm_enabled(dev, neigh->daddr)) {
486 if (!ipoib_cm_get(neigh))
487 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
490 if (!ipoib_cm_get(neigh)) {
491 list_del(&neigh->list);
492 ipoib_neigh_free(neigh);
497 while ((skb = __skb_dequeue(&neigh->queue)))
498 __skb_queue_tail(&skqueue, skb);
504 complete(&path->done);
506 spin_unlock_irqrestore(&priv->lock, flags);
509 ipoib_put_ah(old_ah);
511 while ((skb = __skb_dequeue(&skqueue))) {
513 if (dev_queue_xmit(skb))
514 ipoib_warn(priv, "dev_queue_xmit failed "
515 "to requeue packet\n");
519 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
521 struct ipoib_dev_priv *priv = netdev_priv(dev);
522 struct ipoib_path *path;
524 if (!priv->broadcast)
527 path = kzalloc(sizeof *path, GFP_ATOMIC);
533 skb_queue_head_init(&path->queue);
535 INIT_LIST_HEAD(&path->neigh_list);
537 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
538 path->pathrec.sgid = priv->local_gid;
539 path->pathrec.pkey = cpu_to_be16(priv->pkey);
540 path->pathrec.numb_path = 1;
541 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
546 static int path_rec_start(struct net_device *dev,
547 struct ipoib_path *path)
549 struct ipoib_dev_priv *priv = netdev_priv(dev);
551 ipoib_dbg(priv, "Start path record lookup for %pI6\n",
552 path->pathrec.dgid.raw);
554 init_completion(&path->done);
557 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
559 IB_SA_PATH_REC_DGID |
560 IB_SA_PATH_REC_SGID |
561 IB_SA_PATH_REC_NUMB_PATH |
562 IB_SA_PATH_REC_TRAFFIC_CLASS |
567 if (path->query_id < 0) {
568 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
570 complete(&path->done);
571 return path->query_id;
577 static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
578 struct net_device *dev)
580 struct ipoib_dev_priv *priv = netdev_priv(dev);
581 struct ipoib_path *path;
582 struct ipoib_neigh *neigh;
585 spin_lock_irqsave(&priv->lock, flags);
586 neigh = ipoib_neigh_alloc(daddr, dev);
588 spin_unlock_irqrestore(&priv->lock, flags);
589 ++dev->stats.tx_dropped;
590 dev_kfree_skb_any(skb);
594 path = __path_find(dev, daddr + 4);
596 path = path_rec_create(dev, daddr + 4);
600 __path_add(dev, path);
603 list_add_tail(&neigh->list, &path->neigh_list);
606 kref_get(&path->ah->ref);
607 neigh->ah = path->ah;
609 if (ipoib_cm_enabled(dev, neigh->daddr)) {
610 if (!ipoib_cm_get(neigh))
611 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
612 if (!ipoib_cm_get(neigh)) {
613 list_del(&neigh->list);
614 ipoib_neigh_free(neigh);
617 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
618 __skb_queue_tail(&neigh->queue, skb);
620 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
621 skb_queue_len(&neigh->queue));
625 spin_unlock_irqrestore(&priv->lock, flags);
626 ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr));
627 ipoib_neigh_put(neigh);
633 if (!path->query && path_rec_start(dev, path))
636 __skb_queue_tail(&neigh->queue, skb);
639 spin_unlock_irqrestore(&priv->lock, flags);
640 ipoib_neigh_put(neigh);
644 list_del(&neigh->list);
647 ipoib_neigh_free(neigh);
649 ++dev->stats.tx_dropped;
650 dev_kfree_skb_any(skb);
652 spin_unlock_irqrestore(&priv->lock, flags);
653 ipoib_neigh_put(neigh);
656 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
659 struct ipoib_dev_priv *priv = netdev_priv(dev);
660 struct ipoib_path *path;
663 spin_lock_irqsave(&priv->lock, flags);
665 path = __path_find(dev, cb->hwaddr + 4);
666 if (!path || !path->valid) {
670 path = path_rec_create(dev, cb->hwaddr + 4);
674 __skb_queue_tail(&path->queue, skb);
676 if (!path->query && path_rec_start(dev, path)) {
677 spin_unlock_irqrestore(&priv->lock, flags);
679 path_free(dev, path);
682 __path_add(dev, path);
684 ++dev->stats.tx_dropped;
685 dev_kfree_skb_any(skb);
688 spin_unlock_irqrestore(&priv->lock, flags);
693 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
694 be16_to_cpu(path->pathrec.dlid));
696 spin_unlock_irqrestore(&priv->lock, flags);
697 ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
699 } else if ((path->query || !path_rec_start(dev, path)) &&
700 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
701 __skb_queue_tail(&path->queue, skb);
703 ++dev->stats.tx_dropped;
704 dev_kfree_skb_any(skb);
707 spin_unlock_irqrestore(&priv->lock, flags);
710 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
712 struct ipoib_dev_priv *priv = netdev_priv(dev);
713 struct ipoib_neigh *neigh;
714 struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
715 struct ipoib_header *header;
718 header = (struct ipoib_header *) skb->data;
720 if (unlikely(cb->hwaddr[4] == 0xff)) {
721 /* multicast, arrange "if" according to probability */
722 if ((header->proto != htons(ETH_P_IP)) &&
723 (header->proto != htons(ETH_P_IPV6)) &&
724 (header->proto != htons(ETH_P_ARP)) &&
725 (header->proto != htons(ETH_P_RARP))) {
726 /* ethertype not supported by IPoIB */
727 ++dev->stats.tx_dropped;
728 dev_kfree_skb_any(skb);
731 /* Add in the P_Key for multicast*/
732 cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
733 cb->hwaddr[9] = priv->pkey & 0xff;
735 neigh = ipoib_neigh_get(dev, cb->hwaddr);
737 goto send_using_neigh;
738 ipoib_mcast_send(dev, cb->hwaddr, skb);
742 /* unicast, arrange "switch" according to probability */
743 switch (header->proto) {
744 case htons(ETH_P_IP):
745 case htons(ETH_P_IPV6):
746 neigh = ipoib_neigh_get(dev, cb->hwaddr);
747 if (unlikely(!neigh)) {
748 neigh_add_path(skb, cb->hwaddr, dev);
752 case htons(ETH_P_ARP):
753 case htons(ETH_P_RARP):
754 /* for unicast ARP and RARP should always perform path find */
755 unicast_arp_send(skb, dev, cb);
758 /* ethertype not supported by IPoIB */
759 ++dev->stats.tx_dropped;
760 dev_kfree_skb_any(skb);
765 /* note we now hold a ref to neigh */
766 if (ipoib_cm_get(neigh)) {
767 if (ipoib_cm_up(neigh)) {
768 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
771 } else if (neigh->ah) {
772 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
776 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
777 spin_lock_irqsave(&priv->lock, flags);
778 __skb_queue_tail(&neigh->queue, skb);
779 spin_unlock_irqrestore(&priv->lock, flags);
781 ++dev->stats.tx_dropped;
782 dev_kfree_skb_any(skb);
786 ipoib_neigh_put(neigh);
791 static void ipoib_timeout(struct net_device *dev)
793 struct ipoib_dev_priv *priv = netdev_priv(dev);
795 ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
796 jiffies_to_msecs(jiffies - dev->trans_start));
797 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
798 netif_queue_stopped(dev),
799 priv->tx_head, priv->tx_tail);
800 /* XXX reset QP, etc. */
803 static int ipoib_hard_header(struct sk_buff *skb,
804 struct net_device *dev,
806 const void *daddr, const void *saddr, unsigned len)
808 struct ipoib_header *header;
809 struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
811 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
813 header->proto = htons(type);
814 header->reserved = 0;
817 * we don't rely on dst_entry structure, always stuff the
818 * destination address into skb->cb so we can figure out where
819 * to send the packet later.
821 memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
826 static void ipoib_set_mcast_list(struct net_device *dev)
828 struct ipoib_dev_priv *priv = netdev_priv(dev);
830 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
831 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
835 queue_work(ipoib_workqueue, &priv->restart_task);
838 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
841 * Use only the address parts that contributes to spreading
842 * The subnet prefix is not used as one can not connect to
843 * same remote port (GUID) using the same remote QPN via two
846 /* qpn octets[1:4) & port GUID octets[12:20) */
847 u32 *daddr_32 = (u32 *) daddr;
850 hv = jhash_3words(daddr_32[3], daddr_32[4], 0xFFFFFF & daddr_32[0], 0);
851 return hv & htbl->mask;
854 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
856 struct ipoib_dev_priv *priv = netdev_priv(dev);
857 struct ipoib_neigh_table *ntbl = &priv->ntbl;
858 struct ipoib_neigh_hash *htbl;
859 struct ipoib_neigh *neigh = NULL;
864 htbl = rcu_dereference_bh(ntbl->htbl);
869 hash_val = ipoib_addr_hash(htbl, daddr);
870 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
872 neigh = rcu_dereference_bh(neigh->hnext)) {
873 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
874 /* found, take one ref on behalf of the caller */
875 if (!atomic_inc_not_zero(&neigh->refcnt)) {
880 neigh->alive = jiffies;
886 rcu_read_unlock_bh();
890 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
892 struct ipoib_neigh_table *ntbl = &priv->ntbl;
893 struct ipoib_neigh_hash *htbl;
894 unsigned long neigh_obsolete;
899 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
902 spin_lock_irqsave(&priv->lock, flags);
904 htbl = rcu_dereference_protected(ntbl->htbl,
905 lockdep_is_held(&priv->lock));
910 /* neigh is obsolete if it was idle for two GC periods */
911 dt = 2 * arp_tbl.gc_interval;
912 neigh_obsolete = jiffies - dt;
913 /* handle possible race condition */
914 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
917 for (i = 0; i < htbl->size; i++) {
918 struct ipoib_neigh *neigh;
919 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
921 while ((neigh = rcu_dereference_protected(*np,
922 lockdep_is_held(&priv->lock))) != NULL) {
923 /* was the neigh idle for two GC periods */
924 if (time_after(neigh_obsolete, neigh->alive)) {
925 rcu_assign_pointer(*np,
926 rcu_dereference_protected(neigh->hnext,
927 lockdep_is_held(&priv->lock)));
928 /* remove from path/mc list */
929 list_del(&neigh->list);
930 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
939 spin_unlock_irqrestore(&priv->lock, flags);
942 static void ipoib_reap_neigh(struct work_struct *work)
944 struct ipoib_dev_priv *priv =
945 container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
947 __ipoib_reap_neigh(priv);
949 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
950 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
951 arp_tbl.gc_interval);
955 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
956 struct net_device *dev)
958 struct ipoib_neigh *neigh;
960 neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
965 memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
966 skb_queue_head_init(&neigh->queue);
967 INIT_LIST_HEAD(&neigh->list);
968 ipoib_cm_set(neigh, NULL);
969 /* one ref on behalf of the caller */
970 atomic_set(&neigh->refcnt, 1);
975 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
976 struct net_device *dev)
978 struct ipoib_dev_priv *priv = netdev_priv(dev);
979 struct ipoib_neigh_table *ntbl = &priv->ntbl;
980 struct ipoib_neigh_hash *htbl;
981 struct ipoib_neigh *neigh;
984 htbl = rcu_dereference_protected(ntbl->htbl,
985 lockdep_is_held(&priv->lock));
991 /* need to add a new neigh, but maybe some other thread succeeded?
992 * recalc hash, maybe hash resize took place so we do a search
994 hash_val = ipoib_addr_hash(htbl, daddr);
995 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
996 lockdep_is_held(&priv->lock));
998 neigh = rcu_dereference_protected(neigh->hnext,
999 lockdep_is_held(&priv->lock))) {
1000 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1001 /* found, take one ref on behalf of the caller */
1002 if (!atomic_inc_not_zero(&neigh->refcnt)) {
1007 neigh->alive = jiffies;
1012 neigh = ipoib_neigh_ctor(daddr, dev);
1016 /* one ref on behalf of the hash table */
1017 atomic_inc(&neigh->refcnt);
1018 neigh->alive = jiffies;
1020 rcu_assign_pointer(neigh->hnext,
1021 rcu_dereference_protected(htbl->buckets[hash_val],
1022 lockdep_is_held(&priv->lock)));
1023 rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1024 atomic_inc(&ntbl->entries);
1031 void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1033 /* neigh reference count was dropprd to zero */
1034 struct net_device *dev = neigh->dev;
1035 struct ipoib_dev_priv *priv = netdev_priv(dev);
1036 struct sk_buff *skb;
1038 ipoib_put_ah(neigh->ah);
1039 while ((skb = __skb_dequeue(&neigh->queue))) {
1040 ++dev->stats.tx_dropped;
1041 dev_kfree_skb_any(skb);
1043 if (ipoib_cm_get(neigh))
1044 ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1045 ipoib_dbg(netdev_priv(dev),
1046 "neigh free for %06x %pI6\n",
1047 IPOIB_QPN(neigh->daddr),
1050 if (atomic_dec_and_test(&priv->ntbl.entries)) {
1051 if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1052 complete(&priv->ntbl.flushed);
1056 static void ipoib_neigh_reclaim(struct rcu_head *rp)
1058 /* Called as a result of removal from hash table */
1059 struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1060 /* note TX context may hold another ref */
1061 ipoib_neigh_put(neigh);
1064 void ipoib_neigh_free(struct ipoib_neigh *neigh)
1066 struct net_device *dev = neigh->dev;
1067 struct ipoib_dev_priv *priv = netdev_priv(dev);
1068 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1069 struct ipoib_neigh_hash *htbl;
1070 struct ipoib_neigh __rcu **np;
1071 struct ipoib_neigh *n;
1074 htbl = rcu_dereference_protected(ntbl->htbl,
1075 lockdep_is_held(&priv->lock));
1079 hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1080 np = &htbl->buckets[hash_val];
1081 for (n = rcu_dereference_protected(*np,
1082 lockdep_is_held(&priv->lock));
1084 n = rcu_dereference_protected(*np,
1085 lockdep_is_held(&priv->lock))) {
1088 rcu_assign_pointer(*np,
1089 rcu_dereference_protected(neigh->hnext,
1090 lockdep_is_held(&priv->lock)));
1091 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1099 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1101 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1102 struct ipoib_neigh_hash *htbl;
1103 struct ipoib_neigh **buckets;
1106 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1108 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1111 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1112 size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1113 buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL);
1119 htbl->mask = (size - 1);
1120 htbl->buckets = buckets;
1123 atomic_set(&ntbl->entries, 0);
1125 /* start garbage collection */
1126 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1127 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
1128 arp_tbl.gc_interval);
1133 static void neigh_hash_free_rcu(struct rcu_head *head)
1135 struct ipoib_neigh_hash *htbl = container_of(head,
1136 struct ipoib_neigh_hash,
1138 struct ipoib_neigh __rcu **buckets = htbl->buckets;
1139 struct ipoib_neigh_table *ntbl = htbl->ntbl;
1143 complete(&ntbl->deleted);
1146 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1148 struct ipoib_dev_priv *priv = netdev_priv(dev);
1149 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1150 struct ipoib_neigh_hash *htbl;
1151 unsigned long flags;
1154 /* remove all neigh connected to a given path or mcast */
1155 spin_lock_irqsave(&priv->lock, flags);
1157 htbl = rcu_dereference_protected(ntbl->htbl,
1158 lockdep_is_held(&priv->lock));
1163 for (i = 0; i < htbl->size; i++) {
1164 struct ipoib_neigh *neigh;
1165 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1167 while ((neigh = rcu_dereference_protected(*np,
1168 lockdep_is_held(&priv->lock))) != NULL) {
1169 /* delete neighs belong to this parent */
1170 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1171 rcu_assign_pointer(*np,
1172 rcu_dereference_protected(neigh->hnext,
1173 lockdep_is_held(&priv->lock)));
1174 /* remove from parent list */
1175 list_del(&neigh->list);
1176 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1184 spin_unlock_irqrestore(&priv->lock, flags);
1187 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1189 struct ipoib_neigh_table *ntbl = &priv->ntbl;
1190 struct ipoib_neigh_hash *htbl;
1191 unsigned long flags;
1192 int i, wait_flushed = 0;
1194 init_completion(&priv->ntbl.flushed);
1196 spin_lock_irqsave(&priv->lock, flags);
1198 htbl = rcu_dereference_protected(ntbl->htbl,
1199 lockdep_is_held(&priv->lock));
1203 wait_flushed = atomic_read(&priv->ntbl.entries);
1207 for (i = 0; i < htbl->size; i++) {
1208 struct ipoib_neigh *neigh;
1209 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1211 while ((neigh = rcu_dereference_protected(*np,
1212 lockdep_is_held(&priv->lock))) != NULL) {
1213 rcu_assign_pointer(*np,
1214 rcu_dereference_protected(neigh->hnext,
1215 lockdep_is_held(&priv->lock)));
1216 /* remove from path/mc list */
1217 list_del(&neigh->list);
1218 call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1223 rcu_assign_pointer(ntbl->htbl, NULL);
1224 call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1227 spin_unlock_irqrestore(&priv->lock, flags);
1229 wait_for_completion(&priv->ntbl.flushed);
1232 static void ipoib_neigh_hash_uninit(struct net_device *dev)
1234 struct ipoib_dev_priv *priv = netdev_priv(dev);
1237 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
1238 init_completion(&priv->ntbl.deleted);
1239 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1241 /* Stop GC if called at init fail need to cancel work */
1242 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1244 cancel_delayed_work(&priv->neigh_reap_task);
1246 ipoib_flush_neighs(priv);
1248 wait_for_completion(&priv->ntbl.deleted);
1252 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1254 struct ipoib_dev_priv *priv = netdev_priv(dev);
1256 if (ipoib_neigh_hash_init(priv) < 0)
1258 /* Allocate RX/TX "rings" to hold queued skbs */
1259 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
1261 if (!priv->rx_ring) {
1262 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
1263 ca->name, ipoib_recvq_size);
1264 goto out_neigh_hash_cleanup;
1267 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
1268 if (!priv->tx_ring) {
1269 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
1270 ca->name, ipoib_sendq_size);
1271 goto out_rx_ring_cleanup;
1274 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1276 if (ipoib_ib_dev_init(dev, ca, port))
1277 goto out_tx_ring_cleanup;
1281 out_tx_ring_cleanup:
1282 vfree(priv->tx_ring);
1284 out_rx_ring_cleanup:
1285 kfree(priv->rx_ring);
1287 out_neigh_hash_cleanup:
1288 ipoib_neigh_hash_uninit(dev);
1293 void ipoib_dev_cleanup(struct net_device *dev)
1295 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
1300 ipoib_delete_debug_files(dev);
1302 /* Delete any child interfaces first */
1303 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
1304 /* Stop GC on child */
1305 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
1306 cancel_delayed_work(&cpriv->neigh_reap_task);
1307 unregister_netdevice_queue(cpriv->dev, &head);
1309 unregister_netdevice_many(&head);
1311 ipoib_ib_dev_cleanup(dev);
1313 kfree(priv->rx_ring);
1314 vfree(priv->tx_ring);
1316 priv->rx_ring = NULL;
1317 priv->tx_ring = NULL;
1319 ipoib_neigh_hash_uninit(dev);
1322 static const struct header_ops ipoib_header_ops = {
1323 .create = ipoib_hard_header,
1326 static const struct net_device_ops ipoib_netdev_ops = {
1327 .ndo_uninit = ipoib_uninit,
1328 .ndo_open = ipoib_open,
1329 .ndo_stop = ipoib_stop,
1330 .ndo_change_mtu = ipoib_change_mtu,
1331 .ndo_fix_features = ipoib_fix_features,
1332 .ndo_start_xmit = ipoib_start_xmit,
1333 .ndo_tx_timeout = ipoib_timeout,
1334 .ndo_set_rx_mode = ipoib_set_mcast_list,
1337 void ipoib_setup(struct net_device *dev)
1339 struct ipoib_dev_priv *priv = netdev_priv(dev);
1341 dev->netdev_ops = &ipoib_netdev_ops;
1342 dev->header_ops = &ipoib_header_ops;
1344 ipoib_set_ethtool_ops(dev);
1346 netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
1348 dev->watchdog_timeo = HZ;
1350 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1352 dev->hard_header_len = IPOIB_ENCAP_LEN;
1353 dev->addr_len = INFINIBAND_ALEN;
1354 dev->type = ARPHRD_INFINIBAND;
1355 dev->tx_queue_len = ipoib_sendq_size * 2;
1356 dev->features = (NETIF_F_VLAN_CHALLENGED |
1358 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1360 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1362 netif_carrier_off(dev);
1366 spin_lock_init(&priv->lock);
1368 mutex_init(&priv->vlan_mutex);
1370 INIT_LIST_HEAD(&priv->path_list);
1371 INIT_LIST_HEAD(&priv->child_intfs);
1372 INIT_LIST_HEAD(&priv->dead_ahs);
1373 INIT_LIST_HEAD(&priv->multicast_list);
1375 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
1376 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
1377 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1378 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
1379 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
1380 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
1381 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1382 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1383 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
1386 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1388 struct net_device *dev;
1390 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
1395 return netdev_priv(dev);
1398 static ssize_t show_pkey(struct device *dev,
1399 struct device_attribute *attr, char *buf)
1401 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1403 return sprintf(buf, "0x%04x\n", priv->pkey);
1405 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1407 static ssize_t show_umcast(struct device *dev,
1408 struct device_attribute *attr, char *buf)
1410 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1412 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1415 void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
1417 struct ipoib_dev_priv *priv = netdev_priv(ndev);
1419 if (umcast_val > 0) {
1420 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1421 ipoib_warn(priv, "ignoring multicast groups joined directly "
1424 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1427 static ssize_t set_umcast(struct device *dev,
1428 struct device_attribute *attr,
1429 const char *buf, size_t count)
1431 unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1433 ipoib_set_umcast(to_net_dev(dev), umcast_val);
1437 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1439 int ipoib_add_umcast_attr(struct net_device *dev)
1441 return device_create_file(&dev->dev, &dev_attr_umcast);
1444 static ssize_t create_child(struct device *dev,
1445 struct device_attribute *attr,
1446 const char *buf, size_t count)
1451 if (sscanf(buf, "%i", &pkey) != 1)
1454 if (pkey < 0 || pkey > 0xffff)
1458 * Set the full membership bit, so that we join the right
1459 * broadcast group, etc.
1463 ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1465 return ret ? ret : count;
1467 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
1469 static ssize_t delete_child(struct device *dev,
1470 struct device_attribute *attr,
1471 const char *buf, size_t count)
1476 if (sscanf(buf, "%i", &pkey) != 1)
1479 if (pkey < 0 || pkey > 0xffff)
1482 ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1484 return ret ? ret : count;
1487 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
1489 int ipoib_add_pkey_attr(struct net_device *dev)
1491 return device_create_file(&dev->dev, &dev_attr_pkey);
1494 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1496 struct ib_device_attr *device_attr;
1497 int result = -ENOMEM;
1499 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1501 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1502 hca->name, sizeof *device_attr);
1506 result = ib_query_device(hca, device_attr);
1508 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1513 priv->hca_caps = device_attr->device_cap_flags;
1517 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1518 priv->dev->hw_features = NETIF_F_SG |
1519 NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1521 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1522 priv->dev->hw_features |= NETIF_F_TSO;
1524 priv->dev->features |= priv->dev->hw_features;
1530 static struct net_device *ipoib_add_port(const char *format,
1531 struct ib_device *hca, u8 port)
1533 struct ipoib_dev_priv *priv;
1534 struct ib_port_attr attr;
1535 int result = -ENOMEM;
1537 priv = ipoib_intf_alloc(format);
1539 goto alloc_mem_failed;
1541 SET_NETDEV_DEV(priv->dev, hca->dma_device);
1542 priv->dev->dev_id = port - 1;
1544 if (!ib_query_port(hca, port, &attr))
1545 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1547 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1549 goto device_init_failed;
1552 /* MTU will be reset when mcast join happens */
1553 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1554 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
1556 priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
1558 result = ib_query_pkey(hca, port, 0, &priv->pkey);
1560 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1561 hca->name, port, result);
1562 goto device_init_failed;
1565 if (ipoib_set_dev_features(priv, hca))
1566 goto device_init_failed;
1569 * Set the full membership bit, so that we join the right
1570 * broadcast group, etc.
1572 priv->pkey |= 0x8000;
1574 priv->dev->broadcast[8] = priv->pkey >> 8;
1575 priv->dev->broadcast[9] = priv->pkey & 0xff;
1577 result = ib_query_gid(hca, port, 0, &priv->local_gid);
1579 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1580 hca->name, port, result);
1581 goto device_init_failed;
1583 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1585 result = ipoib_dev_init(priv->dev, hca, port);
1587 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1588 hca->name, port, result);
1589 goto device_init_failed;
1592 INIT_IB_EVENT_HANDLER(&priv->event_handler,
1593 priv->ca, ipoib_event);
1594 result = ib_register_event_handler(&priv->event_handler);
1596 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1597 "port %d (ret = %d)\n",
1598 hca->name, port, result);
1602 result = register_netdev(priv->dev);
1604 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1605 hca->name, port, result);
1606 goto register_failed;
1609 ipoib_create_debug_files(priv->dev);
1611 if (ipoib_cm_add_mode_attr(priv->dev))
1613 if (ipoib_add_pkey_attr(priv->dev))
1615 if (ipoib_add_umcast_attr(priv->dev))
1617 if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1619 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1625 ipoib_delete_debug_files(priv->dev);
1626 unregister_netdev(priv->dev);
1629 ib_unregister_event_handler(&priv->event_handler);
1630 /* Stop GC if started before flush */
1631 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1632 cancel_delayed_work(&priv->neigh_reap_task);
1633 flush_workqueue(ipoib_workqueue);
1636 ipoib_dev_cleanup(priv->dev);
1639 free_netdev(priv->dev);
1642 return ERR_PTR(result);
1645 static void ipoib_add_one(struct ib_device *device)
1647 struct list_head *dev_list;
1648 struct net_device *dev;
1649 struct ipoib_dev_priv *priv;
1652 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1655 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1659 INIT_LIST_HEAD(dev_list);
1661 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1666 e = device->phys_port_cnt;
1669 for (p = s; p <= e; ++p) {
1670 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
1672 dev = ipoib_add_port("ib%d", device, p);
1674 priv = netdev_priv(dev);
1675 list_add_tail(&priv->list, dev_list);
1679 ib_set_client_data(device, &ipoib_client, dev_list);
1682 static void ipoib_remove_one(struct ib_device *device)
1684 struct ipoib_dev_priv *priv, *tmp;
1685 struct list_head *dev_list;
1687 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1690 dev_list = ib_get_client_data(device, &ipoib_client);
1692 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1693 ib_unregister_event_handler(&priv->event_handler);
1696 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
1700 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1701 cancel_delayed_work(&priv->neigh_reap_task);
1702 flush_workqueue(ipoib_workqueue);
1704 unregister_netdev(priv->dev);
1705 free_netdev(priv->dev);
1711 static int __init ipoib_init_module(void)
1715 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1716 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1717 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1719 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1720 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1721 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
1722 #ifdef CONFIG_INFINIBAND_IPOIB_CM
1723 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1727 * When copying small received packets, we only copy from the
1728 * linear data part of the SKB, so we rely on this condition.
1730 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
1732 ret = ipoib_register_debugfs();
1737 * We create our own workqueue mainly because we want to be
1738 * able to flush it when devices are being removed. We can't
1739 * use schedule_work()/flush_scheduled_work() because both
1740 * unregister_netdev() and linkwatch_event take the rtnl lock,
1741 * so flush_scheduled_work() can deadlock during device
1744 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1745 if (!ipoib_workqueue) {
1750 ib_sa_register_client(&ipoib_sa_client);
1752 ret = ib_register_client(&ipoib_client);
1756 ret = ipoib_netlink_init();
1763 ib_unregister_client(&ipoib_client);
1766 ib_sa_unregister_client(&ipoib_sa_client);
1767 destroy_workqueue(ipoib_workqueue);
1770 ipoib_unregister_debugfs();
1775 static void __exit ipoib_cleanup_module(void)
1777 ipoib_netlink_fini();
1778 ib_unregister_client(&ipoib_client);
1779 ib_sa_unregister_client(&ipoib_sa_client);
1780 ipoib_unregister_debugfs();
1781 destroy_workqueue(ipoib_workqueue);
1784 module_init(ipoib_init_module);
1785 module_exit(ipoib_cleanup_module);