2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
44 #include <linux/if_arp.h> /* For ARPHRD_xxx */
51 MODULE_AUTHOR("Roland Dreier");
52 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
53 MODULE_LICENSE("Dual BSD/GPL");
55 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
56 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
58 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
59 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
60 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
61 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
63 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
64 int ipoib_debug_level;
66 module_param_named(debug_level, ipoib_debug_level, int, 0644);
67 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
70 struct ipoib_path_iter {
71 struct net_device *dev;
72 struct ipoib_path path;
75 static const u8 ipv4_bcast_addr[] = {
76 0x00, 0xff, 0xff, 0xff,
77 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
81 struct workqueue_struct *ipoib_workqueue;
83 struct ib_sa_client ipoib_sa_client;
85 static void ipoib_add_one(struct ib_device *device);
86 static void ipoib_remove_one(struct ib_device *device);
88 static struct ib_client ipoib_client = {
91 .remove = ipoib_remove_one
94 int ipoib_open(struct net_device *dev)
96 struct ipoib_dev_priv *priv = netdev_priv(dev);
98 ipoib_dbg(priv, "bringing up interface\n");
100 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
102 if (ipoib_pkey_dev_delay_open(dev))
105 if (ipoib_ib_dev_open(dev))
108 if (ipoib_ib_dev_up(dev))
111 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
112 struct ipoib_dev_priv *cpriv;
114 /* Bring up any child interfaces too */
115 mutex_lock(&priv->vlan_mutex);
116 list_for_each_entry(cpriv, &priv->child_intfs, list) {
119 flags = cpriv->dev->flags;
123 dev_change_flags(cpriv->dev, flags | IFF_UP);
125 mutex_unlock(&priv->vlan_mutex);
128 netif_start_queue(dev);
133 ipoib_ib_dev_stop(dev, 1);
136 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
141 static int ipoib_stop(struct net_device *dev)
143 struct ipoib_dev_priv *priv = netdev_priv(dev);
145 ipoib_dbg(priv, "stopping interface\n");
147 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
149 netif_stop_queue(dev);
151 ipoib_ib_dev_down(dev, 0);
152 ipoib_ib_dev_stop(dev, 0);
154 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
155 struct ipoib_dev_priv *cpriv;
157 /* Bring down any child interfaces too */
158 mutex_lock(&priv->vlan_mutex);
159 list_for_each_entry(cpriv, &priv->child_intfs, list) {
162 flags = cpriv->dev->flags;
163 if (!(flags & IFF_UP))
166 dev_change_flags(cpriv->dev, flags & ~IFF_UP);
168 mutex_unlock(&priv->vlan_mutex);
174 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
176 struct ipoib_dev_priv *priv = netdev_priv(dev);
178 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
179 features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
184 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
186 struct ipoib_dev_priv *priv = netdev_priv(dev);
188 /* dev->mtu > 2K ==> connected mode */
189 if (ipoib_cm_admin_enabled(dev)) {
190 if (new_mtu > ipoib_cm_max_mtu(dev))
193 if (new_mtu > priv->mcast_mtu)
194 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
201 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
204 priv->admin_mtu = new_mtu;
206 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
211 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
213 struct ipoib_dev_priv *priv = netdev_priv(dev);
214 struct rb_node *n = priv->path_tree.rb_node;
215 struct ipoib_path *path;
219 path = rb_entry(n, struct ipoib_path, rb_node);
221 ret = memcmp(gid, path->pathrec.dgid.raw,
222 sizeof (union ib_gid));
235 static int __path_add(struct net_device *dev, struct ipoib_path *path)
237 struct ipoib_dev_priv *priv = netdev_priv(dev);
238 struct rb_node **n = &priv->path_tree.rb_node;
239 struct rb_node *pn = NULL;
240 struct ipoib_path *tpath;
245 tpath = rb_entry(pn, struct ipoib_path, rb_node);
247 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
248 sizeof (union ib_gid));
257 rb_link_node(&path->rb_node, pn, n);
258 rb_insert_color(&path->rb_node, &priv->path_tree);
260 list_add_tail(&path->list, &priv->path_list);
265 static void path_free(struct net_device *dev, struct ipoib_path *path)
267 struct ipoib_dev_priv *priv = netdev_priv(dev);
268 struct ipoib_neigh *neigh, *tn;
272 while ((skb = __skb_dequeue(&path->queue)))
273 dev_kfree_skb_irq(skb);
275 spin_lock_irqsave(&priv->lock, flags);
277 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
279 * It's safe to call ipoib_put_ah() inside priv->lock
280 * here, because we know that path->ah will always
281 * hold one more reference, so ipoib_put_ah() will
282 * never do more than decrement the ref count.
285 ipoib_put_ah(neigh->ah);
287 ipoib_neigh_free(dev, neigh);
290 spin_unlock_irqrestore(&priv->lock, flags);
293 ipoib_put_ah(path->ah);
298 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
300 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
302 struct ipoib_path_iter *iter;
304 iter = kmalloc(sizeof *iter, GFP_KERNEL);
309 memset(iter->path.pathrec.dgid.raw, 0, 16);
311 if (ipoib_path_iter_next(iter)) {
319 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
321 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
323 struct ipoib_path *path;
326 spin_lock_irq(&priv->lock);
328 n = rb_first(&priv->path_tree);
331 path = rb_entry(n, struct ipoib_path, rb_node);
333 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
334 sizeof (union ib_gid)) < 0) {
343 spin_unlock_irq(&priv->lock);
348 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
349 struct ipoib_path *path)
354 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
356 void ipoib_mark_paths_invalid(struct net_device *dev)
358 struct ipoib_dev_priv *priv = netdev_priv(dev);
359 struct ipoib_path *path, *tp;
361 spin_lock_irq(&priv->lock);
363 list_for_each_entry_safe(path, tp, &priv->path_list, list) {
364 ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n",
365 be16_to_cpu(path->pathrec.dlid),
366 path->pathrec.dgid.raw);
370 spin_unlock_irq(&priv->lock);
373 void ipoib_flush_paths(struct net_device *dev)
375 struct ipoib_dev_priv *priv = netdev_priv(dev);
376 struct ipoib_path *path, *tp;
377 LIST_HEAD(remove_list);
380 netif_tx_lock_bh(dev);
381 spin_lock_irqsave(&priv->lock, flags);
383 list_splice_init(&priv->path_list, &remove_list);
385 list_for_each_entry(path, &remove_list, list)
386 rb_erase(&path->rb_node, &priv->path_tree);
388 list_for_each_entry_safe(path, tp, &remove_list, list) {
390 ib_sa_cancel_query(path->query_id, path->query);
391 spin_unlock_irqrestore(&priv->lock, flags);
392 netif_tx_unlock_bh(dev);
393 wait_for_completion(&path->done);
394 path_free(dev, path);
395 netif_tx_lock_bh(dev);
396 spin_lock_irqsave(&priv->lock, flags);
399 spin_unlock_irqrestore(&priv->lock, flags);
400 netif_tx_unlock_bh(dev);
403 static void path_rec_completion(int status,
404 struct ib_sa_path_rec *pathrec,
407 struct ipoib_path *path = path_ptr;
408 struct net_device *dev = path->dev;
409 struct ipoib_dev_priv *priv = netdev_priv(dev);
410 struct ipoib_ah *ah = NULL;
411 struct ipoib_ah *old_ah = NULL;
412 struct ipoib_neigh *neigh, *tn;
413 struct sk_buff_head skqueue;
418 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
419 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
421 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
422 status, path->pathrec.dgid.raw);
424 skb_queue_head_init(&skqueue);
427 struct ib_ah_attr av;
429 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
430 ah = ipoib_create_ah(dev, priv->pd, &av);
433 spin_lock_irqsave(&priv->lock, flags);
435 if (!IS_ERR_OR_NULL(ah)) {
436 path->pathrec = *pathrec;
441 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
442 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
444 while ((skb = __skb_dequeue(&path->queue)))
445 __skb_queue_tail(&skqueue, skb);
447 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
449 WARN_ON(neigh->ah != old_ah);
451 * Dropping the ah reference inside
452 * priv->lock is safe here, because we
453 * will hold one more reference from
454 * the original value of path->ah (ie
457 ipoib_put_ah(neigh->ah);
459 kref_get(&path->ah->ref);
460 neigh->ah = path->ah;
461 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
462 sizeof(union ib_gid));
464 if (ipoib_cm_enabled(dev, neigh->neighbour)) {
465 if (!ipoib_cm_get(neigh))
466 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
469 if (!ipoib_cm_get(neigh)) {
470 list_del(&neigh->list);
472 ipoib_put_ah(neigh->ah);
473 ipoib_neigh_free(dev, neigh);
478 while ((skb = __skb_dequeue(&neigh->queue)))
479 __skb_queue_tail(&skqueue, skb);
485 complete(&path->done);
487 spin_unlock_irqrestore(&priv->lock, flags);
490 ipoib_put_ah(old_ah);
492 while ((skb = __skb_dequeue(&skqueue))) {
494 if (dev_queue_xmit(skb))
495 ipoib_warn(priv, "dev_queue_xmit failed "
496 "to requeue packet\n");
500 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
502 struct ipoib_dev_priv *priv = netdev_priv(dev);
503 struct ipoib_path *path;
505 if (!priv->broadcast)
508 path = kzalloc(sizeof *path, GFP_ATOMIC);
514 skb_queue_head_init(&path->queue);
516 INIT_LIST_HEAD(&path->neigh_list);
518 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
519 path->pathrec.sgid = priv->local_gid;
520 path->pathrec.pkey = cpu_to_be16(priv->pkey);
521 path->pathrec.numb_path = 1;
522 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
527 static int path_rec_start(struct net_device *dev,
528 struct ipoib_path *path)
530 struct ipoib_dev_priv *priv = netdev_priv(dev);
532 ipoib_dbg(priv, "Start path record lookup for %pI6\n",
533 path->pathrec.dgid.raw);
535 init_completion(&path->done);
538 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
540 IB_SA_PATH_REC_DGID |
541 IB_SA_PATH_REC_SGID |
542 IB_SA_PATH_REC_NUMB_PATH |
543 IB_SA_PATH_REC_TRAFFIC_CLASS |
548 if (path->query_id < 0) {
549 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
551 complete(&path->done);
552 return path->query_id;
558 /* called with rcu_read_lock */
559 static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
561 struct ipoib_dev_priv *priv = netdev_priv(dev);
562 struct ipoib_path *path;
563 struct ipoib_neigh *neigh;
566 neigh = ipoib_neigh_alloc(n, skb->dev);
568 ++dev->stats.tx_dropped;
569 dev_kfree_skb_any(skb);
573 spin_lock_irqsave(&priv->lock, flags);
575 path = __path_find(dev, n->ha + 4);
577 path = path_rec_create(dev, n->ha + 4);
581 __path_add(dev, path);
584 list_add_tail(&neigh->list, &path->neigh_list);
587 kref_get(&path->ah->ref);
588 neigh->ah = path->ah;
589 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
590 sizeof(union ib_gid));
592 if (ipoib_cm_enabled(dev, neigh->neighbour)) {
593 if (!ipoib_cm_get(neigh))
594 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
595 if (!ipoib_cm_get(neigh)) {
596 list_del(&neigh->list);
598 ipoib_put_ah(neigh->ah);
599 ipoib_neigh_free(dev, neigh);
602 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
603 __skb_queue_tail(&neigh->queue, skb);
605 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
606 skb_queue_len(&neigh->queue));
610 spin_unlock_irqrestore(&priv->lock, flags);
611 ipoib_send(dev, skb, path->ah, IPOIB_QPN(n->ha));
617 if (!path->query && path_rec_start(dev, path))
620 __skb_queue_tail(&neigh->queue, skb);
623 spin_unlock_irqrestore(&priv->lock, flags);
627 list_del(&neigh->list);
630 ipoib_neigh_free(dev, neigh);
632 ++dev->stats.tx_dropped;
633 dev_kfree_skb_any(skb);
635 spin_unlock_irqrestore(&priv->lock, flags);
638 /* called with rcu_read_lock */
639 static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
641 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
643 /* Look up path record for unicasts */
644 if (n->ha[4] != 0xff) {
645 neigh_add_path(skb, n, dev);
649 /* Add in the P_Key for multicasts */
650 n->ha[8] = (priv->pkey >> 8) & 0xff;
651 n->ha[9] = priv->pkey & 0xff;
652 ipoib_mcast_send(dev, n->ha + 4, skb);
655 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
656 struct ipoib_pseudoheader *phdr)
658 struct ipoib_dev_priv *priv = netdev_priv(dev);
659 struct ipoib_path *path;
662 spin_lock_irqsave(&priv->lock, flags);
664 path = __path_find(dev, phdr->hwaddr + 4);
665 if (!path || !path->valid) {
669 path = path_rec_create(dev, phdr->hwaddr + 4);
673 /* put pseudoheader back on for next time */
674 skb_push(skb, sizeof *phdr);
675 __skb_queue_tail(&path->queue, skb);
677 if (!path->query && path_rec_start(dev, path)) {
678 spin_unlock_irqrestore(&priv->lock, flags);
680 path_free(dev, path);
683 __path_add(dev, path);
685 ++dev->stats.tx_dropped;
686 dev_kfree_skb_any(skb);
689 spin_unlock_irqrestore(&priv->lock, flags);
694 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
695 be16_to_cpu(path->pathrec.dlid));
697 spin_unlock_irqrestore(&priv->lock, flags);
698 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
700 } else if ((path->query || !path_rec_start(dev, path)) &&
701 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
702 /* put pseudoheader back on for next time */
703 skb_push(skb, sizeof *phdr);
704 __skb_queue_tail(&path->queue, skb);
706 ++dev->stats.tx_dropped;
707 dev_kfree_skb_any(skb);
710 spin_unlock_irqrestore(&priv->lock, flags);
713 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
715 struct ipoib_dev_priv *priv = netdev_priv(dev);
716 struct ipoib_neigh *neigh;
717 struct neighbour *n = NULL;
721 if (likely(skb_dst(skb))) {
722 n = dst_get_neighbour_noref(skb_dst(skb));
724 ++dev->stats.tx_dropped;
725 dev_kfree_skb_any(skb);
730 if (unlikely(!*to_ipoib_neigh(n))) {
731 ipoib_path_lookup(skb, n, dev);
735 neigh = *to_ipoib_neigh(n);
737 if (unlikely((memcmp(&neigh->dgid.raw,
739 sizeof(union ib_gid))) ||
740 (neigh->dev != dev))) {
741 spin_lock_irqsave(&priv->lock, flags);
743 * It's safe to call ipoib_put_ah() inside
744 * priv->lock here, because we know that
745 * path->ah will always hold one more reference,
746 * so ipoib_put_ah() will never do more than
747 * decrement the ref count.
750 ipoib_put_ah(neigh->ah);
751 list_del(&neigh->list);
752 ipoib_neigh_free(dev, neigh);
753 spin_unlock_irqrestore(&priv->lock, flags);
754 ipoib_path_lookup(skb, n, dev);
758 if (ipoib_cm_get(neigh)) {
759 if (ipoib_cm_up(neigh)) {
760 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
763 } else if (neigh->ah) {
764 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
768 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
769 spin_lock_irqsave(&priv->lock, flags);
770 __skb_queue_tail(&neigh->queue, skb);
771 spin_unlock_irqrestore(&priv->lock, flags);
773 ++dev->stats.tx_dropped;
774 dev_kfree_skb_any(skb);
777 struct ipoib_pseudoheader *phdr =
778 (struct ipoib_pseudoheader *) skb->data;
779 skb_pull(skb, sizeof *phdr);
781 if (phdr->hwaddr[4] == 0xff) {
782 /* Add in the P_Key for multicast*/
783 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
784 phdr->hwaddr[9] = priv->pkey & 0xff;
786 ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
788 /* unicast GID -- should be ARP or RARP reply */
790 if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
791 (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
792 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
793 skb_dst(skb) ? "neigh" : "dst",
794 be16_to_cpup((__be16 *) skb->data),
795 IPOIB_QPN(phdr->hwaddr),
797 dev_kfree_skb_any(skb);
798 ++dev->stats.tx_dropped;
802 unicast_arp_send(skb, dev, phdr);
810 static void ipoib_timeout(struct net_device *dev)
812 struct ipoib_dev_priv *priv = netdev_priv(dev);
814 ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
815 jiffies_to_msecs(jiffies - dev->trans_start));
816 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
817 netif_queue_stopped(dev),
818 priv->tx_head, priv->tx_tail);
819 /* XXX reset QP, etc. */
822 static int ipoib_hard_header(struct sk_buff *skb,
823 struct net_device *dev,
825 const void *daddr, const void *saddr, unsigned len)
827 struct ipoib_header *header;
828 struct dst_entry *dst;
831 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
833 header->proto = htons(type);
834 header->reserved = 0;
837 * If we don't have a neighbour structure, stuff the
838 * destination address onto the front of the skb so we can
839 * figure out where to send the packet later.
844 n = dst_get_neighbour_noref_raw(dst);
845 if ((!dst || !n) && daddr) {
846 struct ipoib_pseudoheader *phdr =
847 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
848 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
854 static void ipoib_set_mcast_list(struct net_device *dev)
856 struct ipoib_dev_priv *priv = netdev_priv(dev);
858 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
859 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
863 queue_work(ipoib_workqueue, &priv->restart_task);
866 static void ipoib_neigh_cleanup(struct neighbour *n)
868 struct ipoib_neigh *neigh;
869 struct ipoib_dev_priv *priv = netdev_priv(n->dev);
871 struct ipoib_ah *ah = NULL;
873 neigh = *to_ipoib_neigh(n);
875 priv = netdev_priv(neigh->dev);
879 "neigh_cleanup for %06x %pI6\n",
883 spin_lock_irqsave(&priv->lock, flags);
887 list_del(&neigh->list);
888 ipoib_neigh_free(n->dev, neigh);
890 spin_unlock_irqrestore(&priv->lock, flags);
896 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
897 struct net_device *dev)
899 struct ipoib_neigh *neigh;
901 neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
905 neigh->neighbour = neighbour;
907 memset(&neigh->dgid.raw, 0, sizeof (union ib_gid));
908 *to_ipoib_neigh(neighbour) = neigh;
909 skb_queue_head_init(&neigh->queue);
910 ipoib_cm_set(neigh, NULL);
915 void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
918 *to_ipoib_neigh(neigh->neighbour) = NULL;
919 while ((skb = __skb_dequeue(&neigh->queue))) {
920 ++dev->stats.tx_dropped;
921 dev_kfree_skb_any(skb);
923 if (ipoib_cm_get(neigh))
924 ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
928 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
930 parms->neigh_cleanup = ipoib_neigh_cleanup;
935 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
937 struct ipoib_dev_priv *priv = netdev_priv(dev);
939 /* Allocate RX/TX "rings" to hold queued skbs */
940 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
942 if (!priv->rx_ring) {
943 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
944 ca->name, ipoib_recvq_size);
948 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
949 if (!priv->tx_ring) {
950 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
951 ca->name, ipoib_sendq_size);
952 goto out_rx_ring_cleanup;
955 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
957 if (ipoib_ib_dev_init(dev, ca, port))
958 goto out_tx_ring_cleanup;
963 vfree(priv->tx_ring);
966 kfree(priv->rx_ring);
972 void ipoib_dev_cleanup(struct net_device *dev)
974 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
976 ipoib_delete_debug_files(dev);
978 /* Delete any child interfaces first */
979 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
980 unregister_netdev(cpriv->dev);
981 ipoib_dev_cleanup(cpriv->dev);
982 free_netdev(cpriv->dev);
985 ipoib_ib_dev_cleanup(dev);
987 kfree(priv->rx_ring);
988 vfree(priv->tx_ring);
990 priv->rx_ring = NULL;
991 priv->tx_ring = NULL;
994 static const struct header_ops ipoib_header_ops = {
995 .create = ipoib_hard_header,
998 static const struct net_device_ops ipoib_netdev_ops = {
999 .ndo_open = ipoib_open,
1000 .ndo_stop = ipoib_stop,
1001 .ndo_change_mtu = ipoib_change_mtu,
1002 .ndo_fix_features = ipoib_fix_features,
1003 .ndo_start_xmit = ipoib_start_xmit,
1004 .ndo_tx_timeout = ipoib_timeout,
1005 .ndo_set_rx_mode = ipoib_set_mcast_list,
1006 .ndo_neigh_setup = ipoib_neigh_setup_dev,
1009 static void ipoib_setup(struct net_device *dev)
1011 struct ipoib_dev_priv *priv = netdev_priv(dev);
1013 dev->netdev_ops = &ipoib_netdev_ops;
1014 dev->header_ops = &ipoib_header_ops;
1016 ipoib_set_ethtool_ops(dev);
1018 netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
1020 dev->watchdog_timeo = HZ;
1022 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1025 * We add in INFINIBAND_ALEN to allow for the destination
1026 * address "pseudoheader" for skbs without neighbour struct.
1028 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
1029 dev->addr_len = INFINIBAND_ALEN;
1030 dev->type = ARPHRD_INFINIBAND;
1031 dev->tx_queue_len = ipoib_sendq_size * 2;
1032 dev->features = (NETIF_F_VLAN_CHALLENGED |
1034 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1036 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1038 netif_carrier_off(dev);
1042 spin_lock_init(&priv->lock);
1044 mutex_init(&priv->vlan_mutex);
1046 INIT_LIST_HEAD(&priv->path_list);
1047 INIT_LIST_HEAD(&priv->child_intfs);
1048 INIT_LIST_HEAD(&priv->dead_ahs);
1049 INIT_LIST_HEAD(&priv->multicast_list);
1051 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
1052 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
1053 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1054 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
1055 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
1056 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
1057 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1058 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1061 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1063 struct net_device *dev;
1065 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
1070 return netdev_priv(dev);
1073 static ssize_t show_pkey(struct device *dev,
1074 struct device_attribute *attr, char *buf)
1076 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1078 return sprintf(buf, "0x%04x\n", priv->pkey);
1080 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1082 static ssize_t show_umcast(struct device *dev,
1083 struct device_attribute *attr, char *buf)
1085 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1087 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1090 static ssize_t set_umcast(struct device *dev,
1091 struct device_attribute *attr,
1092 const char *buf, size_t count)
1094 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1095 unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1097 if (umcast_val > 0) {
1098 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1099 ipoib_warn(priv, "ignoring multicast groups joined directly "
1102 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1106 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1108 int ipoib_add_umcast_attr(struct net_device *dev)
1110 return device_create_file(&dev->dev, &dev_attr_umcast);
1113 static ssize_t create_child(struct device *dev,
1114 struct device_attribute *attr,
1115 const char *buf, size_t count)
1120 if (sscanf(buf, "%i", &pkey) != 1)
1123 if (pkey < 0 || pkey > 0xffff)
1127 * Set the full membership bit, so that we join the right
1128 * broadcast group, etc.
1132 ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1134 return ret ? ret : count;
1136 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
1138 static ssize_t delete_child(struct device *dev,
1139 struct device_attribute *attr,
1140 const char *buf, size_t count)
1145 if (sscanf(buf, "%i", &pkey) != 1)
1148 if (pkey < 0 || pkey > 0xffff)
1151 ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1153 return ret ? ret : count;
1156 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
1158 int ipoib_add_pkey_attr(struct net_device *dev)
1160 return device_create_file(&dev->dev, &dev_attr_pkey);
1163 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1165 struct ib_device_attr *device_attr;
1166 int result = -ENOMEM;
1168 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1170 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1171 hca->name, sizeof *device_attr);
1175 result = ib_query_device(hca, device_attr);
1177 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1182 priv->hca_caps = device_attr->device_cap_flags;
1186 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1187 priv->dev->hw_features = NETIF_F_SG |
1188 NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1190 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1191 priv->dev->hw_features |= NETIF_F_TSO;
1193 priv->dev->features |= priv->dev->hw_features;
1199 static struct net_device *ipoib_add_port(const char *format,
1200 struct ib_device *hca, u8 port)
1202 struct ipoib_dev_priv *priv;
1203 struct ib_port_attr attr;
1204 int result = -ENOMEM;
1206 priv = ipoib_intf_alloc(format);
1208 goto alloc_mem_failed;
1210 SET_NETDEV_DEV(priv->dev, hca->dma_device);
1211 priv->dev->dev_id = port - 1;
1213 if (!ib_query_port(hca, port, &attr))
1214 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1216 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1218 goto device_init_failed;
1221 /* MTU will be reset when mcast join happens */
1222 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1223 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
1225 priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
1227 result = ib_query_pkey(hca, port, 0, &priv->pkey);
1229 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1230 hca->name, port, result);
1231 goto device_init_failed;
1234 if (ipoib_set_dev_features(priv, hca))
1235 goto device_init_failed;
1238 * Set the full membership bit, so that we join the right
1239 * broadcast group, etc.
1241 priv->pkey |= 0x8000;
1243 priv->dev->broadcast[8] = priv->pkey >> 8;
1244 priv->dev->broadcast[9] = priv->pkey & 0xff;
1246 result = ib_query_gid(hca, port, 0, &priv->local_gid);
1248 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1249 hca->name, port, result);
1250 goto device_init_failed;
1252 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1254 result = ipoib_dev_init(priv->dev, hca, port);
1256 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1257 hca->name, port, result);
1258 goto device_init_failed;
1261 INIT_IB_EVENT_HANDLER(&priv->event_handler,
1262 priv->ca, ipoib_event);
1263 result = ib_register_event_handler(&priv->event_handler);
1265 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1266 "port %d (ret = %d)\n",
1267 hca->name, port, result);
1271 result = register_netdev(priv->dev);
1273 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1274 hca->name, port, result);
1275 goto register_failed;
1278 ipoib_create_debug_files(priv->dev);
1280 if (ipoib_cm_add_mode_attr(priv->dev))
1282 if (ipoib_add_pkey_attr(priv->dev))
1284 if (ipoib_add_umcast_attr(priv->dev))
1286 if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1288 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1294 ipoib_delete_debug_files(priv->dev);
1295 unregister_netdev(priv->dev);
1298 ib_unregister_event_handler(&priv->event_handler);
1299 flush_workqueue(ipoib_workqueue);
1302 ipoib_dev_cleanup(priv->dev);
1305 free_netdev(priv->dev);
1308 return ERR_PTR(result);
1311 static void ipoib_add_one(struct ib_device *device)
1313 struct list_head *dev_list;
1314 struct net_device *dev;
1315 struct ipoib_dev_priv *priv;
1318 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1321 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1325 INIT_LIST_HEAD(dev_list);
1327 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1332 e = device->phys_port_cnt;
1335 for (p = s; p <= e; ++p) {
1336 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
1338 dev = ipoib_add_port("ib%d", device, p);
1340 priv = netdev_priv(dev);
1341 list_add_tail(&priv->list, dev_list);
1345 ib_set_client_data(device, &ipoib_client, dev_list);
1348 static void ipoib_remove_one(struct ib_device *device)
1350 struct ipoib_dev_priv *priv, *tmp;
1351 struct list_head *dev_list;
1353 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1356 dev_list = ib_get_client_data(device, &ipoib_client);
1358 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1359 ib_unregister_event_handler(&priv->event_handler);
1362 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
1365 flush_workqueue(ipoib_workqueue);
1367 unregister_netdev(priv->dev);
1368 ipoib_dev_cleanup(priv->dev);
1369 free_netdev(priv->dev);
1375 static int __init ipoib_init_module(void)
1379 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1380 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1381 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1383 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1384 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1385 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
1386 #ifdef CONFIG_INFINIBAND_IPOIB_CM
1387 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1391 * When copying small received packets, we only copy from the
1392 * linear data part of the SKB, so we rely on this condition.
1394 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
1396 ret = ipoib_register_debugfs();
1401 * We create our own workqueue mainly because we want to be
1402 * able to flush it when devices are being removed. We can't
1403 * use schedule_work()/flush_scheduled_work() because both
1404 * unregister_netdev() and linkwatch_event take the rtnl lock,
1405 * so flush_scheduled_work() can deadlock during device
1408 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1409 if (!ipoib_workqueue) {
1414 ib_sa_register_client(&ipoib_sa_client);
1416 ret = ib_register_client(&ipoib_client);
1423 ib_sa_unregister_client(&ipoib_sa_client);
1424 destroy_workqueue(ipoib_workqueue);
1427 ipoib_unregister_debugfs();
1432 static void __exit ipoib_cleanup_module(void)
1434 ib_unregister_client(&ipoib_client);
1435 ib_sa_unregister_client(&ipoib_sa_client);
1436 ipoib_unregister_debugfs();
1437 destroy_workqueue(ipoib_workqueue);
1440 module_init(ipoib_init_module);
1441 module_exit(ipoib_cleanup_module);