net: Introduce net_rwsem to protect net_namespace_list
[linux-2.6-microblaze.git] / drivers / infiniband / core / roce_gid_mgmt.c
1 /*
2  * Copyright (c) 2015, Mellanox Technologies inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "core_priv.h"
34
35 #include <linux/in.h>
36 #include <linux/in6.h>
37
38 /* For in6_dev_get/in6_dev_put */
39 #include <net/addrconf.h>
40 #include <net/bonding.h>
41
42 #include <rdma/ib_cache.h>
43 #include <rdma/ib_addr.h>
44
45 static struct workqueue_struct *gid_cache_wq;
46
47 static struct workqueue_struct *gid_cache_wq;
48
49 enum gid_op_type {
50         GID_DEL = 0,
51         GID_ADD
52 };
53
54 struct update_gid_event_work {
55         struct work_struct work;
56         union ib_gid       gid;
57         struct ib_gid_attr gid_attr;
58         enum gid_op_type gid_op;
59 };
60
61 #define ROCE_NETDEV_CALLBACK_SZ         3
62 struct netdev_event_work_cmd {
63         roce_netdev_callback    cb;
64         roce_netdev_filter      filter;
65         struct net_device       *ndev;
66         struct net_device       *filter_ndev;
67 };
68
69 struct netdev_event_work {
70         struct work_struct              work;
71         struct netdev_event_work_cmd    cmds[ROCE_NETDEV_CALLBACK_SZ];
72 };
73
74 static const struct {
75         bool (*is_supported)(const struct ib_device *device, u8 port_num);
76         enum ib_gid_type gid_type;
77 } PORT_CAP_TO_GID_TYPE[] = {
78         {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
79         {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
80 };
81
82 #define CAP_TO_GID_TABLE_SIZE   ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
83
84 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
85 {
86         int i;
87         unsigned int ret_flags = 0;
88
89         if (!rdma_protocol_roce(ib_dev, port))
90                 return 1UL << IB_GID_TYPE_IB;
91
92         for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
93                 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
94                         ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
95
96         return ret_flags;
97 }
98 EXPORT_SYMBOL(roce_gid_type_mask_support);
99
100 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
101                        u8 port, union ib_gid *gid,
102                        struct ib_gid_attr *gid_attr)
103 {
104         int i;
105         unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
106
107         for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
108                 if ((1UL << i) & gid_type_mask) {
109                         gid_attr->gid_type = i;
110                         switch (gid_op) {
111                         case GID_ADD:
112                                 ib_cache_gid_add(ib_dev, port,
113                                                  gid, gid_attr);
114                                 break;
115                         case GID_DEL:
116                                 ib_cache_gid_del(ib_dev, port,
117                                                  gid, gid_attr);
118                                 break;
119                         }
120                 }
121         }
122 }
123
124 enum bonding_slave_state {
125         BONDING_SLAVE_STATE_ACTIVE      = 1UL << 0,
126         BONDING_SLAVE_STATE_INACTIVE    = 1UL << 1,
127         /* No primary slave or the device isn't a slave in bonding */
128         BONDING_SLAVE_STATE_NA          = 1UL << 2,
129 };
130
131 static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
132                                                                    struct net_device *upper)
133 {
134         if (upper && netif_is_bond_master(upper)) {
135                 struct net_device *pdev =
136                         bond_option_active_slave_get_rcu(netdev_priv(upper));
137
138                 if (pdev)
139                         return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
140                                 BONDING_SLAVE_STATE_INACTIVE;
141         }
142
143         return BONDING_SLAVE_STATE_NA;
144 }
145
146 #define REQUIRED_BOND_STATES            (BONDING_SLAVE_STATE_ACTIVE |   \
147                                          BONDING_SLAVE_STATE_NA)
148 static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
149                                  struct net_device *rdma_ndev, void *cookie)
150 {
151         struct net_device *real_dev;
152         int res;
153
154         if (!rdma_ndev)
155                 return 0;
156
157         rcu_read_lock();
158         real_dev = rdma_vlan_dev_real_dev(cookie);
159         if (!real_dev)
160                 real_dev = cookie;
161
162         res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
163                (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
164                 REQUIRED_BOND_STATES)) ||
165                real_dev == rdma_ndev);
166
167         rcu_read_unlock();
168         return res;
169 }
170
171 static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
172                                       struct net_device *rdma_ndev, void *cookie)
173 {
174         struct net_device *master_dev;
175         int res;
176
177         if (!rdma_ndev)
178                 return 0;
179
180         rcu_read_lock();
181         master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
182         res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
183                 BONDING_SLAVE_STATE_INACTIVE;
184         rcu_read_unlock();
185
186         return res;
187 }
188
189 static int pass_all_filter(struct ib_device *ib_dev, u8 port,
190                            struct net_device *rdma_ndev, void *cookie)
191 {
192         return 1;
193 }
194
195 static int upper_device_filter(struct ib_device *ib_dev, u8 port,
196                                struct net_device *rdma_ndev, void *cookie)
197 {
198         int res;
199
200         if (!rdma_ndev)
201                 return 0;
202
203         if (rdma_ndev == cookie)
204                 return 1;
205
206         rcu_read_lock();
207         res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
208         rcu_read_unlock();
209
210         return res;
211 }
212
213 static void update_gid_ip(enum gid_op_type gid_op,
214                           struct ib_device *ib_dev,
215                           u8 port, struct net_device *ndev,
216                           struct sockaddr *addr)
217 {
218         union ib_gid gid;
219         struct ib_gid_attr gid_attr;
220
221         rdma_ip2gid(addr, &gid);
222         memset(&gid_attr, 0, sizeof(gid_attr));
223         gid_attr.ndev = ndev;
224
225         update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
226 }
227
228 static void enum_netdev_default_gids(struct ib_device *ib_dev,
229                                      u8 port, struct net_device *event_ndev,
230                                      struct net_device *rdma_ndev)
231 {
232         unsigned long gid_type_mask;
233
234         rcu_read_lock();
235         if (!rdma_ndev ||
236             ((rdma_ndev != event_ndev &&
237               !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
238              is_eth_active_slave_of_bonding_rcu(rdma_ndev,
239                                                 netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
240              BONDING_SLAVE_STATE_INACTIVE)) {
241                 rcu_read_unlock();
242                 return;
243         }
244         rcu_read_unlock();
245
246         gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
247
248         ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, gid_type_mask,
249                                      IB_CACHE_GID_DEFAULT_MODE_SET);
250 }
251
252 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
253                                             u8 port,
254                                             struct net_device *event_ndev,
255                                             struct net_device *rdma_ndev)
256 {
257         struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
258
259         if (!rdma_ndev)
260                 return;
261
262         if (!real_dev)
263                 real_dev = event_ndev;
264
265         rcu_read_lock();
266
267         if (rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) &&
268             is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) ==
269             BONDING_SLAVE_STATE_INACTIVE) {
270                 unsigned long gid_type_mask;
271
272                 rcu_read_unlock();
273
274                 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
275
276                 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
277                                              gid_type_mask,
278                                              IB_CACHE_GID_DEFAULT_MODE_DELETE);
279         } else {
280                 rcu_read_unlock();
281         }
282 }
283
284 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
285                                  u8 port, struct net_device *ndev)
286 {
287         struct in_device *in_dev;
288         struct sin_list {
289                 struct list_head        list;
290                 struct sockaddr_in      ip;
291         };
292         struct sin_list *sin_iter;
293         struct sin_list *sin_temp;
294
295         LIST_HEAD(sin_list);
296         if (ndev->reg_state >= NETREG_UNREGISTERING)
297                 return;
298
299         rcu_read_lock();
300         in_dev = __in_dev_get_rcu(ndev);
301         if (!in_dev) {
302                 rcu_read_unlock();
303                 return;
304         }
305
306         for_ifa(in_dev) {
307                 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
308
309                 if (!entry)
310                         continue;
311
312                 entry->ip.sin_family = AF_INET;
313                 entry->ip.sin_addr.s_addr = ifa->ifa_address;
314                 list_add_tail(&entry->list, &sin_list);
315         }
316         endfor_ifa(in_dev);
317         rcu_read_unlock();
318
319         list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
320                 update_gid_ip(GID_ADD, ib_dev, port, ndev,
321                               (struct sockaddr *)&sin_iter->ip);
322                 list_del(&sin_iter->list);
323                 kfree(sin_iter);
324         }
325 }
326
327 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
328                                  u8 port, struct net_device *ndev)
329 {
330         struct inet6_ifaddr *ifp;
331         struct inet6_dev *in6_dev;
332         struct sin6_list {
333                 struct list_head        list;
334                 struct sockaddr_in6     sin6;
335         };
336         struct sin6_list *sin6_iter;
337         struct sin6_list *sin6_temp;
338         struct ib_gid_attr gid_attr = {.ndev = ndev};
339         LIST_HEAD(sin6_list);
340
341         if (ndev->reg_state >= NETREG_UNREGISTERING)
342                 return;
343
344         in6_dev = in6_dev_get(ndev);
345         if (!in6_dev)
346                 return;
347
348         read_lock_bh(&in6_dev->lock);
349         list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
350                 struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
351
352                 if (!entry)
353                         continue;
354
355                 entry->sin6.sin6_family = AF_INET6;
356                 entry->sin6.sin6_addr = ifp->addr;
357                 list_add_tail(&entry->list, &sin6_list);
358         }
359         read_unlock_bh(&in6_dev->lock);
360
361         in6_dev_put(in6_dev);
362
363         list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
364                 union ib_gid    gid;
365
366                 rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
367                 update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
368                 list_del(&sin6_iter->list);
369                 kfree(sin6_iter);
370         }
371 }
372
373 static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
374                             struct net_device *ndev)
375 {
376         enum_netdev_ipv4_ips(ib_dev, port, ndev);
377         if (IS_ENABLED(CONFIG_IPV6))
378                 enum_netdev_ipv6_ips(ib_dev, port, ndev);
379 }
380
381 static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
382                            struct net_device *rdma_ndev, void *cookie)
383 {
384         enum_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
385         _add_netdev_ips(ib_dev, port, cookie);
386 }
387
388 static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
389                            struct net_device *rdma_ndev, void *cookie)
390 {
391         ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
392 }
393
394 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
395                                     u8 port,
396                                     struct net_device *rdma_ndev,
397                                     void *cookie)
398 {
399         struct net *net;
400         struct net_device *ndev;
401
402         /* Lock the rtnl to make sure the netdevs does not move under
403          * our feet
404          */
405         rtnl_lock();
406         down_read(&net_rwsem);
407         for_each_net(net)
408                 for_each_netdev(net, ndev)
409                         if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
410                                 add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
411         up_read(&net_rwsem);
412         rtnl_unlock();
413 }
414
415 /**
416  * rdma_roce_rescan_device - Rescan all of the network devices in the system
417  * and add their gids, as needed, to the relevant RoCE devices.
418  *
419  * @device:         the rdma device
420  */
421 void rdma_roce_rescan_device(struct ib_device *ib_dev)
422 {
423         ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
424                             enum_all_gids_of_dev_cb, NULL);
425 }
426 EXPORT_SYMBOL(rdma_roce_rescan_device);
427
428 static void callback_for_addr_gid_device_scan(struct ib_device *device,
429                                               u8 port,
430                                               struct net_device *rdma_ndev,
431                                               void *cookie)
432 {
433         struct update_gid_event_work *parsed = cookie;
434
435         return update_gid(parsed->gid_op, device,
436                           port, &parsed->gid,
437                           &parsed->gid_attr);
438 }
439
440 struct upper_list {
441         struct list_head list;
442         struct net_device *upper;
443 };
444
445 static int netdev_upper_walk(struct net_device *upper, void *data)
446 {
447         struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
448         struct list_head *upper_list = data;
449
450         if (!entry)
451                 return 0;
452
453         list_add_tail(&entry->list, upper_list);
454         dev_hold(upper);
455         entry->upper = upper;
456
457         return 0;
458 }
459
460 static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
461                                 void *cookie,
462                                 void (*handle_netdev)(struct ib_device *ib_dev,
463                                                       u8 port,
464                                                       struct net_device *ndev))
465 {
466         struct net_device *ndev = cookie;
467         struct upper_list *upper_iter;
468         struct upper_list *upper_temp;
469         LIST_HEAD(upper_list);
470
471         rcu_read_lock();
472         netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list);
473         rcu_read_unlock();
474
475         handle_netdev(ib_dev, port, ndev);
476         list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
477                                  list) {
478                 handle_netdev(ib_dev, port, upper_iter->upper);
479                 dev_put(upper_iter->upper);
480                 list_del(&upper_iter->list);
481                 kfree(upper_iter);
482         }
483 }
484
485 static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
486                                       struct net_device *event_ndev)
487 {
488         ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
489 }
490
491 static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
492                                  struct net_device *rdma_ndev, void *cookie)
493 {
494         handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
495 }
496
497 static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
498                                  struct net_device *rdma_ndev, void *cookie)
499 {
500         handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
501 }
502
503 static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
504                                         struct net_device *rdma_ndev,
505                                         void *cookie)
506 {
507         struct net_device *master_ndev;
508
509         rcu_read_lock();
510         master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
511         if (master_ndev)
512                 dev_hold(master_ndev);
513         rcu_read_unlock();
514
515         if (master_ndev) {
516                 bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
517                                                 rdma_ndev);
518                 dev_put(master_ndev);
519         }
520 }
521
522 static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
523                                    struct net_device *rdma_ndev, void *cookie)
524 {
525         bond_delete_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
526 }
527
528 /* The following functions operate on all IB devices. netdevice_event and
529  * addr_event execute ib_enum_all_roce_netdevs through a work.
530  * ib_enum_all_roce_netdevs iterates through all IB devices.
531  */
532
533 static void netdevice_event_work_handler(struct work_struct *_work)
534 {
535         struct netdev_event_work *work =
536                 container_of(_work, struct netdev_event_work, work);
537         unsigned int i;
538
539         for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
540                 ib_enum_all_roce_netdevs(work->cmds[i].filter,
541                                          work->cmds[i].filter_ndev,
542                                          work->cmds[i].cb,
543                                          work->cmds[i].ndev);
544                 dev_put(work->cmds[i].ndev);
545                 dev_put(work->cmds[i].filter_ndev);
546         }
547
548         kfree(work);
549 }
550
551 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
552                                 struct net_device *ndev)
553 {
554         unsigned int i;
555         struct netdev_event_work *ndev_work =
556                 kmalloc(sizeof(*ndev_work), GFP_KERNEL);
557
558         if (!ndev_work)
559                 return NOTIFY_DONE;
560
561         memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
562         for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
563                 if (!ndev_work->cmds[i].ndev)
564                         ndev_work->cmds[i].ndev = ndev;
565                 if (!ndev_work->cmds[i].filter_ndev)
566                         ndev_work->cmds[i].filter_ndev = ndev;
567                 dev_hold(ndev_work->cmds[i].ndev);
568                 dev_hold(ndev_work->cmds[i].filter_ndev);
569         }
570         INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
571
572         queue_work(gid_cache_wq, &ndev_work->work);
573
574         return NOTIFY_DONE;
575 }
576
577 static const struct netdev_event_work_cmd add_cmd = {
578         .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
579 static const struct netdev_event_work_cmd add_cmd_upper_ips = {
580         .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
581
582 static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
583                                         struct netdev_event_work_cmd *cmds)
584 {
585         static const struct netdev_event_work_cmd upper_ips_del_cmd = {
586                 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
587         static const struct netdev_event_work_cmd bonding_default_del_cmd = {
588                 .cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
589
590         if (changeupper_info->linking == false) {
591                 cmds[0] = upper_ips_del_cmd;
592                 cmds[0].ndev = changeupper_info->upper_dev;
593                 cmds[1] = add_cmd;
594         } else {
595                 cmds[0] = bonding_default_del_cmd;
596                 cmds[0].ndev = changeupper_info->upper_dev;
597                 cmds[1] = add_cmd_upper_ips;
598                 cmds[1].ndev = changeupper_info->upper_dev;
599                 cmds[1].filter_ndev = changeupper_info->upper_dev;
600         }
601 }
602
603 static int netdevice_event(struct notifier_block *this, unsigned long event,
604                            void *ptr)
605 {
606         static const struct netdev_event_work_cmd del_cmd = {
607                 .cb = del_netdev_ips, .filter = pass_all_filter};
608         static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
609                 .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
610         static const struct netdev_event_work_cmd default_del_cmd = {
611                 .cb = del_netdev_default_ips, .filter = pass_all_filter};
612         static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
613                 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
614         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
615         struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
616
617         if (ndev->type != ARPHRD_ETHER)
618                 return NOTIFY_DONE;
619
620         switch (event) {
621         case NETDEV_REGISTER:
622         case NETDEV_UP:
623                 cmds[0] = bonding_default_del_cmd_join;
624                 cmds[1] = add_cmd;
625                 break;
626
627         case NETDEV_UNREGISTER:
628                 if (ndev->reg_state < NETREG_UNREGISTERED)
629                         cmds[0] = del_cmd;
630                 else
631                         return NOTIFY_DONE;
632                 break;
633
634         case NETDEV_CHANGEADDR:
635                 cmds[0] = default_del_cmd;
636                 cmds[1] = add_cmd;
637                 break;
638
639         case NETDEV_CHANGEUPPER:
640                 netdevice_event_changeupper(
641                         container_of(ptr, struct netdev_notifier_changeupper_info, info),
642                         cmds);
643                 break;
644
645         case NETDEV_BONDING_FAILOVER:
646                 cmds[0] = bonding_event_ips_del_cmd;
647                 cmds[1] = bonding_default_del_cmd_join;
648                 cmds[2] = add_cmd_upper_ips;
649                 break;
650
651         default:
652                 return NOTIFY_DONE;
653         }
654
655         return netdevice_queue_work(cmds, ndev);
656 }
657
658 static void update_gid_event_work_handler(struct work_struct *_work)
659 {
660         struct update_gid_event_work *work =
661                 container_of(_work, struct update_gid_event_work, work);
662
663         ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
664                                  callback_for_addr_gid_device_scan, work);
665
666         dev_put(work->gid_attr.ndev);
667         kfree(work);
668 }
669
670 static int addr_event(struct notifier_block *this, unsigned long event,
671                       struct sockaddr *sa, struct net_device *ndev)
672 {
673         struct update_gid_event_work *work;
674         enum gid_op_type gid_op;
675
676         if (ndev->type != ARPHRD_ETHER)
677                 return NOTIFY_DONE;
678
679         switch (event) {
680         case NETDEV_UP:
681                 gid_op = GID_ADD;
682                 break;
683
684         case NETDEV_DOWN:
685                 gid_op = GID_DEL;
686                 break;
687
688         default:
689                 return NOTIFY_DONE;
690         }
691
692         work = kmalloc(sizeof(*work), GFP_ATOMIC);
693         if (!work)
694                 return NOTIFY_DONE;
695
696         INIT_WORK(&work->work, update_gid_event_work_handler);
697
698         rdma_ip2gid(sa, &work->gid);
699         work->gid_op = gid_op;
700
701         memset(&work->gid_attr, 0, sizeof(work->gid_attr));
702         dev_hold(ndev);
703         work->gid_attr.ndev   = ndev;
704
705         queue_work(gid_cache_wq, &work->work);
706
707         return NOTIFY_DONE;
708 }
709
710 static int inetaddr_event(struct notifier_block *this, unsigned long event,
711                           void *ptr)
712 {
713         struct sockaddr_in      in;
714         struct net_device       *ndev;
715         struct in_ifaddr        *ifa = ptr;
716
717         in.sin_family = AF_INET;
718         in.sin_addr.s_addr = ifa->ifa_address;
719         ndev = ifa->ifa_dev->dev;
720
721         return addr_event(this, event, (struct sockaddr *)&in, ndev);
722 }
723
724 static int inet6addr_event(struct notifier_block *this, unsigned long event,
725                            void *ptr)
726 {
727         struct sockaddr_in6     in6;
728         struct net_device       *ndev;
729         struct inet6_ifaddr     *ifa6 = ptr;
730
731         in6.sin6_family = AF_INET6;
732         in6.sin6_addr = ifa6->addr;
733         ndev = ifa6->idev->dev;
734
735         return addr_event(this, event, (struct sockaddr *)&in6, ndev);
736 }
737
738 static struct notifier_block nb_netdevice = {
739         .notifier_call = netdevice_event
740 };
741
742 static struct notifier_block nb_inetaddr = {
743         .notifier_call = inetaddr_event
744 };
745
746 static struct notifier_block nb_inet6addr = {
747         .notifier_call = inet6addr_event
748 };
749
750 int __init roce_gid_mgmt_init(void)
751 {
752         gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
753         if (!gid_cache_wq)
754                 return -ENOMEM;
755
756         register_inetaddr_notifier(&nb_inetaddr);
757         if (IS_ENABLED(CONFIG_IPV6))
758                 register_inet6addr_notifier(&nb_inet6addr);
759         /* We relay on the netdevice notifier to enumerate all
760          * existing devices in the system. Register to this notifier
761          * last to make sure we will not miss any IP add/del
762          * callbacks.
763          */
764         register_netdevice_notifier(&nb_netdevice);
765
766         return 0;
767 }
768
769 void __exit roce_gid_mgmt_cleanup(void)
770 {
771         if (IS_ENABLED(CONFIG_IPV6))
772                 unregister_inet6addr_notifier(&nb_inet6addr);
773         unregister_inetaddr_notifier(&nb_inetaddr);
774         unregister_netdevice_notifier(&nb_netdevice);
775         /* Ensure all gid deletion tasks complete before we go down,
776          * to avoid any reference to free'd memory. By the time
777          * ib-core is removed, all physical devices have been removed,
778          * so no issue with remaining hardware contexts.
779          */
780         destroy_workqueue(gid_cache_wq);
781 }