1e3673f31dad19003bcf5150ee39a7b314d36d60
[linux-2.6-microblaze.git] / drivers / infiniband / core / roce_gid_mgmt.c
1 /*
2  * Copyright (c) 2015, Mellanox Technologies inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "core_priv.h"
34
35 #include <linux/in.h>
36 #include <linux/in6.h>
37
38 /* For in6_dev_get/in6_dev_put */
39 #include <net/addrconf.h>
40 #include <net/bonding.h>
41
42 #include <rdma/ib_cache.h>
43 #include <rdma/ib_addr.h>
44
45 enum gid_op_type {
46         GID_DEL = 0,
47         GID_ADD
48 };
49
50 struct update_gid_event_work {
51         struct work_struct work;
52         union ib_gid       gid;
53         struct ib_gid_attr gid_attr;
54         enum gid_op_type gid_op;
55 };
56
57 #define ROCE_NETDEV_CALLBACK_SZ         3
58 struct netdev_event_work_cmd {
59         roce_netdev_callback    cb;
60         roce_netdev_filter      filter;
61         struct net_device       *ndev;
62         struct net_device       *filter_ndev;
63 };
64
65 struct netdev_event_work {
66         struct work_struct              work;
67         struct netdev_event_work_cmd    cmds[ROCE_NETDEV_CALLBACK_SZ];
68 };
69
70 static const struct {
71         bool (*is_supported)(const struct ib_device *device, u8 port_num);
72         enum ib_gid_type gid_type;
73 } PORT_CAP_TO_GID_TYPE[] = {
74         {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
75         {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
76 };
77
78 #define CAP_TO_GID_TABLE_SIZE   ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
79
80 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
81 {
82         int i;
83         unsigned int ret_flags = 0;
84
85         if (!rdma_protocol_roce(ib_dev, port))
86                 return 1UL << IB_GID_TYPE_IB;
87
88         for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
89                 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
90                         ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
91
92         return ret_flags;
93 }
94 EXPORT_SYMBOL(roce_gid_type_mask_support);
95
96 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
97                        u8 port, union ib_gid *gid,
98                        struct ib_gid_attr *gid_attr)
99 {
100         int i;
101         unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
102
103         for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
104                 if ((1UL << i) & gid_type_mask) {
105                         gid_attr->gid_type = i;
106                         switch (gid_op) {
107                         case GID_ADD:
108                                 ib_cache_gid_add(ib_dev, port,
109                                                  gid, gid_attr);
110                                 break;
111                         case GID_DEL:
112                                 ib_cache_gid_del(ib_dev, port,
113                                                  gid, gid_attr);
114                                 break;
115                         }
116                 }
117         }
118 }
119
120 enum bonding_slave_state {
121         BONDING_SLAVE_STATE_ACTIVE      = 1UL << 0,
122         BONDING_SLAVE_STATE_INACTIVE    = 1UL << 1,
123         /* No primary slave or the device isn't a slave in bonding */
124         BONDING_SLAVE_STATE_NA          = 1UL << 2,
125 };
126
127 static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
128                                                                    struct net_device *upper)
129 {
130         if (upper && netif_is_bond_master(upper)) {
131                 struct net_device *pdev =
132                         bond_option_active_slave_get_rcu(netdev_priv(upper));
133
134                 if (pdev)
135                         return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
136                                 BONDING_SLAVE_STATE_INACTIVE;
137         }
138
139         return BONDING_SLAVE_STATE_NA;
140 }
141
142 static bool is_upper_dev_rcu(struct net_device *dev, struct net_device *upper)
143 {
144         struct net_device *_upper = NULL;
145         struct list_head *iter;
146
147         netdev_for_each_all_upper_dev_rcu(dev, _upper, iter)
148                 if (_upper == upper)
149                         break;
150
151         return _upper == upper;
152 }
153
154 #define REQUIRED_BOND_STATES            (BONDING_SLAVE_STATE_ACTIVE |   \
155                                          BONDING_SLAVE_STATE_NA)
156 static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
157                                  struct net_device *rdma_ndev, void *cookie)
158 {
159         struct net_device *event_ndev = (struct net_device *)cookie;
160         struct net_device *real_dev;
161         int res;
162
163         if (!rdma_ndev)
164                 return 0;
165
166         rcu_read_lock();
167         real_dev = rdma_vlan_dev_real_dev(event_ndev);
168         if (!real_dev)
169                 real_dev = event_ndev;
170
171         res = ((is_upper_dev_rcu(rdma_ndev, event_ndev) &&
172                (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
173                 REQUIRED_BOND_STATES)) ||
174                real_dev == rdma_ndev);
175
176         rcu_read_unlock();
177         return res;
178 }
179
180 static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
181                                       struct net_device *rdma_ndev, void *cookie)
182 {
183         struct net_device *master_dev;
184         int res;
185
186         if (!rdma_ndev)
187                 return 0;
188
189         rcu_read_lock();
190         master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
191         res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
192                 BONDING_SLAVE_STATE_INACTIVE;
193         rcu_read_unlock();
194
195         return res;
196 }
197
198 static int pass_all_filter(struct ib_device *ib_dev, u8 port,
199                            struct net_device *rdma_ndev, void *cookie)
200 {
201         return 1;
202 }
203
204 static int upper_device_filter(struct ib_device *ib_dev, u8 port,
205                                struct net_device *rdma_ndev, void *cookie)
206 {
207         struct net_device *event_ndev = (struct net_device *)cookie;
208         int res;
209
210         if (!rdma_ndev)
211                 return 0;
212
213         if (rdma_ndev == event_ndev)
214                 return 1;
215
216         rcu_read_lock();
217         res = is_upper_dev_rcu(rdma_ndev, event_ndev);
218         rcu_read_unlock();
219
220         return res;
221 }
222
223 static void update_gid_ip(enum gid_op_type gid_op,
224                           struct ib_device *ib_dev,
225                           u8 port, struct net_device *ndev,
226                           struct sockaddr *addr)
227 {
228         union ib_gid gid;
229         struct ib_gid_attr gid_attr;
230
231         rdma_ip2gid(addr, &gid);
232         memset(&gid_attr, 0, sizeof(gid_attr));
233         gid_attr.ndev = ndev;
234
235         update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
236 }
237
238 static void enum_netdev_default_gids(struct ib_device *ib_dev,
239                                      u8 port, struct net_device *event_ndev,
240                                      struct net_device *rdma_ndev)
241 {
242         unsigned long gid_type_mask;
243
244         rcu_read_lock();
245         if (!rdma_ndev ||
246             ((rdma_ndev != event_ndev &&
247               !is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
248              is_eth_active_slave_of_bonding_rcu(rdma_ndev,
249                                                 netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
250              BONDING_SLAVE_STATE_INACTIVE)) {
251                 rcu_read_unlock();
252                 return;
253         }
254         rcu_read_unlock();
255
256         gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
257
258         ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, gid_type_mask,
259                                      IB_CACHE_GID_DEFAULT_MODE_SET);
260 }
261
262 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
263                                             u8 port,
264                                             struct net_device *event_ndev,
265                                             struct net_device *rdma_ndev)
266 {
267         struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
268
269         if (!rdma_ndev)
270                 return;
271
272         if (!real_dev)
273                 real_dev = event_ndev;
274
275         rcu_read_lock();
276
277         if (is_upper_dev_rcu(rdma_ndev, event_ndev) &&
278             is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) ==
279             BONDING_SLAVE_STATE_INACTIVE) {
280                 unsigned long gid_type_mask;
281
282                 rcu_read_unlock();
283
284                 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
285
286                 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
287                                              gid_type_mask,
288                                              IB_CACHE_GID_DEFAULT_MODE_DELETE);
289         } else {
290                 rcu_read_unlock();
291         }
292 }
293
294 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
295                                  u8 port, struct net_device *ndev)
296 {
297         struct in_device *in_dev;
298         struct sin_list {
299                 struct list_head        list;
300                 struct sockaddr_in      ip;
301         };
302         struct sin_list *sin_iter;
303         struct sin_list *sin_temp;
304
305         LIST_HEAD(sin_list);
306         if (ndev->reg_state >= NETREG_UNREGISTERING)
307                 return;
308
309         rcu_read_lock();
310         in_dev = __in_dev_get_rcu(ndev);
311         if (!in_dev) {
312                 rcu_read_unlock();
313                 return;
314         }
315
316         for_ifa(in_dev) {
317                 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
318
319                 if (!entry) {
320                         pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
321                         continue;
322                 }
323                 entry->ip.sin_family = AF_INET;
324                 entry->ip.sin_addr.s_addr = ifa->ifa_address;
325                 list_add_tail(&entry->list, &sin_list);
326         }
327         endfor_ifa(in_dev);
328         rcu_read_unlock();
329
330         list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
331                 update_gid_ip(GID_ADD, ib_dev, port, ndev,
332                               (struct sockaddr *)&sin_iter->ip);
333                 list_del(&sin_iter->list);
334                 kfree(sin_iter);
335         }
336 }
337
338 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
339                                  u8 port, struct net_device *ndev)
340 {
341         struct inet6_ifaddr *ifp;
342         struct inet6_dev *in6_dev;
343         struct sin6_list {
344                 struct list_head        list;
345                 struct sockaddr_in6     sin6;
346         };
347         struct sin6_list *sin6_iter;
348         struct sin6_list *sin6_temp;
349         struct ib_gid_attr gid_attr = {.ndev = ndev};
350         LIST_HEAD(sin6_list);
351
352         if (ndev->reg_state >= NETREG_UNREGISTERING)
353                 return;
354
355         in6_dev = in6_dev_get(ndev);
356         if (!in6_dev)
357                 return;
358
359         read_lock_bh(&in6_dev->lock);
360         list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
361                 struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
362
363                 if (!entry) {
364                         pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv6 update\n");
365                         continue;
366                 }
367
368                 entry->sin6.sin6_family = AF_INET6;
369                 entry->sin6.sin6_addr = ifp->addr;
370                 list_add_tail(&entry->list, &sin6_list);
371         }
372         read_unlock_bh(&in6_dev->lock);
373
374         in6_dev_put(in6_dev);
375
376         list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
377                 union ib_gid    gid;
378
379                 rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
380                 update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
381                 list_del(&sin6_iter->list);
382                 kfree(sin6_iter);
383         }
384 }
385
386 static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
387                             struct net_device *ndev)
388 {
389         enum_netdev_ipv4_ips(ib_dev, port, ndev);
390         if (IS_ENABLED(CONFIG_IPV6))
391                 enum_netdev_ipv6_ips(ib_dev, port, ndev);
392 }
393
394 static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
395                            struct net_device *rdma_ndev, void *cookie)
396 {
397         struct net_device *event_ndev = (struct net_device *)cookie;
398
399         enum_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
400         _add_netdev_ips(ib_dev, port, event_ndev);
401 }
402
403 static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
404                            struct net_device *rdma_ndev, void *cookie)
405 {
406         struct net_device *event_ndev = (struct net_device *)cookie;
407
408         ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
409 }
410
411 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
412                                     u8 port,
413                                     struct net_device *rdma_ndev,
414                                     void *cookie)
415 {
416         struct net *net;
417         struct net_device *ndev;
418
419         /* Lock the rtnl to make sure the netdevs does not move under
420          * our feet
421          */
422         rtnl_lock();
423         for_each_net(net)
424                 for_each_netdev(net, ndev)
425                         if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
426                                 add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
427         rtnl_unlock();
428 }
429
430 /* This function will rescan all of the network devices in the system
431  * and add their gids, as needed, to the relevant RoCE devices. */
432 int roce_rescan_device(struct ib_device *ib_dev)
433 {
434         ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
435                             enum_all_gids_of_dev_cb, NULL);
436
437         return 0;
438 }
439
440 static void callback_for_addr_gid_device_scan(struct ib_device *device,
441                                               u8 port,
442                                               struct net_device *rdma_ndev,
443                                               void *cookie)
444 {
445         struct update_gid_event_work *parsed = cookie;
446
447         return update_gid(parsed->gid_op, device,
448                           port, &parsed->gid,
449                           &parsed->gid_attr);
450 }
451
452 static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
453                                 void *cookie,
454                                 void (*handle_netdev)(struct ib_device *ib_dev,
455                                                       u8 port,
456                                                       struct net_device *ndev))
457 {
458         struct net_device *ndev = (struct net_device *)cookie;
459         struct upper_list {
460                 struct list_head list;
461                 struct net_device *upper;
462         };
463         struct net_device *upper;
464         struct list_head *iter;
465         struct upper_list *upper_iter;
466         struct upper_list *upper_temp;
467         LIST_HEAD(upper_list);
468
469         rcu_read_lock();
470         netdev_for_each_all_upper_dev_rcu(ndev, upper, iter) {
471                 struct upper_list *entry = kmalloc(sizeof(*entry),
472                                                    GFP_ATOMIC);
473
474                 if (!entry) {
475                         pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
476                         continue;
477                 }
478
479                 list_add_tail(&entry->list, &upper_list);
480                 dev_hold(upper);
481                 entry->upper = upper;
482         }
483         rcu_read_unlock();
484
485         handle_netdev(ib_dev, port, ndev);
486         list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
487                                  list) {
488                 handle_netdev(ib_dev, port, upper_iter->upper);
489                 dev_put(upper_iter->upper);
490                 list_del(&upper_iter->list);
491                 kfree(upper_iter);
492         }
493 }
494
495 static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
496                                       struct net_device *event_ndev)
497 {
498         ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
499 }
500
501 static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
502                                  struct net_device *rdma_ndev, void *cookie)
503 {
504         handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
505 }
506
507 static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
508                                  struct net_device *rdma_ndev, void *cookie)
509 {
510         handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
511 }
512
513 static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
514                                         struct net_device *rdma_ndev,
515                                         void *cookie)
516 {
517         struct net_device *master_ndev;
518
519         rcu_read_lock();
520         master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
521         if (master_ndev)
522                 dev_hold(master_ndev);
523         rcu_read_unlock();
524
525         if (master_ndev) {
526                 bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
527                                                 rdma_ndev);
528                 dev_put(master_ndev);
529         }
530 }
531
532 static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
533                                    struct net_device *rdma_ndev, void *cookie)
534 {
535         struct net_device *event_ndev = (struct net_device *)cookie;
536
537         bond_delete_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
538 }
539
540 /* The following functions operate on all IB devices. netdevice_event and
541  * addr_event execute ib_enum_all_roce_netdevs through a work.
542  * ib_enum_all_roce_netdevs iterates through all IB devices.
543  */
544
545 static void netdevice_event_work_handler(struct work_struct *_work)
546 {
547         struct netdev_event_work *work =
548                 container_of(_work, struct netdev_event_work, work);
549         unsigned int i;
550
551         for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
552                 ib_enum_all_roce_netdevs(work->cmds[i].filter,
553                                          work->cmds[i].filter_ndev,
554                                          work->cmds[i].cb,
555                                          work->cmds[i].ndev);
556                 dev_put(work->cmds[i].ndev);
557                 dev_put(work->cmds[i].filter_ndev);
558         }
559
560         kfree(work);
561 }
562
563 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
564                                 struct net_device *ndev)
565 {
566         unsigned int i;
567         struct netdev_event_work *ndev_work =
568                 kmalloc(sizeof(*ndev_work), GFP_KERNEL);
569
570         if (!ndev_work) {
571                 pr_warn("roce_gid_mgmt: can't allocate work for netdevice_event\n");
572                 return NOTIFY_DONE;
573         }
574
575         memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
576         for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
577                 if (!ndev_work->cmds[i].ndev)
578                         ndev_work->cmds[i].ndev = ndev;
579                 if (!ndev_work->cmds[i].filter_ndev)
580                         ndev_work->cmds[i].filter_ndev = ndev;
581                 dev_hold(ndev_work->cmds[i].ndev);
582                 dev_hold(ndev_work->cmds[i].filter_ndev);
583         }
584         INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
585
586         queue_work(ib_wq, &ndev_work->work);
587
588         return NOTIFY_DONE;
589 }
590
591 static const struct netdev_event_work_cmd add_cmd = {
592         .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
593 static const struct netdev_event_work_cmd add_cmd_upper_ips = {
594         .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
595
596 static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
597                                         struct netdev_event_work_cmd *cmds)
598 {
599         static const struct netdev_event_work_cmd upper_ips_del_cmd = {
600                 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
601         static const struct netdev_event_work_cmd bonding_default_del_cmd = {
602                 .cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
603
604         if (changeupper_info->linking == false) {
605                 cmds[0] = upper_ips_del_cmd;
606                 cmds[0].ndev = changeupper_info->upper_dev;
607                 cmds[1] = add_cmd;
608         } else {
609                 cmds[0] = bonding_default_del_cmd;
610                 cmds[0].ndev = changeupper_info->upper_dev;
611                 cmds[1] = add_cmd_upper_ips;
612                 cmds[1].ndev = changeupper_info->upper_dev;
613                 cmds[1].filter_ndev = changeupper_info->upper_dev;
614         }
615 }
616
617 static int netdevice_event(struct notifier_block *this, unsigned long event,
618                            void *ptr)
619 {
620         static const struct netdev_event_work_cmd del_cmd = {
621                 .cb = del_netdev_ips, .filter = pass_all_filter};
622         static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
623                 .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
624         static const struct netdev_event_work_cmd default_del_cmd = {
625                 .cb = del_netdev_default_ips, .filter = pass_all_filter};
626         static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
627                 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
628         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
629         struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
630
631         if (ndev->type != ARPHRD_ETHER)
632                 return NOTIFY_DONE;
633
634         switch (event) {
635         case NETDEV_REGISTER:
636         case NETDEV_UP:
637                 cmds[0] = bonding_default_del_cmd_join;
638                 cmds[1] = add_cmd;
639                 break;
640
641         case NETDEV_UNREGISTER:
642                 if (ndev->reg_state < NETREG_UNREGISTERED)
643                         cmds[0] = del_cmd;
644                 else
645                         return NOTIFY_DONE;
646                 break;
647
648         case NETDEV_CHANGEADDR:
649                 cmds[0] = default_del_cmd;
650                 cmds[1] = add_cmd;
651                 break;
652
653         case NETDEV_CHANGEUPPER:
654                 netdevice_event_changeupper(
655                         container_of(ptr, struct netdev_notifier_changeupper_info, info),
656                         cmds);
657                 break;
658
659         case NETDEV_BONDING_FAILOVER:
660                 cmds[0] = bonding_event_ips_del_cmd;
661                 cmds[1] = bonding_default_del_cmd_join;
662                 cmds[2] = add_cmd_upper_ips;
663                 break;
664
665         default:
666                 return NOTIFY_DONE;
667         }
668
669         return netdevice_queue_work(cmds, ndev);
670 }
671
672 static void update_gid_event_work_handler(struct work_struct *_work)
673 {
674         struct update_gid_event_work *work =
675                 container_of(_work, struct update_gid_event_work, work);
676
677         ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
678                                  callback_for_addr_gid_device_scan, work);
679
680         dev_put(work->gid_attr.ndev);
681         kfree(work);
682 }
683
684 static int addr_event(struct notifier_block *this, unsigned long event,
685                       struct sockaddr *sa, struct net_device *ndev)
686 {
687         struct update_gid_event_work *work;
688         enum gid_op_type gid_op;
689
690         if (ndev->type != ARPHRD_ETHER)
691                 return NOTIFY_DONE;
692
693         switch (event) {
694         case NETDEV_UP:
695                 gid_op = GID_ADD;
696                 break;
697
698         case NETDEV_DOWN:
699                 gid_op = GID_DEL;
700                 break;
701
702         default:
703                 return NOTIFY_DONE;
704         }
705
706         work = kmalloc(sizeof(*work), GFP_ATOMIC);
707         if (!work) {
708                 pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n");
709                 return NOTIFY_DONE;
710         }
711
712         INIT_WORK(&work->work, update_gid_event_work_handler);
713
714         rdma_ip2gid(sa, &work->gid);
715         work->gid_op = gid_op;
716
717         memset(&work->gid_attr, 0, sizeof(work->gid_attr));
718         dev_hold(ndev);
719         work->gid_attr.ndev   = ndev;
720
721         queue_work(ib_wq, &work->work);
722
723         return NOTIFY_DONE;
724 }
725
726 static int inetaddr_event(struct notifier_block *this, unsigned long event,
727                           void *ptr)
728 {
729         struct sockaddr_in      in;
730         struct net_device       *ndev;
731         struct in_ifaddr        *ifa = ptr;
732
733         in.sin_family = AF_INET;
734         in.sin_addr.s_addr = ifa->ifa_address;
735         ndev = ifa->ifa_dev->dev;
736
737         return addr_event(this, event, (struct sockaddr *)&in, ndev);
738 }
739
740 static int inet6addr_event(struct notifier_block *this, unsigned long event,
741                            void *ptr)
742 {
743         struct sockaddr_in6     in6;
744         struct net_device       *ndev;
745         struct inet6_ifaddr     *ifa6 = ptr;
746
747         in6.sin6_family = AF_INET6;
748         in6.sin6_addr = ifa6->addr;
749         ndev = ifa6->idev->dev;
750
751         return addr_event(this, event, (struct sockaddr *)&in6, ndev);
752 }
753
754 static struct notifier_block nb_netdevice = {
755         .notifier_call = netdevice_event
756 };
757
758 static struct notifier_block nb_inetaddr = {
759         .notifier_call = inetaddr_event
760 };
761
762 static struct notifier_block nb_inet6addr = {
763         .notifier_call = inet6addr_event
764 };
765
766 int __init roce_gid_mgmt_init(void)
767 {
768         register_inetaddr_notifier(&nb_inetaddr);
769         if (IS_ENABLED(CONFIG_IPV6))
770                 register_inet6addr_notifier(&nb_inet6addr);
771         /* We relay on the netdevice notifier to enumerate all
772          * existing devices in the system. Register to this notifier
773          * last to make sure we will not miss any IP add/del
774          * callbacks.
775          */
776         register_netdevice_notifier(&nb_netdevice);
777
778         return 0;
779 }
780
781 void __exit roce_gid_mgmt_cleanup(void)
782 {
783         if (IS_ENABLED(CONFIG_IPV6))
784                 unregister_inet6addr_notifier(&nb_inet6addr);
785         unregister_inetaddr_notifier(&nb_inetaddr);
786         unregister_netdevice_notifier(&nb_netdevice);
787         /* Ensure all gid deletion tasks complete before we go down,
788          * to avoid any reference to free'd memory. By the time
789          * ib-core is removed, all physical devices have been removed,
790          * so no issue with remaining hardware contexts.
791          */
792 }