Merge tag 'arc-5.10-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[linux-2.6-microblaze.git] / drivers / infiniband / core / cma.c
index 7f0e91e..a77750b 100644 (file)
@@ -68,6 +68,9 @@ static const char * const cma_events[] = {
        [RDMA_CM_EVENT_TIMEWAIT_EXIT]    = "timewait exit",
 };
 
+static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
+                        union ib_gid *mgid);
+
 const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
 {
        size_t index = event;
@@ -301,6 +304,10 @@ int cma_set_default_gid_type(struct cma_device *cma_dev,
        if (!rdma_is_port_valid(cma_dev->device, port))
                return -EINVAL;
 
+       if (default_gid_type == IB_GID_TYPE_IB &&
+           rdma_protocol_roce_eth_encap(cma_dev->device, port))
+               default_gid_type = IB_GID_TYPE_ROCE;
+
        supported_gids = roce_gid_type_mask_support(cma_dev->device, port);
 
        if (!(supported_gids & 1 << default_gid_type))
@@ -345,13 +352,10 @@ struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
 
 struct cma_multicast {
        struct rdma_id_private *id_priv;
-       union {
-               struct ib_sa_multicast *ib;
-       } multicast;
+       struct ib_sa_multicast *sa_mc;
        struct list_head        list;
        void                    *context;
        struct sockaddr_storage addr;
-       struct kref             mcref;
        u8                      join_state;
 };
 
@@ -363,18 +367,6 @@ struct cma_work {
        struct rdma_cm_event    event;
 };
 
-struct cma_ndev_work {
-       struct work_struct      work;
-       struct rdma_id_private  *id;
-       struct rdma_cm_event    event;
-};
-
-struct iboe_mcast_work {
-       struct work_struct       work;
-       struct rdma_id_private  *id;
-       struct cma_multicast    *mc;
-};
-
 union cma_ip_addr {
        struct in6_addr ip6;
        struct {
@@ -404,23 +396,21 @@ struct cma_req_info {
        u16 pkey;
 };
 
-static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
-{
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&id_priv->lock, flags);
-       ret = (id_priv->state == comp);
-       spin_unlock_irqrestore(&id_priv->lock, flags);
-       return ret;
-}
-
 static int cma_comp_exch(struct rdma_id_private *id_priv,
                         enum rdma_cm_state comp, enum rdma_cm_state exch)
 {
        unsigned long flags;
        int ret;
 
+       /*
+        * The FSM uses a funny double locking where state is protected by both
+        * the handler_mutex and the spinlock. State is not allowed to change
+        * to/from a handler_mutex protected value without also holding
+        * handler_mutex.
+        */
+       if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
+               lockdep_assert_held(&id_priv->handler_mutex);
+
        spin_lock_irqsave(&id_priv->lock, flags);
        if ((ret = (id_priv->state == comp)))
                id_priv->state = exch;
@@ -467,10 +457,8 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
        id_priv->id.route.addr.dev_addr.transport =
                rdma_node_get_transport(cma_dev->device->node_type);
        list_add_tail(&id_priv->list, &cma_dev->id_list);
-       if (id_priv->res.kern_name)
-               rdma_restrack_kadd(&id_priv->res);
-       else
-               rdma_restrack_uadd(&id_priv->res);
+       rdma_restrack_add(&id_priv->res);
+
        trace_cm_id_attach(id_priv, cma_dev->device);
 }
 
@@ -483,14 +471,6 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
                                          rdma_start_port(cma_dev->device)];
 }
 
-static inline void release_mc(struct kref *kref)
-{
-       struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
-
-       kfree(mc->multicast.ib);
-       kfree(mc);
-}
-
 static void cma_release_dev(struct rdma_id_private *id_priv)
 {
        mutex_lock(&lock);
@@ -844,10 +824,10 @@ static void cma_id_put(struct rdma_id_private *id_priv)
                complete(&id_priv->comp);
 }
 
-struct rdma_cm_id *__rdma_create_id(struct net *net,
-                                   rdma_cm_event_handler event_handler,
-                                   void *context, enum rdma_ucm_port_space ps,
-                                   enum ib_qp_type qp_type, const char *caller)
+static struct rdma_id_private *
+__rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
+                void *context, enum rdma_ucm_port_space ps,
+                enum ib_qp_type qp_type, const struct rdma_id_private *parent)
 {
        struct rdma_id_private *id_priv;
 
@@ -855,8 +835,6 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
        if (!id_priv)
                return ERR_PTR(-ENOMEM);
 
-       rdma_restrack_set_task(&id_priv->res, caller);
-       id_priv->res.type = RDMA_RESTRACK_CM_ID;
        id_priv->state = RDMA_CM_IDLE;
        id_priv->id.context = context;
        id_priv->id.event_handler = event_handler;
@@ -876,9 +854,45 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
        id_priv->id.route.addr.dev_addr.net = get_net(net);
        id_priv->seq_num &= 0x00ffffff;
 
-       return &id_priv->id;
+       rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
+       if (parent)
+               rdma_restrack_parent_name(&id_priv->res, &parent->res);
+
+       return id_priv;
 }
-EXPORT_SYMBOL(__rdma_create_id);
+
+struct rdma_cm_id *
+__rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler,
+                       void *context, enum rdma_ucm_port_space ps,
+                       enum ib_qp_type qp_type, const char *caller)
+{
+       struct rdma_id_private *ret;
+
+       ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL);
+       if (IS_ERR(ret))
+               return ERR_CAST(ret);
+
+       rdma_restrack_set_name(&ret->res, caller);
+       return &ret->id;
+}
+EXPORT_SYMBOL(__rdma_create_kernel_id);
+
+struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
+                                      void *context,
+                                      enum rdma_ucm_port_space ps,
+                                      enum ib_qp_type qp_type)
+{
+       struct rdma_id_private *ret;
+
+       ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context,
+                              ps, qp_type, NULL);
+       if (IS_ERR(ret))
+               return ERR_CAST(ret);
+
+       rdma_restrack_set_name(&ret->res, NULL);
+       return &ret->id;
+}
+EXPORT_SYMBOL(rdma_create_user_id);
 
 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
 {
@@ -1783,19 +1797,30 @@ static void cma_release_port(struct rdma_id_private *id_priv)
        mutex_unlock(&lock);
 }
 
-static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv,
-                                   struct cma_multicast *mc)
+static void destroy_mc(struct rdma_id_private *id_priv,
+                      struct cma_multicast *mc)
 {
-       struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
-       struct net_device *ndev = NULL;
+       if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
+               ib_sa_free_multicast(mc->sa_mc);
 
-       if (dev_addr->bound_dev_if)
-               ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
-       if (ndev) {
-               cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
-               dev_put(ndev);
+       if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
+               struct rdma_dev_addr *dev_addr =
+                       &id_priv->id.route.addr.dev_addr;
+               struct net_device *ndev = NULL;
+
+               if (dev_addr->bound_dev_if)
+                       ndev = dev_get_by_index(dev_addr->net,
+                                               dev_addr->bound_dev_if);
+               if (ndev) {
+                       union ib_gid mgid;
+
+                       cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
+                                    &mgid);
+                       cma_igmp_send(ndev, &mgid, false);
+                       dev_put(ndev);
+               }
        }
-       kref_put(&mc->mcref, release_mc);
+       kfree(mc);
 }
 
 static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
@@ -1803,16 +1828,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
        struct cma_multicast *mc;
 
        while (!list_empty(&id_priv->mc_list)) {
-               mc = container_of(id_priv->mc_list.next,
-                                 struct cma_multicast, list);
+               mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
+                                     list);
                list_del(&mc->list);
-               if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
-                                     id_priv->id.port_num)) {
-                       ib_sa_free_multicast(mc->multicast.ib);
-                       kfree(mc);
-               } else {
-                       cma_leave_roce_mc_group(id_priv, mc);
-               }
+               destroy_mc(id_priv, mc);
        }
 }
 
@@ -1821,7 +1840,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
 {
        cma_cancel_operation(id_priv, state);
 
-       rdma_restrack_del(&id_priv->res);
        if (id_priv->cma_dev) {
                if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
                        if (id_priv->cm_id.ib)
@@ -1847,6 +1865,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
                rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
 
        put_net(id_priv->id.route.addr.dev_addr.net);
+       rdma_restrack_del(&id_priv->res);
        kfree(id_priv);
 }
 
@@ -1949,13 +1968,15 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
 {
        struct rdma_id_private *id_priv = cm_id->context;
        struct rdma_cm_event event = {};
+       enum rdma_cm_state state;
        int ret;
 
        mutex_lock(&id_priv->handler_mutex);
+       state = READ_ONCE(id_priv->state);
        if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
-            id_priv->state != RDMA_CM_CONNECT) ||
+            state != RDMA_CM_CONNECT) ||
            (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
-            id_priv->state != RDMA_CM_DISCONNECT))
+            state != RDMA_CM_DISCONNECT))
                goto out;
 
        switch (ib_event->event) {
@@ -1965,7 +1986,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
                event.status = -ETIMEDOUT;
                break;
        case IB_CM_REP_RECEIVED:
-               if (cma_comp(id_priv, RDMA_CM_CONNECT) &&
+               if (state == RDMA_CM_CONNECT &&
                    (id_priv->id.qp_type != IB_QPT_UD)) {
                        trace_cm_send_mra(id_priv);
                        ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
@@ -2043,14 +2064,15 @@ cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
        int ret;
 
        listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
-       id = __rdma_create_id(listen_id->route.addr.dev_addr.net,
-                           listen_id->event_handler, listen_id->context,
-                           listen_id->ps, ib_event->param.req_rcvd.qp_type,
-                           listen_id_priv->res.kern_name);
-       if (IS_ERR(id))
+       id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net,
+                                  listen_id->event_handler, listen_id->context,
+                                  listen_id->ps,
+                                  ib_event->param.req_rcvd.qp_type,
+                                  listen_id_priv);
+       if (IS_ERR(id_priv))
                return NULL;
 
-       id_priv = container_of(id, struct rdma_id_private, id);
+       id = &id_priv->id;
        if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
                              (struct sockaddr *)&id->route.addr.dst_addr,
                              listen_id, ib_event, ss_family, service_id))
@@ -2104,13 +2126,13 @@ cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
        int ret;
 
        listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
-       id = __rdma_create_id(net, listen_id->event_handler, listen_id->context,
-                             listen_id->ps, IB_QPT_UD,
-                             listen_id_priv->res.kern_name);
-       if (IS_ERR(id))
+       id_priv = __rdma_create_id(net, listen_id->event_handler,
+                                  listen_id->context, listen_id->ps, IB_QPT_UD,
+                                  listen_id_priv);
+       if (IS_ERR(id_priv))
                return NULL;
 
-       id_priv = container_of(id, struct rdma_id_private, id);
+       id = &id_priv->id;
        if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
                              (struct sockaddr *)&id->route.addr.dst_addr,
                              listen_id, ib_event, ss_family,
@@ -2184,7 +2206,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
        }
 
        mutex_lock(&listen_id->handler_mutex);
-       if (listen_id->state != RDMA_CM_LISTEN) {
+       if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) {
                ret = -ECONNABORTED;
                goto err_unlock;
        }
@@ -2226,8 +2248,8 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
                goto net_dev_put;
        }
 
-       if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
-           (conn_id->id.qp_type != IB_QPT_UD)) {
+       if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
+           conn_id->id.qp_type != IB_QPT_UD) {
                trace_cm_send_mra(cm_id->context);
                ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
        }
@@ -2288,7 +2310,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
        struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
 
        mutex_lock(&id_priv->handler_mutex);
-       if (id_priv->state != RDMA_CM_CONNECT)
+       if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
                goto out;
 
        switch (iw_event->event) {
@@ -2346,7 +2368,6 @@ out:
 static int iw_conn_req_handler(struct iw_cm_id *cm_id,
                               struct iw_cm_event *iw_event)
 {
-       struct rdma_cm_id *new_cm_id;
        struct rdma_id_private *listen_id, *conn_id;
        struct rdma_cm_event event = {};
        int ret = -ECONNABORTED;
@@ -2362,20 +2383,18 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
        listen_id = cm_id->context;
 
        mutex_lock(&listen_id->handler_mutex);
-       if (listen_id->state != RDMA_CM_LISTEN)
+       if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN)
                goto out;
 
        /* Create a new RDMA id for the new IW CM ID */
-       new_cm_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net,
-                                    listen_id->id.event_handler,
-                                    listen_id->id.context,
-                                    RDMA_PS_TCP, IB_QPT_RC,
-                                    listen_id->res.kern_name);
-       if (IS_ERR(new_cm_id)) {
+       conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net,
+                                  listen_id->id.event_handler,
+                                  listen_id->id.context, RDMA_PS_TCP,
+                                  IB_QPT_RC, listen_id);
+       if (IS_ERR(conn_id)) {
                ret = -ENOMEM;
                goto out;
        }
-       conn_id = container_of(new_cm_id, struct rdma_id_private, id);
        mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
        conn_id->state = RDMA_CM_CONNECT;
 
@@ -2480,7 +2499,6 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
                              struct cma_device *cma_dev)
 {
        struct rdma_id_private *dev_id_priv;
-       struct rdma_cm_id *id;
        struct net *net = id_priv->id.route.addr.dev_addr.net;
        int ret;
 
@@ -2489,13 +2507,12 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
        if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
                return;
 
-       id = __rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps,
-                             id_priv->id.qp_type, id_priv->res.kern_name);
-       if (IS_ERR(id))
+       dev_id_priv =
+               __rdma_create_id(net, cma_listen_handler, id_priv,
+                                id_priv->id.ps, id_priv->id.qp_type, id_priv);
+       if (IS_ERR(dev_id_priv))
                return;
 
-       dev_id_priv = container_of(id, struct rdma_id_private, id);
-
        dev_id_priv->state = RDMA_CM_ADDR_BOUND;
        memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
               rdma_addr_size(cma_src_addr(id_priv)));
@@ -2508,7 +2525,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
        dev_id_priv->tos_set = id_priv->tos_set;
        dev_id_priv->tos = id_priv->tos;
 
-       ret = rdma_listen(id, id_priv->backlog);
+       ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
        if (ret)
                dev_warn(&cma_dev->device->dev,
                         "RDMA CMA: cma_listen_on_dev, error %d\n", ret);
@@ -2647,32 +2664,14 @@ static void cma_work_handler(struct work_struct *_work)
        struct rdma_id_private *id_priv = work->id;
 
        mutex_lock(&id_priv->handler_mutex);
-       if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
+       if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
+           READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
                goto out_unlock;
-
-       if (cma_cm_event_handler(id_priv, &work->event)) {
-               cma_id_put(id_priv);
-               destroy_id_handler_unlock(id_priv);
-               goto out_free;
+       if (work->old_state != 0 || work->new_state != 0) {
+               if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
+                       goto out_unlock;
        }
 
-out_unlock:
-       mutex_unlock(&id_priv->handler_mutex);
-       cma_id_put(id_priv);
-out_free:
-       kfree(work);
-}
-
-static void cma_ndev_work_handler(struct work_struct *_work)
-{
-       struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
-       struct rdma_id_private *id_priv = work->id;
-
-       mutex_lock(&id_priv->handler_mutex);
-       if (id_priv->state == RDMA_CM_DESTROYING ||
-           id_priv->state == RDMA_CM_DEVICE_REMOVAL)
-               goto out_unlock;
-
        if (cma_cm_event_handler(id_priv, &work->event)) {
                cma_id_put(id_priv);
                destroy_id_handler_unlock(id_priv);
@@ -2683,6 +2682,8 @@ out_unlock:
        mutex_unlock(&id_priv->handler_mutex);
        cma_id_put(id_priv);
 out_free:
+       if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
+               rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
        kfree(work);
 }
 
@@ -2865,9 +2866,10 @@ struct iboe_prio_tc_map {
        bool found;
 };
 
-static int get_lower_vlan_dev_tc(struct net_device *dev, void *data)
+static int get_lower_vlan_dev_tc(struct net_device *dev,
+                                struct netdev_nested_priv *priv)
 {
-       struct iboe_prio_tc_map *map = data;
+       struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data;
 
        if (is_vlan_dev(dev))
                map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
@@ -2886,16 +2888,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
 {
        struct iboe_prio_tc_map prio_tc_map = {};
        int prio = rt_tos2priority(tos);
+       struct netdev_nested_priv priv;
 
        /* If VLAN device, get it directly from the VLAN netdev */
        if (is_vlan_dev(ndev))
                return get_vlan_ndev_tc(ndev, prio);
 
        prio_tc_map.input_prio = prio;
+       priv.data = (void *)&prio_tc_map;
        rcu_read_lock();
        netdev_walk_all_lower_dev_rcu(ndev,
                                      get_lower_vlan_dev_tc,
-                                     &prio_tc_map);
+                                     &priv);
        rcu_read_unlock();
        /* If map is found from lower device, use it; Otherwise
         * continue with the current netdevice to get priority to tc map.
@@ -3237,32 +3241,54 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
        return rdma_bind_addr(id, src_addr);
 }
 
-int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
-                     const struct sockaddr *dst_addr, unsigned long timeout_ms)
+/*
+ * If required, resolve the source address for bind and leave the id_priv in
+ * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
+ * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
+ * ignored.
+ */
+static int resolve_prepare_src(struct rdma_id_private *id_priv,
+                              struct sockaddr *src_addr,
+                              const struct sockaddr *dst_addr)
 {
-       struct rdma_id_private *id_priv;
        int ret;
 
-       id_priv = container_of(id, struct rdma_id_private, id);
        memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
-       if (id_priv->state == RDMA_CM_IDLE) {
-               ret = cma_bind_addr(id, src_addr, dst_addr);
-               if (ret) {
-                       memset(cma_dst_addr(id_priv), 0,
-                              rdma_addr_size(dst_addr));
-                       return ret;
+       if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
+               /* For a well behaved ULP state will be RDMA_CM_IDLE */
+               ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
+               if (ret)
+                       goto err_dst;
+               if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
+                                          RDMA_CM_ADDR_QUERY))) {
+                       ret = -EINVAL;
+                       goto err_dst;
                }
        }
 
        if (cma_family(id_priv) != dst_addr->sa_family) {
-               memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_state;
        }
+       return 0;
 
-       if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
-               memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
-               return -EINVAL;
-       }
+err_state:
+       cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
+err_dst:
+       memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
+       return ret;
+}
+
+int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
+                     const struct sockaddr *dst_addr, unsigned long timeout_ms)
+{
+       struct rdma_id_private *id_priv =
+               container_of(id, struct rdma_id_private, id);
+       int ret;
+
+       ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
+       if (ret)
+               return ret;
 
        if (cma_any_addr(dst_addr)) {
                ret = cma_resolve_loopback(id_priv);
@@ -3294,7 +3320,8 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
 
        id_priv = container_of(id, struct rdma_id_private, id);
        spin_lock_irqsave(&id_priv->lock, flags);
-       if (reuse || id_priv->state == RDMA_CM_IDLE) {
+       if ((reuse && id_priv->state != RDMA_CM_LISTEN) ||
+           id_priv->state == RDMA_CM_IDLE) {
                id_priv->reuseaddr = reuse;
                ret = 0;
        } else {
@@ -3488,8 +3515,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
                if (id_priv == cur_id)
                        continue;
 
-               if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr &&
-                   cur_id->reuseaddr)
+               if (reuseaddr && cur_id->reuseaddr)
                        continue;
 
                cur_addr = cma_src_addr(cur_id);
@@ -3530,18 +3556,6 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
        return ret;
 }
 
-static int cma_bind_listen(struct rdma_id_private *id_priv)
-{
-       struct rdma_bind_list *bind_list = id_priv->bind_list;
-       int ret = 0;
-
-       mutex_lock(&lock);
-       if (bind_list->owners.first->next)
-               ret = cma_check_port(bind_list, id_priv, 0);
-       mutex_unlock(&lock);
-       return ret;
-}
-
 static enum rdma_ucm_port_space
 cma_select_inet_ps(struct rdma_id_private *id_priv)
 {
@@ -3635,22 +3649,31 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
 
 int rdma_listen(struct rdma_cm_id *id, int backlog)
 {
-       struct rdma_id_private *id_priv;
+       struct rdma_id_private *id_priv =
+               container_of(id, struct rdma_id_private, id);
        int ret;
 
-       id_priv = container_of(id, struct rdma_id_private, id);
-       if (id_priv->state == RDMA_CM_IDLE) {
+       if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
+               /* For a well behaved ULP state will be RDMA_CM_IDLE */
                id->route.addr.src_addr.ss_family = AF_INET;
                ret = rdma_bind_addr(id, cma_src_addr(id_priv));
                if (ret)
                        return ret;
+               if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
+                                          RDMA_CM_LISTEN)))
+                       return -EINVAL;
        }
 
-       if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
-               return -EINVAL;
-
+       /*
+        * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable
+        * any more, and has to be unique in the bind list.
+        */
        if (id_priv->reuseaddr) {
-               ret = cma_bind_listen(id_priv);
+               mutex_lock(&lock);
+               ret = cma_check_port(id_priv->bind_list, id_priv, 0);
+               if (!ret)
+                       id_priv->reuseaddr = 0;
+               mutex_unlock(&lock);
                if (ret)
                        goto err;
        }
@@ -3675,6 +3698,10 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
        return 0;
 err:
        id_priv->backlog = 0;
+       /*
+        * All the failure paths that lead here will not allow the req_handler's
+        * to have run.
+        */
        cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
        return ret;
 }
@@ -3729,7 +3756,6 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
 
        return 0;
 err2:
-       rdma_restrack_del(&id_priv->res);
        if (id_priv->cma_dev)
                cma_release_dev(id_priv);
 err1:
@@ -3778,7 +3804,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
        int ret;
 
        mutex_lock(&id_priv->handler_mutex);
-       if (id_priv->state != RDMA_CM_CONNECT)
+       if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
                goto out;
 
        switch (ib_event->event) {
@@ -4012,12 +4038,21 @@ out:
        return ret;
 }
 
-int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
+/**
+ * rdma_connect_locked - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ *
+ * Same as rdma_connect() but can only be called from the
+ * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
+ */
+int rdma_connect_locked(struct rdma_cm_id *id,
+                       struct rdma_conn_param *conn_param)
 {
-       struct rdma_id_private *id_priv;
+       struct rdma_id_private *id_priv =
+               container_of(id, struct rdma_id_private, id);
        int ret;
 
-       id_priv = container_of(id, struct rdma_id_private, id);
        if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
                return -EINVAL;
 
@@ -4036,13 +4071,37 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
        else
                ret = -ENOSYS;
        if (ret)
-               goto err;
-
+               goto err_state;
        return 0;
-err:
+err_state:
        cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
        return ret;
 }
+EXPORT_SYMBOL(rdma_connect_locked);
+
+/**
+ * rdma_connect - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ *
+ * Users must have resolved a route for the rdma_cm_id to connect with by having
+ * called rdma_resolve_route before calling this routine.
+ *
+ * This call will either connect to a remote QP or obtain remote QP information
+ * for unconnected rdma_cm_id's.  The actual operation is based on the
+ * rdma_cm_id's port space.
+ */
+int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
+{
+       struct rdma_id_private *id_priv =
+               container_of(id, struct rdma_id_private, id);
+       int ret;
+
+       mutex_lock(&id_priv->handler_mutex);
+       ret = rdma_connect_locked(id, conn_param);
+       mutex_unlock(&id_priv->handler_mutex);
+       return ret;
+}
 EXPORT_SYMBOL(rdma_connect);
 
 /**
@@ -4152,17 +4211,33 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
        return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
 }
 
-int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
-                 const char *caller)
+/**
+ * rdma_accept - Called to accept a connection request or response.
+ * @id: Connection identifier associated with the request.
+ * @conn_param: Information needed to establish the connection.  This must be
+ *   provided if accepting a connection request.  If accepting a connection
+ *   response, this parameter must be NULL.
+ *
+ * Typically, this routine is only called by the listener to accept a connection
+ * request.  It must also be called on the active side of a connection if the
+ * user is performing their own QP transitions.
+ *
+ * In the case of error, a reject message is sent to the remote side and the
+ * state of the qp associated with the id is modified to error, such that any
+ * previously posted receive buffers would be flushed.
+ *
+ * This function is for use by kernel ULPs and must be called from under the
+ * handler callback.
+ */
+int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
 {
-       struct rdma_id_private *id_priv;
+       struct rdma_id_private *id_priv =
+               container_of(id, struct rdma_id_private, id);
        int ret;
 
-       id_priv = container_of(id, struct rdma_id_private, id);
-
-       rdma_restrack_set_task(&id_priv->res, caller);
+       lockdep_assert_held(&id_priv->handler_mutex);
 
-       if (!cma_comp(id_priv, RDMA_CM_CONNECT))
+       if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
                return -EINVAL;
 
        if (!id->qp && conn_param) {
@@ -4200,10 +4275,10 @@ reject:
        rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
        return ret;
 }
-EXPORT_SYMBOL(__rdma_accept);
+EXPORT_SYMBOL(rdma_accept);
 
-int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
-                     const char *caller, struct rdma_ucm_ece *ece)
+int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+                   struct rdma_ucm_ece *ece)
 {
        struct rdma_id_private *id_priv =
                container_of(id, struct rdma_id_private, id);
@@ -4211,9 +4286,27 @@ int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
        id_priv->ece.vendor_id = ece->vendor_id;
        id_priv->ece.attr_mod = ece->attr_mod;
 
-       return __rdma_accept(id, conn_param, caller);
+       return rdma_accept(id, conn_param);
+}
+EXPORT_SYMBOL(rdma_accept_ece);
+
+void rdma_lock_handler(struct rdma_cm_id *id)
+{
+       struct rdma_id_private *id_priv =
+               container_of(id, struct rdma_id_private, id);
+
+       mutex_lock(&id_priv->handler_mutex);
+}
+EXPORT_SYMBOL(rdma_lock_handler);
+
+void rdma_unlock_handler(struct rdma_cm_id *id)
+{
+       struct rdma_id_private *id_priv =
+               container_of(id, struct rdma_id_private, id);
+
+       mutex_unlock(&id_priv->handler_mutex);
 }
-EXPORT_SYMBOL(__rdma_accept_ece);
+EXPORT_SYMBOL(rdma_unlock_handler);
 
 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
 {
@@ -4296,63 +4389,66 @@ out:
 }
 EXPORT_SYMBOL(rdma_disconnect);
 
-static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
+static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
+                             struct ib_sa_multicast *multicast,
+                             struct rdma_cm_event *event,
+                             struct cma_multicast *mc)
 {
-       struct rdma_id_private *id_priv;
-       struct cma_multicast *mc = multicast->context;
-       struct rdma_cm_event event = {};
-       int ret = 0;
-
-       id_priv = mc->id_priv;
-       mutex_lock(&id_priv->handler_mutex);
-       if (id_priv->state != RDMA_CM_ADDR_BOUND &&
-           id_priv->state != RDMA_CM_ADDR_RESOLVED)
-               goto out;
+       struct rdma_dev_addr *dev_addr;
+       enum ib_gid_type gid_type;
+       struct net_device *ndev;
 
        if (!status)
                status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
        else
                pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
                                     status);
-       mutex_lock(&id_priv->qp_mutex);
-       if (!status && id_priv->id.qp) {
-               status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
-                                        be16_to_cpu(multicast->rec.mlid));
-               if (status)
-                       pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
-                                            status);
+
+       event->status = status;
+       event->param.ud.private_data = mc->context;
+       if (status) {
+               event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
+               return;
        }
-       mutex_unlock(&id_priv->qp_mutex);
 
-       event.status = status;
-       event.param.ud.private_data = mc->context;
-       if (!status) {
-               struct rdma_dev_addr *dev_addr =
-                       &id_priv->id.route.addr.dev_addr;
-               struct net_device *ndev =
-                       dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
-               enum ib_gid_type gid_type =
-                       id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
-                       rdma_start_port(id_priv->cma_dev->device)];
-
-               event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
-               ret = ib_init_ah_from_mcmember(id_priv->id.device,
-                                              id_priv->id.port_num,
-                                              &multicast->rec,
-                                              ndev, gid_type,
-                                              &event.param.ud.ah_attr);
-               if (ret)
-                       event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
+       dev_addr = &id_priv->id.route.addr.dev_addr;
+       ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+       gid_type =
+               id_priv->cma_dev
+                       ->default_gid_type[id_priv->id.port_num -
+                                          rdma_start_port(
+                                                  id_priv->cma_dev->device)];
+
+       event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
+       if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
+                                    &multicast->rec, ndev, gid_type,
+                                    &event->param.ud.ah_attr)) {
+               event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
+               goto out;
+       }
 
-               event.param.ud.qp_num = 0xFFFFFF;
-               event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
-               if (ndev)
-                       dev_put(ndev);
-       } else
-               event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
+       event->param.ud.qp_num = 0xFFFFFF;
+       event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
 
-       ret = cma_cm_event_handler(id_priv, &event);
+out:
+       if (ndev)
+               dev_put(ndev);
+}
+
+static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
+{
+       struct cma_multicast *mc = multicast->context;
+       struct rdma_id_private *id_priv = mc->id_priv;
+       struct rdma_cm_event event = {};
+       int ret = 0;
+
+       mutex_lock(&id_priv->handler_mutex);
+       if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL ||
+           READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
+               goto out;
 
+       cma_make_mc_event(status, id_priv, multicast, &event, mc);
+       ret = cma_cm_event_handler(id_priv, &event);
        rdma_destroy_ah_attr(&event.param.ud.ah_attr);
        if (ret) {
                destroy_id_handler_unlock(id_priv);
@@ -4442,23 +4538,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
                             IB_SA_MCMEMBER_REC_MTU |
                             IB_SA_MCMEMBER_REC_HOP_LIMIT;
 
-       mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
-                                               id_priv->id.port_num, &rec,
-                                               comp_mask, GFP_KERNEL,
-                                               cma_ib_mc_handler, mc);
-       return PTR_ERR_OR_ZERO(mc->multicast.ib);
-}
-
-static void iboe_mcast_work_handler(struct work_struct *work)
-{
-       struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
-       struct cma_multicast *mc = mw->mc;
-       struct ib_sa_multicast *m = mc->multicast.ib;
-
-       mc->multicast.ib->context = mc;
-       cma_ib_mc_handler(0, m);
-       kref_put(&mc->mcref, release_mc);
-       kfree(mw);
+       mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
+                                        id_priv->id.port_num, &rec, comp_mask,
+                                        GFP_KERNEL, cma_ib_mc_handler, mc);
+       return PTR_ERR_OR_ZERO(mc->sa_mc);
 }
 
 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
@@ -4493,52 +4576,47 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
                                   struct cma_multicast *mc)
 {
-       struct iboe_mcast_work *work;
+       struct cma_work *work;
        struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
        int err = 0;
        struct sockaddr *addr = (struct sockaddr *)&mc->addr;
        struct net_device *ndev = NULL;
+       struct ib_sa_multicast ib;
        enum ib_gid_type gid_type;
        bool send_only;
 
        send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
 
-       if (cma_zero_addr((struct sockaddr *)&mc->addr))
+       if (cma_zero_addr(addr))
                return -EINVAL;
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
        if (!work)
                return -ENOMEM;
 
-       mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
-       if (!mc->multicast.ib) {
-               err = -ENOMEM;
-               goto out1;
-       }
-
        gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
                   rdma_start_port(id_priv->cma_dev->device)];
-       cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid, gid_type);
+       cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
 
-       mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
+       ib.rec.pkey = cpu_to_be16(0xffff);
        if (id_priv->id.ps == RDMA_PS_UDP)
-               mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
+               ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
 
        if (dev_addr->bound_dev_if)
                ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
        if (!ndev) {
                err = -ENODEV;
-               goto out2;
+               goto err_free;
        }
-       mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
-       mc->multicast.ib->rec.hop_limit = 1;
-       mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
+       ib.rec.rate = iboe_get_rate(ndev);
+       ib.rec.hop_limit = 1;
+       ib.rec.mtu = iboe_get_mtu(ndev->mtu);
 
        if (addr->sa_family == AF_INET) {
                if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
-                       mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
+                       ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
                        if (!send_only) {
-                               err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
+                               err = cma_igmp_send(ndev, &ib.rec.mgid,
                                                    true);
                        }
                }
@@ -4547,24 +4625,22 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
                        err = -ENOTSUPP;
        }
        dev_put(ndev);
-       if (err || !mc->multicast.ib->rec.mtu) {
+       if (err || !ib.rec.mtu) {
                if (!err)
                        err = -EINVAL;
-               goto out2;
+               goto err_free;
        }
        rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
-                   &mc->multicast.ib->rec.port_gid);
+                   &ib.rec.port_gid);
        work->id = id_priv;
-       work->mc = mc;
-       INIT_WORK(&work->work, iboe_mcast_work_handler);
-       kref_get(&mc->mcref);
+       INIT_WORK(&work->work, cma_work_handler);
+       cma_make_mc_event(0, id_priv, &ib, &work->event, mc);
+       /* Balances with cma_id_put() in cma_work_handler */
+       cma_id_get(id_priv);
        queue_work(cma_wq, &work->work);
-
        return 0;
 
-out2:
-       kfree(mc->multicast.ib);
-out1:
+err_free:
        kfree(work);
        return err;
 }
@@ -4572,19 +4648,21 @@ out1:
 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
                        u8 join_state, void *context)
 {
-       struct rdma_id_private *id_priv;
+       struct rdma_id_private *id_priv =
+               container_of(id, struct rdma_id_private, id);
        struct cma_multicast *mc;
        int ret;
 
-       if (!id->device)
+       /* Not supported for kernel QPs */
+       if (WARN_ON(id->qp))
                return -EINVAL;
 
-       id_priv = container_of(id, struct rdma_id_private, id);
-       if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
-           !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
+       /* ULP is calling this wrong. */
+       if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND &&
+                           READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
                return -EINVAL;
 
-       mc = kmalloc(sizeof *mc, GFP_KERNEL);
+       mc = kzalloc(sizeof(*mc), GFP_KERNEL);
        if (!mc)
                return -ENOMEM;
 
@@ -4594,7 +4672,6 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
        mc->join_state = join_state;
 
        if (rdma_protocol_roce(id->device, id->port_num)) {
-               kref_init(&mc->mcref);
                ret = cma_iboe_join_multicast(id_priv, mc);
                if (ret)
                        goto out_err;
@@ -4626,25 +4703,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
        id_priv = container_of(id, struct rdma_id_private, id);
        spin_lock_irq(&id_priv->lock);
        list_for_each_entry(mc, &id_priv->mc_list, list) {
-               if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
-                       list_del(&mc->list);
-                       spin_unlock_irq(&id_priv->lock);
-
-                       if (id->qp)
-                               ib_detach_mcast(id->qp,
-                                               &mc->multicast.ib->rec.mgid,
-                                               be16_to_cpu(mc->multicast.ib->rec.mlid));
-
-                       BUG_ON(id_priv->cma_dev->device != id->device);
-
-                       if (rdma_cap_ib_mcast(id->device, id->port_num)) {
-                               ib_sa_free_multicast(mc->multicast.ib);
-                               kfree(mc);
-                       } else if (rdma_protocol_roce(id->device, id->port_num)) {
-                               cma_leave_roce_mc_group(id_priv, mc);
-                       }
-                       return;
-               }
+               if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
+                       continue;
+               list_del(&mc->list);
+               spin_unlock_irq(&id_priv->lock);
+
+               WARN_ON(id_priv->cma_dev->device != id->device);
+               destroy_mc(id_priv, mc);
+               return;
        }
        spin_unlock_irq(&id_priv->lock);
 }
@@ -4653,7 +4719,7 @@ EXPORT_SYMBOL(rdma_leave_multicast);
 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
 {
        struct rdma_dev_addr *dev_addr;
-       struct cma_ndev_work *work;
+       struct cma_work *work;
 
        dev_addr = &id_priv->id.route.addr.dev_addr;
 
@@ -4666,7 +4732,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
                if (!work)
                        return -ENOMEM;
 
-               INIT_WORK(&work->work, cma_ndev_work_handler);
+               INIT_WORK(&work->work, cma_work_handler);
                work->id = id_priv;
                work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
                cma_id_get(id_priv);