Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-microblaze.git] / drivers / infiniband / hw / mlx5 / ib_rep.c
index bf5a6e4..5282148 100644 (file)
@@ -8,13 +8,15 @@
 #include "srq.h"
 
 static int
-mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev,
+                     struct mlx5_eswitch_rep *rep,
+                     int vport_index)
 {
        struct mlx5_ib_dev *ibdev;
-       int vport_index;
 
        ibdev = mlx5_eswitch_uplink_get_proto_dev(dev->priv.eswitch, REP_IB);
-       vport_index = rep->vport_index;
+       if (!ibdev)
+               return -EINVAL;
 
        ibdev->port[vport_index].rep = rep;
        rep->rep_data[REP_IB].priv = ibdev;
@@ -26,19 +28,39 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
        return 0;
 }
 
+static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev);
+
 static int
 mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
 {
        u32 num_ports = mlx5_eswitch_get_total_vports(dev);
        const struct mlx5_ib_profile *profile;
+       struct mlx5_core_dev *peer_dev;
        struct mlx5_ib_dev *ibdev;
+       u32 peer_num_ports;
        int vport_index;
        int ret;
 
+       vport_index = rep->vport_index;
+
+       if (mlx5_lag_is_shared_fdb(dev)) {
+               peer_dev = mlx5_lag_get_peer_mdev(dev);
+               peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev);
+               if (mlx5_lag_is_master(dev)) {
+                       /* Only 1 ib port is the representor for both uplinks */
+                       num_ports += peer_num_ports - 1;
+               } else {
+                       if (rep->vport == MLX5_VPORT_UPLINK)
+                               return 0;
+                       vport_index += peer_num_ports;
+                       dev = peer_dev;
+               }
+       }
+
        if (rep->vport == MLX5_VPORT_UPLINK)
                profile = &raw_eth_profile;
        else
-               return mlx5_ib_set_vport_rep(dev, rep);
+               return mlx5_ib_set_vport_rep(dev, rep, vport_index);
 
        ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
        if (!ibdev)
@@ -64,6 +86,8 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
                goto fail_add;
 
        rep->rep_data[REP_IB].priv = ibdev;
+       if (mlx5_lag_is_shared_fdb(dev))
+               mlx5_ib_register_peer_vport_reps(dev);
 
        return 0;
 
@@ -82,18 +106,45 @@ static void *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep)
 static void
 mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
 {
+       struct mlx5_core_dev *mdev = mlx5_eswitch_get_core_dev(rep->esw);
        struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
+       int vport_index = rep->vport_index;
        struct mlx5_ib_port *port;
 
-       port = &dev->port[rep->vport_index];
+       if (WARN_ON(!mdev))
+               return;
+
+       if (mlx5_lag_is_shared_fdb(mdev) &&
+           !mlx5_lag_is_master(mdev)) {
+               struct mlx5_core_dev *peer_mdev;
+
+               if (rep->vport == MLX5_VPORT_UPLINK)
+                       return;
+               peer_mdev = mlx5_lag_get_peer_mdev(mdev);
+               vport_index += mlx5_eswitch_get_total_vports(peer_mdev);
+       }
+
+       if (!dev)
+               return;
+
+       port = &dev->port[vport_index];
        write_lock(&port->roce.netdev_lock);
        port->roce.netdev = NULL;
        write_unlock(&port->roce.netdev_lock);
        rep->rep_data[REP_IB].priv = NULL;
        port->rep = NULL;
 
-       if (rep->vport == MLX5_VPORT_UPLINK)
+       if (rep->vport == MLX5_VPORT_UPLINK) {
+               struct mlx5_core_dev *peer_mdev;
+               struct mlx5_eswitch *esw;
+
+               if (mlx5_lag_is_shared_fdb(mdev)) {
+                       peer_mdev = mlx5_lag_get_peer_mdev(mdev);
+                       esw = peer_mdev->priv.eswitch;
+                       mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
+               }
                __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+       }
 }
 
 static const struct mlx5_eswitch_rep_ops rep_ops = {
@@ -102,6 +153,18 @@ static const struct mlx5_eswitch_rep_ops rep_ops = {
        .get_proto_dev = mlx5_ib_rep_to_dev,
 };
 
+static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev)
+{
+       struct mlx5_core_dev *peer_mdev = mlx5_lag_get_peer_mdev(mdev);
+       struct mlx5_eswitch *esw;
+
+       if (!peer_mdev)
+               return;
+
+       esw = peer_mdev->priv.eswitch;
+       mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
+}
+
 struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
                                          u16 vport_num)
 {