Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[linux-2.6-microblaze.git] / drivers / infiniband / sw / rxe / rxe_mcast.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6
7 #include "rxe.h"
8 #include "rxe_loc.h"
9
10 /* caller should hold mc_grp_pool->pool_lock */
11 static struct rxe_mc_grp *create_grp(struct rxe_dev *rxe,
12                                      struct rxe_pool *pool,
13                                      union ib_gid *mgid)
14 {
15         int err;
16         struct rxe_mc_grp *grp;
17
18         grp = rxe_alloc_locked(&rxe->mc_grp_pool);
19         if (!grp)
20                 return ERR_PTR(-ENOMEM);
21
22         INIT_LIST_HEAD(&grp->qp_list);
23         spin_lock_init(&grp->mcg_lock);
24         grp->rxe = rxe;
25         rxe_add_key_locked(grp, mgid);
26
27         err = rxe_mcast_add(rxe, mgid);
28         if (unlikely(err)) {
29                 rxe_drop_key_locked(grp);
30                 rxe_drop_ref(grp);
31                 return ERR_PTR(err);
32         }
33
34         return grp;
35 }
36
37 int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
38                       struct rxe_mc_grp **grp_p)
39 {
40         int err;
41         struct rxe_mc_grp *grp;
42         struct rxe_pool *pool = &rxe->mc_grp_pool;
43         unsigned long flags;
44
45         if (rxe->attr.max_mcast_qp_attach == 0)
46                 return -EINVAL;
47
48         write_lock_irqsave(&pool->pool_lock, flags);
49
50         grp = rxe_pool_get_key_locked(pool, mgid);
51         if (grp)
52                 goto done;
53
54         grp = create_grp(rxe, pool, mgid);
55         if (IS_ERR(grp)) {
56                 write_unlock_irqrestore(&pool->pool_lock, flags);
57                 err = PTR_ERR(grp);
58                 return err;
59         }
60
61 done:
62         write_unlock_irqrestore(&pool->pool_lock, flags);
63         *grp_p = grp;
64         return 0;
65 }
66
67 int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
68                            struct rxe_mc_grp *grp)
69 {
70         int err;
71         struct rxe_mc_elem *elem;
72
73         /* check to see of the qp is already a member of the group */
74         spin_lock_bh(&qp->grp_lock);
75         spin_lock_bh(&grp->mcg_lock);
76         list_for_each_entry(elem, &grp->qp_list, qp_list) {
77                 if (elem->qp == qp) {
78                         err = 0;
79                         goto out;
80                 }
81         }
82
83         if (grp->num_qp >= rxe->attr.max_mcast_qp_attach) {
84                 err = -ENOMEM;
85                 goto out;
86         }
87
88         elem = rxe_alloc_locked(&rxe->mc_elem_pool);
89         if (!elem) {
90                 err = -ENOMEM;
91                 goto out;
92         }
93
94         /* each qp holds a ref on the grp */
95         rxe_add_ref(grp);
96
97         grp->num_qp++;
98         elem->qp = qp;
99         elem->grp = grp;
100
101         list_add(&elem->qp_list, &grp->qp_list);
102         list_add(&elem->grp_list, &qp->grp_list);
103
104         err = 0;
105 out:
106         spin_unlock_bh(&grp->mcg_lock);
107         spin_unlock_bh(&qp->grp_lock);
108         return err;
109 }
110
111 int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
112                             union ib_gid *mgid)
113 {
114         struct rxe_mc_grp *grp;
115         struct rxe_mc_elem *elem, *tmp;
116
117         grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
118         if (!grp)
119                 goto err1;
120
121         spin_lock_bh(&qp->grp_lock);
122         spin_lock_bh(&grp->mcg_lock);
123
124         list_for_each_entry_safe(elem, tmp, &grp->qp_list, qp_list) {
125                 if (elem->qp == qp) {
126                         list_del(&elem->qp_list);
127                         list_del(&elem->grp_list);
128                         grp->num_qp--;
129
130                         spin_unlock_bh(&grp->mcg_lock);
131                         spin_unlock_bh(&qp->grp_lock);
132                         rxe_drop_ref(elem);
133                         rxe_drop_ref(grp);      /* ref held by QP */
134                         rxe_drop_ref(grp);      /* ref from get_key */
135                         return 0;
136                 }
137         }
138
139         spin_unlock_bh(&grp->mcg_lock);
140         spin_unlock_bh(&qp->grp_lock);
141         rxe_drop_ref(grp);                      /* ref from get_key */
142 err1:
143         return -EINVAL;
144 }
145
146 void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
147 {
148         struct rxe_mc_grp *grp;
149         struct rxe_mc_elem *elem;
150
151         while (1) {
152                 spin_lock_bh(&qp->grp_lock);
153                 if (list_empty(&qp->grp_list)) {
154                         spin_unlock_bh(&qp->grp_lock);
155                         break;
156                 }
157                 elem = list_first_entry(&qp->grp_list, struct rxe_mc_elem,
158                                         grp_list);
159                 list_del(&elem->grp_list);
160                 spin_unlock_bh(&qp->grp_lock);
161
162                 grp = elem->grp;
163                 spin_lock_bh(&grp->mcg_lock);
164                 list_del(&elem->qp_list);
165                 grp->num_qp--;
166                 spin_unlock_bh(&grp->mcg_lock);
167                 rxe_drop_ref(grp);
168                 rxe_drop_ref(elem);
169         }
170 }
171
172 void rxe_mc_cleanup(struct rxe_pool_entry *arg)
173 {
174         struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem);
175         struct rxe_dev *rxe = grp->rxe;
176
177         rxe_drop_key(grp);
178         rxe_mcast_delete(rxe, &grp->mgid);
179 }