mlxsw: spectrum: Add the multicast routing offloading logic
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_mr.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <linux/rhashtable.h>
36
37 #include "spectrum_mr.h"
38 #include "spectrum_router.h"
39
40 struct mlxsw_sp_mr {
41         const struct mlxsw_sp_mr_ops *mr_ops;
42         void *catchall_route_priv;
43         struct delayed_work stats_update_dw;
44         struct list_head table_list;
45 #define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
46         unsigned long priv[0];
47         /* priv has to be always the last item */
48 };
49
50 struct mlxsw_sp_mr_vif {
51         struct net_device *dev;
52         const struct mlxsw_sp_rif *rif;
53         unsigned long vif_flags;
54
55         /* A list of route_vif_entry structs that point to routes that the VIF
56          * instance is used as one of the egress VIFs
57          */
58         struct list_head route_evif_list;
59
60         /* A list of route_vif_entry structs that point to routes that the VIF
61          * instance is used as an ingress VIF
62          */
63         struct list_head route_ivif_list;
64 };
65
66 struct mlxsw_sp_mr_route_vif_entry {
67         struct list_head vif_node;
68         struct list_head route_node;
69         struct mlxsw_sp_mr_vif *mr_vif;
70         struct mlxsw_sp_mr_route *mr_route;
71 };
72
73 struct mlxsw_sp_mr_table {
74         struct list_head node;
75         enum mlxsw_sp_l3proto proto;
76         struct mlxsw_sp *mlxsw_sp;
77         u32 vr_id;
78         struct mlxsw_sp_mr_vif vifs[MAXVIFS];
79         struct list_head route_list;
80         struct rhashtable route_ht;
81         char catchall_route_priv[0];
82         /* catchall_route_priv has to be always the last item */
83 };
84
85 struct mlxsw_sp_mr_route {
86         struct list_head node;
87         struct rhash_head ht_node;
88         struct mlxsw_sp_mr_route_key key;
89         enum mlxsw_sp_mr_route_action route_action;
90         u16 min_mtu;
91         struct mfc_cache *mfc4;
92         void *route_priv;
93         const struct mlxsw_sp_mr_table *mr_table;
94         /* A list of route_vif_entry structs that point to the egress VIFs */
95         struct list_head evif_list;
96         /* A route_vif_entry struct that point to the ingress VIF */
97         struct mlxsw_sp_mr_route_vif_entry ivif;
98 };
99
100 static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
101         .key_len = sizeof(struct mlxsw_sp_mr_route_key),
102         .key_offset = offsetof(struct mlxsw_sp_mr_route, key),
103         .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node),
104         .automatic_shrinking = true,
105 };
106
107 static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif)
108 {
109         return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
110 }
111
112 static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
113 {
114         return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif;
115 }
116
117 static bool mlxsw_sp_mr_vif_rif_invalid(const struct mlxsw_sp_mr_vif *vif)
118 {
119         return mlxsw_sp_mr_vif_regular(vif) && vif->dev && !vif->rif;
120 }
121
122 static bool
123 mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
124 {
125         vifi_t ivif;
126
127         switch (mr_route->mr_table->proto) {
128         case MLXSW_SP_L3_PROTO_IPV4:
129                 ivif = mr_route->mfc4->mfc_parent;
130                 return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255;
131         case MLXSW_SP_L3_PROTO_IPV6:
132                 /* fall through */
133         default:
134                 WARN_ON_ONCE(1);
135         }
136         return false;
137 }
138
139 static int
140 mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
141 {
142         struct mlxsw_sp_mr_route_vif_entry *rve;
143         int valid_evifs;
144
145         valid_evifs = 0;
146         list_for_each_entry(rve, &mr_route->evif_list, route_node)
147                 if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
148                         valid_evifs++;
149         return valid_evifs;
150 }
151
152 static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route)
153 {
154         switch (mr_route->mr_table->proto) {
155         case MLXSW_SP_L3_PROTO_IPV4:
156                 return mr_route->key.source_mask.addr4 == INADDR_ANY;
157         case MLXSW_SP_L3_PROTO_IPV6:
158                 /* fall through */
159         default:
160                 WARN_ON_ONCE(1);
161         }
162         return false;
163 }
164
165 static enum mlxsw_sp_mr_route_action
166 mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
167 {
168         struct mlxsw_sp_mr_route_vif_entry *rve;
169
170         /* If the ingress port is not regular and resolved, trap the route */
171         if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
172                 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
173
174         /* The kernel does not match a (*,G) route that the ingress interface is
175          * not one of the egress interfaces, so trap these kind of routes.
176          */
177         if (mlxsw_sp_mr_route_starg(mr_route) &&
178             !mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
179                 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
180
181         /* If the route has no valid eVIFs, trap it. */
182         if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route))
183                 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
184
185         /* If either one of the eVIFs is not regular (VIF of type pimreg or
186          * tunnel) or one of the VIFs has no matching RIF, trap the packet.
187          */
188         list_for_each_entry(rve, &mr_route->evif_list, route_node) {
189                 if (!mlxsw_sp_mr_vif_regular(rve->mr_vif) ||
190                     mlxsw_sp_mr_vif_rif_invalid(rve->mr_vif))
191                         return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
192         }
193         return MLXSW_SP_MR_ROUTE_ACTION_FORWARD;
194 }
195
196 static enum mlxsw_sp_mr_route_prio
197 mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
198 {
199         return mlxsw_sp_mr_route_starg(mr_route) ?
200                 MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
201 }
202
203 static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
204                                    struct mlxsw_sp_mr_route_key *key,
205                                    const struct mfc_cache *mfc)
206 {
207         bool starg = (mfc->mfc_origin == INADDR_ANY);
208
209         memset(key, 0, sizeof(*key));
210         key->vrid = mr_table->vr_id;
211         key->proto = mr_table->proto;
212         key->group.addr4 = mfc->mfc_mcastgrp;
213         key->group_mask.addr4 = 0xffffffff;
214         key->source.addr4 = mfc->mfc_origin;
215         key->source_mask.addr4 = starg ? 0 : 0xffffffff;
216 }
217
218 static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
219                                        struct mlxsw_sp_mr_vif *mr_vif)
220 {
221         struct mlxsw_sp_mr_route_vif_entry *rve;
222
223         rve = kzalloc(sizeof(*rve), GFP_KERNEL);
224         if (!rve)
225                 return -ENOMEM;
226         rve->mr_route = mr_route;
227         rve->mr_vif = mr_vif;
228         list_add_tail(&rve->route_node, &mr_route->evif_list);
229         list_add_tail(&rve->vif_node, &mr_vif->route_evif_list);
230         return 0;
231 }
232
233 static void
234 mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve)
235 {
236         list_del(&rve->route_node);
237         list_del(&rve->vif_node);
238         kfree(rve);
239 }
240
241 static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route,
242                                         struct mlxsw_sp_mr_vif *mr_vif)
243 {
244         mr_route->ivif.mr_route = mr_route;
245         mr_route->ivif.mr_vif = mr_vif;
246         list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list);
247 }
248
249 static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route)
250 {
251         list_del(&mr_route->ivif.vif_node);
252 }
253
254 static int
255 mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table,
256                               struct mlxsw_sp_mr_route *mr_route,
257                               struct mlxsw_sp_mr_route_info *route_info)
258 {
259         struct mlxsw_sp_mr_route_vif_entry *rve;
260         u16 *erif_indices;
261         u16 irif_index;
262         u16 erif = 0;
263
264         erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices),
265                                      GFP_KERNEL);
266         if (!erif_indices)
267                 return -ENOMEM;
268
269         list_for_each_entry(rve, &mr_route->evif_list, route_node) {
270                 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
271                         u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
272
273                         erif_indices[erif++] = rifi;
274                 }
275         }
276
277         if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
278                 irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif);
279         else
280                 irif_index = 0;
281
282         route_info->irif_index = irif_index;
283         route_info->erif_indices = erif_indices;
284         route_info->min_mtu = mr_route->min_mtu;
285         route_info->route_action = mr_route->route_action;
286         route_info->erif_num = erif;
287         return 0;
288 }
289
290 static void
291 mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info)
292 {
293         kfree(route_info->erif_indices);
294 }
295
296 static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table,
297                                    struct mlxsw_sp_mr_route *mr_route,
298                                    bool replace)
299 {
300         struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
301         struct mlxsw_sp_mr_route_info route_info;
302         struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
303         int err;
304
305         err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info);
306         if (err)
307                 return err;
308
309         if (!replace) {
310                 struct mlxsw_sp_mr_route_params route_params;
311
312                 mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size,
313                                                GFP_KERNEL);
314                 if (!mr_route->route_priv) {
315                         err = -ENOMEM;
316                         goto out;
317                 }
318
319                 route_params.key = mr_route->key;
320                 route_params.value = route_info;
321                 route_params.prio = mlxsw_sp_mr_route_prio(mr_route);
322                 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
323                                                mr_route->route_priv,
324                                                &route_params);
325                 if (err)
326                         kfree(mr_route->route_priv);
327         } else {
328                 err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv,
329                                                &route_info);
330         }
331 out:
332         mlxsw_sp_mr_route_info_destroy(&route_info);
333         return err;
334 }
335
336 static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
337                                     struct mlxsw_sp_mr_route *mr_route)
338 {
339         struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
340         struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
341
342         mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv);
343         kfree(mr_route->route_priv);
344 }
345
346 static struct mlxsw_sp_mr_route *
347 mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
348                           struct mfc_cache *mfc)
349 {
350         struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
351         struct mlxsw_sp_mr_route *mr_route;
352         int err;
353         int i;
354
355         /* Allocate and init a new route and fill it with parameters */
356         mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL);
357         if (!mr_route)
358                 return ERR_PTR(-ENOMEM);
359         INIT_LIST_HEAD(&mr_route->evif_list);
360         mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc);
361
362         /* Find min_mtu and link iVIF and eVIFs */
363         mr_route->min_mtu = ETH_MAX_MTU;
364         ipmr_cache_hold(mfc);
365         mr_route->mfc4 = mfc;
366         mr_route->mr_table = mr_table;
367         for (i = 0; i < MAXVIFS; i++) {
368                 if (mfc->mfc_un.res.ttls[i] != 255) {
369                         err = mlxsw_sp_mr_route_evif_link(mr_route,
370                                                           &mr_table->vifs[i]);
371                         if (err)
372                                 goto err;
373                         if (mr_table->vifs[i].dev &&
374                             mr_table->vifs[i].dev->mtu < mr_route->min_mtu)
375                                 mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
376                 }
377         }
378         mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]);
379         if (err)
380                 goto err;
381
382         mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
383         return mr_route;
384 err:
385         ipmr_cache_put(mfc);
386         list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
387                 mlxsw_sp_mr_route_evif_unlink(rve);
388         kfree(mr_route);
389         return ERR_PTR(err);
390 }
391
392 static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table,
393                                        struct mlxsw_sp_mr_route *mr_route)
394 {
395         struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
396
397         mlxsw_sp_mr_route_ivif_unlink(mr_route);
398         ipmr_cache_put(mr_route->mfc4);
399         list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
400                 mlxsw_sp_mr_route_evif_unlink(rve);
401         kfree(mr_route);
402 }
403
404 static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
405                                       struct mlxsw_sp_mr_route *mr_route)
406 {
407         switch (mr_table->proto) {
408         case MLXSW_SP_L3_PROTO_IPV4:
409                 mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
410                 break;
411         case MLXSW_SP_L3_PROTO_IPV6:
412                 /* fall through */
413         default:
414                 WARN_ON_ONCE(1);
415         }
416 }
417
418 static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
419                                         bool offload)
420 {
421         switch (mr_route->mr_table->proto) {
422         case MLXSW_SP_L3_PROTO_IPV4:
423                 if (offload)
424                         mr_route->mfc4->mfc_flags |= MFC_OFFLOAD;
425                 else
426                         mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD;
427                 break;
428         case MLXSW_SP_L3_PROTO_IPV6:
429                 /* fall through */
430         default:
431                 WARN_ON_ONCE(1);
432         }
433 }
434
435 static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
436 {
437         bool offload;
438
439         offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP;
440         mlxsw_sp_mr_mfc_offload_set(mr_route, offload);
441 }
442
443 static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
444                                     struct mlxsw_sp_mr_route *mr_route)
445 {
446         mlxsw_sp_mr_mfc_offload_set(mr_route, false);
447         mlxsw_sp_mr_route_erase(mr_table, mr_route);
448         rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
449                                mlxsw_sp_mr_route_ht_params);
450         list_del(&mr_route->node);
451         mlxsw_sp_mr_route_destroy(mr_table, mr_route);
452 }
453
454 int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
455                            struct mfc_cache *mfc, bool replace)
456 {
457         struct mlxsw_sp_mr_route *mr_orig_route = NULL;
458         struct mlxsw_sp_mr_route *mr_route;
459         int err;
460
461         /* If the route is a (*,*) route, abort, as these kind of routes are
462          * used for proxy routes.
463          */
464         if (mfc->mfc_origin == INADDR_ANY && mfc->mfc_mcastgrp == INADDR_ANY) {
465                 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
466                          "Offloading proxy routes is not supported.\n");
467                 return -EINVAL;
468         }
469
470         /* Create a new route */
471         mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc);
472         if (IS_ERR(mr_route))
473                 return PTR_ERR(mr_route);
474
475         /* Find any route with a matching key */
476         mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht,
477                                                &mr_route->key,
478                                                mlxsw_sp_mr_route_ht_params);
479         if (replace) {
480                 /* On replace case, make the route point to the new route_priv.
481                  */
482                 if (WARN_ON(!mr_orig_route)) {
483                         err = -ENOENT;
484                         goto err_no_orig_route;
485                 }
486                 mr_route->route_priv = mr_orig_route->route_priv;
487         } else if (mr_orig_route) {
488                 /* On non replace case, if another route with the same key was
489                  * found, abort, as duplicate routes are used for proxy routes.
490                  */
491                 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
492                          "Offloading proxy routes is not supported.\n");
493                 err = -EINVAL;
494                 goto err_duplicate_route;
495         }
496
497         /* Put it in the table data-structures */
498         list_add_tail(&mr_route->node, &mr_table->route_list);
499         err = rhashtable_insert_fast(&mr_table->route_ht,
500                                      &mr_route->ht_node,
501                                      mlxsw_sp_mr_route_ht_params);
502         if (err)
503                 goto err_rhashtable_insert;
504
505         /* Write the route to the hardware */
506         err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
507         if (err)
508                 goto err_mr_route_write;
509
510         /* Destroy the original route */
511         if (replace) {
512                 rhashtable_remove_fast(&mr_table->route_ht,
513                                        &mr_orig_route->ht_node,
514                                        mlxsw_sp_mr_route_ht_params);
515                 list_del(&mr_orig_route->node);
516                 mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route);
517         }
518
519         mlxsw_sp_mr_mfc_offload_update(mr_route);
520         return 0;
521
522 err_mr_route_write:
523         rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
524                                mlxsw_sp_mr_route_ht_params);
525 err_rhashtable_insert:
526         list_del(&mr_route->node);
527 err_no_orig_route:
528 err_duplicate_route:
529         mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
530         return err;
531 }
532
533 void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
534                             struct mfc_cache *mfc)
535 {
536         struct mlxsw_sp_mr_route *mr_route;
537         struct mlxsw_sp_mr_route_key key;
538
539         mlxsw_sp_mr_route4_key(mr_table, &key, mfc);
540         mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
541                                           mlxsw_sp_mr_route_ht_params);
542         if (mr_route)
543                 __mlxsw_sp_mr_route_del(mr_table, mr_route);
544 }
545
546 /* Should be called after the VIF struct is updated */
547 static int
548 mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table,
549                                struct mlxsw_sp_mr_route_vif_entry *rve)
550 {
551         struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
552         enum mlxsw_sp_mr_route_action route_action;
553         struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
554         u16 irif_index;
555         int err;
556
557         route_action = mlxsw_sp_mr_route_action(rve->mr_route);
558         if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
559                 return 0;
560
561         /* rve->mr_vif->rif is guaranteed to be valid at this stage */
562         irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
563         err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv,
564                                             irif_index);
565         if (err)
566                 return err;
567
568         err = mr->mr_ops->route_action_update(mlxsw_sp,
569                                               rve->mr_route->route_priv,
570                                               route_action);
571         if (err)
572                 /* No need to rollback here because the iRIF change only takes
573                  * place after the action has been updated.
574                  */
575                 return err;
576
577         rve->mr_route->route_action = route_action;
578         mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
579         return 0;
580 }
581
582 static void
583 mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table,
584                                  struct mlxsw_sp_mr_route_vif_entry *rve)
585 {
586         struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
587         struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
588
589         mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv,
590                                         MLXSW_SP_MR_ROUTE_ACTION_TRAP);
591         rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
592         mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
593 }
594
595 /* Should be called after the RIF struct is updated */
596 static int
597 mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
598                                struct mlxsw_sp_mr_route_vif_entry *rve)
599 {
600         struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
601         enum mlxsw_sp_mr_route_action route_action;
602         struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
603         u16 erif_index = 0;
604         int err;
605
606         /* Update the route action, as the new eVIF can be a tunnel or a pimreg
607          * device which will require updating the action.
608          */
609         route_action = mlxsw_sp_mr_route_action(rve->mr_route);
610         if (route_action != rve->mr_route->route_action) {
611                 err = mr->mr_ops->route_action_update(mlxsw_sp,
612                                                       rve->mr_route->route_priv,
613                                                       route_action);
614                 if (err)
615                         return err;
616         }
617
618         /* Add the eRIF */
619         if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
620                 erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
621                 err = mr->mr_ops->route_erif_add(mlxsw_sp,
622                                                  rve->mr_route->route_priv,
623                                                  erif_index);
624                 if (err)
625                         goto err_route_erif_add;
626         }
627
628         /* Update the minimum MTU */
629         if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) {
630                 rve->mr_route->min_mtu = rve->mr_vif->dev->mtu;
631                 err = mr->mr_ops->route_min_mtu_update(mlxsw_sp,
632                                                        rve->mr_route->route_priv,
633                                                        rve->mr_route->min_mtu);
634                 if (err)
635                         goto err_route_min_mtu_update;
636         }
637
638         rve->mr_route->route_action = route_action;
639         mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
640         return 0;
641
642 err_route_min_mtu_update:
643         if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
644                 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
645                                            erif_index);
646 err_route_erif_add:
647         if (route_action != rve->mr_route->route_action)
648                 mr->mr_ops->route_action_update(mlxsw_sp,
649                                                 rve->mr_route->route_priv,
650                                                 rve->mr_route->route_action);
651         return err;
652 }
653
654 /* Should be called before the RIF struct is updated */
655 static void
656 mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table,
657                                  struct mlxsw_sp_mr_route_vif_entry *rve)
658 {
659         struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
660         enum mlxsw_sp_mr_route_action route_action;
661         struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
662         u16 rifi;
663
664         /* If the unresolved RIF was not valid, no need to delete it */
665         if (!mlxsw_sp_mr_vif_valid(rve->mr_vif))
666                 return;
667
668         /* Update the route action: if there is only one valid eVIF in the
669          * route, set the action to trap as the VIF deletion will lead to zero
670          * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to
671          * determine the route action.
672          */
673         if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1)
674                 route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
675         else
676                 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
677         if (route_action != rve->mr_route->route_action)
678                 mr->mr_ops->route_action_update(mlxsw_sp,
679                                                 rve->mr_route->route_priv,
680                                                 route_action);
681
682         /* Delete the erif from the route */
683         rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
684         mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi);
685         rve->mr_route->route_action = route_action;
686         mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
687 }
688
689 static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
690                                    struct net_device *dev,
691                                    struct mlxsw_sp_mr_vif *mr_vif,
692                                    unsigned long vif_flags,
693                                    const struct mlxsw_sp_rif *rif)
694 {
695         struct mlxsw_sp_mr_route_vif_entry *irve, *erve;
696         int err;
697
698         /* Update the VIF */
699         mr_vif->dev = dev;
700         mr_vif->rif = rif;
701         mr_vif->vif_flags = vif_flags;
702
703         /* Update all routes where this VIF is used as an unresolved iRIF */
704         list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) {
705                 err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve);
706                 if (err)
707                         goto err_irif_unresolve;
708         }
709
710         /* Update all routes where this VIF is used as an unresolved eRIF */
711         list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) {
712                 err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve);
713                 if (err)
714                         goto err_erif_unresolve;
715         }
716         return 0;
717
718 err_erif_unresolve:
719         list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
720                                          vif_node)
721                 mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
722 err_irif_unresolve:
723         list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
724                                          vif_node)
725                 mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
726         mr_vif->rif = NULL;
727         return err;
728 }
729
730 static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table,
731                                       struct net_device *dev,
732                                       struct mlxsw_sp_mr_vif *mr_vif)
733 {
734         struct mlxsw_sp_mr_route_vif_entry *rve;
735
736         /* Update all routes where this VIF is used as an unresolved eRIF */
737         list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node)
738                 mlxsw_sp_mr_route_evif_unresolve(mr_table, rve);
739
740         /* Update all routes where this VIF is used as an unresolved iRIF */
741         list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node)
742                 mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve);
743
744         /* Update the VIF */
745         mr_vif->dev = dev;
746         mr_vif->rif = NULL;
747 }
748
749 int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
750                         struct net_device *dev, vifi_t vif_index,
751                         unsigned long vif_flags, const struct mlxsw_sp_rif *rif)
752 {
753         struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
754
755         if (WARN_ON(vif_index >= MAXVIFS))
756                 return -EINVAL;
757         if (mr_vif->dev)
758                 return -EEXIST;
759         return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif);
760 }
761
762 void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
763 {
764         struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
765
766         if (WARN_ON(vif_index >= MAXVIFS))
767                 return;
768         if (WARN_ON(!mr_vif->dev))
769                 return;
770         mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif);
771 }
772
773 struct mlxsw_sp_mr_vif *
774 mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
775                            const struct net_device *dev)
776 {
777         vifi_t vif_index;
778
779         for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
780                 if (mr_table->vifs[vif_index].dev == dev)
781                         return &mr_table->vifs[vif_index];
782         return NULL;
783 }
784
785 int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
786                         const struct mlxsw_sp_rif *rif)
787 {
788         const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
789         struct mlxsw_sp_mr_vif *mr_vif;
790
791         if (!rif_dev)
792                 return 0;
793
794         mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
795         if (!mr_vif)
796                 return 0;
797         return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
798                                        mr_vif->vif_flags, rif);
799 }
800
801 void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
802                          const struct mlxsw_sp_rif *rif)
803 {
804         const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
805         struct mlxsw_sp_mr_vif *mr_vif;
806
807         if (!rif_dev)
808                 return;
809
810         mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
811         if (!mr_vif)
812                 return;
813         mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
814 }
815
816 void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
817                                 const struct mlxsw_sp_rif *rif, int mtu)
818 {
819         const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
820         struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
821         struct mlxsw_sp_mr_route_vif_entry *rve;
822         struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
823         struct mlxsw_sp_mr_vif *mr_vif;
824
825         if (!rif_dev)
826                 return;
827
828         /* Search for a VIF that use that RIF */
829         mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
830         if (!mr_vif)
831                 return;
832
833         /* Update all the routes that uses that VIF as eVIF */
834         list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) {
835                 if (mtu < rve->mr_route->min_mtu) {
836                         rve->mr_route->min_mtu = mtu;
837                         mr->mr_ops->route_min_mtu_update(mlxsw_sp,
838                                                          rve->mr_route->route_priv,
839                                                          mtu);
840                 }
841         }
842 }
843
844 struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
845                                                    u32 vr_id,
846                                                    enum mlxsw_sp_l3proto proto)
847 {
848         struct mlxsw_sp_mr_route_params catchall_route_params = {
849                 .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
850                 .key = {
851                         .vrid = vr_id,
852                 },
853                 .value = {
854                         .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
855                 }
856         };
857         struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
858         struct mlxsw_sp_mr_table *mr_table;
859         int err;
860         int i;
861
862         mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size,
863                            GFP_KERNEL);
864         if (!mr_table)
865                 return ERR_PTR(-ENOMEM);
866
867         mr_table->vr_id = vr_id;
868         mr_table->mlxsw_sp = mlxsw_sp;
869         mr_table->proto = proto;
870         INIT_LIST_HEAD(&mr_table->route_list);
871
872         err = rhashtable_init(&mr_table->route_ht,
873                               &mlxsw_sp_mr_route_ht_params);
874         if (err)
875                 goto err_route_rhashtable_init;
876
877         for (i = 0; i < MAXVIFS; i++) {
878                 INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
879                 INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
880         }
881
882         err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
883                                        mr_table->catchall_route_priv,
884                                        &catchall_route_params);
885         if (err)
886                 goto err_ops_route_create;
887         list_add_tail(&mr_table->node, &mr->table_list);
888         return mr_table;
889
890 err_ops_route_create:
891         rhashtable_destroy(&mr_table->route_ht);
892 err_route_rhashtable_init:
893         kfree(mr_table);
894         return ERR_PTR(err);
895 }
896
897 void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
898 {
899         struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
900         struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
901
902         WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
903         list_del(&mr_table->node);
904         mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
905                                   &mr_table->catchall_route_priv);
906         rhashtable_destroy(&mr_table->route_ht);
907         kfree(mr_table);
908 }
909
910 void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
911 {
912         struct mlxsw_sp_mr_route *mr_route, *tmp;
913         int i;
914
915         list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
916                 __mlxsw_sp_mr_route_del(mr_table, mr_route);
917
918         for (i = 0; i < MAXVIFS; i++) {
919                 mr_table->vifs[i].dev = NULL;
920                 mr_table->vifs[i].rif = NULL;
921         }
922 }
923
924 bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table)
925 {
926         int i;
927
928         for (i = 0; i < MAXVIFS; i++)
929                 if (mr_table->vifs[i].dev)
930                         return false;
931         return list_empty(&mr_table->route_list);
932 }
933
934 static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
935                                            struct mlxsw_sp_mr_route *mr_route)
936 {
937         struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
938         u64 packets, bytes;
939
940         if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
941                 return;
942
943         mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
944                                 &bytes);
945
946         switch (mr_route->mr_table->proto) {
947         case MLXSW_SP_L3_PROTO_IPV4:
948                 if (mr_route->mfc4->mfc_un.res.pkt != packets)
949                         mr_route->mfc4->mfc_un.res.lastuse = jiffies;
950                 mr_route->mfc4->mfc_un.res.pkt = packets;
951                 mr_route->mfc4->mfc_un.res.bytes = bytes;
952                 break;
953         case MLXSW_SP_L3_PROTO_IPV6:
954                 /* fall through */
955         default:
956                 WARN_ON_ONCE(1);
957         }
958 }
959
960 static void mlxsw_sp_mr_stats_update(struct work_struct *work)
961 {
962         struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
963                                               stats_update_dw.work);
964         struct mlxsw_sp_mr_table *mr_table;
965         struct mlxsw_sp_mr_route *mr_route;
966         unsigned long interval;
967
968         rtnl_lock();
969         list_for_each_entry(mr_table, &mr->table_list, node)
970                 list_for_each_entry(mr_route, &mr_table->route_list, node)
971                         mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
972                                                        mr_route);
973         rtnl_unlock();
974
975         interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
976         mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
977 }
978
979 int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
980                      const struct mlxsw_sp_mr_ops *mr_ops)
981 {
982         struct mlxsw_sp_mr *mr;
983         unsigned long interval;
984         int err;
985
986         mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL);
987         if (!mr)
988                 return -ENOMEM;
989         mr->mr_ops = mr_ops;
990         mlxsw_sp->mr = mr;
991         INIT_LIST_HEAD(&mr->table_list);
992
993         err = mr_ops->init(mlxsw_sp, mr->priv);
994         if (err)
995                 goto err;
996
997         /* Create the delayed work for counter updates */
998         INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update);
999         interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
1000         mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
1001         return 0;
1002 err:
1003         kfree(mr);
1004         return err;
1005 }
1006
1007 void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
1008 {
1009         struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
1010
1011         cancel_delayed_work_sync(&mr->stats_update_dw);
1012         mr->mr_ops->fini(mr->priv);
1013         kfree(mr);
1014 }