1 #ifndef __LINUX_MROUTE_BASE_H
2 #define __LINUX_MROUTE_BASE_H
4 #include <linux/netdevice.h>
5 #include <linux/rhashtable-types.h>
6 #include <linux/spinlock.h>
7 #include <net/net_namespace.h>
9 #include <net/fib_notifier.h>
10 #include <net/ip_fib.h>
13 * struct vif_device - interface representor for multicast routing
14 * @dev: network device being used
15 * @bytes_in: statistic; bytes ingressing
16 * @bytes_out: statistic; bytes egresing
17 * @pkt_in: statistic; packets ingressing
18 * @pkt_out: statistic; packets egressing
19 * @rate_limit: Traffic shaping (NI)
20 * @threshold: TTL threshold
21 * @flags: Control flags
22 * @link: Physical interface index
23 * @dev_parent_id: device parent id
24 * @local: Local address
25 * @remote: Remote address for tunnels
28 struct net_device *dev;
29 unsigned long bytes_in, bytes_out;
30 unsigned long pkt_in, pkt_out;
31 unsigned long rate_limit;
32 unsigned char threshold;
36 /* Currently only used by ipmr */
37 struct netdev_phys_item_id dev_parent_id;
41 struct vif_entry_notifier_info {
42 struct fib_notifier_info info;
43 struct net_device *dev;
44 unsigned short vif_index;
45 unsigned short vif_flags;
49 static inline int mr_call_vif_notifier(struct notifier_block *nb,
50 unsigned short family,
51 enum fib_event_type event_type,
52 struct vif_device *vif,
53 unsigned short vif_index, u32 tb_id)
55 struct vif_entry_notifier_info info = {
60 .vif_index = vif_index,
61 .vif_flags = vif->flags,
65 return call_fib_notifier(nb, event_type, &info.info);
68 static inline int mr_call_vif_notifiers(struct net *net,
69 unsigned short family,
70 enum fib_event_type event_type,
71 struct vif_device *vif,
72 unsigned short vif_index, u32 tb_id,
73 unsigned int *ipmr_seq)
75 struct vif_entry_notifier_info info = {
80 .vif_index = vif_index,
81 .vif_flags = vif->flags,
87 return call_fib_notifiers(net, event_type, &info.info);
91 /* This one is nasty; value is defined in uapi using different symbols for
92 * mroute and morute6 but both map into same 32.
97 #define VIF_EXISTS(_mrt, _idx) (!!((_mrt)->vif_table[_idx].dev))
100 * MFC_STATIC - the entry was added statically (not by a routing daemon)
101 * MFC_OFFLOAD - the entry was offloaded to the hardware
105 MFC_OFFLOAD = BIT(1),
109 * struct mr_mfc - common multicast routing entries
110 * @mnode: rhashtable list
111 * @mfc_parent: source interface (iif)
112 * @mfc_flags: entry flags
113 * @expires: unresolved entry expire time
114 * @unresolved: unresolved cached skbs
115 * @last_assert: time of last assert
116 * @minvif: minimum VIF id
117 * @maxvif: maximum VIF id
118 * @bytes: bytes that have passed for this entry
119 * @pkt: packets that have passed for this entry
120 * @wrong_if: number of wrong source interface hits
121 * @lastuse: time of last use of the group (traffic or update)
122 * @ttls: OIF TTL threshold array
123 * @refcount: reference count for this entry
124 * @list: global entry list
125 * @rcu: used for entry destruction
126 * @free: Operation used for freeing an entry under RCU
129 struct rhlist_head mnode;
130 unsigned short mfc_parent;
135 unsigned long expires;
136 struct sk_buff_head unresolved;
139 unsigned long last_assert;
144 unsigned long wrong_if;
145 unsigned long lastuse;
146 unsigned char ttls[MAXVIFS];
150 struct list_head list;
152 void (*free)(struct rcu_head *head);
155 static inline void mr_cache_put(struct mr_mfc *c)
157 if (refcount_dec_and_test(&c->mfc_un.res.refcount))
158 call_rcu(&c->rcu, c->free);
161 static inline void mr_cache_hold(struct mr_mfc *c)
163 refcount_inc(&c->mfc_un.res.refcount);
166 struct mfc_entry_notifier_info {
167 struct fib_notifier_info info;
172 static inline int mr_call_mfc_notifier(struct notifier_block *nb,
173 unsigned short family,
174 enum fib_event_type event_type,
175 struct mr_mfc *mfc, u32 tb_id)
177 struct mfc_entry_notifier_info info = {
185 return call_fib_notifier(nb, event_type, &info.info);
188 static inline int mr_call_mfc_notifiers(struct net *net,
189 unsigned short family,
190 enum fib_event_type event_type,
191 struct mr_mfc *mfc, u32 tb_id,
192 unsigned int *ipmr_seq)
194 struct mfc_entry_notifier_info info = {
204 return call_fib_notifiers(net, event_type, &info.info);
210 * struct mr_table_ops - callbacks and info for protocol-specific ops
211 * @rht_params: parameters for accessing the MFC hash
212 * @cmparg_any: a hash key to be used for matching on (*,*) routes
214 struct mr_table_ops {
215 const struct rhashtable_params *rht_params;
220 * struct mr_table - a multicast routing table
221 * @list: entry within a list of multicast routing tables
222 * @net: net where this table belongs
223 * @ops: protocol specific operations
224 * @id: identifier of the table
225 * @mroute_sk: socket associated with the table
226 * @ipmr_expire_timer: timer for handling unresolved routes
227 * @mfc_unres_queue: list of unresolved MFC entries
228 * @vif_table: array containing all possible vifs
229 * @mfc_hash: Hash table of all resolved routes for easy lookup
230 * @mfc_cache_list: list of resovled routes for possible traversal
231 * @maxvif: Identifier of highest value vif currently in use
232 * @cache_resolve_queue_len: current size of unresolved queue
233 * @mroute_do_assert: Whether to inform userspace on wrong ingress
234 * @mroute_do_pim: Whether to receive IGMP PIMv1
235 * @mroute_reg_vif_num: PIM-device vif index
238 struct list_head list;
240 struct mr_table_ops ops;
242 struct sock __rcu *mroute_sk;
243 struct timer_list ipmr_expire_timer;
244 struct list_head mfc_unres_queue;
245 struct vif_device vif_table[MAXVIFS];
246 struct rhltable mfc_hash;
247 struct list_head mfc_cache_list;
249 atomic_t cache_resolve_queue_len;
250 bool mroute_do_assert;
252 bool mroute_do_wrvifwhole;
253 int mroute_reg_vif_num;
256 #ifdef CONFIG_IP_MROUTE_COMMON
257 void vif_device_init(struct vif_device *v,
258 struct net_device *dev,
259 unsigned long rate_limit,
260 unsigned char threshold,
261 unsigned short flags,
262 unsigned short get_iflink_mask);
265 mr_table_alloc(struct net *net, u32 id,
266 struct mr_table_ops *ops,
267 void (*expire_func)(struct timer_list *t),
268 void (*table_set)(struct mr_table *mrt,
271 /* These actually return 'struct mr_mfc *', but to avoid need for explicit
272 * castings they simply return void.
274 void *mr_mfc_find_parent(struct mr_table *mrt,
275 void *hasharg, int parent);
276 void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi);
277 void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg);
279 int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
280 struct mr_mfc *c, struct rtmsg *rtm);
281 int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
282 struct netlink_callback *cb,
283 int (*fill)(struct mr_table *mrt, struct sk_buff *skb,
284 u32 portid, u32 seq, struct mr_mfc *c,
286 spinlock_t *lock, struct fib_dump_filter *filter);
287 int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
288 struct mr_table *(*iter)(struct net *net,
289 struct mr_table *mrt),
290 int (*fill)(struct mr_table *mrt,
292 u32 portid, u32 seq, struct mr_mfc *c,
294 spinlock_t *lock, struct fib_dump_filter *filter);
296 int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
297 int (*rules_dump)(struct net *net,
298 struct notifier_block *nb),
299 struct mr_table *(*mr_iter)(struct net *net,
300 struct mr_table *mrt),
303 static inline void vif_device_init(struct vif_device *v,
304 struct net_device *dev,
305 unsigned long rate_limit,
306 unsigned char threshold,
307 unsigned short flags,
308 unsigned short get_iflink_mask)
312 static inline void *mr_mfc_find_parent(struct mr_table *mrt,
313 void *hasharg, int parent)
318 static inline void *mr_mfc_find_any_parent(struct mr_table *mrt,
324 static inline struct mr_mfc *mr_mfc_find_any(struct mr_table *mrt,
325 int vifi, void *hasharg)
330 static inline int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
331 struct mr_mfc *c, struct rtmsg *rtm)
337 mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
338 struct mr_table *(*iter)(struct net *net,
339 struct mr_table *mrt),
340 int (*fill)(struct mr_table *mrt,
342 u32 portid, u32 seq, struct mr_mfc *c,
344 spinlock_t *lock, struct fib_dump_filter *filter)
349 static inline int mr_dump(struct net *net, struct notifier_block *nb,
350 unsigned short family,
351 int (*rules_dump)(struct net *net,
352 struct notifier_block *nb),
353 struct mr_table *(*mr_iter)(struct net *net,
354 struct mr_table *mrt),
361 static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg)
363 return mr_mfc_find_parent(mrt, hasharg, -1);
366 #ifdef CONFIG_PROC_FS
368 struct seq_net_private p;
369 struct mr_table *mrt;
374 struct seq_net_private p;
375 struct mr_table *mrt;
376 struct list_head *cache;
378 /* Lock protecting the mr_table's unresolved queue */
382 #ifdef CONFIG_IP_MROUTE_COMMON
383 void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos);
384 void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos);
386 static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
388 return *pos ? mr_vif_seq_idx(seq_file_net(seq),
389 seq->private, *pos - 1)
393 /* These actually return 'struct mr_mfc *', but to avoid need for explicit
394 * castings they simply return void.
396 void *mr_mfc_seq_idx(struct net *net,
397 struct mr_mfc_iter *it, loff_t pos);
398 void *mr_mfc_seq_next(struct seq_file *seq, void *v,
401 static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos,
402 struct mr_table *mrt, spinlock_t *lock)
404 struct mr_mfc_iter *it = seq->private;
410 return *pos ? mr_mfc_seq_idx(seq_file_net(seq),
411 seq->private, *pos - 1)
415 static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v)
417 struct mr_mfc_iter *it = seq->private;
418 struct mr_table *mrt = it->mrt;
420 if (it->cache == &mrt->mfc_unres_queue)
421 spin_unlock_bh(it->lock);
422 else if (it->cache == &mrt->mfc_cache_list)
426 static inline void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter,
432 static inline void *mr_vif_seq_next(struct seq_file *seq,
433 void *v, loff_t *pos)
438 static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
443 static inline void *mr_mfc_seq_idx(struct net *net,
444 struct mr_mfc_iter *it, loff_t pos)
449 static inline void *mr_mfc_seq_next(struct seq_file *seq, void *v,
455 static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos,
456 struct mr_table *mrt, spinlock_t *lock)
461 static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v)