1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/mrp_bridge.h>
4 #include "br_private_mrp.h"
6 static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
8 static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br,
11 struct net_bridge_port *res = NULL;
12 struct net_bridge_port *port;
14 list_for_each_entry(port, &br->port_list, list) {
15 if (port->dev->ifindex == ifindex) {
24 static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
26 struct br_mrp *res = NULL;
29 list_for_each_entry_rcu(mrp, &br->mrp_list, list,
30 lockdep_rtnl_is_held()) {
31 if (mrp->ring_id == ring_id) {
40 static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
41 struct net_bridge_port *p)
43 struct br_mrp *res = NULL;
46 list_for_each_entry_rcu(mrp, &br->mrp_list, list,
47 lockdep_rtnl_is_held()) {
48 if (rcu_access_pointer(mrp->p_port) == p ||
49 rcu_access_pointer(mrp->s_port) == p) {
58 static int br_mrp_next_seq(struct br_mrp *mrp)
64 static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
65 const u8 *src, const u8 *dst)
67 struct ethhdr *eth_hdr;
71 skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
76 skb->protocol = htons(ETH_P_MRP);
77 skb->priority = MRP_FRAME_PRIO;
78 skb_reserve(skb, sizeof(*eth_hdr));
80 eth_hdr = skb_push(skb, sizeof(*eth_hdr));
81 ether_addr_copy(eth_hdr->h_dest, dst);
82 ether_addr_copy(eth_hdr->h_source, src);
83 eth_hdr->h_proto = htons(ETH_P_MRP);
85 version = skb_put(skb, sizeof(*version));
86 *version = cpu_to_be16(MRP_VERSION);
91 static void br_mrp_skb_tlv(struct sk_buff *skb,
92 enum br_mrp_tlv_header_type type,
95 struct br_mrp_tlv_hdr *hdr;
97 hdr = skb_put(skb, sizeof(*hdr));
102 static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp)
104 struct br_mrp_common_hdr *hdr;
106 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr));
108 hdr = skb_put(skb, sizeof(*hdr));
109 hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp));
110 memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH);
113 static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
114 struct net_bridge_port *p,
115 enum br_mrp_port_role_type port_role)
117 struct br_mrp_ring_test_hdr *hdr = NULL;
118 struct sk_buff *skb = NULL;
123 skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac);
127 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr));
128 hdr = skb_put(skb, sizeof(*hdr));
130 hdr->prio = cpu_to_be16(MRP_DEFAULT_PRIO);
131 ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
132 hdr->port_role = cpu_to_be16(port_role);
133 hdr->state = cpu_to_be16(mrp->ring_state);
134 hdr->transitions = cpu_to_be16(mrp->ring_transitions);
135 hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
137 br_mrp_skb_common(skb, mrp);
138 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
143 static void br_mrp_test_work_expired(struct work_struct *work)
145 struct delayed_work *del_work = to_delayed_work(work);
146 struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work);
147 struct net_bridge_port *p;
148 bool notify_open = false;
151 if (time_before_eq(mrp->test_end, jiffies))
154 if (mrp->test_count_miss < mrp->test_max_miss) {
155 mrp->test_count_miss++;
157 /* Notify that the ring is open only if the ring state is
158 * closed, otherwise it would continue to notify at every
161 if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED)
167 p = rcu_dereference(mrp->p_port);
169 skb = br_mrp_alloc_test_skb(mrp, p, BR_MRP_PORT_ROLE_PRIMARY);
173 skb_reset_network_header(skb);
176 if (notify_open && !mrp->ring_role_offloaded)
177 br_mrp_port_open(p->dev, true);
180 p = rcu_dereference(mrp->s_port);
182 skb = br_mrp_alloc_test_skb(mrp, p, BR_MRP_PORT_ROLE_SECONDARY);
186 skb_reset_network_header(skb);
189 if (notify_open && !mrp->ring_role_offloaded)
190 br_mrp_port_open(p->dev, true);
196 queue_delayed_work(system_wq, &mrp->test_work,
197 usecs_to_jiffies(mrp->test_interval));
200 /* Deletes the MRP instance.
201 * note: called under rtnl_lock
203 static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
205 struct net_bridge_port *p;
207 /* Stop sending MRP_Test frames */
208 cancel_delayed_work_sync(&mrp->test_work);
209 br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0);
211 br_mrp_switchdev_del(br, mrp);
213 /* Reset the ports */
214 p = rtnl_dereference(mrp->p_port);
216 spin_lock_bh(&br->lock);
217 p->state = BR_STATE_FORWARDING;
218 p->flags &= ~BR_MRP_AWARE;
219 spin_unlock_bh(&br->lock);
220 br_mrp_port_switchdev_set_state(p, BR_STATE_FORWARDING);
221 rcu_assign_pointer(mrp->p_port, NULL);
224 p = rtnl_dereference(mrp->s_port);
226 spin_lock_bh(&br->lock);
227 p->state = BR_STATE_FORWARDING;
228 p->flags &= ~BR_MRP_AWARE;
229 spin_unlock_bh(&br->lock);
230 br_mrp_port_switchdev_set_state(p, BR_STATE_FORWARDING);
231 rcu_assign_pointer(mrp->s_port, NULL);
234 list_del_rcu(&mrp->list);
238 /* Adds a new MRP instance.
239 * note: called under rtnl_lock
241 int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
243 struct net_bridge_port *p;
247 /* If the ring exists, it is not possible to create another one with the
250 mrp = br_mrp_find_id(br, instance->ring_id);
254 if (!br_mrp_get_port(br, instance->p_ifindex) ||
255 !br_mrp_get_port(br, instance->s_ifindex))
258 mrp = kzalloc(sizeof(*mrp), GFP_KERNEL);
262 mrp->ring_id = instance->ring_id;
264 p = br_mrp_get_port(br, instance->p_ifindex);
265 spin_lock_bh(&br->lock);
266 p->state = BR_STATE_FORWARDING;
267 p->flags |= BR_MRP_AWARE;
268 spin_unlock_bh(&br->lock);
269 rcu_assign_pointer(mrp->p_port, p);
271 p = br_mrp_get_port(br, instance->s_ifindex);
272 spin_lock_bh(&br->lock);
273 p->state = BR_STATE_FORWARDING;
274 p->flags |= BR_MRP_AWARE;
275 spin_unlock_bh(&br->lock);
276 rcu_assign_pointer(mrp->s_port, p);
278 INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
279 list_add_tail_rcu(&mrp->list, &br->mrp_list);
281 err = br_mrp_switchdev_add(br, mrp);
288 br_mrp_del_impl(br, mrp);
293 /* Deletes the MRP instance from which the port is part of
294 * note: called under rtnl_lock
296 void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p)
298 struct br_mrp *mrp = br_mrp_find_port(br, p);
300 /* If the port is not part of a MRP instance just bail out */
304 br_mrp_del_impl(br, mrp);
307 /* Deletes existing MRP instance based on ring_id
308 * note: called under rtnl_lock
310 int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
312 struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id);
317 br_mrp_del_impl(br, mrp);
322 /* Set port state, port state can be forwarding, blocked or disabled
323 * note: already called with rtnl_lock
325 int br_mrp_set_port_state(struct net_bridge_port *p,
326 enum br_mrp_port_state_type state)
328 if (!p || !(p->flags & BR_MRP_AWARE))
331 spin_lock_bh(&p->br->lock);
333 if (state == BR_MRP_PORT_STATE_FORWARDING)
334 p->state = BR_STATE_FORWARDING;
336 p->state = BR_STATE_BLOCKING;
338 spin_unlock_bh(&p->br->lock);
340 br_mrp_port_switchdev_set_state(p, state);
345 /* Set port role, port role can be primary or secondary
346 * note: already called with rtnl_lock
348 int br_mrp_set_port_role(struct net_bridge_port *p,
349 struct br_mrp_port_role *role)
353 if (!p || !(p->flags & BR_MRP_AWARE))
356 mrp = br_mrp_find_id(p->br, role->ring_id);
361 if (role->role == BR_MRP_PORT_ROLE_PRIMARY)
362 rcu_assign_pointer(mrp->p_port, p);
364 rcu_assign_pointer(mrp->s_port, p);
366 br_mrp_port_switchdev_set_role(p, role->role);
371 /* Set ring state, ring state can be only Open or Closed
372 * note: already called with rtnl_lock
374 int br_mrp_set_ring_state(struct net_bridge *br,
375 struct br_mrp_ring_state *state)
377 struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id);
382 if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED &&
383 state->ring_state != BR_MRP_RING_STATE_CLOSED)
384 mrp->ring_transitions++;
386 mrp->ring_state = state->ring_state;
388 br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state);
393 /* Set ring role, ring role can be only MRM(Media Redundancy Manager) or
394 * MRC(Media Redundancy Client).
395 * note: already called with rtnl_lock
397 int br_mrp_set_ring_role(struct net_bridge *br,
398 struct br_mrp_ring_role *role)
400 struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
406 mrp->ring_role = role->ring_role;
408 /* If there is an error just bailed out */
409 err = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role);
410 if (err && err != -EOPNOTSUPP)
413 /* Now detect if the HW actually applied the role or not. If the HW
414 * applied the role it means that the SW will not to do those operations
415 * anymore. For example if the role ir MRM then the HW will notify the
416 * SW when ring is open, but if the is not pushed to the HW the SW will
417 * need to detect when the ring is open
419 mrp->ring_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
424 /* Start to generate MRP test frames, the frames are generated by HW and if it
425 * fails, they are generated by the SW.
426 * note: already called with rtnl_lock
428 int br_mrp_start_test(struct net_bridge *br,
429 struct br_mrp_start_test *test)
431 struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id);
436 /* Try to push it to the HW and if it fails then continue to generate in
437 * SW and if that also fails then return error
439 if (!br_mrp_switchdev_send_ring_test(br, mrp, test->interval,
440 test->max_miss, test->period))
443 mrp->test_interval = test->interval;
444 mrp->test_end = jiffies + usecs_to_jiffies(test->period);
445 mrp->test_max_miss = test->max_miss;
446 mrp->test_count_miss = 0;
447 queue_delayed_work(system_wq, &mrp->test_work,
448 usecs_to_jiffies(test->interval));
453 /* Process only MRP Test frame. All the other MRP frames are processed by
454 * userspace application
455 * note: already called with rcu_read_lock
457 static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
460 const struct br_mrp_tlv_hdr *hdr;
461 struct br_mrp_tlv_hdr _hdr;
463 /* Each MRP header starts with a version field which is 16 bits.
464 * Therefore skip the version and get directly the TLV header.
466 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
470 if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
473 mrp->test_count_miss = 0;
475 /* Notify the userspace that the ring is closed only when the ring is
478 if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED)
479 br_mrp_port_open(port->dev, false);
482 /* This will just forward the frame to the other mrp ring port(MRC role) or will
484 * note: already called with rcu_read_lock
486 static int br_mrp_rcv(struct net_bridge_port *p,
487 struct sk_buff *skb, struct net_device *dev)
489 struct net_device *s_dev, *p_dev, *d_dev;
490 struct net_bridge_port *p_port, *s_port;
491 struct net_bridge *br;
492 struct sk_buff *nskb;
495 /* If port is disabled don't accept any frames */
496 if (p->state == BR_STATE_DISABLED)
500 mrp = br_mrp_find_port(br, p);
504 p_port = rcu_dereference(mrp->p_port);
508 s_port = rcu_dereference(mrp->s_port);
512 /* If the role is MRM then don't forward the frames */
513 if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
514 br_mrp_mrm_process(mrp, p, skb);
518 /* Clone the frame and forward it on the other MRP port */
519 nskb = skb_clone(skb, GFP_ATOMIC);
532 skb_push(nskb, ETH_HLEN);
533 dev_queue_xmit(nskb);
538 /* Check if the frame was received on a port that is part of MRP ring
539 * and if the frame has MRP eth. In that case process the frame otherwise do
541 * note: already called with rcu_read_lock
543 int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
545 /* If there is no MRP instance do normal forwarding */
546 if (likely(!(p->flags & BR_MRP_AWARE)))
549 if (unlikely(skb->protocol == htons(ETH_P_MRP)))
550 return br_mrp_rcv(p, skb, p->dev);
556 bool br_mrp_enabled(struct net_bridge *br)
558 return !list_empty(&br->mrp_list);