u32 flow_tag;
u32 mod_hdr_id;
u32 hairpin_tirn;
+ u8 match_level;
struct mlx5_flow_table *hairpin_ft;
};
table_created = true;
}
- parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ if (attr->match_level != MLX5_MATCH_NONE)
+ parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
&flow_act, dest, dest_ix);
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
+ attr->out_mdev = out_priv->mdev;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
- u8 *min_inline)
+ u8 *match_level)
{
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
u16 addr_type = 0;
u8 ip_proto = 0;
- *min_inline = MLX5_INLINE_MODE_L2;
+ *match_level = MLX5_MATCH_NONE;
if (f->dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
inner_headers);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->key);
-
- struct flow_dissector_key_control *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->mask);
- addr_type = key->addr_type;
-
- /* the HW doesn't support frag first/later */
- if (mask->flags & FLOW_DIS_FIRST_FRAG)
- return -EOPNOTSUPP;
-
- if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
- key->flags & FLOW_DIS_IS_FRAGMENT);
-
- /* the HW doesn't need L3 inline to match on frag=no */
- if (key->flags & FLOW_DIS_IS_FRAGMENT)
- *min_inline = MLX5_INLINE_MODE_IP;
- }
- }
-
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
- ip_proto = key->ip_proto;
-
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
- ntohs(mask->n_proto));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ntohs(key->n_proto));
-
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
- mask->ip_proto);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
- key->ip_proto);
-
- if (mask->ip_proto)
- *min_inline = MLX5_INLINE_MODE_IP;
- }
-
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_dissector_key_eth_addrs *key =
skb_flow_dissector_target(f->dissector,
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
key->src);
+
+ if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
+ *match_level = MLX5_MATCH_L2;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
+
+ *match_level = MLX5_MATCH_L2;
+ }
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+ ntohs(mask->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ntohs(key->n_proto));
+
+ if (mask->n_proto)
+ *match_level = MLX5_MATCH_L2;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ f->key);
+
+ struct flow_dissector_key_control *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ f->mask);
+ addr_type = key->addr_type;
+
+ /* the HW doesn't support frag first/later */
+ if (mask->flags & FLOW_DIS_FIRST_FRAG)
+ return -EOPNOTSUPP;
+
+ if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
+ key->flags & FLOW_DIS_IS_FRAGMENT);
+
+ /* the HW doesn't need L3 inline to match on frag=no */
+ if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
+ *match_level = MLX5_INLINE_MODE_L2;
+ /* *** L2 attributes parsing up to here *** */
+ else
+ *match_level = MLX5_INLINE_MODE_IP;
}
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+ ip_proto = key->ip_proto;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+ mask->ip_proto);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ key->ip_proto);
+
+ if (mask->ip_proto)
+ *match_level = MLX5_MATCH_L3;
+ }
+
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key =
skb_flow_dissector_target(f->dissector,
&key->dst, sizeof(key->dst));
if (mask->src || mask->dst)
- *min_inline = MLX5_INLINE_MODE_IP;
+ *match_level = MLX5_MATCH_L3;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
- *min_inline = MLX5_INLINE_MODE_IP;
+ *match_level = MLX5_MATCH_L3;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
return -EOPNOTSUPP;
if (mask->tos || mask->ttl)
- *min_inline = MLX5_INLINE_MODE_IP;
+ *match_level = MLX5_MATCH_L3;
}
+ /* *** L3 attributes parsing up to here *** */
+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_dissector_key_ports *key =
skb_flow_dissector_target(f->dissector,
}
if (mask->src || mask->dst)
- *min_inline = MLX5_INLINE_MODE_TCP_UDP;
+ *match_level = MLX5_MATCH_L4;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
ntohs(key->flags));
if (mask->flags)
- *min_inline = MLX5_INLINE_MODE_TCP_UDP;
+ *match_level = MLX5_MATCH_L4;
}
return 0;
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep;
- u8 min_inline;
+ u8 match_level;
int err;
- err = __parse_cls_flower(priv, spec, f, &min_inline);
+ err = __parse_cls_flower(priv, spec, f, &match_level);
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
if (rep->vport != FDB_UPLINK_VPORT &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
- esw->offloads.inline_mode < min_inline)) {
+ esw->offloads.inline_mode < match_level)) {
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
- min_inline, esw->offloads.inline_mode);
+ match_level, esw->offloads.inline_mode);
return -EOPNOTSUPP;
}
}
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ flow->esw_attr->match_level = match_level;
+ else
+ flow->nic_attr->match_level = match_level;
+
return err;
}
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
const struct tc_action *a;
LIST_HEAD(actions);
+ u32 action = 0;
int err;
if (!tcf_exts_has_actions(exts))
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.flow_counter))
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
if (err)
return err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
continue;
}
if (is_tcf_csum(a)) {
- if (csum_offload_supported(priv, attr->action,
+ if (csum_offload_supported(priv, action,
tcf_csum_update_flags(a)))
continue;
same_hw_devs(priv, netdev_priv(peer_dev))) {
parse_attr->mirred_ifindex = peer_dev->ifindex;
flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else {
netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
peer_dev->name);
}
attr->flow_tag = mark;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
continue;
}
return -EINVAL;
}
+ attr->action = action;
if (!actions_match_supported(priv, exts, parse_attr, flow))
return -EOPNOTSUPP;
const struct tc_action *a;
LIST_HEAD(actions);
bool encap = false;
- int err = 0;
+ u32 action = 0;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
attr->in_rep = rpriv->rep;
+ attr->in_mdev = priv->mdev;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
if (is_tcf_pedit(a)) {
+ int err;
+
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
parse_attr);
if (err)
return err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
continue;
}
if (is_tcf_csum(a)) {
- if (csum_offload_supported(priv, attr->action,
+ if (csum_offload_supported(priv, action,
tcf_csum_update_flags(a)))
continue;
if (switchdev_port_same_parent_id(priv->netdev,
out_dev)) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
+ attr->out_mdev = out_priv->mdev;
} else if (encap) {
parse_attr->mirred_ifindex = out_dev->ifindex;
parse_attr->tun_info = *info;
attr->parse_attr = parse_attr;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
/* attr->out_rep is resolved when we handle encap */
} else {
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
if (is_tcf_vlan(a)) {
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
attr->vlan_vid = tcf_vlan_push_vid(a);
if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
attr->vlan_prio = tcf_vlan_push_prio(a);
}
if (is_tcf_tunnel_release(a)) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
continue;
}
return -EINVAL;
}
+ attr->action = action;
if (!actions_match_supported(priv, exts, parse_attr, flow))
return -EOPNOTSUPP;
- return err;
+ return 0;
}
int mlx5e_configure_flower(struct mlx5e_priv *priv,