Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6-microblaze.git] / net / openvswitch / flow_netlink.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2007-2017 Nicira, Inc.
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include "flow.h"
9 #include "datapath.h"
10 #include <linux/uaccess.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_ether.h>
14 #include <linux/if_vlan.h>
15 #include <net/llc_pdu.h>
16 #include <linux/kernel.h>
17 #include <linux/jhash.h>
18 #include <linux/jiffies.h>
19 #include <linux/llc.h>
20 #include <linux/module.h>
21 #include <linux/in.h>
22 #include <linux/rcupdate.h>
23 #include <linux/if_arp.h>
24 #include <linux/ip.h>
25 #include <linux/ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/icmp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/rculist.h>
32 #include <net/geneve.h>
33 #include <net/ip.h>
34 #include <net/ipv6.h>
35 #include <net/ndisc.h>
36 #include <net/mpls.h>
37 #include <net/vxlan.h>
38 #include <net/tun_proto.h>
39 #include <net/erspan.h>
40
41 #include "flow_netlink.h"
42
43 struct ovs_len_tbl {
44         int len;
45         const struct ovs_len_tbl *next;
46 };
47
48 #define OVS_ATTR_NESTED -1
49 #define OVS_ATTR_VARIABLE -2
50
51 static bool actions_may_change_flow(const struct nlattr *actions)
52 {
53         struct nlattr *nla;
54         int rem;
55
56         nla_for_each_nested(nla, actions, rem) {
57                 u16 action = nla_type(nla);
58
59                 switch (action) {
60                 case OVS_ACTION_ATTR_OUTPUT:
61                 case OVS_ACTION_ATTR_RECIRC:
62                 case OVS_ACTION_ATTR_TRUNC:
63                 case OVS_ACTION_ATTR_USERSPACE:
64                         break;
65
66                 case OVS_ACTION_ATTR_CT:
67                 case OVS_ACTION_ATTR_CT_CLEAR:
68                 case OVS_ACTION_ATTR_HASH:
69                 case OVS_ACTION_ATTR_POP_ETH:
70                 case OVS_ACTION_ATTR_POP_MPLS:
71                 case OVS_ACTION_ATTR_POP_NSH:
72                 case OVS_ACTION_ATTR_POP_VLAN:
73                 case OVS_ACTION_ATTR_PUSH_ETH:
74                 case OVS_ACTION_ATTR_PUSH_MPLS:
75                 case OVS_ACTION_ATTR_PUSH_NSH:
76                 case OVS_ACTION_ATTR_PUSH_VLAN:
77                 case OVS_ACTION_ATTR_SAMPLE:
78                 case OVS_ACTION_ATTR_SET:
79                 case OVS_ACTION_ATTR_SET_MASKED:
80                 case OVS_ACTION_ATTR_METER:
81                 case OVS_ACTION_ATTR_CHECK_PKT_LEN:
82                 case OVS_ACTION_ATTR_ADD_MPLS:
83                 default:
84                         return true;
85                 }
86         }
87         return false;
88 }
89
90 static void update_range(struct sw_flow_match *match,
91                          size_t offset, size_t size, bool is_mask)
92 {
93         struct sw_flow_key_range *range;
94         size_t start = rounddown(offset, sizeof(long));
95         size_t end = roundup(offset + size, sizeof(long));
96
97         if (!is_mask)
98                 range = &match->range;
99         else
100                 range = &match->mask->range;
101
102         if (range->start == range->end) {
103                 range->start = start;
104                 range->end = end;
105                 return;
106         }
107
108         if (range->start > start)
109                 range->start = start;
110
111         if (range->end < end)
112                 range->end = end;
113 }
114
115 #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
116         do { \
117                 update_range(match, offsetof(struct sw_flow_key, field),    \
118                              sizeof((match)->key->field), is_mask);         \
119                 if (is_mask)                                                \
120                         (match)->mask->key.field = value;                   \
121                 else                                                        \
122                         (match)->key->field = value;                        \
123         } while (0)
124
125 #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask)     \
126         do {                                                                \
127                 update_range(match, offset, len, is_mask);                  \
128                 if (is_mask)                                                \
129                         memcpy((u8 *)&(match)->mask->key + offset, value_p, \
130                                len);                                       \
131                 else                                                        \
132                         memcpy((u8 *)(match)->key + offset, value_p, len);  \
133         } while (0)
134
135 #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask)               \
136         SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
137                                   value_p, len, is_mask)
138
139 #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask)              \
140         do {                                                                \
141                 update_range(match, offsetof(struct sw_flow_key, field),    \
142                              sizeof((match)->key->field), is_mask);         \
143                 if (is_mask)                                                \
144                         memset((u8 *)&(match)->mask->key.field, value,      \
145                                sizeof((match)->mask->key.field));           \
146                 else                                                        \
147                         memset((u8 *)&(match)->key->field, value,           \
148                                sizeof((match)->key->field));                \
149         } while (0)
150
151 static bool match_validate(const struct sw_flow_match *match,
152                            u64 key_attrs, u64 mask_attrs, bool log)
153 {
154         u64 key_expected = 0;
155         u64 mask_allowed = key_attrs;  /* At most allow all key attributes */
156
157         /* The following mask attributes allowed only if they
158          * pass the validation tests. */
159         mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
160                         | (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)
161                         | (1 << OVS_KEY_ATTR_IPV6)
162                         | (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)
163                         | (1 << OVS_KEY_ATTR_TCP)
164                         | (1 << OVS_KEY_ATTR_TCP_FLAGS)
165                         | (1 << OVS_KEY_ATTR_UDP)
166                         | (1 << OVS_KEY_ATTR_SCTP)
167                         | (1 << OVS_KEY_ATTR_ICMP)
168                         | (1 << OVS_KEY_ATTR_ICMPV6)
169                         | (1 << OVS_KEY_ATTR_ARP)
170                         | (1 << OVS_KEY_ATTR_ND)
171                         | (1 << OVS_KEY_ATTR_MPLS)
172                         | (1 << OVS_KEY_ATTR_NSH));
173
174         /* Always allowed mask fields. */
175         mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
176                        | (1 << OVS_KEY_ATTR_IN_PORT)
177                        | (1 << OVS_KEY_ATTR_ETHERTYPE));
178
179         /* Check key attributes. */
180         if (match->key->eth.type == htons(ETH_P_ARP)
181                         || match->key->eth.type == htons(ETH_P_RARP)) {
182                 key_expected |= 1 << OVS_KEY_ATTR_ARP;
183                 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
184                         mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
185         }
186
187         if (eth_p_mpls(match->key->eth.type)) {
188                 key_expected |= 1 << OVS_KEY_ATTR_MPLS;
189                 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
190                         mask_allowed |= 1 << OVS_KEY_ATTR_MPLS;
191         }
192
193         if (match->key->eth.type == htons(ETH_P_IP)) {
194                 key_expected |= 1 << OVS_KEY_ATTR_IPV4;
195                 if (match->mask && match->mask->key.eth.type == htons(0xffff)) {
196                         mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
197                         mask_allowed |= 1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4;
198                 }
199
200                 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
201                         if (match->key->ip.proto == IPPROTO_UDP) {
202                                 key_expected |= 1 << OVS_KEY_ATTR_UDP;
203                                 if (match->mask && (match->mask->key.ip.proto == 0xff))
204                                         mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
205                         }
206
207                         if (match->key->ip.proto == IPPROTO_SCTP) {
208                                 key_expected |= 1 << OVS_KEY_ATTR_SCTP;
209                                 if (match->mask && (match->mask->key.ip.proto == 0xff))
210                                         mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
211                         }
212
213                         if (match->key->ip.proto == IPPROTO_TCP) {
214                                 key_expected |= 1 << OVS_KEY_ATTR_TCP;
215                                 key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
216                                 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
217                                         mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
218                                         mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
219                                 }
220                         }
221
222                         if (match->key->ip.proto == IPPROTO_ICMP) {
223                                 key_expected |= 1 << OVS_KEY_ATTR_ICMP;
224                                 if (match->mask && (match->mask->key.ip.proto == 0xff))
225                                         mask_allowed |= 1 << OVS_KEY_ATTR_ICMP;
226                         }
227                 }
228         }
229
230         if (match->key->eth.type == htons(ETH_P_IPV6)) {
231                 key_expected |= 1 << OVS_KEY_ATTR_IPV6;
232                 if (match->mask && match->mask->key.eth.type == htons(0xffff)) {
233                         mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
234                         mask_allowed |= 1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6;
235                 }
236
237                 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
238                         if (match->key->ip.proto == IPPROTO_UDP) {
239                                 key_expected |= 1 << OVS_KEY_ATTR_UDP;
240                                 if (match->mask && (match->mask->key.ip.proto == 0xff))
241                                         mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
242                         }
243
244                         if (match->key->ip.proto == IPPROTO_SCTP) {
245                                 key_expected |= 1 << OVS_KEY_ATTR_SCTP;
246                                 if (match->mask && (match->mask->key.ip.proto == 0xff))
247                                         mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
248                         }
249
250                         if (match->key->ip.proto == IPPROTO_TCP) {
251                                 key_expected |= 1 << OVS_KEY_ATTR_TCP;
252                                 key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
253                                 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
254                                         mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
255                                         mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
256                                 }
257                         }
258
259                         if (match->key->ip.proto == IPPROTO_ICMPV6) {
260                                 key_expected |= 1 << OVS_KEY_ATTR_ICMPV6;
261                                 if (match->mask && (match->mask->key.ip.proto == 0xff))
262                                         mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
263
264                                 if (match->key->tp.src ==
265                                                 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
266                                     match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
267                                         key_expected |= 1 << OVS_KEY_ATTR_ND;
268                                         /* Original direction conntrack tuple
269                                          * uses the same space as the ND fields
270                                          * in the key, so both are not allowed
271                                          * at the same time.
272                                          */
273                                         mask_allowed &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
274                                         if (match->mask && (match->mask->key.tp.src == htons(0xff)))
275                                                 mask_allowed |= 1 << OVS_KEY_ATTR_ND;
276                                 }
277                         }
278                 }
279         }
280
281         if (match->key->eth.type == htons(ETH_P_NSH)) {
282                 key_expected |= 1 << OVS_KEY_ATTR_NSH;
283                 if (match->mask &&
284                     match->mask->key.eth.type == htons(0xffff)) {
285                         mask_allowed |= 1 << OVS_KEY_ATTR_NSH;
286                 }
287         }
288
289         if ((key_attrs & key_expected) != key_expected) {
290                 /* Key attributes check failed. */
291                 OVS_NLERR(log, "Missing key (keys=%llx, expected=%llx)",
292                           (unsigned long long)key_attrs,
293                           (unsigned long long)key_expected);
294                 return false;
295         }
296
297         if ((mask_attrs & mask_allowed) != mask_attrs) {
298                 /* Mask attributes check failed. */
299                 OVS_NLERR(log, "Unexpected mask (mask=%llx, allowed=%llx)",
300                           (unsigned long long)mask_attrs,
301                           (unsigned long long)mask_allowed);
302                 return false;
303         }
304
305         return true;
306 }
307
308 size_t ovs_tun_key_attr_size(void)
309 {
310         /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
311          * updating this function.
312          */
313         return    nla_total_size_64bit(8) /* OVS_TUNNEL_KEY_ATTR_ID */
314                 + nla_total_size(16)   /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */
315                 + nla_total_size(16)   /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */
316                 + nla_total_size(1)    /* OVS_TUNNEL_KEY_ATTR_TOS */
317                 + nla_total_size(1)    /* OVS_TUNNEL_KEY_ATTR_TTL */
318                 + nla_total_size(0)    /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
319                 + nla_total_size(0)    /* OVS_TUNNEL_KEY_ATTR_CSUM */
320                 + nla_total_size(0)    /* OVS_TUNNEL_KEY_ATTR_OAM */
321                 + nla_total_size(256)  /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
322                 /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS and
323                  * OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS is mutually exclusive with
324                  * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
325                  */
326                 + nla_total_size(2)    /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
327                 + nla_total_size(2);   /* OVS_TUNNEL_KEY_ATTR_TP_DST */
328 }
329
330 static size_t ovs_nsh_key_attr_size(void)
331 {
332         /* Whenever adding new OVS_NSH_KEY_ FIELDS, we should consider
333          * updating this function.
334          */
335         return  nla_total_size(NSH_BASE_HDR_LEN) /* OVS_NSH_KEY_ATTR_BASE */
336                 /* OVS_NSH_KEY_ATTR_MD1 and OVS_NSH_KEY_ATTR_MD2 are
337                  * mutually exclusive, so the bigger one can cover
338                  * the small one.
339                  */
340                 + nla_total_size(NSH_CTX_HDRS_MAX_LEN);
341 }
342
343 size_t ovs_key_attr_size(void)
344 {
345         /* Whenever adding new OVS_KEY_ FIELDS, we should consider
346          * updating this function.
347          */
348         BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 29);
349
350         return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
351                 + nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
352                   + ovs_tun_key_attr_size()
353                 + nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
354                 + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
355                 + nla_total_size(4)   /* OVS_KEY_ATTR_DP_HASH */
356                 + nla_total_size(4)   /* OVS_KEY_ATTR_RECIRC_ID */
357                 + nla_total_size(4)   /* OVS_KEY_ATTR_CT_STATE */
358                 + nla_total_size(2)   /* OVS_KEY_ATTR_CT_ZONE */
359                 + nla_total_size(4)   /* OVS_KEY_ATTR_CT_MARK */
360                 + nla_total_size(16)  /* OVS_KEY_ATTR_CT_LABELS */
361                 + nla_total_size(40)  /* OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6 */
362                 + nla_total_size(0)   /* OVS_KEY_ATTR_NSH */
363                   + ovs_nsh_key_attr_size()
364                 + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
365                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
366                 + nla_total_size(4)   /* OVS_KEY_ATTR_VLAN */
367                 + nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
368                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
369                 + nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
370                 + nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
371                 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
372 }
373
374 static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = {
375         [OVS_VXLAN_EXT_GBP]         = { .len = sizeof(u32) },
376 };
377
378 static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
379         [OVS_TUNNEL_KEY_ATTR_ID]            = { .len = sizeof(u64) },
380         [OVS_TUNNEL_KEY_ATTR_IPV4_SRC]      = { .len = sizeof(u32) },
381         [OVS_TUNNEL_KEY_ATTR_IPV4_DST]      = { .len = sizeof(u32) },
382         [OVS_TUNNEL_KEY_ATTR_TOS]           = { .len = 1 },
383         [OVS_TUNNEL_KEY_ATTR_TTL]           = { .len = 1 },
384         [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
385         [OVS_TUNNEL_KEY_ATTR_CSUM]          = { .len = 0 },
386         [OVS_TUNNEL_KEY_ATTR_TP_SRC]        = { .len = sizeof(u16) },
387         [OVS_TUNNEL_KEY_ATTR_TP_DST]        = { .len = sizeof(u16) },
388         [OVS_TUNNEL_KEY_ATTR_OAM]           = { .len = 0 },
389         [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS]   = { .len = OVS_ATTR_VARIABLE },
390         [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS]    = { .len = OVS_ATTR_NESTED,
391                                                 .next = ovs_vxlan_ext_key_lens },
392         [OVS_TUNNEL_KEY_ATTR_IPV6_SRC]      = { .len = sizeof(struct in6_addr) },
393         [OVS_TUNNEL_KEY_ATTR_IPV6_DST]      = { .len = sizeof(struct in6_addr) },
394         [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS]   = { .len = OVS_ATTR_VARIABLE },
395         [OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE]   = { .len = 0 },
396 };
397
398 static const struct ovs_len_tbl
399 ovs_nsh_key_attr_lens[OVS_NSH_KEY_ATTR_MAX + 1] = {
400         [OVS_NSH_KEY_ATTR_BASE] = { .len = sizeof(struct ovs_nsh_key_base) },
401         [OVS_NSH_KEY_ATTR_MD1]  = { .len = sizeof(struct ovs_nsh_key_md1) },
402         [OVS_NSH_KEY_ATTR_MD2]  = { .len = OVS_ATTR_VARIABLE },
403 };
404
405 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
406 static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
407         [OVS_KEY_ATTR_ENCAP]     = { .len = OVS_ATTR_NESTED },
408         [OVS_KEY_ATTR_PRIORITY]  = { .len = sizeof(u32) },
409         [OVS_KEY_ATTR_IN_PORT]   = { .len = sizeof(u32) },
410         [OVS_KEY_ATTR_SKB_MARK]  = { .len = sizeof(u32) },
411         [OVS_KEY_ATTR_ETHERNET]  = { .len = sizeof(struct ovs_key_ethernet) },
412         [OVS_KEY_ATTR_VLAN]      = { .len = sizeof(__be16) },
413         [OVS_KEY_ATTR_ETHERTYPE] = { .len = sizeof(__be16) },
414         [OVS_KEY_ATTR_IPV4]      = { .len = sizeof(struct ovs_key_ipv4) },
415         [OVS_KEY_ATTR_IPV6]      = { .len = sizeof(struct ovs_key_ipv6) },
416         [OVS_KEY_ATTR_TCP]       = { .len = sizeof(struct ovs_key_tcp) },
417         [OVS_KEY_ATTR_TCP_FLAGS] = { .len = sizeof(__be16) },
418         [OVS_KEY_ATTR_UDP]       = { .len = sizeof(struct ovs_key_udp) },
419         [OVS_KEY_ATTR_SCTP]      = { .len = sizeof(struct ovs_key_sctp) },
420         [OVS_KEY_ATTR_ICMP]      = { .len = sizeof(struct ovs_key_icmp) },
421         [OVS_KEY_ATTR_ICMPV6]    = { .len = sizeof(struct ovs_key_icmpv6) },
422         [OVS_KEY_ATTR_ARP]       = { .len = sizeof(struct ovs_key_arp) },
423         [OVS_KEY_ATTR_ND]        = { .len = sizeof(struct ovs_key_nd) },
424         [OVS_KEY_ATTR_RECIRC_ID] = { .len = sizeof(u32) },
425         [OVS_KEY_ATTR_DP_HASH]   = { .len = sizeof(u32) },
426         [OVS_KEY_ATTR_TUNNEL]    = { .len = OVS_ATTR_NESTED,
427                                      .next = ovs_tunnel_key_lens, },
428         [OVS_KEY_ATTR_MPLS]      = { .len = OVS_ATTR_VARIABLE },
429         [OVS_KEY_ATTR_CT_STATE]  = { .len = sizeof(u32) },
430         [OVS_KEY_ATTR_CT_ZONE]   = { .len = sizeof(u16) },
431         [OVS_KEY_ATTR_CT_MARK]   = { .len = sizeof(u32) },
432         [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
433         [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = {
434                 .len = sizeof(struct ovs_key_ct_tuple_ipv4) },
435         [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = {
436                 .len = sizeof(struct ovs_key_ct_tuple_ipv6) },
437         [OVS_KEY_ATTR_NSH]       = { .len = OVS_ATTR_NESTED,
438                                      .next = ovs_nsh_key_attr_lens, },
439 };
440
441 static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
442 {
443         return expected_len == attr_len ||
444                expected_len == OVS_ATTR_NESTED ||
445                expected_len == OVS_ATTR_VARIABLE;
446 }
447
448 static bool is_all_zero(const u8 *fp, size_t size)
449 {
450         int i;
451
452         if (!fp)
453                 return false;
454
455         for (i = 0; i < size; i++)
456                 if (fp[i])
457                         return false;
458
459         return true;
460 }
461
462 static int __parse_flow_nlattrs(const struct nlattr *attr,
463                                 const struct nlattr *a[],
464                                 u64 *attrsp, bool log, bool nz)
465 {
466         const struct nlattr *nla;
467         u64 attrs;
468         int rem;
469
470         attrs = *attrsp;
471         nla_for_each_nested(nla, attr, rem) {
472                 u16 type = nla_type(nla);
473                 int expected_len;
474
475                 if (type > OVS_KEY_ATTR_MAX) {
476                         OVS_NLERR(log, "Key type %d is out of range max %d",
477                                   type, OVS_KEY_ATTR_MAX);
478                         return -EINVAL;
479                 }
480
481                 if (attrs & (1 << type)) {
482                         OVS_NLERR(log, "Duplicate key (type %d).", type);
483                         return -EINVAL;
484                 }
485
486                 expected_len = ovs_key_lens[type].len;
487                 if (!check_attr_len(nla_len(nla), expected_len)) {
488                         OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
489                                   type, nla_len(nla), expected_len);
490                         return -EINVAL;
491                 }
492
493                 if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
494                         attrs |= 1 << type;
495                         a[type] = nla;
496                 }
497         }
498         if (rem) {
499                 OVS_NLERR(log, "Message has %d unknown bytes.", rem);
500                 return -EINVAL;
501         }
502
503         *attrsp = attrs;
504         return 0;
505 }
506
507 static int parse_flow_mask_nlattrs(const struct nlattr *attr,
508                                    const struct nlattr *a[], u64 *attrsp,
509                                    bool log)
510 {
511         return __parse_flow_nlattrs(attr, a, attrsp, log, true);
512 }
513
514 int parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[],
515                        u64 *attrsp, bool log)
516 {
517         return __parse_flow_nlattrs(attr, a, attrsp, log, false);
518 }
519
520 static int genev_tun_opt_from_nlattr(const struct nlattr *a,
521                                      struct sw_flow_match *match, bool is_mask,
522                                      bool log)
523 {
524         unsigned long opt_key_offset;
525
526         if (nla_len(a) > sizeof(match->key->tun_opts)) {
527                 OVS_NLERR(log, "Geneve option length err (len %d, max %zu).",
528                           nla_len(a), sizeof(match->key->tun_opts));
529                 return -EINVAL;
530         }
531
532         if (nla_len(a) % 4 != 0) {
533                 OVS_NLERR(log, "Geneve opt len %d is not a multiple of 4.",
534                           nla_len(a));
535                 return -EINVAL;
536         }
537
538         /* We need to record the length of the options passed
539          * down, otherwise packets with the same format but
540          * additional options will be silently matched.
541          */
542         if (!is_mask) {
543                 SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
544                                 false);
545         } else {
546                 /* This is somewhat unusual because it looks at
547                  * both the key and mask while parsing the
548                  * attributes (and by extension assumes the key
549                  * is parsed first). Normally, we would verify
550                  * that each is the correct length and that the
551                  * attributes line up in the validate function.
552                  * However, that is difficult because this is
553                  * variable length and we won't have the
554                  * information later.
555                  */
556                 if (match->key->tun_opts_len != nla_len(a)) {
557                         OVS_NLERR(log, "Geneve option len %d != mask len %d",
558                                   match->key->tun_opts_len, nla_len(a));
559                         return -EINVAL;
560                 }
561
562                 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
563         }
564
565         opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
566         SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
567                                   nla_len(a), is_mask);
568         return 0;
569 }
570
571 static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
572                                      struct sw_flow_match *match, bool is_mask,
573                                      bool log)
574 {
575         struct nlattr *a;
576         int rem;
577         unsigned long opt_key_offset;
578         struct vxlan_metadata opts;
579
580         BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
581
582         memset(&opts, 0, sizeof(opts));
583         nla_for_each_nested(a, attr, rem) {
584                 int type = nla_type(a);
585
586                 if (type > OVS_VXLAN_EXT_MAX) {
587                         OVS_NLERR(log, "VXLAN extension %d out of range max %d",
588                                   type, OVS_VXLAN_EXT_MAX);
589                         return -EINVAL;
590                 }
591
592                 if (!check_attr_len(nla_len(a),
593                                     ovs_vxlan_ext_key_lens[type].len)) {
594                         OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d",
595                                   type, nla_len(a),
596                                   ovs_vxlan_ext_key_lens[type].len);
597                         return -EINVAL;
598                 }
599
600                 switch (type) {
601                 case OVS_VXLAN_EXT_GBP:
602                         opts.gbp = nla_get_u32(a);
603                         break;
604                 default:
605                         OVS_NLERR(log, "Unknown VXLAN extension attribute %d",
606                                   type);
607                         return -EINVAL;
608                 }
609         }
610         if (rem) {
611                 OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.",
612                           rem);
613                 return -EINVAL;
614         }
615
616         if (!is_mask)
617                 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
618         else
619                 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
620
621         opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
622         SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
623                                   is_mask);
624         return 0;
625 }
626
627 static int erspan_tun_opt_from_nlattr(const struct nlattr *a,
628                                       struct sw_flow_match *match, bool is_mask,
629                                       bool log)
630 {
631         unsigned long opt_key_offset;
632
633         BUILD_BUG_ON(sizeof(struct erspan_metadata) >
634                      sizeof(match->key->tun_opts));
635
636         if (nla_len(a) > sizeof(match->key->tun_opts)) {
637                 OVS_NLERR(log, "ERSPAN option length err (len %d, max %zu).",
638                           nla_len(a), sizeof(match->key->tun_opts));
639                 return -EINVAL;
640         }
641
642         if (!is_mask)
643                 SW_FLOW_KEY_PUT(match, tun_opts_len,
644                                 sizeof(struct erspan_metadata), false);
645         else
646                 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
647
648         opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
649         SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
650                                   nla_len(a), is_mask);
651         return 0;
652 }
653
654 static int ip_tun_from_nlattr(const struct nlattr *attr,
655                               struct sw_flow_match *match, bool is_mask,
656                               bool log)
657 {
658         bool ttl = false, ipv4 = false, ipv6 = false;
659         bool info_bridge_mode = false;
660         __be16 tun_flags = 0;
661         int opts_type = 0;
662         struct nlattr *a;
663         int rem;
664
665         nla_for_each_nested(a, attr, rem) {
666                 int type = nla_type(a);
667                 int err;
668
669                 if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
670                         OVS_NLERR(log, "Tunnel attr %d out of range max %d",
671                                   type, OVS_TUNNEL_KEY_ATTR_MAX);
672                         return -EINVAL;
673                 }
674
675                 if (!check_attr_len(nla_len(a),
676                                     ovs_tunnel_key_lens[type].len)) {
677                         OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
678                                   type, nla_len(a), ovs_tunnel_key_lens[type].len);
679                         return -EINVAL;
680                 }
681
682                 switch (type) {
683                 case OVS_TUNNEL_KEY_ATTR_ID:
684                         SW_FLOW_KEY_PUT(match, tun_key.tun_id,
685                                         nla_get_be64(a), is_mask);
686                         tun_flags |= TUNNEL_KEY;
687                         break;
688                 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
689                         SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
690                                         nla_get_in_addr(a), is_mask);
691                         ipv4 = true;
692                         break;
693                 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
694                         SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
695                                         nla_get_in_addr(a), is_mask);
696                         ipv4 = true;
697                         break;
698                 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
699                         SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
700                                         nla_get_in6_addr(a), is_mask);
701                         ipv6 = true;
702                         break;
703                 case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
704                         SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
705                                         nla_get_in6_addr(a), is_mask);
706                         ipv6 = true;
707                         break;
708                 case OVS_TUNNEL_KEY_ATTR_TOS:
709                         SW_FLOW_KEY_PUT(match, tun_key.tos,
710                                         nla_get_u8(a), is_mask);
711                         break;
712                 case OVS_TUNNEL_KEY_ATTR_TTL:
713                         SW_FLOW_KEY_PUT(match, tun_key.ttl,
714                                         nla_get_u8(a), is_mask);
715                         ttl = true;
716                         break;
717                 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
718                         tun_flags |= TUNNEL_DONT_FRAGMENT;
719                         break;
720                 case OVS_TUNNEL_KEY_ATTR_CSUM:
721                         tun_flags |= TUNNEL_CSUM;
722                         break;
723                 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
724                         SW_FLOW_KEY_PUT(match, tun_key.tp_src,
725                                         nla_get_be16(a), is_mask);
726                         break;
727                 case OVS_TUNNEL_KEY_ATTR_TP_DST:
728                         SW_FLOW_KEY_PUT(match, tun_key.tp_dst,
729                                         nla_get_be16(a), is_mask);
730                         break;
731                 case OVS_TUNNEL_KEY_ATTR_OAM:
732                         tun_flags |= TUNNEL_OAM;
733                         break;
734                 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
735                         if (opts_type) {
736                                 OVS_NLERR(log, "Multiple metadata blocks provided");
737                                 return -EINVAL;
738                         }
739
740                         err = genev_tun_opt_from_nlattr(a, match, is_mask, log);
741                         if (err)
742                                 return err;
743
744                         tun_flags |= TUNNEL_GENEVE_OPT;
745                         opts_type = type;
746                         break;
747                 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
748                         if (opts_type) {
749                                 OVS_NLERR(log, "Multiple metadata blocks provided");
750                                 return -EINVAL;
751                         }
752
753                         err = vxlan_tun_opt_from_nlattr(a, match, is_mask, log);
754                         if (err)
755                                 return err;
756
757                         tun_flags |= TUNNEL_VXLAN_OPT;
758                         opts_type = type;
759                         break;
760                 case OVS_TUNNEL_KEY_ATTR_PAD:
761                         break;
762                 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
763                         if (opts_type) {
764                                 OVS_NLERR(log, "Multiple metadata blocks provided");
765                                 return -EINVAL;
766                         }
767
768                         err = erspan_tun_opt_from_nlattr(a, match, is_mask,
769                                                          log);
770                         if (err)
771                                 return err;
772
773                         tun_flags |= TUNNEL_ERSPAN_OPT;
774                         opts_type = type;
775                         break;
776                 case OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE:
777                         info_bridge_mode = true;
778                         ipv4 = true;
779                         break;
780                 default:
781                         OVS_NLERR(log, "Unknown IP tunnel attribute %d",
782                                   type);
783                         return -EINVAL;
784                 }
785         }
786
787         SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
788         if (is_mask)
789                 SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true);
790         else
791                 SW_FLOW_KEY_PUT(match, tun_proto, ipv6 ? AF_INET6 : AF_INET,
792                                 false);
793
794         if (rem > 0) {
795                 OVS_NLERR(log, "IP tunnel attribute has %d unknown bytes.",
796                           rem);
797                 return -EINVAL;
798         }
799
800         if (ipv4 && ipv6) {
801                 OVS_NLERR(log, "Mixed IPv4 and IPv6 tunnel attributes");
802                 return -EINVAL;
803         }
804
805         if (!is_mask) {
806                 if (!ipv4 && !ipv6) {
807                         OVS_NLERR(log, "IP tunnel dst address not specified");
808                         return -EINVAL;
809                 }
810                 if (ipv4) {
811                         if (info_bridge_mode) {
812                                 if (match->key->tun_key.u.ipv4.src ||
813                                     match->key->tun_key.u.ipv4.dst ||
814                                     match->key->tun_key.tp_src ||
815                                     match->key->tun_key.tp_dst ||
816                                     match->key->tun_key.ttl ||
817                                     match->key->tun_key.tos ||
818                                     tun_flags & ~TUNNEL_KEY) {
819                                         OVS_NLERR(log, "IPv4 tun info is not correct");
820                                         return -EINVAL;
821                                 }
822                         } else if (!match->key->tun_key.u.ipv4.dst) {
823                                 OVS_NLERR(log, "IPv4 tunnel dst address is zero");
824                                 return -EINVAL;
825                         }
826                 }
827                 if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) {
828                         OVS_NLERR(log, "IPv6 tunnel dst address is zero");
829                         return -EINVAL;
830                 }
831
832                 if (!ttl && !info_bridge_mode) {
833                         OVS_NLERR(log, "IP tunnel TTL not specified.");
834                         return -EINVAL;
835                 }
836         }
837
838         return opts_type;
839 }
840
841 static int vxlan_opt_to_nlattr(struct sk_buff *skb,
842                                const void *tun_opts, int swkey_tun_opts_len)
843 {
844         const struct vxlan_metadata *opts = tun_opts;
845         struct nlattr *nla;
846
847         nla = nla_nest_start_noflag(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
848         if (!nla)
849                 return -EMSGSIZE;
850
851         if (nla_put_u32(skb, OVS_VXLAN_EXT_GBP, opts->gbp) < 0)
852                 return -EMSGSIZE;
853
854         nla_nest_end(skb, nla);
855         return 0;
856 }
857
858 static int __ip_tun_to_nlattr(struct sk_buff *skb,
859                               const struct ip_tunnel_key *output,
860                               const void *tun_opts, int swkey_tun_opts_len,
861                               unsigned short tun_proto, u8 mode)
862 {
863         if (output->tun_flags & TUNNEL_KEY &&
864             nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
865                          OVS_TUNNEL_KEY_ATTR_PAD))
866                 return -EMSGSIZE;
867
868         if (mode & IP_TUNNEL_INFO_BRIDGE)
869                 return nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE)
870                        ? -EMSGSIZE : 0;
871
872         switch (tun_proto) {
873         case AF_INET:
874                 if (output->u.ipv4.src &&
875                     nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
876                                     output->u.ipv4.src))
877                         return -EMSGSIZE;
878                 if (output->u.ipv4.dst &&
879                     nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
880                                     output->u.ipv4.dst))
881                         return -EMSGSIZE;
882                 break;
883         case AF_INET6:
884                 if (!ipv6_addr_any(&output->u.ipv6.src) &&
885                     nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
886                                      &output->u.ipv6.src))
887                         return -EMSGSIZE;
888                 if (!ipv6_addr_any(&output->u.ipv6.dst) &&
889                     nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
890                                      &output->u.ipv6.dst))
891                         return -EMSGSIZE;
892                 break;
893         }
894         if (output->tos &&
895             nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
896                 return -EMSGSIZE;
897         if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
898                 return -EMSGSIZE;
899         if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
900             nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
901                 return -EMSGSIZE;
902         if ((output->tun_flags & TUNNEL_CSUM) &&
903             nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
904                 return -EMSGSIZE;
905         if (output->tp_src &&
906             nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src))
907                 return -EMSGSIZE;
908         if (output->tp_dst &&
909             nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
910                 return -EMSGSIZE;
911         if ((output->tun_flags & TUNNEL_OAM) &&
912             nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
913                 return -EMSGSIZE;
914         if (swkey_tun_opts_len) {
915                 if (output->tun_flags & TUNNEL_GENEVE_OPT &&
916                     nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
917                             swkey_tun_opts_len, tun_opts))
918                         return -EMSGSIZE;
919                 else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
920                          vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
921                         return -EMSGSIZE;
922                 else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
923                          nla_put(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
924                                  swkey_tun_opts_len, tun_opts))
925                         return -EMSGSIZE;
926         }
927
928         return 0;
929 }
930
931 static int ip_tun_to_nlattr(struct sk_buff *skb,
932                             const struct ip_tunnel_key *output,
933                             const void *tun_opts, int swkey_tun_opts_len,
934                             unsigned short tun_proto, u8 mode)
935 {
936         struct nlattr *nla;
937         int err;
938
939         nla = nla_nest_start_noflag(skb, OVS_KEY_ATTR_TUNNEL);
940         if (!nla)
941                 return -EMSGSIZE;
942
943         err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len,
944                                  tun_proto, mode);
945         if (err)
946                 return err;
947
948         nla_nest_end(skb, nla);
949         return 0;
950 }
951
952 int ovs_nla_put_tunnel_info(struct sk_buff *skb,
953                             struct ip_tunnel_info *tun_info)
954 {
955         return __ip_tun_to_nlattr(skb, &tun_info->key,
956                                   ip_tunnel_info_opts(tun_info),
957                                   tun_info->options_len,
958                                   ip_tunnel_info_af(tun_info), tun_info->mode);
959 }
960
961 static int encode_vlan_from_nlattrs(struct sw_flow_match *match,
962                                     const struct nlattr *a[],
963                                     bool is_mask, bool inner)
964 {
965         __be16 tci = 0;
966         __be16 tpid = 0;
967
968         if (a[OVS_KEY_ATTR_VLAN])
969                 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
970
971         if (a[OVS_KEY_ATTR_ETHERTYPE])
972                 tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
973
974         if (likely(!inner)) {
975                 SW_FLOW_KEY_PUT(match, eth.vlan.tpid, tpid, is_mask);
976                 SW_FLOW_KEY_PUT(match, eth.vlan.tci, tci, is_mask);
977         } else {
978                 SW_FLOW_KEY_PUT(match, eth.cvlan.tpid, tpid, is_mask);
979                 SW_FLOW_KEY_PUT(match, eth.cvlan.tci, tci, is_mask);
980         }
981         return 0;
982 }
983
984 static int validate_vlan_from_nlattrs(const struct sw_flow_match *match,
985                                       u64 key_attrs, bool inner,
986                                       const struct nlattr **a, bool log)
987 {
988         __be16 tci = 0;
989
990         if (!((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
991               (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
992                eth_type_vlan(nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE])))) {
993                 /* Not a VLAN. */
994                 return 0;
995         }
996
997         if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
998               (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
999                 OVS_NLERR(log, "Invalid %s frame", (inner) ? "C-VLAN" : "VLAN");
1000                 return -EINVAL;
1001         }
1002
1003         if (a[OVS_KEY_ATTR_VLAN])
1004                 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1005
1006         if (!(tci & htons(VLAN_CFI_MASK))) {
1007                 if (tci) {
1008                         OVS_NLERR(log, "%s TCI does not have VLAN_CFI_MASK bit set.",
1009                                   (inner) ? "C-VLAN" : "VLAN");
1010                         return -EINVAL;
1011                 } else if (nla_len(a[OVS_KEY_ATTR_ENCAP])) {
1012                         /* Corner case for truncated VLAN header. */
1013                         OVS_NLERR(log, "Truncated %s header has non-zero encap attribute.",
1014                                   (inner) ? "C-VLAN" : "VLAN");
1015                         return -EINVAL;
1016                 }
1017         }
1018
1019         return 1;
1020 }
1021
1022 static int validate_vlan_mask_from_nlattrs(const struct sw_flow_match *match,
1023                                            u64 key_attrs, bool inner,
1024                                            const struct nlattr **a, bool log)
1025 {
1026         __be16 tci = 0;
1027         __be16 tpid = 0;
1028         bool encap_valid = !!(match->key->eth.vlan.tci &
1029                               htons(VLAN_CFI_MASK));
1030         bool i_encap_valid = !!(match->key->eth.cvlan.tci &
1031                                 htons(VLAN_CFI_MASK));
1032
1033         if (!(key_attrs & (1 << OVS_KEY_ATTR_ENCAP))) {
1034                 /* Not a VLAN. */
1035                 return 0;
1036         }
1037
1038         if ((!inner && !encap_valid) || (inner && !i_encap_valid)) {
1039                 OVS_NLERR(log, "Encap mask attribute is set for non-%s frame.",
1040                           (inner) ? "C-VLAN" : "VLAN");
1041                 return -EINVAL;
1042         }
1043
1044         if (a[OVS_KEY_ATTR_VLAN])
1045                 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1046
1047         if (a[OVS_KEY_ATTR_ETHERTYPE])
1048                 tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1049
1050         if (tpid != htons(0xffff)) {
1051                 OVS_NLERR(log, "Must have an exact match on %s TPID (mask=%x).",
1052                           (inner) ? "C-VLAN" : "VLAN", ntohs(tpid));
1053                 return -EINVAL;
1054         }
1055         if (!(tci & htons(VLAN_CFI_MASK))) {
1056                 OVS_NLERR(log, "%s TCI mask does not have exact match for VLAN_CFI_MASK bit.",
1057                           (inner) ? "C-VLAN" : "VLAN");
1058                 return -EINVAL;
1059         }
1060
1061         return 1;
1062 }
1063
1064 static int __parse_vlan_from_nlattrs(struct sw_flow_match *match,
1065                                      u64 *key_attrs, bool inner,
1066                                      const struct nlattr **a, bool is_mask,
1067                                      bool log)
1068 {
1069         int err;
1070         const struct nlattr *encap;
1071
1072         if (!is_mask)
1073                 err = validate_vlan_from_nlattrs(match, *key_attrs, inner,
1074                                                  a, log);
1075         else
1076                 err = validate_vlan_mask_from_nlattrs(match, *key_attrs, inner,
1077                                                       a, log);
1078         if (err <= 0)
1079                 return err;
1080
1081         err = encode_vlan_from_nlattrs(match, a, is_mask, inner);
1082         if (err)
1083                 return err;
1084
1085         *key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
1086         *key_attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
1087         *key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1088
1089         encap = a[OVS_KEY_ATTR_ENCAP];
1090
1091         if (!is_mask)
1092                 err = parse_flow_nlattrs(encap, a, key_attrs, log);
1093         else
1094                 err = parse_flow_mask_nlattrs(encap, a, key_attrs, log);
1095
1096         return err;
1097 }
1098
1099 static int parse_vlan_from_nlattrs(struct sw_flow_match *match,
1100                                    u64 *key_attrs, const struct nlattr **a,
1101                                    bool is_mask, bool log)
1102 {
1103         int err;
1104         bool encap_valid = false;
1105
1106         err = __parse_vlan_from_nlattrs(match, key_attrs, false, a,
1107                                         is_mask, log);
1108         if (err)
1109                 return err;
1110
1111         encap_valid = !!(match->key->eth.vlan.tci & htons(VLAN_CFI_MASK));
1112         if (encap_valid) {
1113                 err = __parse_vlan_from_nlattrs(match, key_attrs, true, a,
1114                                                 is_mask, log);
1115                 if (err)
1116                         return err;
1117         }
1118
1119         return 0;
1120 }
1121
1122 static int parse_eth_type_from_nlattrs(struct sw_flow_match *match,
1123                                        u64 *attrs, const struct nlattr **a,
1124                                        bool is_mask, bool log)
1125 {
1126         __be16 eth_type;
1127
1128         eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1129         if (is_mask) {
1130                 /* Always exact match EtherType. */
1131                 eth_type = htons(0xffff);
1132         } else if (!eth_proto_is_802_3(eth_type)) {
1133                 OVS_NLERR(log, "EtherType %x is less than min %x",
1134                                 ntohs(eth_type), ETH_P_802_3_MIN);
1135                 return -EINVAL;
1136         }
1137
1138         SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
1139         *attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1140         return 0;
1141 }
1142
1143 static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
1144                                  u64 *attrs, const struct nlattr **a,
1145                                  bool is_mask, bool log)
1146 {
1147         u8 mac_proto = MAC_PROTO_ETHERNET;
1148
1149         if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) {
1150                 u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
1151
1152                 SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask);
1153                 *attrs &= ~(1 << OVS_KEY_ATTR_DP_HASH);
1154         }
1155
1156         if (*attrs & (1 << OVS_KEY_ATTR_RECIRC_ID)) {
1157                 u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]);
1158
1159                 SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask);
1160                 *attrs &= ~(1 << OVS_KEY_ATTR_RECIRC_ID);
1161         }
1162
1163         if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
1164                 SW_FLOW_KEY_PUT(match, phy.priority,
1165                           nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
1166                 *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
1167         }
1168
1169         if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
1170                 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
1171
1172                 if (is_mask) {
1173                         in_port = 0xffffffff; /* Always exact match in_port. */
1174                 } else if (in_port >= DP_MAX_PORTS) {
1175                         OVS_NLERR(log, "Port %d exceeds max allowable %d",
1176                                   in_port, DP_MAX_PORTS);
1177                         return -EINVAL;
1178                 }
1179
1180                 SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
1181                 *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
1182         } else if (!is_mask) {
1183                 SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
1184         }
1185
1186         if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
1187                 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
1188
1189                 SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
1190                 *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
1191         }
1192         if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
1193                 if (ip_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
1194                                        is_mask, log) < 0)
1195                         return -EINVAL;
1196                 *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
1197         }
1198
1199         if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
1200             ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
1201                 u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
1202
1203                 if (ct_state & ~CT_SUPPORTED_MASK) {
1204                         OVS_NLERR(log, "ct_state flags %08x unsupported",
1205                                   ct_state);
1206                         return -EINVAL;
1207                 }
1208
1209                 SW_FLOW_KEY_PUT(match, ct_state, ct_state, is_mask);
1210                 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
1211         }
1212         if (*attrs & (1 << OVS_KEY_ATTR_CT_ZONE) &&
1213             ovs_ct_verify(net, OVS_KEY_ATTR_CT_ZONE)) {
1214                 u16 ct_zone = nla_get_u16(a[OVS_KEY_ATTR_CT_ZONE]);
1215
1216                 SW_FLOW_KEY_PUT(match, ct_zone, ct_zone, is_mask);
1217                 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ZONE);
1218         }
1219         if (*attrs & (1 << OVS_KEY_ATTR_CT_MARK) &&
1220             ovs_ct_verify(net, OVS_KEY_ATTR_CT_MARK)) {
1221                 u32 mark = nla_get_u32(a[OVS_KEY_ATTR_CT_MARK]);
1222
1223                 SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
1224                 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
1225         }
1226         if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) &&
1227             ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) {
1228                 const struct ovs_key_ct_labels *cl;
1229
1230                 cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
1231                 SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
1232                                    sizeof(*cl), is_mask);
1233                 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
1234         }
1235         if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)) {
1236                 const struct ovs_key_ct_tuple_ipv4 *ct;
1237
1238                 ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]);
1239
1240                 SW_FLOW_KEY_PUT(match, ipv4.ct_orig.src, ct->ipv4_src, is_mask);
1241                 SW_FLOW_KEY_PUT(match, ipv4.ct_orig.dst, ct->ipv4_dst, is_mask);
1242                 SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask);
1243                 SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask);
1244                 SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv4_proto, is_mask);
1245                 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4);
1246         }
1247         if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) {
1248                 const struct ovs_key_ct_tuple_ipv6 *ct;
1249
1250                 ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]);
1251
1252                 SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.src, &ct->ipv6_src,
1253                                    sizeof(match->key->ipv6.ct_orig.src),
1254                                    is_mask);
1255                 SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.dst, &ct->ipv6_dst,
1256                                    sizeof(match->key->ipv6.ct_orig.dst),
1257                                    is_mask);
1258                 SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask);
1259                 SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask);
1260                 SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv6_proto, is_mask);
1261                 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
1262         }
1263
1264         /* For layer 3 packets the Ethernet type is provided
1265          * and treated as metadata but no MAC addresses are provided.
1266          */
1267         if (!(*attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) &&
1268             (*attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)))
1269                 mac_proto = MAC_PROTO_NONE;
1270
1271         /* Always exact match mac_proto */
1272         SW_FLOW_KEY_PUT(match, mac_proto, is_mask ? 0xff : mac_proto, is_mask);
1273
1274         if (mac_proto == MAC_PROTO_NONE)
1275                 return parse_eth_type_from_nlattrs(match, attrs, a, is_mask,
1276                                                    log);
1277
1278         return 0;
1279 }
1280
1281 int nsh_hdr_from_nlattr(const struct nlattr *attr,
1282                         struct nshhdr *nh, size_t size)
1283 {
1284         struct nlattr *a;
1285         int rem;
1286         u8 flags = 0;
1287         u8 ttl = 0;
1288         int mdlen = 0;
1289
1290         /* validate_nsh has check this, so we needn't do duplicate check here
1291          */
1292         if (size < NSH_BASE_HDR_LEN)
1293                 return -ENOBUFS;
1294
1295         nla_for_each_nested(a, attr, rem) {
1296                 int type = nla_type(a);
1297
1298                 switch (type) {
1299                 case OVS_NSH_KEY_ATTR_BASE: {
1300                         const struct ovs_nsh_key_base *base = nla_data(a);
1301
1302                         flags = base->flags;
1303                         ttl = base->ttl;
1304                         nh->np = base->np;
1305                         nh->mdtype = base->mdtype;
1306                         nh->path_hdr = base->path_hdr;
1307                         break;
1308                 }
1309                 case OVS_NSH_KEY_ATTR_MD1:
1310                         mdlen = nla_len(a);
1311                         if (mdlen > size - NSH_BASE_HDR_LEN)
1312                                 return -ENOBUFS;
1313                         memcpy(&nh->md1, nla_data(a), mdlen);
1314                         break;
1315
1316                 case OVS_NSH_KEY_ATTR_MD2:
1317                         mdlen = nla_len(a);
1318                         if (mdlen > size - NSH_BASE_HDR_LEN)
1319                                 return -ENOBUFS;
1320                         memcpy(&nh->md2, nla_data(a), mdlen);
1321                         break;
1322
1323                 default:
1324                         return -EINVAL;
1325                 }
1326         }
1327
1328         /* nsh header length  = NSH_BASE_HDR_LEN + mdlen */
1329         nh->ver_flags_ttl_len = 0;
1330         nsh_set_flags_ttl_len(nh, flags, ttl, NSH_BASE_HDR_LEN + mdlen);
1331
1332         return 0;
1333 }
1334
1335 int nsh_key_from_nlattr(const struct nlattr *attr,
1336                         struct ovs_key_nsh *nsh, struct ovs_key_nsh *nsh_mask)
1337 {
1338         struct nlattr *a;
1339         int rem;
1340
1341         /* validate_nsh has check this, so we needn't do duplicate check here
1342          */
1343         nla_for_each_nested(a, attr, rem) {
1344                 int type = nla_type(a);
1345
1346                 switch (type) {
1347                 case OVS_NSH_KEY_ATTR_BASE: {
1348                         const struct ovs_nsh_key_base *base = nla_data(a);
1349                         const struct ovs_nsh_key_base *base_mask = base + 1;
1350
1351                         nsh->base = *base;
1352                         nsh_mask->base = *base_mask;
1353                         break;
1354                 }
1355                 case OVS_NSH_KEY_ATTR_MD1: {
1356                         const struct ovs_nsh_key_md1 *md1 = nla_data(a);
1357                         const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
1358
1359                         memcpy(nsh->context, md1->context, sizeof(*md1));
1360                         memcpy(nsh_mask->context, md1_mask->context,
1361                                sizeof(*md1_mask));
1362                         break;
1363                 }
1364                 case OVS_NSH_KEY_ATTR_MD2:
1365                         /* Not supported yet */
1366                         return -ENOTSUPP;
1367                 default:
1368                         return -EINVAL;
1369                 }
1370         }
1371
1372         return 0;
1373 }
1374
1375 static int nsh_key_put_from_nlattr(const struct nlattr *attr,
1376                                    struct sw_flow_match *match, bool is_mask,
1377                                    bool is_push_nsh, bool log)
1378 {
1379         struct nlattr *a;
1380         int rem;
1381         bool has_base = false;
1382         bool has_md1 = false;
1383         bool has_md2 = false;
1384         u8 mdtype = 0;
1385         int mdlen = 0;
1386
1387         if (WARN_ON(is_push_nsh && is_mask))
1388                 return -EINVAL;
1389
1390         nla_for_each_nested(a, attr, rem) {
1391                 int type = nla_type(a);
1392                 int i;
1393
1394                 if (type > OVS_NSH_KEY_ATTR_MAX) {
1395                         OVS_NLERR(log, "nsh attr %d is out of range max %d",
1396                                   type, OVS_NSH_KEY_ATTR_MAX);
1397                         return -EINVAL;
1398                 }
1399
1400                 if (!check_attr_len(nla_len(a),
1401                                     ovs_nsh_key_attr_lens[type].len)) {
1402                         OVS_NLERR(
1403                             log,
1404                             "nsh attr %d has unexpected len %d expected %d",
1405                             type,
1406                             nla_len(a),
1407                             ovs_nsh_key_attr_lens[type].len
1408                         );
1409                         return -EINVAL;
1410                 }
1411
1412                 switch (type) {
1413                 case OVS_NSH_KEY_ATTR_BASE: {
1414                         const struct ovs_nsh_key_base *base = nla_data(a);
1415
1416                         has_base = true;
1417                         mdtype = base->mdtype;
1418                         SW_FLOW_KEY_PUT(match, nsh.base.flags,
1419                                         base->flags, is_mask);
1420                         SW_FLOW_KEY_PUT(match, nsh.base.ttl,
1421                                         base->ttl, is_mask);
1422                         SW_FLOW_KEY_PUT(match, nsh.base.mdtype,
1423                                         base->mdtype, is_mask);
1424                         SW_FLOW_KEY_PUT(match, nsh.base.np,
1425                                         base->np, is_mask);
1426                         SW_FLOW_KEY_PUT(match, nsh.base.path_hdr,
1427                                         base->path_hdr, is_mask);
1428                         break;
1429                 }
1430                 case OVS_NSH_KEY_ATTR_MD1: {
1431                         const struct ovs_nsh_key_md1 *md1 = nla_data(a);
1432
1433                         has_md1 = true;
1434                         for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++)
1435                                 SW_FLOW_KEY_PUT(match, nsh.context[i],
1436                                                 md1->context[i], is_mask);
1437                         break;
1438                 }
1439                 case OVS_NSH_KEY_ATTR_MD2:
1440                         if (!is_push_nsh) /* Not supported MD type 2 yet */
1441                                 return -ENOTSUPP;
1442
1443                         has_md2 = true;
1444                         mdlen = nla_len(a);
1445                         if (mdlen > NSH_CTX_HDRS_MAX_LEN || mdlen <= 0) {
1446                                 OVS_NLERR(
1447                                     log,
1448                                     "Invalid MD length %d for MD type %d",
1449                                     mdlen,
1450                                     mdtype
1451                                 );
1452                                 return -EINVAL;
1453                         }
1454                         break;
1455                 default:
1456                         OVS_NLERR(log, "Unknown nsh attribute %d",
1457                                   type);
1458                         return -EINVAL;
1459                 }
1460         }
1461
1462         if (rem > 0) {
1463                 OVS_NLERR(log, "nsh attribute has %d unknown bytes.", rem);
1464                 return -EINVAL;
1465         }
1466
1467         if (has_md1 && has_md2) {
1468                 OVS_NLERR(
1469                     1,
1470                     "invalid nsh attribute: md1 and md2 are exclusive."
1471                 );
1472                 return -EINVAL;
1473         }
1474
1475         if (!is_mask) {
1476                 if ((has_md1 && mdtype != NSH_M_TYPE1) ||
1477                     (has_md2 && mdtype != NSH_M_TYPE2)) {
1478                         OVS_NLERR(1, "nsh attribute has unmatched MD type %d.",
1479                                   mdtype);
1480                         return -EINVAL;
1481                 }
1482
1483                 if (is_push_nsh &&
1484                     (!has_base || (!has_md1 && !has_md2))) {
1485                         OVS_NLERR(
1486                             1,
1487                             "push_nsh: missing base or metadata attributes"
1488                         );
1489                         return -EINVAL;
1490                 }
1491         }
1492
1493         return 0;
1494 }
1495
1496 static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
1497                                 u64 attrs, const struct nlattr **a,
1498                                 bool is_mask, bool log)
1499 {
1500         int err;
1501
1502         err = metadata_from_nlattrs(net, match, &attrs, a, is_mask, log);
1503         if (err)
1504                 return err;
1505
1506         if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) {
1507                 const struct ovs_key_ethernet *eth_key;
1508
1509                 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
1510                 SW_FLOW_KEY_MEMCPY(match, eth.src,
1511                                 eth_key->eth_src, ETH_ALEN, is_mask);
1512                 SW_FLOW_KEY_MEMCPY(match, eth.dst,
1513                                 eth_key->eth_dst, ETH_ALEN, is_mask);
1514                 attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
1515
1516                 if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
1517                         /* VLAN attribute is always parsed before getting here since it
1518                          * may occur multiple times.
1519                          */
1520                         OVS_NLERR(log, "VLAN attribute unexpected.");
1521                         return -EINVAL;
1522                 }
1523
1524                 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
1525                         err = parse_eth_type_from_nlattrs(match, &attrs, a, is_mask,
1526                                                           log);
1527                         if (err)
1528                                 return err;
1529                 } else if (!is_mask) {
1530                         SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
1531                 }
1532         } else if (!match->key->eth.type) {
1533                 OVS_NLERR(log, "Either Ethernet header or EtherType is required.");
1534                 return -EINVAL;
1535         }
1536
1537         if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
1538                 const struct ovs_key_ipv4 *ipv4_key;
1539
1540                 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
1541                 if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
1542                         OVS_NLERR(log, "IPv4 frag type %d is out of range max %d",
1543                                   ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
1544                         return -EINVAL;
1545                 }
1546                 SW_FLOW_KEY_PUT(match, ip.proto,
1547                                 ipv4_key->ipv4_proto, is_mask);
1548                 SW_FLOW_KEY_PUT(match, ip.tos,
1549                                 ipv4_key->ipv4_tos, is_mask);
1550                 SW_FLOW_KEY_PUT(match, ip.ttl,
1551                                 ipv4_key->ipv4_ttl, is_mask);
1552                 SW_FLOW_KEY_PUT(match, ip.frag,
1553                                 ipv4_key->ipv4_frag, is_mask);
1554                 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
1555                                 ipv4_key->ipv4_src, is_mask);
1556                 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
1557                                 ipv4_key->ipv4_dst, is_mask);
1558                 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
1559         }
1560
1561         if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
1562                 const struct ovs_key_ipv6 *ipv6_key;
1563
1564                 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
1565                 if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
1566                         OVS_NLERR(log, "IPv6 frag type %d is out of range max %d",
1567                                   ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
1568                         return -EINVAL;
1569                 }
1570
1571                 if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) {
1572                         OVS_NLERR(log, "IPv6 flow label %x is out of range (max=%x)",
1573                                   ntohl(ipv6_key->ipv6_label), (1 << 20) - 1);
1574                         return -EINVAL;
1575                 }
1576
1577                 SW_FLOW_KEY_PUT(match, ipv6.label,
1578                                 ipv6_key->ipv6_label, is_mask);
1579                 SW_FLOW_KEY_PUT(match, ip.proto,
1580                                 ipv6_key->ipv6_proto, is_mask);
1581                 SW_FLOW_KEY_PUT(match, ip.tos,
1582                                 ipv6_key->ipv6_tclass, is_mask);
1583                 SW_FLOW_KEY_PUT(match, ip.ttl,
1584                                 ipv6_key->ipv6_hlimit, is_mask);
1585                 SW_FLOW_KEY_PUT(match, ip.frag,
1586                                 ipv6_key->ipv6_frag, is_mask);
1587                 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
1588                                 ipv6_key->ipv6_src,
1589                                 sizeof(match->key->ipv6.addr.src),
1590                                 is_mask);
1591                 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
1592                                 ipv6_key->ipv6_dst,
1593                                 sizeof(match->key->ipv6.addr.dst),
1594                                 is_mask);
1595
1596                 attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
1597         }
1598
1599         if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
1600                 const struct ovs_key_arp *arp_key;
1601
1602                 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
1603                 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
1604                         OVS_NLERR(log, "Unknown ARP opcode (opcode=%d).",
1605                                   arp_key->arp_op);
1606                         return -EINVAL;
1607                 }
1608
1609                 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
1610                                 arp_key->arp_sip, is_mask);
1611                 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
1612                         arp_key->arp_tip, is_mask);
1613                 SW_FLOW_KEY_PUT(match, ip.proto,
1614                                 ntohs(arp_key->arp_op), is_mask);
1615                 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
1616                                 arp_key->arp_sha, ETH_ALEN, is_mask);
1617                 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
1618                                 arp_key->arp_tha, ETH_ALEN, is_mask);
1619
1620                 attrs &= ~(1 << OVS_KEY_ATTR_ARP);
1621         }
1622
1623         if (attrs & (1 << OVS_KEY_ATTR_NSH)) {
1624                 if (nsh_key_put_from_nlattr(a[OVS_KEY_ATTR_NSH], match,
1625                                             is_mask, false, log) < 0)
1626                         return -EINVAL;
1627                 attrs &= ~(1 << OVS_KEY_ATTR_NSH);
1628         }
1629
1630         if (attrs & (1 << OVS_KEY_ATTR_MPLS)) {
1631                 const struct ovs_key_mpls *mpls_key;
1632                 u32 hdr_len;
1633                 u32 label_count, label_count_mask, i;
1634
1635                 mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]);
1636                 hdr_len = nla_len(a[OVS_KEY_ATTR_MPLS]);
1637                 label_count = hdr_len / sizeof(struct ovs_key_mpls);
1638
1639                 if (label_count == 0 || label_count > MPLS_LABEL_DEPTH ||
1640                     hdr_len % sizeof(struct ovs_key_mpls))
1641                         return -EINVAL;
1642
1643                 label_count_mask =  GENMASK(label_count - 1, 0);
1644
1645                 for (i = 0 ; i < label_count; i++)
1646                         SW_FLOW_KEY_PUT(match, mpls.lse[i],
1647                                         mpls_key[i].mpls_lse, is_mask);
1648
1649                 SW_FLOW_KEY_PUT(match, mpls.num_labels_mask,
1650                                 label_count_mask, is_mask);
1651
1652                 attrs &= ~(1 << OVS_KEY_ATTR_MPLS);
1653          }
1654
1655         if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
1656                 const struct ovs_key_tcp *tcp_key;
1657
1658                 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
1659                 SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
1660                 SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
1661                 attrs &= ~(1 << OVS_KEY_ATTR_TCP);
1662         }
1663
1664         if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
1665                 SW_FLOW_KEY_PUT(match, tp.flags,
1666                                 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
1667                                 is_mask);
1668                 attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS);
1669         }
1670
1671         if (attrs & (1 << OVS_KEY_ATTR_UDP)) {
1672                 const struct ovs_key_udp *udp_key;
1673
1674                 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
1675                 SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
1676                 SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
1677                 attrs &= ~(1 << OVS_KEY_ATTR_UDP);
1678         }
1679
1680         if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
1681                 const struct ovs_key_sctp *sctp_key;
1682
1683                 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
1684                 SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
1685                 SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
1686                 attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
1687         }
1688
1689         if (attrs & (1 << OVS_KEY_ATTR_ICMP)) {
1690                 const struct ovs_key_icmp *icmp_key;
1691
1692                 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
1693                 SW_FLOW_KEY_PUT(match, tp.src,
1694                                 htons(icmp_key->icmp_type), is_mask);
1695                 SW_FLOW_KEY_PUT(match, tp.dst,
1696                                 htons(icmp_key->icmp_code), is_mask);
1697                 attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
1698         }
1699
1700         if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) {
1701                 const struct ovs_key_icmpv6 *icmpv6_key;
1702
1703                 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
1704                 SW_FLOW_KEY_PUT(match, tp.src,
1705                                 htons(icmpv6_key->icmpv6_type), is_mask);
1706                 SW_FLOW_KEY_PUT(match, tp.dst,
1707                                 htons(icmpv6_key->icmpv6_code), is_mask);
1708                 attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
1709         }
1710
1711         if (attrs & (1 << OVS_KEY_ATTR_ND)) {
1712                 const struct ovs_key_nd *nd_key;
1713
1714                 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
1715                 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
1716                         nd_key->nd_target,
1717                         sizeof(match->key->ipv6.nd.target),
1718                         is_mask);
1719                 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
1720                         nd_key->nd_sll, ETH_ALEN, is_mask);
1721                 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
1722                                 nd_key->nd_tll, ETH_ALEN, is_mask);
1723                 attrs &= ~(1 << OVS_KEY_ATTR_ND);
1724         }
1725
1726         if (attrs != 0) {
1727                 OVS_NLERR(log, "Unknown key attributes %llx",
1728                           (unsigned long long)attrs);
1729                 return -EINVAL;
1730         }
1731
1732         return 0;
1733 }
1734
1735 static void nlattr_set(struct nlattr *attr, u8 val,
1736                        const struct ovs_len_tbl *tbl)
1737 {
1738         struct nlattr *nla;
1739         int rem;
1740
1741         /* The nlattr stream should already have been validated */
1742         nla_for_each_nested(nla, attr, rem) {
1743                 if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
1744                         nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
1745                 else
1746                         memset(nla_data(nla), val, nla_len(nla));
1747
1748                 if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
1749                         *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
1750         }
1751 }
1752
1753 static void mask_set_nlattr(struct nlattr *attr, u8 val)
1754 {
1755         nlattr_set(attr, val, ovs_key_lens);
1756 }
1757
1758 /**
1759  * ovs_nla_get_match - parses Netlink attributes into a flow key and
1760  * mask. In case the 'mask' is NULL, the flow is treated as exact match
1761  * flow. Otherwise, it is treated as a wildcarded flow, except the mask
1762  * does not include any don't care bit.
1763  * @net: Used to determine per-namespace field support.
1764  * @match: receives the extracted flow match information.
1765  * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1766  * sequence. The fields should of the packet that triggered the creation
1767  * of this flow.
1768  * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
1769  * attribute specifies the mask field of the wildcarded flow.
1770  * @log: Boolean to allow kernel error logging.  Normally true, but when
1771  * probing for feature compatibility this should be passed in as false to
1772  * suppress unnecessary error logging.
1773  */
1774 int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
1775                       const struct nlattr *nla_key,
1776                       const struct nlattr *nla_mask,
1777                       bool log)
1778 {
1779         const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1780         struct nlattr *newmask = NULL;
1781         u64 key_attrs = 0;
1782         u64 mask_attrs = 0;
1783         int err;
1784
1785         err = parse_flow_nlattrs(nla_key, a, &key_attrs, log);
1786         if (err)
1787                 return err;
1788
1789         err = parse_vlan_from_nlattrs(match, &key_attrs, a, false, log);
1790         if (err)
1791                 return err;
1792
1793         err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log);
1794         if (err)
1795                 return err;
1796
1797         if (match->mask) {
1798                 if (!nla_mask) {
1799                         /* Create an exact match mask. We need to set to 0xff
1800                          * all the 'match->mask' fields that have been touched
1801                          * in 'match->key'. We cannot simply memset
1802                          * 'match->mask', because padding bytes and fields not
1803                          * specified in 'match->key' should be left to 0.
1804                          * Instead, we use a stream of netlink attributes,
1805                          * copied from 'key' and set to 0xff.
1806                          * ovs_key_from_nlattrs() will take care of filling
1807                          * 'match->mask' appropriately.
1808                          */
1809                         newmask = kmemdup(nla_key,
1810                                           nla_total_size(nla_len(nla_key)),
1811                                           GFP_KERNEL);
1812                         if (!newmask)
1813                                 return -ENOMEM;
1814
1815                         mask_set_nlattr(newmask, 0xff);
1816
1817                         /* The userspace does not send tunnel attributes that
1818                          * are 0, but we should not wildcard them nonetheless.
1819                          */
1820                         if (match->key->tun_proto)
1821                                 SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
1822                                                          0xff, true);
1823
1824                         nla_mask = newmask;
1825                 }
1826
1827                 err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log);
1828                 if (err)
1829                         goto free_newmask;
1830
1831                 /* Always match on tci. */
1832                 SW_FLOW_KEY_PUT(match, eth.vlan.tci, htons(0xffff), true);
1833                 SW_FLOW_KEY_PUT(match, eth.cvlan.tci, htons(0xffff), true);
1834
1835                 err = parse_vlan_from_nlattrs(match, &mask_attrs, a, true, log);
1836                 if (err)
1837                         goto free_newmask;
1838
1839                 err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true,
1840                                            log);
1841                 if (err)
1842                         goto free_newmask;
1843         }
1844
1845         if (!match_validate(match, key_attrs, mask_attrs, log))
1846                 err = -EINVAL;
1847
1848 free_newmask:
1849         kfree(newmask);
1850         return err;
1851 }
1852
1853 static size_t get_ufid_len(const struct nlattr *attr, bool log)
1854 {
1855         size_t len;
1856
1857         if (!attr)
1858                 return 0;
1859
1860         len = nla_len(attr);
1861         if (len < 1 || len > MAX_UFID_LENGTH) {
1862                 OVS_NLERR(log, "ufid size %u bytes exceeds the range (1, %d)",
1863                           nla_len(attr), MAX_UFID_LENGTH);
1864                 return 0;
1865         }
1866
1867         return len;
1868 }
1869
1870 /* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID,
1871  * or false otherwise.
1872  */
1873 bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr,
1874                       bool log)
1875 {
1876         sfid->ufid_len = get_ufid_len(attr, log);
1877         if (sfid->ufid_len)
1878                 memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len);
1879
1880         return sfid->ufid_len;
1881 }
1882
1883 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
1884                            const struct sw_flow_key *key, bool log)
1885 {
1886         struct sw_flow_key *new_key;
1887
1888         if (ovs_nla_get_ufid(sfid, ufid, log))
1889                 return 0;
1890
1891         /* If UFID was not provided, use unmasked key. */
1892         new_key = kmalloc(sizeof(*new_key), GFP_KERNEL);
1893         if (!new_key)
1894                 return -ENOMEM;
1895         memcpy(new_key, key, sizeof(*key));
1896         sfid->unmasked_key = new_key;
1897
1898         return 0;
1899 }
1900
1901 u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
1902 {
1903         return attr ? nla_get_u32(attr) : 0;
1904 }
1905
1906 /**
1907  * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
1908  * @net: Network namespace.
1909  * @key: Receives extracted in_port, priority, tun_key, skb_mark and conntrack
1910  * metadata.
1911  * @a: Array of netlink attributes holding parsed %OVS_KEY_ATTR_* Netlink
1912  * attributes.
1913  * @attrs: Bit mask for the netlink attributes included in @a.
1914  * @log: Boolean to allow kernel error logging.  Normally true, but when
1915  * probing for feature compatibility this should be passed in as false to
1916  * suppress unnecessary error logging.
1917  *
1918  * This parses a series of Netlink attributes that form a flow key, which must
1919  * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1920  * get the metadata, that is, the parts of the flow key that cannot be
1921  * extracted from the packet itself.
1922  *
1923  * This must be called before the packet key fields are filled in 'key'.
1924  */
1925
1926 int ovs_nla_get_flow_metadata(struct net *net,
1927                               const struct nlattr *a[OVS_KEY_ATTR_MAX + 1],
1928                               u64 attrs, struct sw_flow_key *key, bool log)
1929 {
1930         struct sw_flow_match match;
1931
1932         memset(&match, 0, sizeof(match));
1933         match.key = key;
1934
1935         key->ct_state = 0;
1936         key->ct_zone = 0;
1937         key->ct_orig_proto = 0;
1938         memset(&key->ct, 0, sizeof(key->ct));
1939         memset(&key->ipv4.ct_orig, 0, sizeof(key->ipv4.ct_orig));
1940         memset(&key->ipv6.ct_orig, 0, sizeof(key->ipv6.ct_orig));
1941
1942         key->phy.in_port = DP_MAX_PORTS;
1943
1944         return metadata_from_nlattrs(net, &match, &attrs, a, false, log);
1945 }
1946
1947 static int ovs_nla_put_vlan(struct sk_buff *skb, const struct vlan_head *vh,
1948                             bool is_mask)
1949 {
1950         __be16 eth_type = !is_mask ? vh->tpid : htons(0xffff);
1951
1952         if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
1953             nla_put_be16(skb, OVS_KEY_ATTR_VLAN, vh->tci))
1954                 return -EMSGSIZE;
1955         return 0;
1956 }
1957
1958 static int nsh_key_to_nlattr(const struct ovs_key_nsh *nsh, bool is_mask,
1959                              struct sk_buff *skb)
1960 {
1961         struct nlattr *start;
1962
1963         start = nla_nest_start_noflag(skb, OVS_KEY_ATTR_NSH);
1964         if (!start)
1965                 return -EMSGSIZE;
1966
1967         if (nla_put(skb, OVS_NSH_KEY_ATTR_BASE, sizeof(nsh->base), &nsh->base))
1968                 goto nla_put_failure;
1969
1970         if (is_mask || nsh->base.mdtype == NSH_M_TYPE1) {
1971                 if (nla_put(skb, OVS_NSH_KEY_ATTR_MD1,
1972                             sizeof(nsh->context), nsh->context))
1973                         goto nla_put_failure;
1974         }
1975
1976         /* Don't support MD type 2 yet */
1977
1978         nla_nest_end(skb, start);
1979
1980         return 0;
1981
1982 nla_put_failure:
1983         return -EMSGSIZE;
1984 }
1985
1986 static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
1987                              const struct sw_flow_key *output, bool is_mask,
1988                              struct sk_buff *skb)
1989 {
1990         struct ovs_key_ethernet *eth_key;
1991         struct nlattr *nla;
1992         struct nlattr *encap = NULL;
1993         struct nlattr *in_encap = NULL;
1994
1995         if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
1996                 goto nla_put_failure;
1997
1998         if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
1999                 goto nla_put_failure;
2000
2001         if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
2002                 goto nla_put_failure;
2003
2004         if ((swkey->tun_proto || is_mask)) {
2005                 const void *opts = NULL;
2006
2007                 if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
2008                         opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
2009
2010                 if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
2011                                      swkey->tun_opts_len, swkey->tun_proto, 0))
2012                         goto nla_put_failure;
2013         }
2014
2015         if (swkey->phy.in_port == DP_MAX_PORTS) {
2016                 if (is_mask && (output->phy.in_port == 0xffff))
2017                         if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
2018                                 goto nla_put_failure;
2019         } else {
2020                 u16 upper_u16;
2021                 upper_u16 = !is_mask ? 0 : 0xffff;
2022
2023                 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
2024                                 (upper_u16 << 16) | output->phy.in_port))
2025                         goto nla_put_failure;
2026         }
2027
2028         if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
2029                 goto nla_put_failure;
2030
2031         if (ovs_ct_put_key(swkey, output, skb))
2032                 goto nla_put_failure;
2033
2034         if (ovs_key_mac_proto(swkey) == MAC_PROTO_ETHERNET) {
2035                 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
2036                 if (!nla)
2037                         goto nla_put_failure;
2038
2039                 eth_key = nla_data(nla);
2040                 ether_addr_copy(eth_key->eth_src, output->eth.src);
2041                 ether_addr_copy(eth_key->eth_dst, output->eth.dst);
2042
2043                 if (swkey->eth.vlan.tci || eth_type_vlan(swkey->eth.type)) {
2044                         if (ovs_nla_put_vlan(skb, &output->eth.vlan, is_mask))
2045                                 goto nla_put_failure;
2046                         encap = nla_nest_start_noflag(skb, OVS_KEY_ATTR_ENCAP);
2047                         if (!swkey->eth.vlan.tci)
2048                                 goto unencap;
2049
2050                         if (swkey->eth.cvlan.tci || eth_type_vlan(swkey->eth.type)) {
2051                                 if (ovs_nla_put_vlan(skb, &output->eth.cvlan, is_mask))
2052                                         goto nla_put_failure;
2053                                 in_encap = nla_nest_start_noflag(skb,
2054                                                                  OVS_KEY_ATTR_ENCAP);
2055                                 if (!swkey->eth.cvlan.tci)
2056                                         goto unencap;
2057                         }
2058                 }
2059
2060                 if (swkey->eth.type == htons(ETH_P_802_2)) {
2061                         /*
2062                         * Ethertype 802.2 is represented in the netlink with omitted
2063                         * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
2064                         * 0xffff in the mask attribute.  Ethertype can also
2065                         * be wildcarded.
2066                         */
2067                         if (is_mask && output->eth.type)
2068                                 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
2069                                                         output->eth.type))
2070                                         goto nla_put_failure;
2071                         goto unencap;
2072                 }
2073         }
2074
2075         if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
2076                 goto nla_put_failure;
2077
2078         if (eth_type_vlan(swkey->eth.type)) {
2079                 /* There are 3 VLAN tags, we don't know anything about the rest
2080                  * of the packet, so truncate here.
2081                  */
2082                 WARN_ON_ONCE(!(encap && in_encap));
2083                 goto unencap;
2084         }
2085
2086         if (swkey->eth.type == htons(ETH_P_IP)) {
2087                 struct ovs_key_ipv4 *ipv4_key;
2088
2089                 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
2090                 if (!nla)
2091                         goto nla_put_failure;
2092                 ipv4_key = nla_data(nla);
2093                 ipv4_key->ipv4_src = output->ipv4.addr.src;
2094                 ipv4_key->ipv4_dst = output->ipv4.addr.dst;
2095                 ipv4_key->ipv4_proto = output->ip.proto;
2096                 ipv4_key->ipv4_tos = output->ip.tos;
2097                 ipv4_key->ipv4_ttl = output->ip.ttl;
2098                 ipv4_key->ipv4_frag = output->ip.frag;
2099         } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
2100                 struct ovs_key_ipv6 *ipv6_key;
2101
2102                 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
2103                 if (!nla)
2104                         goto nla_put_failure;
2105                 ipv6_key = nla_data(nla);
2106                 memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
2107                                 sizeof(ipv6_key->ipv6_src));
2108                 memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
2109                                 sizeof(ipv6_key->ipv6_dst));
2110                 ipv6_key->ipv6_label = output->ipv6.label;
2111                 ipv6_key->ipv6_proto = output->ip.proto;
2112                 ipv6_key->ipv6_tclass = output->ip.tos;
2113                 ipv6_key->ipv6_hlimit = output->ip.ttl;
2114                 ipv6_key->ipv6_frag = output->ip.frag;
2115         } else if (swkey->eth.type == htons(ETH_P_NSH)) {
2116                 if (nsh_key_to_nlattr(&output->nsh, is_mask, skb))
2117                         goto nla_put_failure;
2118         } else if (swkey->eth.type == htons(ETH_P_ARP) ||
2119                    swkey->eth.type == htons(ETH_P_RARP)) {
2120                 struct ovs_key_arp *arp_key;
2121
2122                 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
2123                 if (!nla)
2124                         goto nla_put_failure;
2125                 arp_key = nla_data(nla);
2126                 memset(arp_key, 0, sizeof(struct ovs_key_arp));
2127                 arp_key->arp_sip = output->ipv4.addr.src;
2128                 arp_key->arp_tip = output->ipv4.addr.dst;
2129                 arp_key->arp_op = htons(output->ip.proto);
2130                 ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
2131                 ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
2132         } else if (eth_p_mpls(swkey->eth.type)) {
2133                 u8 i, num_labels;
2134                 struct ovs_key_mpls *mpls_key;
2135
2136                 num_labels = hweight_long(output->mpls.num_labels_mask);
2137                 nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS,
2138                                   num_labels * sizeof(*mpls_key));
2139                 if (!nla)
2140                         goto nla_put_failure;
2141
2142                 mpls_key = nla_data(nla);
2143                 for (i = 0; i < num_labels; i++)
2144                         mpls_key[i].mpls_lse = output->mpls.lse[i];
2145         }
2146
2147         if ((swkey->eth.type == htons(ETH_P_IP) ||
2148              swkey->eth.type == htons(ETH_P_IPV6)) &&
2149              swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
2150
2151                 if (swkey->ip.proto == IPPROTO_TCP) {
2152                         struct ovs_key_tcp *tcp_key;
2153
2154                         nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
2155                         if (!nla)
2156                                 goto nla_put_failure;
2157                         tcp_key = nla_data(nla);
2158                         tcp_key->tcp_src = output->tp.src;
2159                         tcp_key->tcp_dst = output->tp.dst;
2160                         if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
2161                                          output->tp.flags))
2162                                 goto nla_put_failure;
2163                 } else if (swkey->ip.proto == IPPROTO_UDP) {
2164                         struct ovs_key_udp *udp_key;
2165
2166                         nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
2167                         if (!nla)
2168                                 goto nla_put_failure;
2169                         udp_key = nla_data(nla);
2170                         udp_key->udp_src = output->tp.src;
2171                         udp_key->udp_dst = output->tp.dst;
2172                 } else if (swkey->ip.proto == IPPROTO_SCTP) {
2173                         struct ovs_key_sctp *sctp_key;
2174
2175                         nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
2176                         if (!nla)
2177                                 goto nla_put_failure;
2178                         sctp_key = nla_data(nla);
2179                         sctp_key->sctp_src = output->tp.src;
2180                         sctp_key->sctp_dst = output->tp.dst;
2181                 } else if (swkey->eth.type == htons(ETH_P_IP) &&
2182                            swkey->ip.proto == IPPROTO_ICMP) {
2183                         struct ovs_key_icmp *icmp_key;
2184
2185                         nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
2186                         if (!nla)
2187                                 goto nla_put_failure;
2188                         icmp_key = nla_data(nla);
2189                         icmp_key->icmp_type = ntohs(output->tp.src);
2190                         icmp_key->icmp_code = ntohs(output->tp.dst);
2191                 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
2192                            swkey->ip.proto == IPPROTO_ICMPV6) {
2193                         struct ovs_key_icmpv6 *icmpv6_key;
2194
2195                         nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
2196                                                 sizeof(*icmpv6_key));
2197                         if (!nla)
2198                                 goto nla_put_failure;
2199                         icmpv6_key = nla_data(nla);
2200                         icmpv6_key->icmpv6_type = ntohs(output->tp.src);
2201                         icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
2202
2203                         if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
2204                             icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
2205                                 struct ovs_key_nd *nd_key;
2206
2207                                 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
2208                                 if (!nla)
2209                                         goto nla_put_failure;
2210                                 nd_key = nla_data(nla);
2211                                 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
2212                                                         sizeof(nd_key->nd_target));
2213                                 ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
2214                                 ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
2215                         }
2216                 }
2217         }
2218
2219 unencap:
2220         if (in_encap)
2221                 nla_nest_end(skb, in_encap);
2222         if (encap)
2223                 nla_nest_end(skb, encap);
2224
2225         return 0;
2226
2227 nla_put_failure:
2228         return -EMSGSIZE;
2229 }
2230
2231 int ovs_nla_put_key(const struct sw_flow_key *swkey,
2232                     const struct sw_flow_key *output, int attr, bool is_mask,
2233                     struct sk_buff *skb)
2234 {
2235         int err;
2236         struct nlattr *nla;
2237
2238         nla = nla_nest_start_noflag(skb, attr);
2239         if (!nla)
2240                 return -EMSGSIZE;
2241         err = __ovs_nla_put_key(swkey, output, is_mask, skb);
2242         if (err)
2243                 return err;
2244         nla_nest_end(skb, nla);
2245
2246         return 0;
2247 }
2248
2249 /* Called with ovs_mutex or RCU read lock. */
2250 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
2251 {
2252         if (ovs_identifier_is_ufid(&flow->id))
2253                 return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len,
2254                                flow->id.ufid);
2255
2256         return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key,
2257                                OVS_FLOW_ATTR_KEY, false, skb);
2258 }
2259
2260 /* Called with ovs_mutex or RCU read lock. */
2261 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
2262 {
2263         return ovs_nla_put_key(&flow->key, &flow->key,
2264                                 OVS_FLOW_ATTR_KEY, false, skb);
2265 }
2266
2267 /* Called with ovs_mutex or RCU read lock. */
2268 int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
2269 {
2270         return ovs_nla_put_key(&flow->key, &flow->mask->key,
2271                                 OVS_FLOW_ATTR_MASK, true, skb);
2272 }
2273
2274 #define MAX_ACTIONS_BUFSIZE     (32 * 1024)
2275
2276 static struct sw_flow_actions *nla_alloc_flow_actions(int size)
2277 {
2278         struct sw_flow_actions *sfa;
2279
2280         WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
2281
2282         sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
2283         if (!sfa)
2284                 return ERR_PTR(-ENOMEM);
2285
2286         sfa->actions_len = 0;
2287         return sfa;
2288 }
2289
2290 static void ovs_nla_free_set_action(const struct nlattr *a)
2291 {
2292         const struct nlattr *ovs_key = nla_data(a);
2293         struct ovs_tunnel_info *ovs_tun;
2294
2295         switch (nla_type(ovs_key)) {
2296         case OVS_KEY_ATTR_TUNNEL_INFO:
2297                 ovs_tun = nla_data(ovs_key);
2298                 dst_release((struct dst_entry *)ovs_tun->tun_dst);
2299                 break;
2300         }
2301 }
2302
2303 void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
2304 {
2305         const struct nlattr *a;
2306         int rem;
2307
2308         if (!sf_acts)
2309                 return;
2310
2311         nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
2312                 switch (nla_type(a)) {
2313                 case OVS_ACTION_ATTR_SET:
2314                         ovs_nla_free_set_action(a);
2315                         break;
2316                 case OVS_ACTION_ATTR_CT:
2317                         ovs_ct_free_action(a);
2318                         break;
2319                 }
2320         }
2321
2322         kfree(sf_acts);
2323 }
2324
2325 static void __ovs_nla_free_flow_actions(struct rcu_head *head)
2326 {
2327         ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
2328 }
2329
2330 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
2331  * The caller must hold rcu_read_lock for this to be sensible. */
2332 void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts)
2333 {
2334         call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions);
2335 }
2336
2337 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
2338                                        int attr_len, bool log)
2339 {
2340
2341         struct sw_flow_actions *acts;
2342         int new_acts_size;
2343         size_t req_size = NLA_ALIGN(attr_len);
2344         int next_offset = offsetof(struct sw_flow_actions, actions) +
2345                                         (*sfa)->actions_len;
2346
2347         if (req_size <= (ksize(*sfa) - next_offset))
2348                 goto out;
2349
2350         new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
2351
2352         if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
2353                 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
2354                         OVS_NLERR(log, "Flow action size exceeds max %u",
2355                                   MAX_ACTIONS_BUFSIZE);
2356                         return ERR_PTR(-EMSGSIZE);
2357                 }
2358                 new_acts_size = MAX_ACTIONS_BUFSIZE;
2359         }
2360
2361         acts = nla_alloc_flow_actions(new_acts_size);
2362         if (IS_ERR(acts))
2363                 return (void *)acts;
2364
2365         memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
2366         acts->actions_len = (*sfa)->actions_len;
2367         acts->orig_len = (*sfa)->orig_len;
2368         kfree(*sfa);
2369         *sfa = acts;
2370
2371 out:
2372         (*sfa)->actions_len += req_size;
2373         return  (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
2374 }
2375
2376 static struct nlattr *__add_action(struct sw_flow_actions **sfa,
2377                                    int attrtype, void *data, int len, bool log)
2378 {
2379         struct nlattr *a;
2380
2381         a = reserve_sfa_size(sfa, nla_attr_size(len), log);
2382         if (IS_ERR(a))
2383                 return a;
2384
2385         a->nla_type = attrtype;
2386         a->nla_len = nla_attr_size(len);
2387
2388         if (data)
2389                 memcpy(nla_data(a), data, len);
2390         memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
2391
2392         return a;
2393 }
2394
2395 int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype, void *data,
2396                        int len, bool log)
2397 {
2398         struct nlattr *a;
2399
2400         a = __add_action(sfa, attrtype, data, len, log);
2401
2402         return PTR_ERR_OR_ZERO(a);
2403 }
2404
2405 static inline int add_nested_action_start(struct sw_flow_actions **sfa,
2406                                           int attrtype, bool log)
2407 {
2408         int used = (*sfa)->actions_len;
2409         int err;
2410
2411         err = ovs_nla_add_action(sfa, attrtype, NULL, 0, log);
2412         if (err)
2413                 return err;
2414
2415         return used;
2416 }
2417
2418 static inline void add_nested_action_end(struct sw_flow_actions *sfa,
2419                                          int st_offset)
2420 {
2421         struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions +
2422                                                                st_offset);
2423
2424         a->nla_len = sfa->actions_len - st_offset;
2425 }
2426
2427 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2428                                   const struct sw_flow_key *key,
2429                                   struct sw_flow_actions **sfa,
2430                                   __be16 eth_type, __be16 vlan_tci,
2431                                   u32 mpls_label_count, bool log);
2432
2433 static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
2434                                     const struct sw_flow_key *key,
2435                                     struct sw_flow_actions **sfa,
2436                                     __be16 eth_type, __be16 vlan_tci,
2437                                     u32 mpls_label_count, bool log, bool last)
2438 {
2439         const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
2440         const struct nlattr *probability, *actions;
2441         const struct nlattr *a;
2442         int rem, start, err;
2443         struct sample_arg arg;
2444
2445         memset(attrs, 0, sizeof(attrs));
2446         nla_for_each_nested(a, attr, rem) {
2447                 int type = nla_type(a);
2448                 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
2449                         return -EINVAL;
2450                 attrs[type] = a;
2451         }
2452         if (rem)
2453                 return -EINVAL;
2454
2455         probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
2456         if (!probability || nla_len(probability) != sizeof(u32))
2457                 return -EINVAL;
2458
2459         actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
2460         if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
2461                 return -EINVAL;
2462
2463         /* validation done, copy sample action. */
2464         start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
2465         if (start < 0)
2466                 return start;
2467
2468         /* When both skb and flow may be changed, put the sample
2469          * into a deferred fifo. On the other hand, if only skb
2470          * may be modified, the actions can be executed in place.
2471          *
2472          * Do this analysis at the flow installation time.
2473          * Set 'clone_action->exec' to true if the actions can be
2474          * executed without being deferred.
2475          *
2476          * If the sample is the last action, it can always be excuted
2477          * rather than deferred.
2478          */
2479         arg.exec = last || !actions_may_change_flow(actions);
2480         arg.probability = nla_get_u32(probability);
2481
2482         err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_ARG, &arg, sizeof(arg),
2483                                  log);
2484         if (err)
2485                 return err;
2486
2487         err = __ovs_nla_copy_actions(net, actions, key, sfa,
2488                                      eth_type, vlan_tci, mpls_label_count, log);
2489
2490         if (err)
2491                 return err;
2492
2493         add_nested_action_end(*sfa, start);
2494
2495         return 0;
2496 }
2497
2498 static int validate_and_copy_clone(struct net *net,
2499                                    const struct nlattr *attr,
2500                                    const struct sw_flow_key *key,
2501                                    struct sw_flow_actions **sfa,
2502                                    __be16 eth_type, __be16 vlan_tci,
2503                                    u32 mpls_label_count, bool log, bool last)
2504 {
2505         int start, err;
2506         u32 exec;
2507
2508         if (nla_len(attr) && nla_len(attr) < NLA_HDRLEN)
2509                 return -EINVAL;
2510
2511         start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CLONE, log);
2512         if (start < 0)
2513                 return start;
2514
2515         exec = last || !actions_may_change_flow(attr);
2516
2517         err = ovs_nla_add_action(sfa, OVS_CLONE_ATTR_EXEC, &exec,
2518                                  sizeof(exec), log);
2519         if (err)
2520                 return err;
2521
2522         err = __ovs_nla_copy_actions(net, attr, key, sfa,
2523                                      eth_type, vlan_tci, mpls_label_count, log);
2524         if (err)
2525                 return err;
2526
2527         add_nested_action_end(*sfa, start);
2528
2529         return 0;
2530 }
2531
2532 void ovs_match_init(struct sw_flow_match *match,
2533                     struct sw_flow_key *key,
2534                     bool reset_key,
2535                     struct sw_flow_mask *mask)
2536 {
2537         memset(match, 0, sizeof(*match));
2538         match->key = key;
2539         match->mask = mask;
2540
2541         if (reset_key)
2542                 memset(key, 0, sizeof(*key));
2543
2544         if (mask) {
2545                 memset(&mask->key, 0, sizeof(mask->key));
2546                 mask->range.start = mask->range.end = 0;
2547         }
2548 }
2549
2550 static int validate_geneve_opts(struct sw_flow_key *key)
2551 {
2552         struct geneve_opt *option;
2553         int opts_len = key->tun_opts_len;
2554         bool crit_opt = false;
2555
2556         option = (struct geneve_opt *)TUN_METADATA_OPTS(key, key->tun_opts_len);
2557         while (opts_len > 0) {
2558                 int len;
2559
2560                 if (opts_len < sizeof(*option))
2561                         return -EINVAL;
2562
2563                 len = sizeof(*option) + option->length * 4;
2564                 if (len > opts_len)
2565                         return -EINVAL;
2566
2567                 crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
2568
2569                 option = (struct geneve_opt *)((u8 *)option + len);
2570                 opts_len -= len;
2571         }
2572
2573         key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
2574
2575         return 0;
2576 }
2577
2578 static int validate_and_copy_set_tun(const struct nlattr *attr,
2579                                      struct sw_flow_actions **sfa, bool log)
2580 {
2581         struct sw_flow_match match;
2582         struct sw_flow_key key;
2583         struct metadata_dst *tun_dst;
2584         struct ip_tunnel_info *tun_info;
2585         struct ovs_tunnel_info *ovs_tun;
2586         struct nlattr *a;
2587         int err = 0, start, opts_type;
2588         __be16 dst_opt_type;
2589
2590         dst_opt_type = 0;
2591         ovs_match_init(&match, &key, true, NULL);
2592         opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
2593         if (opts_type < 0)
2594                 return opts_type;
2595
2596         if (key.tun_opts_len) {
2597                 switch (opts_type) {
2598                 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
2599                         err = validate_geneve_opts(&key);
2600                         if (err < 0)
2601                                 return err;
2602                         dst_opt_type = TUNNEL_GENEVE_OPT;
2603                         break;
2604                 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
2605                         dst_opt_type = TUNNEL_VXLAN_OPT;
2606                         break;
2607                 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
2608                         dst_opt_type = TUNNEL_ERSPAN_OPT;
2609                         break;
2610                 }
2611         }
2612
2613         start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
2614         if (start < 0)
2615                 return start;
2616
2617         tun_dst = metadata_dst_alloc(key.tun_opts_len, METADATA_IP_TUNNEL,
2618                                      GFP_KERNEL);
2619
2620         if (!tun_dst)
2621                 return -ENOMEM;
2622
2623         err = dst_cache_init(&tun_dst->u.tun_info.dst_cache, GFP_KERNEL);
2624         if (err) {
2625                 dst_release((struct dst_entry *)tun_dst);
2626                 return err;
2627         }
2628
2629         a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
2630                          sizeof(*ovs_tun), log);
2631         if (IS_ERR(a)) {
2632                 dst_release((struct dst_entry *)tun_dst);
2633                 return PTR_ERR(a);
2634         }
2635
2636         ovs_tun = nla_data(a);
2637         ovs_tun->tun_dst = tun_dst;
2638
2639         tun_info = &tun_dst->u.tun_info;
2640         tun_info->mode = IP_TUNNEL_INFO_TX;
2641         if (key.tun_proto == AF_INET6)
2642                 tun_info->mode |= IP_TUNNEL_INFO_IPV6;
2643         else if (key.tun_proto == AF_INET && key.tun_key.u.ipv4.dst == 0)
2644                 tun_info->mode |= IP_TUNNEL_INFO_BRIDGE;
2645         tun_info->key = key.tun_key;
2646
2647         /* We need to store the options in the action itself since
2648          * everything else will go away after flow setup. We can append
2649          * it to tun_info and then point there.
2650          */
2651         ip_tunnel_info_opts_set(tun_info,
2652                                 TUN_METADATA_OPTS(&key, key.tun_opts_len),
2653                                 key.tun_opts_len, dst_opt_type);
2654         add_nested_action_end(*sfa, start);
2655
2656         return err;
2657 }
2658
2659 static bool validate_nsh(const struct nlattr *attr, bool is_mask,
2660                          bool is_push_nsh, bool log)
2661 {
2662         struct sw_flow_match match;
2663         struct sw_flow_key key;
2664         int ret = 0;
2665
2666         ovs_match_init(&match, &key, true, NULL);
2667         ret = nsh_key_put_from_nlattr(attr, &match, is_mask,
2668                                       is_push_nsh, log);
2669         return !ret;
2670 }
2671
2672 /* Return false if there are any non-masked bits set.
2673  * Mask follows data immediately, before any netlink padding.
2674  */
2675 static bool validate_masked(u8 *data, int len)
2676 {
2677         u8 *mask = data + len;
2678
2679         while (len--)
2680                 if (*data++ & ~*mask++)
2681                         return false;
2682
2683         return true;
2684 }
2685
2686 static int validate_set(const struct nlattr *a,
2687                         const struct sw_flow_key *flow_key,
2688                         struct sw_flow_actions **sfa, bool *skip_copy,
2689                         u8 mac_proto, __be16 eth_type, bool masked, bool log)
2690 {
2691         const struct nlattr *ovs_key = nla_data(a);
2692         int key_type = nla_type(ovs_key);
2693         size_t key_len;
2694
2695         /* There can be only one key in a action */
2696         if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
2697                 return -EINVAL;
2698
2699         key_len = nla_len(ovs_key);
2700         if (masked)
2701                 key_len /= 2;
2702
2703         if (key_type > OVS_KEY_ATTR_MAX ||
2704             !check_attr_len(key_len, ovs_key_lens[key_type].len))
2705                 return -EINVAL;
2706
2707         if (masked && !validate_masked(nla_data(ovs_key), key_len))
2708                 return -EINVAL;
2709
2710         switch (key_type) {
2711         const struct ovs_key_ipv4 *ipv4_key;
2712         const struct ovs_key_ipv6 *ipv6_key;
2713         int err;
2714
2715         case OVS_KEY_ATTR_PRIORITY:
2716         case OVS_KEY_ATTR_SKB_MARK:
2717         case OVS_KEY_ATTR_CT_MARK:
2718         case OVS_KEY_ATTR_CT_LABELS:
2719                 break;
2720
2721         case OVS_KEY_ATTR_ETHERNET:
2722                 if (mac_proto != MAC_PROTO_ETHERNET)
2723                         return -EINVAL;
2724                 break;
2725
2726         case OVS_KEY_ATTR_TUNNEL:
2727                 if (masked)
2728                         return -EINVAL; /* Masked tunnel set not supported. */
2729
2730                 *skip_copy = true;
2731                 err = validate_and_copy_set_tun(a, sfa, log);
2732                 if (err)
2733                         return err;
2734                 break;
2735
2736         case OVS_KEY_ATTR_IPV4:
2737                 if (eth_type != htons(ETH_P_IP))
2738                         return -EINVAL;
2739
2740                 ipv4_key = nla_data(ovs_key);
2741
2742                 if (masked) {
2743                         const struct ovs_key_ipv4 *mask = ipv4_key + 1;
2744
2745                         /* Non-writeable fields. */
2746                         if (mask->ipv4_proto || mask->ipv4_frag)
2747                                 return -EINVAL;
2748                 } else {
2749                         if (ipv4_key->ipv4_proto != flow_key->ip.proto)
2750                                 return -EINVAL;
2751
2752                         if (ipv4_key->ipv4_frag != flow_key->ip.frag)
2753                                 return -EINVAL;
2754                 }
2755                 break;
2756
2757         case OVS_KEY_ATTR_IPV6:
2758                 if (eth_type != htons(ETH_P_IPV6))
2759                         return -EINVAL;
2760
2761                 ipv6_key = nla_data(ovs_key);
2762
2763                 if (masked) {
2764                         const struct ovs_key_ipv6 *mask = ipv6_key + 1;
2765
2766                         /* Non-writeable fields. */
2767                         if (mask->ipv6_proto || mask->ipv6_frag)
2768                                 return -EINVAL;
2769
2770                         /* Invalid bits in the flow label mask? */
2771                         if (ntohl(mask->ipv6_label) & 0xFFF00000)
2772                                 return -EINVAL;
2773                 } else {
2774                         if (ipv6_key->ipv6_proto != flow_key->ip.proto)
2775                                 return -EINVAL;
2776
2777                         if (ipv6_key->ipv6_frag != flow_key->ip.frag)
2778                                 return -EINVAL;
2779                 }
2780                 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
2781                         return -EINVAL;
2782
2783                 break;
2784
2785         case OVS_KEY_ATTR_TCP:
2786                 if ((eth_type != htons(ETH_P_IP) &&
2787                      eth_type != htons(ETH_P_IPV6)) ||
2788                     flow_key->ip.proto != IPPROTO_TCP)
2789                         return -EINVAL;
2790
2791                 break;
2792
2793         case OVS_KEY_ATTR_UDP:
2794                 if ((eth_type != htons(ETH_P_IP) &&
2795                      eth_type != htons(ETH_P_IPV6)) ||
2796                     flow_key->ip.proto != IPPROTO_UDP)
2797                         return -EINVAL;
2798
2799                 break;
2800
2801         case OVS_KEY_ATTR_MPLS:
2802                 if (!eth_p_mpls(eth_type))
2803                         return -EINVAL;
2804                 break;
2805
2806         case OVS_KEY_ATTR_SCTP:
2807                 if ((eth_type != htons(ETH_P_IP) &&
2808                      eth_type != htons(ETH_P_IPV6)) ||
2809                     flow_key->ip.proto != IPPROTO_SCTP)
2810                         return -EINVAL;
2811
2812                 break;
2813
2814         case OVS_KEY_ATTR_NSH:
2815                 if (eth_type != htons(ETH_P_NSH))
2816                         return -EINVAL;
2817                 if (!validate_nsh(nla_data(a), masked, false, log))
2818                         return -EINVAL;
2819                 break;
2820
2821         default:
2822                 return -EINVAL;
2823         }
2824
2825         /* Convert non-masked non-tunnel set actions to masked set actions. */
2826         if (!masked && key_type != OVS_KEY_ATTR_TUNNEL) {
2827                 int start, len = key_len * 2;
2828                 struct nlattr *at;
2829
2830                 *skip_copy = true;
2831
2832                 start = add_nested_action_start(sfa,
2833                                                 OVS_ACTION_ATTR_SET_TO_MASKED,
2834                                                 log);
2835                 if (start < 0)
2836                         return start;
2837
2838                 at = __add_action(sfa, key_type, NULL, len, log);
2839                 if (IS_ERR(at))
2840                         return PTR_ERR(at);
2841
2842                 memcpy(nla_data(at), nla_data(ovs_key), key_len); /* Key. */
2843                 memset(nla_data(at) + key_len, 0xff, key_len);    /* Mask. */
2844                 /* Clear non-writeable bits from otherwise writeable fields. */
2845                 if (key_type == OVS_KEY_ATTR_IPV6) {
2846                         struct ovs_key_ipv6 *mask = nla_data(at) + key_len;
2847
2848                         mask->ipv6_label &= htonl(0x000FFFFF);
2849                 }
2850                 add_nested_action_end(*sfa, start);
2851         }
2852
2853         return 0;
2854 }
2855
2856 static int validate_userspace(const struct nlattr *attr)
2857 {
2858         static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
2859                 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
2860                 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
2861                 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = {.type = NLA_U32 },
2862         };
2863         struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
2864         int error;
2865
2866         error = nla_parse_nested_deprecated(a, OVS_USERSPACE_ATTR_MAX, attr,
2867                                             userspace_policy, NULL);
2868         if (error)
2869                 return error;
2870
2871         if (!a[OVS_USERSPACE_ATTR_PID] ||
2872             !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
2873                 return -EINVAL;
2874
2875         return 0;
2876 }
2877
2878 static const struct nla_policy cpl_policy[OVS_CHECK_PKT_LEN_ATTR_MAX + 1] = {
2879         [OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] = {.type = NLA_U16 },
2880         [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] = {.type = NLA_NESTED },
2881         [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL] = {.type = NLA_NESTED },
2882 };
2883
2884 static int validate_and_copy_check_pkt_len(struct net *net,
2885                                            const struct nlattr *attr,
2886                                            const struct sw_flow_key *key,
2887                                            struct sw_flow_actions **sfa,
2888                                            __be16 eth_type, __be16 vlan_tci,
2889                                            u32 mpls_label_count,
2890                                            bool log, bool last)
2891 {
2892         const struct nlattr *acts_if_greater, *acts_if_lesser_eq;
2893         struct nlattr *a[OVS_CHECK_PKT_LEN_ATTR_MAX + 1];
2894         struct check_pkt_len_arg arg;
2895         int nested_acts_start;
2896         int start, err;
2897
2898         err = nla_parse_deprecated_strict(a, OVS_CHECK_PKT_LEN_ATTR_MAX,
2899                                           nla_data(attr), nla_len(attr),
2900                                           cpl_policy, NULL);
2901         if (err)
2902                 return err;
2903
2904         if (!a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] ||
2905             !nla_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]))
2906                 return -EINVAL;
2907
2908         acts_if_lesser_eq = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL];
2909         acts_if_greater = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER];
2910
2911         /* Both the nested action should be present. */
2912         if (!acts_if_greater || !acts_if_lesser_eq)
2913                 return -EINVAL;
2914
2915         /* validation done, copy the nested actions. */
2916         start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CHECK_PKT_LEN,
2917                                         log);
2918         if (start < 0)
2919                 return start;
2920
2921         arg.pkt_len = nla_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]);
2922         arg.exec_for_lesser_equal =
2923                 last || !actions_may_change_flow(acts_if_lesser_eq);
2924         arg.exec_for_greater =
2925                 last || !actions_may_change_flow(acts_if_greater);
2926
2927         err = ovs_nla_add_action(sfa, OVS_CHECK_PKT_LEN_ATTR_ARG, &arg,
2928                                  sizeof(arg), log);
2929         if (err)
2930                 return err;
2931
2932         nested_acts_start = add_nested_action_start(sfa,
2933                 OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL, log);
2934         if (nested_acts_start < 0)
2935                 return nested_acts_start;
2936
2937         err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa,
2938                                      eth_type, vlan_tci, mpls_label_count, log);
2939
2940         if (err)
2941                 return err;
2942
2943         add_nested_action_end(*sfa, nested_acts_start);
2944
2945         nested_acts_start = add_nested_action_start(sfa,
2946                 OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER, log);
2947         if (nested_acts_start < 0)
2948                 return nested_acts_start;
2949
2950         err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa,
2951                                      eth_type, vlan_tci, mpls_label_count, log);
2952
2953         if (err)
2954                 return err;
2955
2956         add_nested_action_end(*sfa, nested_acts_start);
2957         add_nested_action_end(*sfa, start);
2958         return 0;
2959 }
2960
2961 static int copy_action(const struct nlattr *from,
2962                        struct sw_flow_actions **sfa, bool log)
2963 {
2964         int totlen = NLA_ALIGN(from->nla_len);
2965         struct nlattr *to;
2966
2967         to = reserve_sfa_size(sfa, from->nla_len, log);
2968         if (IS_ERR(to))
2969                 return PTR_ERR(to);
2970
2971         memcpy(to, from, totlen);
2972         return 0;
2973 }
2974
2975 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2976                                   const struct sw_flow_key *key,
2977                                   struct sw_flow_actions **sfa,
2978                                   __be16 eth_type, __be16 vlan_tci,
2979                                   u32 mpls_label_count, bool log)
2980 {
2981         u8 mac_proto = ovs_key_mac_proto(key);
2982         const struct nlattr *a;
2983         int rem, err;
2984
2985         nla_for_each_nested(a, attr, rem) {
2986                 /* Expected argument lengths, (u32)-1 for variable length. */
2987                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
2988                         [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
2989                         [OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
2990                         [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
2991                         [OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls),
2992                         [OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16),
2993                         [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
2994                         [OVS_ACTION_ATTR_POP_VLAN] = 0,
2995                         [OVS_ACTION_ATTR_SET] = (u32)-1,
2996                         [OVS_ACTION_ATTR_SET_MASKED] = (u32)-1,
2997                         [OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
2998                         [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash),
2999                         [OVS_ACTION_ATTR_CT] = (u32)-1,
3000                         [OVS_ACTION_ATTR_CT_CLEAR] = 0,
3001                         [OVS_ACTION_ATTR_TRUNC] = sizeof(struct ovs_action_trunc),
3002                         [OVS_ACTION_ATTR_PUSH_ETH] = sizeof(struct ovs_action_push_eth),
3003                         [OVS_ACTION_ATTR_POP_ETH] = 0,
3004                         [OVS_ACTION_ATTR_PUSH_NSH] = (u32)-1,
3005                         [OVS_ACTION_ATTR_POP_NSH] = 0,
3006                         [OVS_ACTION_ATTR_METER] = sizeof(u32),
3007                         [OVS_ACTION_ATTR_CLONE] = (u32)-1,
3008                         [OVS_ACTION_ATTR_CHECK_PKT_LEN] = (u32)-1,
3009                         [OVS_ACTION_ATTR_ADD_MPLS] = sizeof(struct ovs_action_add_mpls),
3010                 };
3011                 const struct ovs_action_push_vlan *vlan;
3012                 int type = nla_type(a);
3013                 bool skip_copy;
3014
3015                 if (type > OVS_ACTION_ATTR_MAX ||
3016                     (action_lens[type] != nla_len(a) &&
3017                      action_lens[type] != (u32)-1))
3018                         return -EINVAL;
3019
3020                 skip_copy = false;
3021                 switch (type) {
3022                 case OVS_ACTION_ATTR_UNSPEC:
3023                         return -EINVAL;
3024
3025                 case OVS_ACTION_ATTR_USERSPACE:
3026                         err = validate_userspace(a);
3027                         if (err)
3028                                 return err;
3029                         break;
3030
3031                 case OVS_ACTION_ATTR_OUTPUT:
3032                         if (nla_get_u32(a) >= DP_MAX_PORTS)
3033                                 return -EINVAL;
3034                         break;
3035
3036                 case OVS_ACTION_ATTR_TRUNC: {
3037                         const struct ovs_action_trunc *trunc = nla_data(a);
3038
3039                         if (trunc->max_len < ETH_HLEN)
3040                                 return -EINVAL;
3041                         break;
3042                 }
3043
3044                 case OVS_ACTION_ATTR_HASH: {
3045                         const struct ovs_action_hash *act_hash = nla_data(a);
3046
3047                         switch (act_hash->hash_alg) {
3048                         case OVS_HASH_ALG_L4:
3049                                 break;
3050                         default:
3051                                 return  -EINVAL;
3052                         }
3053
3054                         break;
3055                 }
3056
3057                 case OVS_ACTION_ATTR_POP_VLAN:
3058                         if (mac_proto != MAC_PROTO_ETHERNET)
3059                                 return -EINVAL;
3060                         vlan_tci = htons(0);
3061                         break;
3062
3063                 case OVS_ACTION_ATTR_PUSH_VLAN:
3064                         if (mac_proto != MAC_PROTO_ETHERNET)
3065                                 return -EINVAL;
3066                         vlan = nla_data(a);
3067                         if (!eth_type_vlan(vlan->vlan_tpid))
3068                                 return -EINVAL;
3069                         if (!(vlan->vlan_tci & htons(VLAN_CFI_MASK)))
3070                                 return -EINVAL;
3071                         vlan_tci = vlan->vlan_tci;
3072                         break;
3073
3074                 case OVS_ACTION_ATTR_RECIRC:
3075                         break;
3076
3077                 case OVS_ACTION_ATTR_ADD_MPLS: {
3078                         const struct ovs_action_add_mpls *mpls = nla_data(a);
3079
3080                         if (!eth_p_mpls(mpls->mpls_ethertype))
3081                                 return -EINVAL;
3082
3083                         if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK) {
3084                                 if (vlan_tci & htons(VLAN_CFI_MASK) ||
3085                                     (eth_type != htons(ETH_P_IP) &&
3086                                      eth_type != htons(ETH_P_IPV6) &&
3087                                      eth_type != htons(ETH_P_ARP) &&
3088                                      eth_type != htons(ETH_P_RARP) &&
3089                                      !eth_p_mpls(eth_type)))
3090                                         return -EINVAL;
3091                                 mpls_label_count++;
3092                         } else {
3093                                 if (mac_proto == MAC_PROTO_ETHERNET) {
3094                                         mpls_label_count = 1;
3095                                         mac_proto = MAC_PROTO_NONE;
3096                                 } else {
3097                                         mpls_label_count++;
3098                                 }
3099                         }
3100                         eth_type = mpls->mpls_ethertype;
3101                         break;
3102                 }
3103
3104                 case OVS_ACTION_ATTR_PUSH_MPLS: {
3105                         const struct ovs_action_push_mpls *mpls = nla_data(a);
3106
3107                         if (!eth_p_mpls(mpls->mpls_ethertype))
3108                                 return -EINVAL;
3109                         /* Prohibit push MPLS other than to a white list
3110                          * for packets that have a known tag order.
3111                          */
3112                         if (vlan_tci & htons(VLAN_CFI_MASK) ||
3113                             (eth_type != htons(ETH_P_IP) &&
3114                              eth_type != htons(ETH_P_IPV6) &&
3115                              eth_type != htons(ETH_P_ARP) &&
3116                              eth_type != htons(ETH_P_RARP) &&
3117                              !eth_p_mpls(eth_type)))
3118                                 return -EINVAL;
3119                         eth_type = mpls->mpls_ethertype;
3120                         mpls_label_count++;
3121                         break;
3122                 }
3123
3124                 case OVS_ACTION_ATTR_POP_MPLS: {
3125                         __be16  proto;
3126                         if (vlan_tci & htons(VLAN_CFI_MASK) ||
3127                             !eth_p_mpls(eth_type))
3128                                 return -EINVAL;
3129
3130                         /* Disallow subsequent L2.5+ set actions and mpls_pop
3131                          * actions once the last MPLS label in the packet is
3132                          * is popped as there is no check here to ensure that
3133                          * the new eth type is valid and thus set actions could
3134                          * write off the end of the packet or otherwise corrupt
3135                          * it.
3136                          *
3137                          * Support for these actions is planned using packet
3138                          * recirculation.
3139                          */
3140                         proto = nla_get_be16(a);
3141
3142                         if (proto == htons(ETH_P_TEB) &&
3143                             mac_proto != MAC_PROTO_NONE)
3144                                 return -EINVAL;
3145
3146                         mpls_label_count--;
3147
3148                         if (!eth_p_mpls(proto) || !mpls_label_count)
3149                                 eth_type = htons(0);
3150                         else
3151                                 eth_type =  proto;
3152
3153                         break;
3154                 }
3155
3156                 case OVS_ACTION_ATTR_SET:
3157                         err = validate_set(a, key, sfa,
3158                                            &skip_copy, mac_proto, eth_type,
3159                                            false, log);
3160                         if (err)
3161                                 return err;
3162                         break;
3163
3164                 case OVS_ACTION_ATTR_SET_MASKED:
3165                         err = validate_set(a, key, sfa,
3166                                            &skip_copy, mac_proto, eth_type,
3167                                            true, log);
3168                         if (err)
3169                                 return err;
3170                         break;
3171
3172                 case OVS_ACTION_ATTR_SAMPLE: {
3173                         bool last = nla_is_last(a, rem);
3174
3175                         err = validate_and_copy_sample(net, a, key, sfa,
3176                                                        eth_type, vlan_tci,
3177                                                        mpls_label_count,
3178                                                        log, last);
3179                         if (err)
3180                                 return err;
3181                         skip_copy = true;
3182                         break;
3183                 }
3184
3185                 case OVS_ACTION_ATTR_CT:
3186                         err = ovs_ct_copy_action(net, a, key, sfa, log);
3187                         if (err)
3188                                 return err;
3189                         skip_copy = true;
3190                         break;
3191
3192                 case OVS_ACTION_ATTR_CT_CLEAR:
3193                         break;
3194
3195                 case OVS_ACTION_ATTR_PUSH_ETH:
3196                         /* Disallow pushing an Ethernet header if one
3197                          * is already present */
3198                         if (mac_proto != MAC_PROTO_NONE)
3199                                 return -EINVAL;
3200                         mac_proto = MAC_PROTO_ETHERNET;
3201                         break;
3202
3203                 case OVS_ACTION_ATTR_POP_ETH:
3204                         if (mac_proto != MAC_PROTO_ETHERNET)
3205                                 return -EINVAL;
3206                         if (vlan_tci & htons(VLAN_CFI_MASK))
3207                                 return -EINVAL;
3208                         mac_proto = MAC_PROTO_NONE;
3209                         break;
3210
3211                 case OVS_ACTION_ATTR_PUSH_NSH:
3212                         if (mac_proto != MAC_PROTO_ETHERNET) {
3213                                 u8 next_proto;
3214
3215                                 next_proto = tun_p_from_eth_p(eth_type);
3216                                 if (!next_proto)
3217                                         return -EINVAL;
3218                         }
3219                         mac_proto = MAC_PROTO_NONE;
3220                         if (!validate_nsh(nla_data(a), false, true, true))
3221                                 return -EINVAL;
3222                         break;
3223
3224                 case OVS_ACTION_ATTR_POP_NSH: {
3225                         __be16 inner_proto;
3226
3227                         if (eth_type != htons(ETH_P_NSH))
3228                                 return -EINVAL;
3229                         inner_proto = tun_p_to_eth_p(key->nsh.base.np);
3230                         if (!inner_proto)
3231                                 return -EINVAL;
3232                         if (key->nsh.base.np == TUN_P_ETHERNET)
3233                                 mac_proto = MAC_PROTO_ETHERNET;
3234                         else
3235                                 mac_proto = MAC_PROTO_NONE;
3236                         break;
3237                 }
3238
3239                 case OVS_ACTION_ATTR_METER:
3240                         /* Non-existent meters are simply ignored.  */
3241                         break;
3242
3243                 case OVS_ACTION_ATTR_CLONE: {
3244                         bool last = nla_is_last(a, rem);
3245
3246                         err = validate_and_copy_clone(net, a, key, sfa,
3247                                                       eth_type, vlan_tci,
3248                                                       mpls_label_count,
3249                                                       log, last);
3250                         if (err)
3251                                 return err;
3252                         skip_copy = true;
3253                         break;
3254                 }
3255
3256                 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
3257                         bool last = nla_is_last(a, rem);
3258
3259                         err = validate_and_copy_check_pkt_len(net, a, key, sfa,
3260                                                               eth_type,
3261                                                               vlan_tci,
3262                                                               mpls_label_count,
3263                                                               log, last);
3264                         if (err)
3265                                 return err;
3266                         skip_copy = true;
3267                         break;
3268                 }
3269
3270                 default:
3271                         OVS_NLERR(log, "Unknown Action type %d", type);
3272                         return -EINVAL;
3273                 }
3274                 if (!skip_copy) {
3275                         err = copy_action(a, sfa, log);
3276                         if (err)
3277                                 return err;
3278                 }
3279         }
3280
3281         if (rem > 0)
3282                 return -EINVAL;
3283
3284         return 0;
3285 }
3286
3287 /* 'key' must be the masked key. */
3288 int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
3289                          const struct sw_flow_key *key,
3290                          struct sw_flow_actions **sfa, bool log)
3291 {
3292         int err;
3293         u32 mpls_label_count = 0;
3294
3295         *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
3296         if (IS_ERR(*sfa))
3297                 return PTR_ERR(*sfa);
3298
3299         if (eth_p_mpls(key->eth.type))
3300                 mpls_label_count = hweight_long(key->mpls.num_labels_mask);
3301
3302         (*sfa)->orig_len = nla_len(attr);
3303         err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type,
3304                                      key->eth.vlan.tci, mpls_label_count, log);
3305         if (err)
3306                 ovs_nla_free_flow_actions(*sfa);
3307
3308         return err;
3309 }
3310
3311 static int sample_action_to_attr(const struct nlattr *attr,
3312                                  struct sk_buff *skb)
3313 {
3314         struct nlattr *start, *ac_start = NULL, *sample_arg;
3315         int err = 0, rem = nla_len(attr);
3316         const struct sample_arg *arg;
3317         struct nlattr *actions;
3318
3319         start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SAMPLE);
3320         if (!start)
3321                 return -EMSGSIZE;
3322
3323         sample_arg = nla_data(attr);
3324         arg = nla_data(sample_arg);
3325         actions = nla_next(sample_arg, &rem);
3326
3327         if (nla_put_u32(skb, OVS_SAMPLE_ATTR_PROBABILITY, arg->probability)) {
3328                 err = -EMSGSIZE;
3329                 goto out;
3330         }
3331
3332         ac_start = nla_nest_start_noflag(skb, OVS_SAMPLE_ATTR_ACTIONS);
3333         if (!ac_start) {
3334                 err = -EMSGSIZE;
3335                 goto out;
3336         }
3337
3338         err = ovs_nla_put_actions(actions, rem, skb);
3339
3340 out:
3341         if (err) {
3342                 nla_nest_cancel(skb, ac_start);
3343                 nla_nest_cancel(skb, start);
3344         } else {
3345                 nla_nest_end(skb, ac_start);
3346                 nla_nest_end(skb, start);
3347         }
3348
3349         return err;
3350 }
3351
3352 static int clone_action_to_attr(const struct nlattr *attr,
3353                                 struct sk_buff *skb)
3354 {
3355         struct nlattr *start;
3356         int err = 0, rem = nla_len(attr);
3357
3358         start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CLONE);
3359         if (!start)
3360                 return -EMSGSIZE;
3361
3362         err = ovs_nla_put_actions(nla_data(attr), rem, skb);
3363
3364         if (err)
3365                 nla_nest_cancel(skb, start);
3366         else
3367                 nla_nest_end(skb, start);
3368
3369         return err;
3370 }
3371
3372 static int check_pkt_len_action_to_attr(const struct nlattr *attr,
3373                                         struct sk_buff *skb)
3374 {
3375         struct nlattr *start, *ac_start = NULL;
3376         const struct check_pkt_len_arg *arg;
3377         const struct nlattr *a, *cpl_arg;
3378         int err = 0, rem = nla_len(attr);
3379
3380         start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CHECK_PKT_LEN);
3381         if (!start)
3382                 return -EMSGSIZE;
3383
3384         /* The first nested attribute in 'attr' is always
3385          * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
3386          */
3387         cpl_arg = nla_data(attr);
3388         arg = nla_data(cpl_arg);
3389
3390         if (nla_put_u16(skb, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN, arg->pkt_len)) {
3391                 err = -EMSGSIZE;
3392                 goto out;
3393         }
3394
3395         /* Second nested attribute in 'attr' is always
3396          * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
3397          */
3398         a = nla_next(cpl_arg, &rem);
3399         ac_start =  nla_nest_start_noflag(skb,
3400                                           OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
3401         if (!ac_start) {
3402                 err = -EMSGSIZE;
3403                 goto out;
3404         }
3405
3406         err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
3407         if (err) {
3408                 nla_nest_cancel(skb, ac_start);
3409                 goto out;
3410         } else {
3411                 nla_nest_end(skb, ac_start);
3412         }
3413
3414         /* Third nested attribute in 'attr' is always
3415          * OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER.
3416          */
3417         a = nla_next(a, &rem);
3418         ac_start =  nla_nest_start_noflag(skb,
3419                                           OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
3420         if (!ac_start) {
3421                 err = -EMSGSIZE;
3422                 goto out;
3423         }
3424
3425         err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
3426         if (err) {
3427                 nla_nest_cancel(skb, ac_start);
3428                 goto out;
3429         } else {
3430                 nla_nest_end(skb, ac_start);
3431         }
3432
3433         nla_nest_end(skb, start);
3434         return 0;
3435
3436 out:
3437         nla_nest_cancel(skb, start);
3438         return err;
3439 }
3440
3441 static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
3442 {
3443         const struct nlattr *ovs_key = nla_data(a);
3444         int key_type = nla_type(ovs_key);
3445         struct nlattr *start;
3446         int err;
3447
3448         switch (key_type) {
3449         case OVS_KEY_ATTR_TUNNEL_INFO: {
3450                 struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key);
3451                 struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info;
3452
3453                 start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SET);
3454                 if (!start)
3455                         return -EMSGSIZE;
3456
3457                 err =  ip_tun_to_nlattr(skb, &tun_info->key,
3458                                         ip_tunnel_info_opts(tun_info),
3459                                         tun_info->options_len,
3460                                         ip_tunnel_info_af(tun_info), tun_info->mode);
3461                 if (err)
3462                         return err;
3463                 nla_nest_end(skb, start);
3464                 break;
3465         }
3466         default:
3467                 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
3468                         return -EMSGSIZE;
3469                 break;
3470         }
3471
3472         return 0;
3473 }
3474
3475 static int masked_set_action_to_set_action_attr(const struct nlattr *a,
3476                                                 struct sk_buff *skb)
3477 {
3478         const struct nlattr *ovs_key = nla_data(a);
3479         struct nlattr *nla;
3480         size_t key_len = nla_len(ovs_key) / 2;
3481
3482         /* Revert the conversion we did from a non-masked set action to
3483          * masked set action.
3484          */
3485         nla = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SET);
3486         if (!nla)
3487                 return -EMSGSIZE;
3488
3489         if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key)))
3490                 return -EMSGSIZE;
3491
3492         nla_nest_end(skb, nla);
3493         return 0;
3494 }
3495
3496 int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
3497 {
3498         const struct nlattr *a;
3499         int rem, err;
3500
3501         nla_for_each_attr(a, attr, len, rem) {
3502                 int type = nla_type(a);
3503
3504                 switch (type) {
3505                 case OVS_ACTION_ATTR_SET:
3506                         err = set_action_to_attr(a, skb);
3507                         if (err)
3508                                 return err;
3509                         break;
3510
3511                 case OVS_ACTION_ATTR_SET_TO_MASKED:
3512                         err = masked_set_action_to_set_action_attr(a, skb);
3513                         if (err)
3514                                 return err;
3515                         break;
3516
3517                 case OVS_ACTION_ATTR_SAMPLE:
3518                         err = sample_action_to_attr(a, skb);
3519                         if (err)
3520                                 return err;
3521                         break;
3522
3523                 case OVS_ACTION_ATTR_CT:
3524                         err = ovs_ct_action_to_attr(nla_data(a), skb);
3525                         if (err)
3526                                 return err;
3527                         break;
3528
3529                 case OVS_ACTION_ATTR_CLONE:
3530                         err = clone_action_to_attr(a, skb);
3531                         if (err)
3532                                 return err;
3533                         break;
3534
3535                 case OVS_ACTION_ATTR_CHECK_PKT_LEN:
3536                         err = check_pkt_len_action_to_attr(a, skb);
3537                         if (err)
3538                                 return err;
3539                         break;
3540
3541                 default:
3542                         if (nla_put(skb, type, nla_len(a), nla_data(a)))
3543                                 return -EMSGSIZE;
3544                         break;
3545                 }
3546         }
3547
3548         return 0;
3549 }