2 # SPDX-License-Identifier: GPL-2.0
7 # veth1 <---> veth2 veth3 <---> veth4 (the top route)
8 # veth5 <---> veth6 veth7 <---> veth8 (the bottom route)
10 # each vethN gets IPv[4|6]_N address
15 # all tests test pings from IPv*_SRC to IPv*_DST
17 # by default, routes are configured to allow packets to go
18 # IP*_1 <=> IP*_2 <=> IP*_3 <=> IP*_4 (the top route)
20 # a GRE device is installed in NS3 with IPv*_GRE, and
21 # NS1/NS2 are configured to route packets to IPv*_GRE via IP*_8
26 # 1. routes NS2->IPv*_DST are brought down, so the only way a ping
27 # from IP*_SRC to IP*_DST can work is via IPv*_GRE
29 # 2a. in an egress test, a bpf LWT_XMIT program is installed on veth1
30 # that encaps the packets with an IP/GRE header to route to IPv*_GRE
32 # ping: SRC->[encap at veth1:egress]->GRE:decap->DST
33 # ping replies go DST->SRC directly
35 # 2b. in an ingress test, a bpf LWT_IN program is installed on veth2
36 # that encaps the packets with an IP/GRE header to route to IPv*_GRE
38 # ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
39 # ping replies go DST->SRC directly
41 if [[ $EUID -ne 0 ]]; then
42 echo "This script must be run as root"
47 readonly NS1="ns1-$(mktemp -u XXXXXX)"
48 readonly NS2="ns2-$(mktemp -u XXXXXX)"
49 readonly NS3="ns3-$(mktemp -u XXXXXX)"
51 readonly IPv4_1="172.16.1.100"
52 readonly IPv4_2="172.16.2.100"
53 readonly IPv4_3="172.16.3.100"
54 readonly IPv4_4="172.16.4.100"
55 readonly IPv4_5="172.16.5.100"
56 readonly IPv4_6="172.16.6.100"
57 readonly IPv4_7="172.16.7.100"
58 readonly IPv4_8="172.16.8.100"
59 readonly IPv4_GRE="172.16.16.100"
61 readonly IPv4_SRC=$IPv4_1
62 readonly IPv4_DST=$IPv4_4
64 readonly IPv6_1="fb01::1"
65 readonly IPv6_2="fb02::1"
66 readonly IPv6_3="fb03::1"
67 readonly IPv6_4="fb04::1"
68 readonly IPv6_5="fb05::1"
69 readonly IPv6_6="fb06::1"
70 readonly IPv6_7="fb07::1"
71 readonly IPv6_8="fb08::1"
72 readonly IPv6_GRE="fb10::1"
74 readonly IPv6_SRC=$IPv6_1
75 readonly IPv6_DST=$IPv6_4
83 process_test_results()
85 if [[ "${TEST_STATUS}" -eq 0 ]] ; then
87 TESTS_SUCCEEDED=$((TESTS_SUCCEEDED+1))
90 TESTS_FAILED=$((TESTS_FAILED+1))
94 print_test_summary_and_exit()
96 echo "passed tests: ${TESTS_SUCCEEDED}"
97 echo "failed tests: ${TESTS_FAILED}"
98 if [ "${TESTS_FAILED}" -eq "0" ] ; then
107 set -e # exit on error
110 # create devices and namespaces
111 ip netns add "${NS1}"
112 ip netns add "${NS2}"
113 ip netns add "${NS3}"
115 # rp_filter gets confused by what these tests are doing, so disable it
116 ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
117 ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
118 ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
119 ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0
120 ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0
121 ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0
123 ip link add veth1 type veth peer name veth2
124 ip link add veth3 type veth peer name veth4
125 ip link add veth5 type veth peer name veth6
126 ip link add veth7 type veth peer name veth8
128 ip netns exec ${NS2} sysctl -wq net.ipv4.ip_forward=1
129 ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.forwarding=1
131 ip link set veth1 netns ${NS1}
132 ip link set veth2 netns ${NS2}
133 ip link set veth3 netns ${NS2}
134 ip link set veth4 netns ${NS3}
135 ip link set veth5 netns ${NS1}
136 ip link set veth6 netns ${NS2}
137 ip link set veth7 netns ${NS2}
138 ip link set veth8 netns ${NS3}
140 if [ ! -z "${VRF}" ] ; then
141 ip -netns ${NS1} link add red type vrf table 1001
142 ip -netns ${NS1} link set red up
143 ip -netns ${NS1} route add table 1001 unreachable default metric 8192
144 ip -netns ${NS1} -6 route add table 1001 unreachable default metric 8192
145 ip -netns ${NS1} link set veth1 vrf red
146 ip -netns ${NS1} link set veth5 vrf red
148 ip -netns ${NS2} link add red type vrf table 1001
149 ip -netns ${NS2} link set red up
150 ip -netns ${NS2} route add table 1001 unreachable default metric 8192
151 ip -netns ${NS2} -6 route add table 1001 unreachable default metric 8192
152 ip -netns ${NS2} link set veth2 vrf red
153 ip -netns ${NS2} link set veth3 vrf red
154 ip -netns ${NS2} link set veth6 vrf red
155 ip -netns ${NS2} link set veth7 vrf red
158 # configure addesses: the top route (1-2-3-4)
159 ip -netns ${NS1} addr add ${IPv4_1}/24 dev veth1
160 ip -netns ${NS2} addr add ${IPv4_2}/24 dev veth2
161 ip -netns ${NS2} addr add ${IPv4_3}/24 dev veth3
162 ip -netns ${NS3} addr add ${IPv4_4}/24 dev veth4
163 ip -netns ${NS1} -6 addr add ${IPv6_1}/128 nodad dev veth1
164 ip -netns ${NS2} -6 addr add ${IPv6_2}/128 nodad dev veth2
165 ip -netns ${NS2} -6 addr add ${IPv6_3}/128 nodad dev veth3
166 ip -netns ${NS3} -6 addr add ${IPv6_4}/128 nodad dev veth4
168 # configure addresses: the bottom route (5-6-7-8)
169 ip -netns ${NS1} addr add ${IPv4_5}/24 dev veth5
170 ip -netns ${NS2} addr add ${IPv4_6}/24 dev veth6
171 ip -netns ${NS2} addr add ${IPv4_7}/24 dev veth7
172 ip -netns ${NS3} addr add ${IPv4_8}/24 dev veth8
173 ip -netns ${NS1} -6 addr add ${IPv6_5}/128 nodad dev veth5
174 ip -netns ${NS2} -6 addr add ${IPv6_6}/128 nodad dev veth6
175 ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7
176 ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8
178 ip -netns ${NS1} link set dev veth1 up
179 ip -netns ${NS2} link set dev veth2 up
180 ip -netns ${NS2} link set dev veth3 up
181 ip -netns ${NS3} link set dev veth4 up
182 ip -netns ${NS1} link set dev veth5 up
183 ip -netns ${NS2} link set dev veth6 up
184 ip -netns ${NS2} link set dev veth7 up
185 ip -netns ${NS3} link set dev veth8 up
187 # configure routes: IP*_SRC -> veth1/IP*_2 (= top route) default;
188 # the bottom route to specific bottom addresses
192 ip -netns ${NS1} route add ${IPv4_2}/32 dev veth1 ${VRF}
193 ip -netns ${NS1} route add default dev veth1 via ${IPv4_2} ${VRF} # go top by default
194 ip -netns ${NS1} -6 route add ${IPv6_2}/128 dev veth1 ${VRF}
195 ip -netns ${NS1} -6 route add default dev veth1 via ${IPv6_2} ${VRF} # go top by default
197 ip -netns ${NS1} route add ${IPv4_6}/32 dev veth5 ${VRF}
198 ip -netns ${NS1} route add ${IPv4_7}/32 dev veth5 via ${IPv4_6} ${VRF}
199 ip -netns ${NS1} route add ${IPv4_8}/32 dev veth5 via ${IPv4_6} ${VRF}
200 ip -netns ${NS1} -6 route add ${IPv6_6}/128 dev veth5 ${VRF}
201 ip -netns ${NS1} -6 route add ${IPv6_7}/128 dev veth5 via ${IPv6_6} ${VRF}
202 ip -netns ${NS1} -6 route add ${IPv6_8}/128 dev veth5 via ${IPv6_6} ${VRF}
206 ip -netns ${NS2} route add ${IPv4_1}/32 dev veth2 ${VRF}
207 ip -netns ${NS2} route add ${IPv4_4}/32 dev veth3 ${VRF}
208 ip -netns ${NS2} -6 route add ${IPv6_1}/128 dev veth2 ${VRF}
209 ip -netns ${NS2} -6 route add ${IPv6_4}/128 dev veth3 ${VRF}
211 ip -netns ${NS2} route add ${IPv4_5}/32 dev veth6 ${VRF}
212 ip -netns ${NS2} route add ${IPv4_8}/32 dev veth7 ${VRF}
213 ip -netns ${NS2} -6 route add ${IPv6_5}/128 dev veth6 ${VRF}
214 ip -netns ${NS2} -6 route add ${IPv6_8}/128 dev veth7 ${VRF}
218 ip -netns ${NS3} route add ${IPv4_3}/32 dev veth4
219 ip -netns ${NS3} route add ${IPv4_1}/32 dev veth4 via ${IPv4_3}
220 ip -netns ${NS3} route add ${IPv4_2}/32 dev veth4 via ${IPv4_3}
221 ip -netns ${NS3} -6 route add ${IPv6_3}/128 dev veth4
222 ip -netns ${NS3} -6 route add ${IPv6_1}/128 dev veth4 via ${IPv6_3}
223 ip -netns ${NS3} -6 route add ${IPv6_2}/128 dev veth4 via ${IPv6_3}
225 ip -netns ${NS3} route add ${IPv4_7}/32 dev veth8
226 ip -netns ${NS3} route add ${IPv4_5}/32 dev veth8 via ${IPv4_7}
227 ip -netns ${NS3} route add ${IPv4_6}/32 dev veth8 via ${IPv4_7}
228 ip -netns ${NS3} -6 route add ${IPv6_7}/128 dev veth8
229 ip -netns ${NS3} -6 route add ${IPv6_5}/128 dev veth8 via ${IPv6_7}
230 ip -netns ${NS3} -6 route add ${IPv6_6}/128 dev veth8 via ${IPv6_7}
232 # configure IPv4 GRE device in NS3, and a route to it via the "bottom" route
233 ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255
234 ip -netns ${NS3} link set gre_dev up
235 ip -netns ${NS3} addr add ${IPv4_GRE} dev gre_dev
236 ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6} ${VRF}
237 ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8} ${VRF}
240 # configure IPv6 GRE device in NS3, and a route to it via the "bottom" route
241 ip -netns ${NS3} -6 tunnel add name gre6_dev mode ip6gre remote ${IPv6_1} local ${IPv6_GRE} ttl 255
242 ip -netns ${NS3} link set gre6_dev up
243 ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev
244 ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF}
245 ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF}
247 TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
249 sleep 1 # reduce flakiness
255 if [ -f ${TMPFILE} ] ; then
259 ip netns del ${NS1} 2> /dev/null
260 ip netns del ${NS2} 2> /dev/null
261 ip netns del ${NS3} 2> /dev/null
266 remove_routes_to_gredev()
268 ip -netns ${NS1} route del ${IPv4_GRE} dev veth5 ${VRF}
269 ip -netns ${NS2} route del ${IPv4_GRE} dev veth7 ${VRF}
270 ip -netns ${NS1} -6 route del ${IPv6_GRE}/128 dev veth5 ${VRF}
271 ip -netns ${NS2} -6 route del ${IPv6_GRE}/128 dev veth7 ${VRF}
274 add_unreachable_routes_to_gredev()
276 ip -netns ${NS1} route add unreachable ${IPv4_GRE}/32 ${VRF}
277 ip -netns ${NS2} route add unreachable ${IPv4_GRE}/32 ${VRF}
278 ip -netns ${NS1} -6 route add unreachable ${IPv6_GRE}/128 ${VRF}
279 ip -netns ${NS2} -6 route add unreachable ${IPv6_GRE}/128 ${VRF}
284 local readonly PROTO=$1
285 local readonly EXPECTED=$2
288 if [ "${PROTO}" == "IPv4" ] ; then
289 ip netns exec ${NS1} ping -c 1 -W 1 -I veth1 ${IPv4_DST} 2>&1 > /dev/null
291 elif [ "${PROTO}" == "IPv6" ] ; then
292 ip netns exec ${NS1} ping6 -c 1 -W 6 -I veth1 ${IPv6_DST} 2>&1 > /dev/null
295 echo " test_ping: unknown PROTO: ${PROTO}"
299 if [ "0" != "${RET}" ]; then
303 if [ "${EXPECTED}" != "${RET}" ] ; then
304 echo " test_ping failed: expected: ${EXPECTED}; got ${RET}"
311 local readonly PROTO=$1
312 local readonly PKT_SZ=5000
314 : > ${TMPFILE} # trim the capture file
316 # check that nc is present
317 command -v nc >/dev/null 2>&1 || \
318 { echo >&2 "nc is not available: skipping TSO tests"; return; }
320 # listen on port 9000, capture TCP into $TMPFILE
321 if [ "${PROTO}" == "IPv4" ] ; then
323 ip netns exec ${NS3} bash -c \
324 "nc -4 -l -p 9000 > ${TMPFILE} &"
325 elif [ "${PROTO}" == "IPv6" ] ; then
327 ip netns exec ${NS3} bash -c \
328 "nc -6 -l -p 9000 > ${TMPFILE} &"
331 echo " test_gso: unknown PROTO: ${PROTO}"
334 sleep 1 # let nc start listening
336 # send a packet larger than MTU
337 ip netns exec ${NS1} bash -c \
338 "dd if=/dev/zero bs=$PKT_SZ count=1 > /dev/tcp/${IP_DST}/9000 2>/dev/null"
339 sleep 2 # let the packet get delivered
341 # verify we received all expected bytes
342 SZ=$(stat -c %s ${TMPFILE})
343 if [ "$SZ" != "$PKT_SZ" ] ; then
344 echo " test_gso failed: ${PROTO}"
351 local readonly ENCAP=$1
352 echo "starting egress ${ENCAP} encap test ${VRF}"
355 # by default, pings work
359 # remove NS2->DST routes, ping fails
360 ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3 ${VRF}
361 ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3 ${VRF}
365 # install replacement routes (LWT/eBPF), pings succeed
366 if [ "${ENCAP}" == "IPv4" ] ; then
367 ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
368 test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
369 ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
370 test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
371 elif [ "${ENCAP}" == "IPv6" ] ; then
372 ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
373 test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
374 ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
375 test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
377 echo " unknown encap ${ENCAP}"
383 # skip GSO tests with VRF: VRF routing needs properly assigned
384 # source IP/device, which is easy to do with ping and hard with dd/nc.
385 if [ -z "${VRF}" ] ; then
390 # a negative test: remove routes to GRE devices: ping fails
391 remove_routes_to_gredev
395 # another negative test
396 add_unreachable_routes_to_gredev
406 local readonly ENCAP=$1
407 echo "starting ingress ${ENCAP} encap test ${VRF}"
410 # need to wait a bit for IPv6 to autoconf, otherwise
411 # ping6 sometimes fails with "unable to bind to address"
413 # by default, pings work
417 # remove NS2->DST routes, pings fail
418 ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3 ${VRF}
419 ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3 ${VRF}
423 # install replacement routes (LWT/eBPF), pings succeed
424 if [ "${ENCAP}" == "IPv4" ] ; then
425 ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
426 test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
427 ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
428 test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
429 elif [ "${ENCAP}" == "IPv6" ] ; then
430 ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
431 test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
432 ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
433 test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
435 echo "FAIL: unknown encap ${ENCAP}"
441 # a negative test: remove routes to GRE devices: ping fails
442 remove_routes_to_gredev
446 # another negative test
447 add_unreachable_routes_to_gredev
467 print_test_summary_and_exit