selftests/bpf: Use __BYTE_ORDER__
authorIlya Leoshkevich <iii@linux.ibm.com>
Tue, 26 Oct 2021 01:08:28 +0000 (03:08 +0200)
committerAndrii Nakryiko <andrii@kernel.org>
Tue, 26 Oct 2021 03:39:42 +0000 (20:39 -0700)
Use the compiler-defined __BYTE_ORDER__ instead of the libc-defined
__BYTE_ORDER for consistency.

Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20211026010831.748682-4-iii@linux.ibm.com
tools/testing/selftests/bpf/prog_tests/btf_endian.c
tools/testing/selftests/bpf/test_sysctl.c
tools/testing/selftests/bpf/verifier/ctx_skb.c
tools/testing/selftests/bpf/verifier/lwt.c
tools/testing/selftests/bpf/verifier/perf_event_sample_period.c

index 2653cc4..8afbf3d 100644 (file)
@@ -7,12 +7,12 @@
 #include <bpf/btf.h>
 
 void test_btf_endian() {
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        enum btf_endianness endian = BTF_LITTLE_ENDIAN;
-#elif __BYTE_ORDER == __BIG_ENDIAN
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
        enum btf_endianness endian = BTF_BIG_ENDIAN;
 #else
-#error "Unrecognized __BYTE_ORDER"
+#error "Unrecognized __BYTE_ORDER__"
 #endif
        enum btf_endianness swap_endian = 1 - endian;
        struct btf *btf = NULL, *swap_btf = NULL;
index a20a919..a3bb6d3 100644 (file)
@@ -124,7 +124,7 @@ static struct sysctl_test tests[] = {
                .descr = "ctx:write sysctl:write read ok narrow",
                .insns = {
                        /* u64 w = (u16)write & 1; */
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
                        BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1,
                                    offsetof(struct bpf_sysctl, write)),
 #else
@@ -184,7 +184,7 @@ static struct sysctl_test tests[] = {
                .descr = "ctx:file_pos sysctl:read read ok narrow",
                .insns = {
                        /* If (file_pos == X) */
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
                        BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
                                    offsetof(struct bpf_sysctl, file_pos)),
 #else
index 9e1a30b..83cecfb 100644 (file)
        "check skb->hash byte load permitted",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                    offsetof(struct __sk_buff, hash)),
 #else
        "check skb->hash byte load permitted 3",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                    offsetof(struct __sk_buff, hash) + 3),
 #else
        "check skb->hash half load permitted",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                    offsetof(struct __sk_buff, hash)),
 #else
        "check skb->hash half load permitted 2",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                    offsetof(struct __sk_buff, hash) + 2),
 #else
        "check skb->hash half load not permitted, unaligned 1",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                    offsetof(struct __sk_buff, hash) + 1),
 #else
        "check skb->hash half load not permitted, unaligned 3",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                    offsetof(struct __sk_buff, hash) + 3),
 #else
        "check skb->data half load not permitted",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                    offsetof(struct __sk_buff, data)),
 #else
index 2cab6a3..5c8944d 100644 (file)
        "check skb->tc_classid half load not permitted for lwt prog",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                    offsetof(struct __sk_buff, tc_classid)),
 #else
index 471c1a5..d8a9b1a 100644 (file)
@@ -2,7 +2,7 @@
        "check bpf_perf_event_data->sample_period byte load permitted",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
@@ -18,7 +18,7 @@
        "check bpf_perf_event_data->sample_period half load permitted",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
@@ -34,7 +34,7 @@
        "check bpf_perf_event_data->sample_period word load permitted",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else