Merge tag 'irq-core-2020-12-23' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / sparc / net / bpf_jit_asm_32.S
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <asm/ptrace.h>
3
4 #include "bpf_jit_32.h"
5
6 #define SAVE_SZ         96
7 #define SCRATCH_OFF     72
8 #define BE_PTR(label)   be label
9 #define SIGN_EXTEND(reg)
10
11 #define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */
12
13         .text
14         .globl  bpf_jit_load_word
15 bpf_jit_load_word:
16         cmp     r_OFF, 0
17         bl      bpf_slow_path_word_neg
18          nop
19         .globl  bpf_jit_load_word_positive_offset
20 bpf_jit_load_word_positive_offset:
21         sub     r_HEADLEN, r_OFF, r_TMP
22         cmp     r_TMP, 3
23         ble     bpf_slow_path_word
24          add    r_SKB_DATA, r_OFF, r_TMP
25         andcc   r_TMP, 3, %g0
26         bne     load_word_unaligned
27          nop
28         retl
29          ld     [r_TMP], r_A
30 load_word_unaligned:
31         ldub    [r_TMP + 0x0], r_OFF
32         ldub    [r_TMP + 0x1], r_TMP2
33         sll     r_OFF, 8, r_OFF
34         or      r_OFF, r_TMP2, r_OFF
35         ldub    [r_TMP + 0x2], r_TMP2
36         sll     r_OFF, 8, r_OFF
37         or      r_OFF, r_TMP2, r_OFF
38         ldub    [r_TMP + 0x3], r_TMP2
39         sll     r_OFF, 8, r_OFF
40         retl
41          or     r_OFF, r_TMP2, r_A
42
43         .globl  bpf_jit_load_half
44 bpf_jit_load_half:
45         cmp     r_OFF, 0
46         bl      bpf_slow_path_half_neg
47          nop
48         .globl  bpf_jit_load_half_positive_offset
49 bpf_jit_load_half_positive_offset:
50         sub     r_HEADLEN, r_OFF, r_TMP
51         cmp     r_TMP, 1
52         ble     bpf_slow_path_half
53          add    r_SKB_DATA, r_OFF, r_TMP
54         andcc   r_TMP, 1, %g0
55         bne     load_half_unaligned
56          nop
57         retl
58          lduh   [r_TMP], r_A
59 load_half_unaligned:
60         ldub    [r_TMP + 0x0], r_OFF
61         ldub    [r_TMP + 0x1], r_TMP2
62         sll     r_OFF, 8, r_OFF
63         retl
64          or     r_OFF, r_TMP2, r_A
65
66         .globl  bpf_jit_load_byte
67 bpf_jit_load_byte:
68         cmp     r_OFF, 0
69         bl      bpf_slow_path_byte_neg
70          nop
71         .globl  bpf_jit_load_byte_positive_offset
72 bpf_jit_load_byte_positive_offset:
73         cmp     r_OFF, r_HEADLEN
74         bge     bpf_slow_path_byte
75          nop
76         retl
77          ldub   [r_SKB_DATA + r_OFF], r_A
78
79         .globl  bpf_jit_load_byte_msh
80 bpf_jit_load_byte_msh:
81         cmp     r_OFF, 0
82         bl      bpf_slow_path_byte_msh_neg
83          nop
84         .globl  bpf_jit_load_byte_msh_positive_offset
85 bpf_jit_load_byte_msh_positive_offset:
86         cmp     r_OFF, r_HEADLEN
87         bge     bpf_slow_path_byte_msh
88          nop
89         ldub    [r_SKB_DATA + r_OFF], r_OFF
90         and     r_OFF, 0xf, r_OFF
91         retl
92          sll    r_OFF, 2, r_X
93
94 #define bpf_slow_path_common(LEN)       \
95         save    %sp, -SAVE_SZ, %sp;     \
96         mov     %i0, %o0;               \
97         mov     r_OFF, %o1;             \
98         add     %fp, SCRATCH_OFF, %o2;  \
99         call    skb_copy_bits;          \
100          mov    (LEN), %o3;             \
101         cmp     %o0, 0;                 \
102         restore;
103
104 bpf_slow_path_word:
105         bpf_slow_path_common(4)
106         bl      bpf_error
107          ld     [%sp + SCRATCH_OFF], r_A
108         retl
109          nop
110 bpf_slow_path_half:
111         bpf_slow_path_common(2)
112         bl      bpf_error
113          lduh   [%sp + SCRATCH_OFF], r_A
114         retl
115          nop
116 bpf_slow_path_byte:
117         bpf_slow_path_common(1)
118         bl      bpf_error
119          ldub   [%sp + SCRATCH_OFF], r_A
120         retl
121          nop
122 bpf_slow_path_byte_msh:
123         bpf_slow_path_common(1)
124         bl      bpf_error
125          ldub   [%sp + SCRATCH_OFF], r_A
126         and     r_OFF, 0xf, r_OFF
127         retl
128          sll    r_OFF, 2, r_X
129
130 #define bpf_negative_common(LEN)                        \
131         save    %sp, -SAVE_SZ, %sp;                     \
132         mov     %i0, %o0;                               \
133         mov     r_OFF, %o1;                             \
134         SIGN_EXTEND(%o1);                               \
135         call    bpf_internal_load_pointer_neg_helper;   \
136          mov    (LEN), %o2;                             \
137         mov     %o0, r_TMP;                             \
138         cmp     %o0, 0;                                 \
139         BE_PTR(bpf_error);                              \
140          restore;
141
142 bpf_slow_path_word_neg:
143         sethi   %hi(SKF_MAX_NEG_OFF), r_TMP
144         cmp     r_OFF, r_TMP
145         bl      bpf_error
146          nop
147         .globl  bpf_jit_load_word_negative_offset
148 bpf_jit_load_word_negative_offset:
149         bpf_negative_common(4)
150         andcc   r_TMP, 3, %g0
151         bne     load_word_unaligned
152          nop
153         retl
154          ld     [r_TMP], r_A
155
156 bpf_slow_path_half_neg:
157         sethi   %hi(SKF_MAX_NEG_OFF), r_TMP
158         cmp     r_OFF, r_TMP
159         bl      bpf_error
160          nop
161         .globl  bpf_jit_load_half_negative_offset
162 bpf_jit_load_half_negative_offset:
163         bpf_negative_common(2)
164         andcc   r_TMP, 1, %g0
165         bne     load_half_unaligned
166          nop
167         retl
168          lduh   [r_TMP], r_A
169
170 bpf_slow_path_byte_neg:
171         sethi   %hi(SKF_MAX_NEG_OFF), r_TMP
172         cmp     r_OFF, r_TMP
173         bl      bpf_error
174          nop
175         .globl  bpf_jit_load_byte_negative_offset
176 bpf_jit_load_byte_negative_offset:
177         bpf_negative_common(1)
178         retl
179          ldub   [r_TMP], r_A
180
181 bpf_slow_path_byte_msh_neg:
182         sethi   %hi(SKF_MAX_NEG_OFF), r_TMP
183         cmp     r_OFF, r_TMP
184         bl      bpf_error
185          nop
186         .globl  bpf_jit_load_byte_msh_negative_offset
187 bpf_jit_load_byte_msh_negative_offset:
188         bpf_negative_common(1)
189         ldub    [r_TMP], r_OFF
190         and     r_OFF, 0xf, r_OFF
191         retl
192          sll    r_OFF, 2, r_X
193
194 bpf_error:
195         /* Make the JIT program return zero.  The JIT epilogue
196          * stores away the original %o7 into r_saved_O7.  The
197          * normal leaf function return is to use "retl" which
198          * would evalute to "jmpl %o7 + 8, %g0" but we want to
199          * use the saved value thus the sequence you see here.
200          */
201         jmpl    r_saved_O7 + 8, %g0
202          clr    %o0