Merge branch 'akpm' (patches from Andrew)
[linux-2.6-microblaze.git] / arch / arm64 / crypto / sha2-ce-core.S
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * sha2-ce-core.S - core SHA-224/SHA-256 transform using v8 Crypto Extensions
4  *
5  * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
6  */
7
8 #include <linux/linkage.h>
9 #include <asm/assembler.h>
10
11         .text
12         .arch           armv8-a+crypto
13
14         dga             .req    q20
15         dgav            .req    v20
16         dgb             .req    q21
17         dgbv            .req    v21
18
19         t0              .req    v22
20         t1              .req    v23
21
22         dg0q            .req    q24
23         dg0v            .req    v24
24         dg1q            .req    q25
25         dg1v            .req    v25
26         dg2q            .req    q26
27         dg2v            .req    v26
28
29         .macro          add_only, ev, rc, s0
30         mov             dg2v.16b, dg0v.16b
31         .ifeq           \ev
32         add             t1.4s, v\s0\().4s, \rc\().4s
33         sha256h         dg0q, dg1q, t0.4s
34         sha256h2        dg1q, dg2q, t0.4s
35         .else
36         .ifnb           \s0
37         add             t0.4s, v\s0\().4s, \rc\().4s
38         .endif
39         sha256h         dg0q, dg1q, t1.4s
40         sha256h2        dg1q, dg2q, t1.4s
41         .endif
42         .endm
43
44         .macro          add_update, ev, rc, s0, s1, s2, s3
45         sha256su0       v\s0\().4s, v\s1\().4s
46         add_only        \ev, \rc, \s1
47         sha256su1       v\s0\().4s, v\s2\().4s, v\s3\().4s
48         .endm
49
50         /*
51          * The SHA-256 round constants
52          */
53         .section        ".rodata", "a"
54         .align          4
55 .Lsha2_rcon:
56         .word           0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
57         .word           0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
58         .word           0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
59         .word           0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
60         .word           0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
61         .word           0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
62         .word           0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
63         .word           0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
64         .word           0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
65         .word           0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
66         .word           0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
67         .word           0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
68         .word           0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
69         .word           0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
70         .word           0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
71         .word           0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
72
73         /*
74          * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
75          *                        int blocks)
76          */
77         .text
78 SYM_FUNC_START(sha2_ce_transform)
79         frame_push      3
80
81         mov             x19, x0
82         mov             x20, x1
83         mov             x21, x2
84
85         /* load round constants */
86 0:      adr_l           x8, .Lsha2_rcon
87         ld1             { v0.4s- v3.4s}, [x8], #64
88         ld1             { v4.4s- v7.4s}, [x8], #64
89         ld1             { v8.4s-v11.4s}, [x8], #64
90         ld1             {v12.4s-v15.4s}, [x8]
91
92         /* load state */
93         ld1             {dgav.4s, dgbv.4s}, [x19]
94
95         /* load sha256_ce_state::finalize */
96         ldr_l           w4, sha256_ce_offsetof_finalize, x4
97         ldr             w4, [x19, x4]
98
99         /* load input */
100 1:      ld1             {v16.4s-v19.4s}, [x20], #64
101         sub             w21, w21, #1
102
103 CPU_LE( rev32           v16.16b, v16.16b        )
104 CPU_LE( rev32           v17.16b, v17.16b        )
105 CPU_LE( rev32           v18.16b, v18.16b        )
106 CPU_LE( rev32           v19.16b, v19.16b        )
107
108 2:      add             t0.4s, v16.4s, v0.4s
109         mov             dg0v.16b, dgav.16b
110         mov             dg1v.16b, dgbv.16b
111
112         add_update      0,  v1, 16, 17, 18, 19
113         add_update      1,  v2, 17, 18, 19, 16
114         add_update      0,  v3, 18, 19, 16, 17
115         add_update      1,  v4, 19, 16, 17, 18
116
117         add_update      0,  v5, 16, 17, 18, 19
118         add_update      1,  v6, 17, 18, 19, 16
119         add_update      0,  v7, 18, 19, 16, 17
120         add_update      1,  v8, 19, 16, 17, 18
121
122         add_update      0,  v9, 16, 17, 18, 19
123         add_update      1, v10, 17, 18, 19, 16
124         add_update      0, v11, 18, 19, 16, 17
125         add_update      1, v12, 19, 16, 17, 18
126
127         add_only        0, v13, 17
128         add_only        1, v14, 18
129         add_only        0, v15, 19
130         add_only        1
131
132         /* update state */
133         add             dgav.4s, dgav.4s, dg0v.4s
134         add             dgbv.4s, dgbv.4s, dg1v.4s
135
136         /* handled all input blocks? */
137         cbz             w21, 3f
138
139         if_will_cond_yield_neon
140         st1             {dgav.4s, dgbv.4s}, [x19]
141         do_cond_yield_neon
142         b               0b
143         endif_yield_neon
144
145         b               1b
146
147         /*
148          * Final block: add padding and total bit count.
149          * Skip if the input size was not a round multiple of the block size,
150          * the padding is handled by the C code in that case.
151          */
152 3:      cbz             x4, 4f
153         ldr_l           w4, sha256_ce_offsetof_count, x4
154         ldr             x4, [x19, x4]
155         movi            v17.2d, #0
156         mov             x8, #0x80000000
157         movi            v18.2d, #0
158         ror             x7, x4, #29             // ror(lsl(x4, 3), 32)
159         fmov            d16, x8
160         mov             x4, #0
161         mov             v19.d[0], xzr
162         mov             v19.d[1], x7
163         b               2b
164
165         /* store new state */
166 4:      st1             {dgav.4s, dgbv.4s}, [x19]
167         frame_pop
168         ret
169 SYM_FUNC_END(sha2_ce_transform)