x86: remove always-defined CONFIG_AS_AVX
[linux-2.6-microblaze.git] / arch / x86 / crypto / poly1305-x86_64-cryptogams.pl
1 #!/usr/bin/env perl
2 # SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 #
4 # Copyright (C) 2017-2018 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved.
5 # Copyright (C) 2017-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
6 # Copyright (C) 2006-2017 CRYPTOGAMS by <appro@openssl.org>. All Rights Reserved.
7 #
8 # This code is taken from the OpenSSL project but the author, Andy Polyakov,
9 # has relicensed it under the licenses specified in the SPDX header above.
10 # The original headers, including the original license headers, are
11 # included below for completeness.
12 #
13 # ====================================================================
14 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
15 # project. The module is, however, dual licensed under OpenSSL and
16 # CRYPTOGAMS licenses depending on where you obtain it. For further
17 # details see http://www.openssl.org/~appro/cryptogams/.
18 # ====================================================================
19 #
20 # This module implements Poly1305 hash for x86_64.
21 #
22 # March 2015
23 #
24 # Initial release.
25 #
26 # December 2016
27 #
28 # Add AVX512F+VL+BW code path.
29 #
30 # November 2017
31 #
32 # Convert AVX512F+VL+BW code path to pure AVX512F, so that it can be
33 # executed even on Knights Landing. Trigger for modification was
34 # observation that AVX512 code paths can negatively affect overall
35 # Skylake-X system performance. Since we are likely to suppress
36 # AVX512F capability flag [at least on Skylake-X], conversion serves
37 # as kind of "investment protection". Note that next *lake processor,
38 # Cannonlake, has AVX512IFMA code path to execute...
39 #
40 # Numbers are cycles per processed byte with poly1305_blocks alone,
41 # measured with rdtsc at fixed clock frequency.
42 #
43 #               IALU/gcc-4.8(*) AVX(**)         AVX2    AVX-512
44 # P4            4.46/+120%      -
45 # Core 2        2.41/+90%       -
46 # Westmere      1.88/+120%      -
47 # Sandy Bridge  1.39/+140%      1.10
48 # Haswell       1.14/+175%      1.11            0.65
49 # Skylake[-X]   1.13/+120%      0.96            0.51    [0.35]
50 # Silvermont    2.83/+95%       -
51 # Knights L     3.60/?          1.65            1.10    0.41(***)
52 # Goldmont      1.70/+180%      -
53 # VIA Nano      1.82/+150%      -
54 # Sledgehammer  1.38/+160%      -
55 # Bulldozer     2.30/+130%      0.97
56 # Ryzen         1.15/+200%      1.08            1.18
57 #
58 # (*)   improvement coefficients relative to clang are more modest and
59 #       are ~50% on most processors, in both cases we are comparing to
60 #       __int128 code;
61 # (**)  SSE2 implementation was attempted, but among non-AVX processors
62 #       it was faster than integer-only code only on older Intel P4 and
63 #       Core processors, 50-30%, less newer processor is, but slower on
64 #       contemporary ones, for example almost 2x slower on Atom, and as
65 #       former are naturally disappearing, SSE2 is deemed unnecessary;
66 # (***) strangely enough performance seems to vary from core to core,
67 #       listed result is best case;
68
69 $flavour = shift;
70 $output  = shift;
71 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
72
73 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
74 $kernel=0; $kernel=1 if (!$flavour && !$output);
75
76 if (!$kernel) {
77         $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
78         ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
79         ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
80         die "can't locate x86_64-xlate.pl";
81
82         open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
83         *STDOUT=*OUT;
84
85         if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
86             =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
87                 $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
88         }
89
90         if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
91             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
92                 $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
93                 $avx += 1 if ($1==2.11 && $2>=8);
94         }
95
96         if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
97             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
98                 $avx = ($1>=10) + ($1>=11);
99         }
100
101         if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
102                 $avx = ($2>=3.0) + ($2>3.0);
103         }
104 } else {
105         $avx = 4; # The kernel uses ifdefs for this.
106 }
107
108 sub declare_function() {
109         my ($name, $align, $nargs) = @_;
110         if($kernel) {
111                 $code .= ".align $align\n";
112                 $code .= "SYM_FUNC_START($name)\n";
113                 $code .= ".L$name:\n";
114         } else {
115                 $code .= ".globl        $name\n";
116                 $code .= ".type $name,\@function,$nargs\n";
117                 $code .= ".align        $align\n";
118                 $code .= "$name:\n";
119         }
120 }
121
122 sub end_function() {
123         my ($name) = @_;
124         if($kernel) {
125                 $code .= "SYM_FUNC_END($name)\n";
126         } else {
127                 $code .= ".size   $name,.-$name\n";
128         }
129 }
130
131 $code.=<<___ if $kernel;
132 #include <linux/linkage.h>
133 ___
134
135 if ($avx) {
136 $code.=<<___ if $kernel;
137 .section .rodata
138 ___
139 $code.=<<___;
140 .align  64
141 .Lconst:
142 .Lmask24:
143 .long   0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
144 .L129:
145 .long   `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
146 .Lmask26:
147 .long   0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
148 .Lpermd_avx2:
149 .long   2,2,2,3,2,0,2,1
150 .Lpermd_avx512:
151 .long   0,0,0,1, 0,2,0,3, 0,4,0,5, 0,6,0,7
152
153 .L2_44_inp_permd:
154 .long   0,1,1,2,2,3,7,7
155 .L2_44_inp_shift:
156 .quad   0,12,24,64
157 .L2_44_mask:
158 .quad   0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff
159 .L2_44_shift_rgt:
160 .quad   44,44,42,64
161 .L2_44_shift_lft:
162 .quad   8,8,10,64
163
164 .align  64
165 .Lx_mask44:
166 .quad   0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff
167 .quad   0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff
168 .Lx_mask42:
169 .quad   0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff
170 .quad   0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff
171 ___
172 }
173 $code.=<<___ if (!$kernel);
174 .asciz  "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
175 .align  16
176 ___
177
178 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
179 my ($mac,$nonce)=($inp,$len);   # *_emit arguments
180 my ($d1,$d2,$d3, $r0,$r1,$s1)=("%r8","%r9","%rdi","%r11","%r12","%r13");
181 my ($h0,$h1,$h2)=("%r14","%rbx","%r10");
182
183 sub poly1305_iteration {
184 # input:        copy of $r1 in %rax, $h0-$h2, $r0-$r1
185 # output:       $h0-$h2 *= $r0-$r1
186 $code.=<<___;
187         mulq    $h0                     # h0*r1
188         mov     %rax,$d2
189          mov    $r0,%rax
190         mov     %rdx,$d3
191
192         mulq    $h0                     # h0*r0
193         mov     %rax,$h0                # future $h0
194          mov    $r0,%rax
195         mov     %rdx,$d1
196
197         mulq    $h1                     # h1*r0
198         add     %rax,$d2
199          mov    $s1,%rax
200         adc     %rdx,$d3
201
202         mulq    $h1                     # h1*s1
203          mov    $h2,$h1                 # borrow $h1
204         add     %rax,$h0
205         adc     %rdx,$d1
206
207         imulq   $s1,$h1                 # h2*s1
208         add     $h1,$d2
209          mov    $d1,$h1
210         adc     \$0,$d3
211
212         imulq   $r0,$h2                 # h2*r0
213         add     $d2,$h1
214         mov     \$-4,%rax               # mask value
215         adc     $h2,$d3
216
217         and     $d3,%rax                # last reduction step
218         mov     $d3,$h2
219         shr     \$2,$d3
220         and     \$3,$h2
221         add     $d3,%rax
222         add     %rax,$h0
223         adc     \$0,$h1
224         adc     \$0,$h2
225 ___
226 }
227
228 ########################################################################
229 # Layout of opaque area is following.
230 #
231 #       unsigned __int64 h[3];          # current hash value base 2^64
232 #       unsigned __int64 r[2];          # key value base 2^64
233
234 $code.=<<___;
235 .text
236 ___
237 $code.=<<___ if (!$kernel);
238 .extern OPENSSL_ia32cap_P
239
240 .globl  poly1305_init_x86_64
241 .hidden poly1305_init_x86_64
242 .globl  poly1305_blocks_x86_64
243 .hidden poly1305_blocks_x86_64
244 .globl  poly1305_emit_x86_64
245 .hidden poly1305_emit_x86_64
246 ___
247 &declare_function("poly1305_init_x86_64", 32, 3);
248 $code.=<<___;
249         xor     %rax,%rax
250         mov     %rax,0($ctx)            # initialize hash value
251         mov     %rax,8($ctx)
252         mov     %rax,16($ctx)
253
254         cmp     \$0,$inp
255         je      .Lno_key
256 ___
257 $code.=<<___ if (!$kernel);
258         lea     poly1305_blocks_x86_64(%rip),%r10
259         lea     poly1305_emit_x86_64(%rip),%r11
260 ___
261 $code.=<<___    if (!$kernel && $avx);
262         mov     OPENSSL_ia32cap_P+4(%rip),%r9
263         lea     poly1305_blocks_avx(%rip),%rax
264         lea     poly1305_emit_avx(%rip),%rcx
265         bt      \$`60-32`,%r9           # AVX?
266         cmovc   %rax,%r10
267         cmovc   %rcx,%r11
268 ___
269 $code.=<<___    if (!$kernel && $avx>1);
270         lea     poly1305_blocks_avx2(%rip),%rax
271         bt      \$`5+32`,%r9            # AVX2?
272         cmovc   %rax,%r10
273 ___
274 $code.=<<___    if (!$kernel && $avx>3);
275         mov     \$`(1<<31|1<<21|1<<16)`,%rax
276         shr     \$32,%r9
277         and     %rax,%r9
278         cmp     %rax,%r9
279         je      .Linit_base2_44
280 ___
281 $code.=<<___;
282         mov     \$0x0ffffffc0fffffff,%rax
283         mov     \$0x0ffffffc0ffffffc,%rcx
284         and     0($inp),%rax
285         and     8($inp),%rcx
286         mov     %rax,24($ctx)
287         mov     %rcx,32($ctx)
288 ___
289 $code.=<<___    if (!$kernel && $flavour !~ /elf32/);
290         mov     %r10,0(%rdx)
291         mov     %r11,8(%rdx)
292 ___
293 $code.=<<___    if (!$kernel && $flavour =~ /elf32/);
294         mov     %r10d,0(%rdx)
295         mov     %r11d,4(%rdx)
296 ___
297 $code.=<<___;
298         mov     \$1,%eax
299 .Lno_key:
300         ret
301 ___
302 &end_function("poly1305_init_x86_64");
303
304 &declare_function("poly1305_blocks_x86_64", 32, 4);
305 $code.=<<___;
306 .cfi_startproc
307 .Lblocks:
308         shr     \$4,$len
309         jz      .Lno_data               # too short
310
311         push    %rbx
312 .cfi_push       %rbx
313         push    %r12
314 .cfi_push       %r12
315         push    %r13
316 .cfi_push       %r13
317         push    %r14
318 .cfi_push       %r14
319         push    %r15
320 .cfi_push       %r15
321         push    $ctx
322 .cfi_push       $ctx
323 .Lblocks_body:
324
325         mov     $len,%r15               # reassign $len
326
327         mov     24($ctx),$r0            # load r
328         mov     32($ctx),$s1
329
330         mov     0($ctx),$h0             # load hash value
331         mov     8($ctx),$h1
332         mov     16($ctx),$h2
333
334         mov     $s1,$r1
335         shr     \$2,$s1
336         mov     $r1,%rax
337         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
338         jmp     .Loop
339
340 .align  32
341 .Loop:
342         add     0($inp),$h0             # accumulate input
343         adc     8($inp),$h1
344         lea     16($inp),$inp
345         adc     $padbit,$h2
346 ___
347
348         &poly1305_iteration();
349
350 $code.=<<___;
351         mov     $r1,%rax
352         dec     %r15                    # len-=16
353         jnz     .Loop
354
355         mov     0(%rsp),$ctx
356 .cfi_restore    $ctx
357
358         mov     $h0,0($ctx)             # store hash value
359         mov     $h1,8($ctx)
360         mov     $h2,16($ctx)
361
362         mov     8(%rsp),%r15
363 .cfi_restore    %r15
364         mov     16(%rsp),%r14
365 .cfi_restore    %r14
366         mov     24(%rsp),%r13
367 .cfi_restore    %r13
368         mov     32(%rsp),%r12
369 .cfi_restore    %r12
370         mov     40(%rsp),%rbx
371 .cfi_restore    %rbx
372         lea     48(%rsp),%rsp
373 .cfi_adjust_cfa_offset  -48
374 .Lno_data:
375 .Lblocks_epilogue:
376         ret
377 .cfi_endproc
378 ___
379 &end_function("poly1305_blocks_x86_64");
380
381 &declare_function("poly1305_emit_x86_64", 32, 3);
382 $code.=<<___;
383 .Lemit:
384         mov     0($ctx),%r8     # load hash value
385         mov     8($ctx),%r9
386         mov     16($ctx),%r10
387
388         mov     %r8,%rax
389         add     \$5,%r8         # compare to modulus
390         mov     %r9,%rcx
391         adc     \$0,%r9
392         adc     \$0,%r10
393         shr     \$2,%r10        # did 130-bit value overflow?
394         cmovnz  %r8,%rax
395         cmovnz  %r9,%rcx
396
397         add     0($nonce),%rax  # accumulate nonce
398         adc     8($nonce),%rcx
399         mov     %rax,0($mac)    # write result
400         mov     %rcx,8($mac)
401
402         ret
403 ___
404 &end_function("poly1305_emit_x86_64");
405 if ($avx) {
406
407 ########################################################################
408 # Layout of opaque area is following.
409 #
410 #       unsigned __int32 h[5];          # current hash value base 2^26
411 #       unsigned __int32 is_base2_26;
412 #       unsigned __int64 r[2];          # key value base 2^64
413 #       unsigned __int64 pad;
414 #       struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
415 #
416 # where r^n are base 2^26 digits of degrees of multiplier key. There are
417 # 5 digits, but last four are interleaved with multiples of 5, totalling
418 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
419
420 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
421     map("%xmm$_",(0..15));
422
423 $code.=<<___;
424 .type   __poly1305_block,\@abi-omnipotent
425 .align  32
426 __poly1305_block:
427         push $ctx
428 ___
429         &poly1305_iteration();
430 $code.=<<___;
431         pop $ctx
432         ret
433 .size   __poly1305_block,.-__poly1305_block
434
435 .type   __poly1305_init_avx,\@abi-omnipotent
436 .align  32
437 __poly1305_init_avx:
438         push %rbp
439         mov %rsp,%rbp
440         mov     $r0,$h0
441         mov     $r1,$h1
442         xor     $h2,$h2
443
444         lea     48+64($ctx),$ctx        # size optimization
445
446         mov     $r1,%rax
447         call    __poly1305_block        # r^2
448
449         mov     \$0x3ffffff,%eax        # save interleaved r^2 and r base 2^26
450         mov     \$0x3ffffff,%edx
451         mov     $h0,$d1
452         and     $h0#d,%eax
453         mov     $r0,$d2
454         and     $r0#d,%edx
455         mov     %eax,`16*0+0-64`($ctx)
456         shr     \$26,$d1
457         mov     %edx,`16*0+4-64`($ctx)
458         shr     \$26,$d2
459
460         mov     \$0x3ffffff,%eax
461         mov     \$0x3ffffff,%edx
462         and     $d1#d,%eax
463         and     $d2#d,%edx
464         mov     %eax,`16*1+0-64`($ctx)
465         lea     (%rax,%rax,4),%eax      # *5
466         mov     %edx,`16*1+4-64`($ctx)
467         lea     (%rdx,%rdx,4),%edx      # *5
468         mov     %eax,`16*2+0-64`($ctx)
469         shr     \$26,$d1
470         mov     %edx,`16*2+4-64`($ctx)
471         shr     \$26,$d2
472
473         mov     $h1,%rax
474         mov     $r1,%rdx
475         shl     \$12,%rax
476         shl     \$12,%rdx
477         or      $d1,%rax
478         or      $d2,%rdx
479         and     \$0x3ffffff,%eax
480         and     \$0x3ffffff,%edx
481         mov     %eax,`16*3+0-64`($ctx)
482         lea     (%rax,%rax,4),%eax      # *5
483         mov     %edx,`16*3+4-64`($ctx)
484         lea     (%rdx,%rdx,4),%edx      # *5
485         mov     %eax,`16*4+0-64`($ctx)
486         mov     $h1,$d1
487         mov     %edx,`16*4+4-64`($ctx)
488         mov     $r1,$d2
489
490         mov     \$0x3ffffff,%eax
491         mov     \$0x3ffffff,%edx
492         shr     \$14,$d1
493         shr     \$14,$d2
494         and     $d1#d,%eax
495         and     $d2#d,%edx
496         mov     %eax,`16*5+0-64`($ctx)
497         lea     (%rax,%rax,4),%eax      # *5
498         mov     %edx,`16*5+4-64`($ctx)
499         lea     (%rdx,%rdx,4),%edx      # *5
500         mov     %eax,`16*6+0-64`($ctx)
501         shr     \$26,$d1
502         mov     %edx,`16*6+4-64`($ctx)
503         shr     \$26,$d2
504
505         mov     $h2,%rax
506         shl     \$24,%rax
507         or      %rax,$d1
508         mov     $d1#d,`16*7+0-64`($ctx)
509         lea     ($d1,$d1,4),$d1         # *5
510         mov     $d2#d,`16*7+4-64`($ctx)
511         lea     ($d2,$d2,4),$d2         # *5
512         mov     $d1#d,`16*8+0-64`($ctx)
513         mov     $d2#d,`16*8+4-64`($ctx)
514
515         mov     $r1,%rax
516         call    __poly1305_block        # r^3
517
518         mov     \$0x3ffffff,%eax        # save r^3 base 2^26
519         mov     $h0,$d1
520         and     $h0#d,%eax
521         shr     \$26,$d1
522         mov     %eax,`16*0+12-64`($ctx)
523
524         mov     \$0x3ffffff,%edx
525         and     $d1#d,%edx
526         mov     %edx,`16*1+12-64`($ctx)
527         lea     (%rdx,%rdx,4),%edx      # *5
528         shr     \$26,$d1
529         mov     %edx,`16*2+12-64`($ctx)
530
531         mov     $h1,%rax
532         shl     \$12,%rax
533         or      $d1,%rax
534         and     \$0x3ffffff,%eax
535         mov     %eax,`16*3+12-64`($ctx)
536         lea     (%rax,%rax,4),%eax      # *5
537         mov     $h1,$d1
538         mov     %eax,`16*4+12-64`($ctx)
539
540         mov     \$0x3ffffff,%edx
541         shr     \$14,$d1
542         and     $d1#d,%edx
543         mov     %edx,`16*5+12-64`($ctx)
544         lea     (%rdx,%rdx,4),%edx      # *5
545         shr     \$26,$d1
546         mov     %edx,`16*6+12-64`($ctx)
547
548         mov     $h2,%rax
549         shl     \$24,%rax
550         or      %rax,$d1
551         mov     $d1#d,`16*7+12-64`($ctx)
552         lea     ($d1,$d1,4),$d1         # *5
553         mov     $d1#d,`16*8+12-64`($ctx)
554
555         mov     $r1,%rax
556         call    __poly1305_block        # r^4
557
558         mov     \$0x3ffffff,%eax        # save r^4 base 2^26
559         mov     $h0,$d1
560         and     $h0#d,%eax
561         shr     \$26,$d1
562         mov     %eax,`16*0+8-64`($ctx)
563
564         mov     \$0x3ffffff,%edx
565         and     $d1#d,%edx
566         mov     %edx,`16*1+8-64`($ctx)
567         lea     (%rdx,%rdx,4),%edx      # *5
568         shr     \$26,$d1
569         mov     %edx,`16*2+8-64`($ctx)
570
571         mov     $h1,%rax
572         shl     \$12,%rax
573         or      $d1,%rax
574         and     \$0x3ffffff,%eax
575         mov     %eax,`16*3+8-64`($ctx)
576         lea     (%rax,%rax,4),%eax      # *5
577         mov     $h1,$d1
578         mov     %eax,`16*4+8-64`($ctx)
579
580         mov     \$0x3ffffff,%edx
581         shr     \$14,$d1
582         and     $d1#d,%edx
583         mov     %edx,`16*5+8-64`($ctx)
584         lea     (%rdx,%rdx,4),%edx      # *5
585         shr     \$26,$d1
586         mov     %edx,`16*6+8-64`($ctx)
587
588         mov     $h2,%rax
589         shl     \$24,%rax
590         or      %rax,$d1
591         mov     $d1#d,`16*7+8-64`($ctx)
592         lea     ($d1,$d1,4),$d1         # *5
593         mov     $d1#d,`16*8+8-64`($ctx)
594
595         lea     -48-64($ctx),$ctx       # size [de-]optimization
596         pop %rbp
597         ret
598 .size   __poly1305_init_avx,.-__poly1305_init_avx
599 ___
600
601 &declare_function("poly1305_blocks_avx", 32, 4);
602 $code.=<<___;
603 .cfi_startproc
604         mov     20($ctx),%r8d           # is_base2_26
605         cmp     \$128,$len
606         jae     .Lblocks_avx
607         test    %r8d,%r8d
608         jz      .Lblocks
609
610 .Lblocks_avx:
611         and     \$-16,$len
612         jz      .Lno_data_avx
613
614         vzeroupper
615
616         test    %r8d,%r8d
617         jz      .Lbase2_64_avx
618
619         test    \$31,$len
620         jz      .Leven_avx
621
622         push    %rbp
623 .cfi_push       %rbp
624         mov     %rsp,%rbp
625         push    %rbx
626 .cfi_push       %rbx
627         push    %r12
628 .cfi_push       %r12
629         push    %r13
630 .cfi_push       %r13
631         push    %r14
632 .cfi_push       %r14
633         push    %r15
634 .cfi_push       %r15
635 .Lblocks_avx_body:
636
637         mov     $len,%r15               # reassign $len
638
639         mov     0($ctx),$d1             # load hash value
640         mov     8($ctx),$d2
641         mov     16($ctx),$h2#d
642
643         mov     24($ctx),$r0            # load r
644         mov     32($ctx),$s1
645
646         ################################# base 2^26 -> base 2^64
647         mov     $d1#d,$h0#d
648         and     \$`-1*(1<<31)`,$d1
649         mov     $d2,$r1                 # borrow $r1
650         mov     $d2#d,$h1#d
651         and     \$`-1*(1<<31)`,$d2
652
653         shr     \$6,$d1
654         shl     \$52,$r1
655         add     $d1,$h0
656         shr     \$12,$h1
657         shr     \$18,$d2
658         add     $r1,$h0
659         adc     $d2,$h1
660
661         mov     $h2,$d1
662         shl     \$40,$d1
663         shr     \$24,$h2
664         add     $d1,$h1
665         adc     \$0,$h2                 # can be partially reduced...
666
667         mov     \$-4,$d2                # ... so reduce
668         mov     $h2,$d1
669         and     $h2,$d2
670         shr     \$2,$d1
671         and     \$3,$h2
672         add     $d2,$d1                 # =*5
673         add     $d1,$h0
674         adc     \$0,$h1
675         adc     \$0,$h2
676
677         mov     $s1,$r1
678         mov     $s1,%rax
679         shr     \$2,$s1
680         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
681
682         add     0($inp),$h0             # accumulate input
683         adc     8($inp),$h1
684         lea     16($inp),$inp
685         adc     $padbit,$h2
686
687         call    __poly1305_block
688
689         test    $padbit,$padbit         # if $padbit is zero,
690         jz      .Lstore_base2_64_avx    # store hash in base 2^64 format
691
692         ################################# base 2^64 -> base 2^26
693         mov     $h0,%rax
694         mov     $h0,%rdx
695         shr     \$52,$h0
696         mov     $h1,$r0
697         mov     $h1,$r1
698         shr     \$26,%rdx
699         and     \$0x3ffffff,%rax        # h[0]
700         shl     \$12,$r0
701         and     \$0x3ffffff,%rdx        # h[1]
702         shr     \$14,$h1
703         or      $r0,$h0
704         shl     \$24,$h2
705         and     \$0x3ffffff,$h0         # h[2]
706         shr     \$40,$r1
707         and     \$0x3ffffff,$h1         # h[3]
708         or      $r1,$h2                 # h[4]
709
710         sub     \$16,%r15
711         jz      .Lstore_base2_26_avx
712
713         vmovd   %rax#d,$H0
714         vmovd   %rdx#d,$H1
715         vmovd   $h0#d,$H2
716         vmovd   $h1#d,$H3
717         vmovd   $h2#d,$H4
718         jmp     .Lproceed_avx
719
720 .align  32
721 .Lstore_base2_64_avx:
722         mov     $h0,0($ctx)
723         mov     $h1,8($ctx)
724         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
725         jmp     .Ldone_avx
726
727 .align  16
728 .Lstore_base2_26_avx:
729         mov     %rax#d,0($ctx)          # store hash value base 2^26
730         mov     %rdx#d,4($ctx)
731         mov     $h0#d,8($ctx)
732         mov     $h1#d,12($ctx)
733         mov     $h2#d,16($ctx)
734 .align  16
735 .Ldone_avx:
736         pop             %r15
737 .cfi_restore    %r15
738         pop             %r14
739 .cfi_restore    %r14
740         pop             %r13
741 .cfi_restore    %r13
742         pop             %r12
743 .cfi_restore    %r12
744         pop             %rbx
745 .cfi_restore    %rbx
746         pop             %rbp
747 .cfi_restore    %rbp
748 .Lno_data_avx:
749 .Lblocks_avx_epilogue:
750         ret
751 .cfi_endproc
752
753 .align  32
754 .Lbase2_64_avx:
755 .cfi_startproc
756         push    %rbp
757 .cfi_push       %rbp
758         mov     %rsp,%rbp
759         push    %rbx
760 .cfi_push       %rbx
761         push    %r12
762 .cfi_push       %r12
763         push    %r13
764 .cfi_push       %r13
765         push    %r14
766 .cfi_push       %r14
767         push    %r15
768 .cfi_push       %r15
769 .Lbase2_64_avx_body:
770
771         mov     $len,%r15               # reassign $len
772
773         mov     24($ctx),$r0            # load r
774         mov     32($ctx),$s1
775
776         mov     0($ctx),$h0             # load hash value
777         mov     8($ctx),$h1
778         mov     16($ctx),$h2#d
779
780         mov     $s1,$r1
781         mov     $s1,%rax
782         shr     \$2,$s1
783         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
784
785         test    \$31,$len
786         jz      .Linit_avx
787
788         add     0($inp),$h0             # accumulate input
789         adc     8($inp),$h1
790         lea     16($inp),$inp
791         adc     $padbit,$h2
792         sub     \$16,%r15
793
794         call    __poly1305_block
795
796 .Linit_avx:
797         ################################# base 2^64 -> base 2^26
798         mov     $h0,%rax
799         mov     $h0,%rdx
800         shr     \$52,$h0
801         mov     $h1,$d1
802         mov     $h1,$d2
803         shr     \$26,%rdx
804         and     \$0x3ffffff,%rax        # h[0]
805         shl     \$12,$d1
806         and     \$0x3ffffff,%rdx        # h[1]
807         shr     \$14,$h1
808         or      $d1,$h0
809         shl     \$24,$h2
810         and     \$0x3ffffff,$h0         # h[2]
811         shr     \$40,$d2
812         and     \$0x3ffffff,$h1         # h[3]
813         or      $d2,$h2                 # h[4]
814
815         vmovd   %rax#d,$H0
816         vmovd   %rdx#d,$H1
817         vmovd   $h0#d,$H2
818         vmovd   $h1#d,$H3
819         vmovd   $h2#d,$H4
820         movl    \$1,20($ctx)            # set is_base2_26
821
822         call    __poly1305_init_avx
823
824 .Lproceed_avx:
825         mov     %r15,$len
826         pop             %r15
827 .cfi_restore    %r15
828         pop             %r14
829 .cfi_restore    %r14
830         pop             %r13
831 .cfi_restore    %r13
832         pop             %r12
833 .cfi_restore    %r12
834         pop             %rbx
835 .cfi_restore    %rbx
836         pop             %rbp
837 .cfi_restore    %rbp
838 .Lbase2_64_avx_epilogue:
839         jmp     .Ldo_avx
840 .cfi_endproc
841
842 .align  32
843 .Leven_avx:
844 .cfi_startproc
845         vmovd           4*0($ctx),$H0           # load hash value
846         vmovd           4*1($ctx),$H1
847         vmovd           4*2($ctx),$H2
848         vmovd           4*3($ctx),$H3
849         vmovd           4*4($ctx),$H4
850
851 .Ldo_avx:
852 ___
853 $code.=<<___    if (!$win64);
854         lea             8(%rsp),%r10
855 .cfi_def_cfa_register   %r10
856         and             \$-32,%rsp
857         sub             \$-8,%rsp
858         lea             -0x58(%rsp),%r11
859         sub             \$0x178,%rsp
860 ___
861 $code.=<<___    if ($win64);
862         lea             -0xf8(%rsp),%r11
863         sub             \$0x218,%rsp
864         vmovdqa         %xmm6,0x50(%r11)
865         vmovdqa         %xmm7,0x60(%r11)
866         vmovdqa         %xmm8,0x70(%r11)
867         vmovdqa         %xmm9,0x80(%r11)
868         vmovdqa         %xmm10,0x90(%r11)
869         vmovdqa         %xmm11,0xa0(%r11)
870         vmovdqa         %xmm12,0xb0(%r11)
871         vmovdqa         %xmm13,0xc0(%r11)
872         vmovdqa         %xmm14,0xd0(%r11)
873         vmovdqa         %xmm15,0xe0(%r11)
874 .Ldo_avx_body:
875 ___
876 $code.=<<___;
877         sub             \$64,$len
878         lea             -32($inp),%rax
879         cmovc           %rax,$inp
880
881         vmovdqu         `16*3`($ctx),$D4        # preload r0^2
882         lea             `16*3+64`($ctx),$ctx    # size optimization
883         lea             .Lconst(%rip),%rcx
884
885         ################################################################
886         # load input
887         vmovdqu         16*2($inp),$T0
888         vmovdqu         16*3($inp),$T1
889         vmovdqa         64(%rcx),$MASK          # .Lmask26
890
891         vpsrldq         \$6,$T0,$T2             # splat input
892         vpsrldq         \$6,$T1,$T3
893         vpunpckhqdq     $T1,$T0,$T4             # 4
894         vpunpcklqdq     $T1,$T0,$T0             # 0:1
895         vpunpcklqdq     $T3,$T2,$T3             # 2:3
896
897         vpsrlq          \$40,$T4,$T4            # 4
898         vpsrlq          \$26,$T0,$T1
899         vpand           $MASK,$T0,$T0           # 0
900         vpsrlq          \$4,$T3,$T2
901         vpand           $MASK,$T1,$T1           # 1
902         vpsrlq          \$30,$T3,$T3
903         vpand           $MASK,$T2,$T2           # 2
904         vpand           $MASK,$T3,$T3           # 3
905         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
906
907         jbe             .Lskip_loop_avx
908
909         # expand and copy pre-calculated table to stack
910         vmovdqu         `16*1-64`($ctx),$D1
911         vmovdqu         `16*2-64`($ctx),$D2
912         vpshufd         \$0xEE,$D4,$D3          # 34xx -> 3434
913         vpshufd         \$0x44,$D4,$D0          # xx12 -> 1212
914         vmovdqa         $D3,-0x90(%r11)
915         vmovdqa         $D0,0x00(%rsp)
916         vpshufd         \$0xEE,$D1,$D4
917         vmovdqu         `16*3-64`($ctx),$D0
918         vpshufd         \$0x44,$D1,$D1
919         vmovdqa         $D4,-0x80(%r11)
920         vmovdqa         $D1,0x10(%rsp)
921         vpshufd         \$0xEE,$D2,$D3
922         vmovdqu         `16*4-64`($ctx),$D1
923         vpshufd         \$0x44,$D2,$D2
924         vmovdqa         $D3,-0x70(%r11)
925         vmovdqa         $D2,0x20(%rsp)
926         vpshufd         \$0xEE,$D0,$D4
927         vmovdqu         `16*5-64`($ctx),$D2
928         vpshufd         \$0x44,$D0,$D0
929         vmovdqa         $D4,-0x60(%r11)
930         vmovdqa         $D0,0x30(%rsp)
931         vpshufd         \$0xEE,$D1,$D3
932         vmovdqu         `16*6-64`($ctx),$D0
933         vpshufd         \$0x44,$D1,$D1
934         vmovdqa         $D3,-0x50(%r11)
935         vmovdqa         $D1,0x40(%rsp)
936         vpshufd         \$0xEE,$D2,$D4
937         vmovdqu         `16*7-64`($ctx),$D1
938         vpshufd         \$0x44,$D2,$D2
939         vmovdqa         $D4,-0x40(%r11)
940         vmovdqa         $D2,0x50(%rsp)
941         vpshufd         \$0xEE,$D0,$D3
942         vmovdqu         `16*8-64`($ctx),$D2
943         vpshufd         \$0x44,$D0,$D0
944         vmovdqa         $D3,-0x30(%r11)
945         vmovdqa         $D0,0x60(%rsp)
946         vpshufd         \$0xEE,$D1,$D4
947         vpshufd         \$0x44,$D1,$D1
948         vmovdqa         $D4,-0x20(%r11)
949         vmovdqa         $D1,0x70(%rsp)
950         vpshufd         \$0xEE,$D2,$D3
951          vmovdqa        0x00(%rsp),$D4          # preload r0^2
952         vpshufd         \$0x44,$D2,$D2
953         vmovdqa         $D3,-0x10(%r11)
954         vmovdqa         $D2,0x80(%rsp)
955
956         jmp             .Loop_avx
957
958 .align  32
959 .Loop_avx:
960         ################################################################
961         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
962         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
963         #   \___________________/
964         # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
965         # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
966         #   \___________________/ \____________________/
967         #
968         # Note that we start with inp[2:3]*r^2. This is because it
969         # doesn't depend on reduction in previous iteration.
970         ################################################################
971         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
972         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
973         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
974         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
975         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
976         #
977         # though note that $Tx and $Hx are "reversed" in this section,
978         # and $D4 is preloaded with r0^2...
979
980         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
981         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
982           vmovdqa       $H2,0x20(%r11)                          # offload hash
983         vpmuludq        $T2,$D4,$D2             # d3 = h2*r0
984          vmovdqa        0x10(%rsp),$H2          # r1^2
985         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
986         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
987
988           vmovdqa       $H0,0x00(%r11)                          #
989         vpmuludq        0x20(%rsp),$T4,$H0      # h4*s1
990           vmovdqa       $H1,0x10(%r11)                          #
991         vpmuludq        $T3,$H2,$H1             # h3*r1
992         vpaddq          $H0,$D0,$D0             # d0 += h4*s1
993         vpaddq          $H1,$D4,$D4             # d4 += h3*r1
994           vmovdqa       $H3,0x30(%r11)                          #
995         vpmuludq        $T2,$H2,$H0             # h2*r1
996         vpmuludq        $T1,$H2,$H1             # h1*r1
997         vpaddq          $H0,$D3,$D3             # d3 += h2*r1
998          vmovdqa        0x30(%rsp),$H3          # r2^2
999         vpaddq          $H1,$D2,$D2             # d2 += h1*r1
1000           vmovdqa       $H4,0x40(%r11)                          #
1001         vpmuludq        $T0,$H2,$H2             # h0*r1
1002          vpmuludq       $T2,$H3,$H0             # h2*r2
1003         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
1004
1005          vmovdqa        0x40(%rsp),$H4          # s2^2
1006         vpaddq          $H0,$D4,$D4             # d4 += h2*r2
1007         vpmuludq        $T1,$H3,$H1             # h1*r2
1008         vpmuludq        $T0,$H3,$H3             # h0*r2
1009         vpaddq          $H1,$D3,$D3             # d3 += h1*r2
1010          vmovdqa        0x50(%rsp),$H2          # r3^2
1011         vpaddq          $H3,$D2,$D2             # d2 += h0*r2
1012         vpmuludq        $T4,$H4,$H0             # h4*s2
1013         vpmuludq        $T3,$H4,$H4             # h3*s2
1014         vpaddq          $H0,$D1,$D1             # d1 += h4*s2
1015          vmovdqa        0x60(%rsp),$H3          # s3^2
1016         vpaddq          $H4,$D0,$D0             # d0 += h3*s2
1017
1018          vmovdqa        0x80(%rsp),$H4          # s4^2
1019         vpmuludq        $T1,$H2,$H1             # h1*r3
1020         vpmuludq        $T0,$H2,$H2             # h0*r3
1021         vpaddq          $H1,$D4,$D4             # d4 += h1*r3
1022         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1023         vpmuludq        $T4,$H3,$H0             # h4*s3
1024         vpmuludq        $T3,$H3,$H1             # h3*s3
1025         vpaddq          $H0,$D2,$D2             # d2 += h4*s3
1026          vmovdqu        16*0($inp),$H0                          # load input
1027         vpaddq          $H1,$D1,$D1             # d1 += h3*s3
1028         vpmuludq        $T2,$H3,$H3             # h2*s3
1029          vpmuludq       $T2,$H4,$T2             # h2*s4
1030         vpaddq          $H3,$D0,$D0             # d0 += h2*s3
1031
1032          vmovdqu        16*1($inp),$H1                          #
1033         vpaddq          $T2,$D1,$D1             # d1 += h2*s4
1034         vpmuludq        $T3,$H4,$T3             # h3*s4
1035         vpmuludq        $T4,$H4,$T4             # h4*s4
1036          vpsrldq        \$6,$H0,$H2                             # splat input
1037         vpaddq          $T3,$D2,$D2             # d2 += h3*s4
1038         vpaddq          $T4,$D3,$D3             # d3 += h4*s4
1039          vpsrldq        \$6,$H1,$H3                             #
1040         vpmuludq        0x70(%rsp),$T0,$T4      # h0*r4
1041         vpmuludq        $T1,$H4,$T0             # h1*s4
1042          vpunpckhqdq    $H1,$H0,$H4             # 4
1043         vpaddq          $T4,$D4,$D4             # d4 += h0*r4
1044          vmovdqa        -0x90(%r11),$T4         # r0^4
1045         vpaddq          $T0,$D0,$D0             # d0 += h1*s4
1046
1047         vpunpcklqdq     $H1,$H0,$H0             # 0:1
1048         vpunpcklqdq     $H3,$H2,$H3             # 2:3
1049
1050         #vpsrlq         \$40,$H4,$H4            # 4
1051         vpsrldq         \$`40/8`,$H4,$H4        # 4
1052         vpsrlq          \$26,$H0,$H1
1053         vpand           $MASK,$H0,$H0           # 0
1054         vpsrlq          \$4,$H3,$H2
1055         vpand           $MASK,$H1,$H1           # 1
1056         vpand           0(%rcx),$H4,$H4         # .Lmask24
1057         vpsrlq          \$30,$H3,$H3
1058         vpand           $MASK,$H2,$H2           # 2
1059         vpand           $MASK,$H3,$H3           # 3
1060         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
1061
1062         vpaddq          0x00(%r11),$H0,$H0      # add hash value
1063         vpaddq          0x10(%r11),$H1,$H1
1064         vpaddq          0x20(%r11),$H2,$H2
1065         vpaddq          0x30(%r11),$H3,$H3
1066         vpaddq          0x40(%r11),$H4,$H4
1067
1068         lea             16*2($inp),%rax
1069         lea             16*4($inp),$inp
1070         sub             \$64,$len
1071         cmovc           %rax,$inp
1072
1073         ################################################################
1074         # Now we accumulate (inp[0:1]+hash)*r^4
1075         ################################################################
1076         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1077         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1078         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1079         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1080         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1081
1082         vpmuludq        $H0,$T4,$T0             # h0*r0
1083         vpmuludq        $H1,$T4,$T1             # h1*r0
1084         vpaddq          $T0,$D0,$D0
1085         vpaddq          $T1,$D1,$D1
1086          vmovdqa        -0x80(%r11),$T2         # r1^4
1087         vpmuludq        $H2,$T4,$T0             # h2*r0
1088         vpmuludq        $H3,$T4,$T1             # h3*r0
1089         vpaddq          $T0,$D2,$D2
1090         vpaddq          $T1,$D3,$D3
1091         vpmuludq        $H4,$T4,$T4             # h4*r0
1092          vpmuludq       -0x70(%r11),$H4,$T0     # h4*s1
1093         vpaddq          $T4,$D4,$D4
1094
1095         vpaddq          $T0,$D0,$D0             # d0 += h4*s1
1096         vpmuludq        $H2,$T2,$T1             # h2*r1
1097         vpmuludq        $H3,$T2,$T0             # h3*r1
1098         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
1099          vmovdqa        -0x60(%r11),$T3         # r2^4
1100         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1101         vpmuludq        $H1,$T2,$T1             # h1*r1
1102         vpmuludq        $H0,$T2,$T2             # h0*r1
1103         vpaddq          $T1,$D2,$D2             # d2 += h1*r1
1104         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1105
1106          vmovdqa        -0x50(%r11),$T4         # s2^4
1107         vpmuludq        $H2,$T3,$T0             # h2*r2
1108         vpmuludq        $H1,$T3,$T1             # h1*r2
1109         vpaddq          $T0,$D4,$D4             # d4 += h2*r2
1110         vpaddq          $T1,$D3,$D3             # d3 += h1*r2
1111          vmovdqa        -0x40(%r11),$T2         # r3^4
1112         vpmuludq        $H0,$T3,$T3             # h0*r2
1113         vpmuludq        $H4,$T4,$T0             # h4*s2
1114         vpaddq          $T3,$D2,$D2             # d2 += h0*r2
1115         vpaddq          $T0,$D1,$D1             # d1 += h4*s2
1116          vmovdqa        -0x30(%r11),$T3         # s3^4
1117         vpmuludq        $H3,$T4,$T4             # h3*s2
1118          vpmuludq       $H1,$T2,$T1             # h1*r3
1119         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1120
1121          vmovdqa        -0x10(%r11),$T4         # s4^4
1122         vpaddq          $T1,$D4,$D4             # d4 += h1*r3
1123         vpmuludq        $H0,$T2,$T2             # h0*r3
1124         vpmuludq        $H4,$T3,$T0             # h4*s3
1125         vpaddq          $T2,$D3,$D3             # d3 += h0*r3
1126         vpaddq          $T0,$D2,$D2             # d2 += h4*s3
1127          vmovdqu        16*2($inp),$T0                          # load input
1128         vpmuludq        $H3,$T3,$T2             # h3*s3
1129         vpmuludq        $H2,$T3,$T3             # h2*s3
1130         vpaddq          $T2,$D1,$D1             # d1 += h3*s3
1131          vmovdqu        16*3($inp),$T1                          #
1132         vpaddq          $T3,$D0,$D0             # d0 += h2*s3
1133
1134         vpmuludq        $H2,$T4,$H2             # h2*s4
1135         vpmuludq        $H3,$T4,$H3             # h3*s4
1136          vpsrldq        \$6,$T0,$T2                             # splat input
1137         vpaddq          $H2,$D1,$D1             # d1 += h2*s4
1138         vpmuludq        $H4,$T4,$H4             # h4*s4
1139          vpsrldq        \$6,$T1,$T3                             #
1140         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*s4
1141         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*s4
1142         vpmuludq        -0x20(%r11),$H0,$H4     # h0*r4
1143         vpmuludq        $H1,$T4,$H0
1144          vpunpckhqdq    $T1,$T0,$T4             # 4
1145         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1146         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1147
1148         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1149         vpunpcklqdq     $T3,$T2,$T3             # 2:3
1150
1151         #vpsrlq         \$40,$T4,$T4            # 4
1152         vpsrldq         \$`40/8`,$T4,$T4        # 4
1153         vpsrlq          \$26,$T0,$T1
1154          vmovdqa        0x00(%rsp),$D4          # preload r0^2
1155         vpand           $MASK,$T0,$T0           # 0
1156         vpsrlq          \$4,$T3,$T2
1157         vpand           $MASK,$T1,$T1           # 1
1158         vpand           0(%rcx),$T4,$T4         # .Lmask24
1159         vpsrlq          \$30,$T3,$T3
1160         vpand           $MASK,$T2,$T2           # 2
1161         vpand           $MASK,$T3,$T3           # 3
1162         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1163
1164         ################################################################
1165         # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
1166         # and P. Schwabe
1167
1168         vpsrlq          \$26,$H3,$D3
1169         vpand           $MASK,$H3,$H3
1170         vpaddq          $D3,$H4,$H4             # h3 -> h4
1171
1172         vpsrlq          \$26,$H0,$D0
1173         vpand           $MASK,$H0,$H0
1174         vpaddq          $D0,$D1,$H1             # h0 -> h1
1175
1176         vpsrlq          \$26,$H4,$D0
1177         vpand           $MASK,$H4,$H4
1178
1179         vpsrlq          \$26,$H1,$D1
1180         vpand           $MASK,$H1,$H1
1181         vpaddq          $D1,$H2,$H2             # h1 -> h2
1182
1183         vpaddq          $D0,$H0,$H0
1184         vpsllq          \$2,$D0,$D0
1185         vpaddq          $D0,$H0,$H0             # h4 -> h0
1186
1187         vpsrlq          \$26,$H2,$D2
1188         vpand           $MASK,$H2,$H2
1189         vpaddq          $D2,$H3,$H3             # h2 -> h3
1190
1191         vpsrlq          \$26,$H0,$D0
1192         vpand           $MASK,$H0,$H0
1193         vpaddq          $D0,$H1,$H1             # h0 -> h1
1194
1195         vpsrlq          \$26,$H3,$D3
1196         vpand           $MASK,$H3,$H3
1197         vpaddq          $D3,$H4,$H4             # h3 -> h4
1198
1199         ja              .Loop_avx
1200
1201 .Lskip_loop_avx:
1202         ################################################################
1203         # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1204
1205         vpshufd         \$0x10,$D4,$D4          # r0^n, xx12 -> x1x2
1206         add             \$32,$len
1207         jnz             .Long_tail_avx
1208
1209         vpaddq          $H2,$T2,$T2
1210         vpaddq          $H0,$T0,$T0
1211         vpaddq          $H1,$T1,$T1
1212         vpaddq          $H3,$T3,$T3
1213         vpaddq          $H4,$T4,$T4
1214
1215 .Long_tail_avx:
1216         vmovdqa         $H2,0x20(%r11)
1217         vmovdqa         $H0,0x00(%r11)
1218         vmovdqa         $H1,0x10(%r11)
1219         vmovdqa         $H3,0x30(%r11)
1220         vmovdqa         $H4,0x40(%r11)
1221
1222         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1223         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1224         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1225         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1226         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1227
1228         vpmuludq        $T2,$D4,$D2             # d2 = h2*r0
1229         vpmuludq        $T0,$D4,$D0             # d0 = h0*r0
1230          vpshufd        \$0x10,`16*1-64`($ctx),$H2              # r1^n
1231         vpmuludq        $T1,$D4,$D1             # d1 = h1*r0
1232         vpmuludq        $T3,$D4,$D3             # d3 = h3*r0
1233         vpmuludq        $T4,$D4,$D4             # d4 = h4*r0
1234
1235         vpmuludq        $T3,$H2,$H0             # h3*r1
1236         vpaddq          $H0,$D4,$D4             # d4 += h3*r1
1237          vpshufd        \$0x10,`16*2-64`($ctx),$H3              # s1^n
1238         vpmuludq        $T2,$H2,$H1             # h2*r1
1239         vpaddq          $H1,$D3,$D3             # d3 += h2*r1
1240          vpshufd        \$0x10,`16*3-64`($ctx),$H4              # r2^n
1241         vpmuludq        $T1,$H2,$H0             # h1*r1
1242         vpaddq          $H0,$D2,$D2             # d2 += h1*r1
1243         vpmuludq        $T0,$H2,$H2             # h0*r1
1244         vpaddq          $H2,$D1,$D1             # d1 += h0*r1
1245         vpmuludq        $T4,$H3,$H3             # h4*s1
1246         vpaddq          $H3,$D0,$D0             # d0 += h4*s1
1247
1248          vpshufd        \$0x10,`16*4-64`($ctx),$H2              # s2^n
1249         vpmuludq        $T2,$H4,$H1             # h2*r2
1250         vpaddq          $H1,$D4,$D4             # d4 += h2*r2
1251         vpmuludq        $T1,$H4,$H0             # h1*r2
1252         vpaddq          $H0,$D3,$D3             # d3 += h1*r2
1253          vpshufd        \$0x10,`16*5-64`($ctx),$H3              # r3^n
1254         vpmuludq        $T0,$H4,$H4             # h0*r2
1255         vpaddq          $H4,$D2,$D2             # d2 += h0*r2
1256         vpmuludq        $T4,$H2,$H1             # h4*s2
1257         vpaddq          $H1,$D1,$D1             # d1 += h4*s2
1258          vpshufd        \$0x10,`16*6-64`($ctx),$H4              # s3^n
1259         vpmuludq        $T3,$H2,$H2             # h3*s2
1260         vpaddq          $H2,$D0,$D0             # d0 += h3*s2
1261
1262         vpmuludq        $T1,$H3,$H0             # h1*r3
1263         vpaddq          $H0,$D4,$D4             # d4 += h1*r3
1264         vpmuludq        $T0,$H3,$H3             # h0*r3
1265         vpaddq          $H3,$D3,$D3             # d3 += h0*r3
1266          vpshufd        \$0x10,`16*7-64`($ctx),$H2              # r4^n
1267         vpmuludq        $T4,$H4,$H1             # h4*s3
1268         vpaddq          $H1,$D2,$D2             # d2 += h4*s3
1269          vpshufd        \$0x10,`16*8-64`($ctx),$H3              # s4^n
1270         vpmuludq        $T3,$H4,$H0             # h3*s3
1271         vpaddq          $H0,$D1,$D1             # d1 += h3*s3
1272         vpmuludq        $T2,$H4,$H4             # h2*s3
1273         vpaddq          $H4,$D0,$D0             # d0 += h2*s3
1274
1275         vpmuludq        $T0,$H2,$H2             # h0*r4
1276         vpaddq          $H2,$D4,$D4             # h4 = d4 + h0*r4
1277         vpmuludq        $T4,$H3,$H1             # h4*s4
1278         vpaddq          $H1,$D3,$D3             # h3 = d3 + h4*s4
1279         vpmuludq        $T3,$H3,$H0             # h3*s4
1280         vpaddq          $H0,$D2,$D2             # h2 = d2 + h3*s4
1281         vpmuludq        $T2,$H3,$H1             # h2*s4
1282         vpaddq          $H1,$D1,$D1             # h1 = d1 + h2*s4
1283         vpmuludq        $T1,$H3,$H3             # h1*s4
1284         vpaddq          $H3,$D0,$D0             # h0 = d0 + h1*s4
1285
1286         jz              .Lshort_tail_avx
1287
1288         vmovdqu         16*0($inp),$H0          # load input
1289         vmovdqu         16*1($inp),$H1
1290
1291         vpsrldq         \$6,$H0,$H2             # splat input
1292         vpsrldq         \$6,$H1,$H3
1293         vpunpckhqdq     $H1,$H0,$H4             # 4
1294         vpunpcklqdq     $H1,$H0,$H0             # 0:1
1295         vpunpcklqdq     $H3,$H2,$H3             # 2:3
1296
1297         vpsrlq          \$40,$H4,$H4            # 4
1298         vpsrlq          \$26,$H0,$H1
1299         vpand           $MASK,$H0,$H0           # 0
1300         vpsrlq          \$4,$H3,$H2
1301         vpand           $MASK,$H1,$H1           # 1
1302         vpsrlq          \$30,$H3,$H3
1303         vpand           $MASK,$H2,$H2           # 2
1304         vpand           $MASK,$H3,$H3           # 3
1305         vpor            32(%rcx),$H4,$H4        # padbit, yes, always
1306
1307         vpshufd         \$0x32,`16*0-64`($ctx),$T4      # r0^n, 34xx -> x3x4
1308         vpaddq          0x00(%r11),$H0,$H0
1309         vpaddq          0x10(%r11),$H1,$H1
1310         vpaddq          0x20(%r11),$H2,$H2
1311         vpaddq          0x30(%r11),$H3,$H3
1312         vpaddq          0x40(%r11),$H4,$H4
1313
1314         ################################################################
1315         # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1316
1317         vpmuludq        $H0,$T4,$T0             # h0*r0
1318         vpaddq          $T0,$D0,$D0             # d0 += h0*r0
1319         vpmuludq        $H1,$T4,$T1             # h1*r0
1320         vpaddq          $T1,$D1,$D1             # d1 += h1*r0
1321         vpmuludq        $H2,$T4,$T0             # h2*r0
1322         vpaddq          $T0,$D2,$D2             # d2 += h2*r0
1323          vpshufd        \$0x32,`16*1-64`($ctx),$T2              # r1^n
1324         vpmuludq        $H3,$T4,$T1             # h3*r0
1325         vpaddq          $T1,$D3,$D3             # d3 += h3*r0
1326         vpmuludq        $H4,$T4,$T4             # h4*r0
1327         vpaddq          $T4,$D4,$D4             # d4 += h4*r0
1328
1329         vpmuludq        $H3,$T2,$T0             # h3*r1
1330         vpaddq          $T0,$D4,$D4             # d4 += h3*r1
1331          vpshufd        \$0x32,`16*2-64`($ctx),$T3              # s1
1332         vpmuludq        $H2,$T2,$T1             # h2*r1
1333         vpaddq          $T1,$D3,$D3             # d3 += h2*r1
1334          vpshufd        \$0x32,`16*3-64`($ctx),$T4              # r2
1335         vpmuludq        $H1,$T2,$T0             # h1*r1
1336         vpaddq          $T0,$D2,$D2             # d2 += h1*r1
1337         vpmuludq        $H0,$T2,$T2             # h0*r1
1338         vpaddq          $T2,$D1,$D1             # d1 += h0*r1
1339         vpmuludq        $H4,$T3,$T3             # h4*s1
1340         vpaddq          $T3,$D0,$D0             # d0 += h4*s1
1341
1342          vpshufd        \$0x32,`16*4-64`($ctx),$T2              # s2
1343         vpmuludq        $H2,$T4,$T1             # h2*r2
1344         vpaddq          $T1,$D4,$D4             # d4 += h2*r2
1345         vpmuludq        $H1,$T4,$T0             # h1*r2
1346         vpaddq          $T0,$D3,$D3             # d3 += h1*r2
1347          vpshufd        \$0x32,`16*5-64`($ctx),$T3              # r3
1348         vpmuludq        $H0,$T4,$T4             # h0*r2
1349         vpaddq          $T4,$D2,$D2             # d2 += h0*r2
1350         vpmuludq        $H4,$T2,$T1             # h4*s2
1351         vpaddq          $T1,$D1,$D1             # d1 += h4*s2
1352          vpshufd        \$0x32,`16*6-64`($ctx),$T4              # s3
1353         vpmuludq        $H3,$T2,$T2             # h3*s2
1354         vpaddq          $T2,$D0,$D0             # d0 += h3*s2
1355
1356         vpmuludq        $H1,$T3,$T0             # h1*r3
1357         vpaddq          $T0,$D4,$D4             # d4 += h1*r3
1358         vpmuludq        $H0,$T3,$T3             # h0*r3
1359         vpaddq          $T3,$D3,$D3             # d3 += h0*r3
1360          vpshufd        \$0x32,`16*7-64`($ctx),$T2              # r4
1361         vpmuludq        $H4,$T4,$T1             # h4*s3
1362         vpaddq          $T1,$D2,$D2             # d2 += h4*s3
1363          vpshufd        \$0x32,`16*8-64`($ctx),$T3              # s4
1364         vpmuludq        $H3,$T4,$T0             # h3*s3
1365         vpaddq          $T0,$D1,$D1             # d1 += h3*s3
1366         vpmuludq        $H2,$T4,$T4             # h2*s3
1367         vpaddq          $T4,$D0,$D0             # d0 += h2*s3
1368
1369         vpmuludq        $H0,$T2,$T2             # h0*r4
1370         vpaddq          $T2,$D4,$D4             # d4 += h0*r4
1371         vpmuludq        $H4,$T3,$T1             # h4*s4
1372         vpaddq          $T1,$D3,$D3             # d3 += h4*s4
1373         vpmuludq        $H3,$T3,$T0             # h3*s4
1374         vpaddq          $T0,$D2,$D2             # d2 += h3*s4
1375         vpmuludq        $H2,$T3,$T1             # h2*s4
1376         vpaddq          $T1,$D1,$D1             # d1 += h2*s4
1377         vpmuludq        $H1,$T3,$T3             # h1*s4
1378         vpaddq          $T3,$D0,$D0             # d0 += h1*s4
1379
1380 .Lshort_tail_avx:
1381         ################################################################
1382         # horizontal addition
1383
1384         vpsrldq         \$8,$D4,$T4
1385         vpsrldq         \$8,$D3,$T3
1386         vpsrldq         \$8,$D1,$T1
1387         vpsrldq         \$8,$D0,$T0
1388         vpsrldq         \$8,$D2,$T2
1389         vpaddq          $T3,$D3,$D3
1390         vpaddq          $T4,$D4,$D4
1391         vpaddq          $T0,$D0,$D0
1392         vpaddq          $T1,$D1,$D1
1393         vpaddq          $T2,$D2,$D2
1394
1395         ################################################################
1396         # lazy reduction
1397
1398         vpsrlq          \$26,$D3,$H3
1399         vpand           $MASK,$D3,$D3
1400         vpaddq          $H3,$D4,$D4             # h3 -> h4
1401
1402         vpsrlq          \$26,$D0,$H0
1403         vpand           $MASK,$D0,$D0
1404         vpaddq          $H0,$D1,$D1             # h0 -> h1
1405
1406         vpsrlq          \$26,$D4,$H4
1407         vpand           $MASK,$D4,$D4
1408
1409         vpsrlq          \$26,$D1,$H1
1410         vpand           $MASK,$D1,$D1
1411         vpaddq          $H1,$D2,$D2             # h1 -> h2
1412
1413         vpaddq          $H4,$D0,$D0
1414         vpsllq          \$2,$H4,$H4
1415         vpaddq          $H4,$D0,$D0             # h4 -> h0
1416
1417         vpsrlq          \$26,$D2,$H2
1418         vpand           $MASK,$D2,$D2
1419         vpaddq          $H2,$D3,$D3             # h2 -> h3
1420
1421         vpsrlq          \$26,$D0,$H0
1422         vpand           $MASK,$D0,$D0
1423         vpaddq          $H0,$D1,$D1             # h0 -> h1
1424
1425         vpsrlq          \$26,$D3,$H3
1426         vpand           $MASK,$D3,$D3
1427         vpaddq          $H3,$D4,$D4             # h3 -> h4
1428
1429         vmovd           $D0,`4*0-48-64`($ctx)   # save partially reduced
1430         vmovd           $D1,`4*1-48-64`($ctx)
1431         vmovd           $D2,`4*2-48-64`($ctx)
1432         vmovd           $D3,`4*3-48-64`($ctx)
1433         vmovd           $D4,`4*4-48-64`($ctx)
1434 ___
1435 $code.=<<___    if ($win64);
1436         vmovdqa         0x50(%r11),%xmm6
1437         vmovdqa         0x60(%r11),%xmm7
1438         vmovdqa         0x70(%r11),%xmm8
1439         vmovdqa         0x80(%r11),%xmm9
1440         vmovdqa         0x90(%r11),%xmm10
1441         vmovdqa         0xa0(%r11),%xmm11
1442         vmovdqa         0xb0(%r11),%xmm12
1443         vmovdqa         0xc0(%r11),%xmm13
1444         vmovdqa         0xd0(%r11),%xmm14
1445         vmovdqa         0xe0(%r11),%xmm15
1446         lea             0xf8(%r11),%rsp
1447 .Ldo_avx_epilogue:
1448 ___
1449 $code.=<<___    if (!$win64);
1450         lea             -8(%r10),%rsp
1451 .cfi_def_cfa_register   %rsp
1452 ___
1453 $code.=<<___;
1454         vzeroupper
1455         ret
1456 .cfi_endproc
1457 ___
1458 &end_function("poly1305_blocks_avx");
1459
1460 &declare_function("poly1305_emit_avx", 32, 3);
1461 $code.=<<___;
1462         cmpl    \$0,20($ctx)    # is_base2_26?
1463         je      .Lemit
1464
1465         mov     0($ctx),%eax    # load hash value base 2^26
1466         mov     4($ctx),%ecx
1467         mov     8($ctx),%r8d
1468         mov     12($ctx),%r11d
1469         mov     16($ctx),%r10d
1470
1471         shl     \$26,%rcx       # base 2^26 -> base 2^64
1472         mov     %r8,%r9
1473         shl     \$52,%r8
1474         add     %rcx,%rax
1475         shr     \$12,%r9
1476         add     %rax,%r8        # h0
1477         adc     \$0,%r9
1478
1479         shl     \$14,%r11
1480         mov     %r10,%rax
1481         shr     \$24,%r10
1482         add     %r11,%r9
1483         shl     \$40,%rax
1484         add     %rax,%r9        # h1
1485         adc     \$0,%r10        # h2
1486
1487         mov     %r10,%rax       # could be partially reduced, so reduce
1488         mov     %r10,%rcx
1489         and     \$3,%r10
1490         shr     \$2,%rax
1491         and     \$-4,%rcx
1492         add     %rcx,%rax
1493         add     %rax,%r8
1494         adc     \$0,%r9
1495         adc     \$0,%r10
1496
1497         mov     %r8,%rax
1498         add     \$5,%r8         # compare to modulus
1499         mov     %r9,%rcx
1500         adc     \$0,%r9
1501         adc     \$0,%r10
1502         shr     \$2,%r10        # did 130-bit value overflow?
1503         cmovnz  %r8,%rax
1504         cmovnz  %r9,%rcx
1505
1506         add     0($nonce),%rax  # accumulate nonce
1507         adc     8($nonce),%rcx
1508         mov     %rax,0($mac)    # write result
1509         mov     %rcx,8($mac)
1510
1511         ret
1512 ___
1513 &end_function("poly1305_emit_avx");
1514
1515 if ($avx>1) {
1516
1517 if ($kernel) {
1518         $code .= "#ifdef CONFIG_AS_AVX2\n";
1519 }
1520
1521 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1522     map("%ymm$_",(0..15));
1523 my $S4=$MASK;
1524
1525 sub poly1305_blocks_avxN {
1526         my ($avx512) = @_;
1527         my $suffix = $avx512 ? "_avx512" : "";
1528 $code.=<<___;
1529 .cfi_startproc
1530         mov     20($ctx),%r8d           # is_base2_26
1531         cmp     \$128,$len
1532         jae     .Lblocks_avx2$suffix
1533         test    %r8d,%r8d
1534         jz      .Lblocks
1535
1536 .Lblocks_avx2$suffix:
1537         and     \$-16,$len
1538         jz      .Lno_data_avx2$suffix
1539
1540         vzeroupper
1541
1542         test    %r8d,%r8d
1543         jz      .Lbase2_64_avx2$suffix
1544
1545         test    \$63,$len
1546         jz      .Leven_avx2$suffix
1547
1548         push    %rbp
1549 .cfi_push       %rbp
1550         mov     %rsp,%rbp
1551         push    %rbx
1552 .cfi_push       %rbx
1553         push    %r12
1554 .cfi_push       %r12
1555         push    %r13
1556 .cfi_push       %r13
1557         push    %r14
1558 .cfi_push       %r14
1559         push    %r15
1560 .cfi_push       %r15
1561 .Lblocks_avx2_body$suffix:
1562
1563         mov     $len,%r15               # reassign $len
1564
1565         mov     0($ctx),$d1             # load hash value
1566         mov     8($ctx),$d2
1567         mov     16($ctx),$h2#d
1568
1569         mov     24($ctx),$r0            # load r
1570         mov     32($ctx),$s1
1571
1572         ################################# base 2^26 -> base 2^64
1573         mov     $d1#d,$h0#d
1574         and     \$`-1*(1<<31)`,$d1
1575         mov     $d2,$r1                 # borrow $r1
1576         mov     $d2#d,$h1#d
1577         and     \$`-1*(1<<31)`,$d2
1578
1579         shr     \$6,$d1
1580         shl     \$52,$r1
1581         add     $d1,$h0
1582         shr     \$12,$h1
1583         shr     \$18,$d2
1584         add     $r1,$h0
1585         adc     $d2,$h1
1586
1587         mov     $h2,$d1
1588         shl     \$40,$d1
1589         shr     \$24,$h2
1590         add     $d1,$h1
1591         adc     \$0,$h2                 # can be partially reduced...
1592
1593         mov     \$-4,$d2                # ... so reduce
1594         mov     $h2,$d1
1595         and     $h2,$d2
1596         shr     \$2,$d1
1597         and     \$3,$h2
1598         add     $d2,$d1                 # =*5
1599         add     $d1,$h0
1600         adc     \$0,$h1
1601         adc     \$0,$h2
1602
1603         mov     $s1,$r1
1604         mov     $s1,%rax
1605         shr     \$2,$s1
1606         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1607
1608 .Lbase2_26_pre_avx2$suffix:
1609         add     0($inp),$h0             # accumulate input
1610         adc     8($inp),$h1
1611         lea     16($inp),$inp
1612         adc     $padbit,$h2
1613         sub     \$16,%r15
1614
1615         call    __poly1305_block
1616         mov     $r1,%rax
1617
1618         test    \$63,%r15
1619         jnz     .Lbase2_26_pre_avx2$suffix
1620
1621         test    $padbit,$padbit         # if $padbit is zero,
1622         jz      .Lstore_base2_64_avx2$suffix    # store hash in base 2^64 format
1623
1624         ################################# base 2^64 -> base 2^26
1625         mov     $h0,%rax
1626         mov     $h0,%rdx
1627         shr     \$52,$h0
1628         mov     $h1,$r0
1629         mov     $h1,$r1
1630         shr     \$26,%rdx
1631         and     \$0x3ffffff,%rax        # h[0]
1632         shl     \$12,$r0
1633         and     \$0x3ffffff,%rdx        # h[1]
1634         shr     \$14,$h1
1635         or      $r0,$h0
1636         shl     \$24,$h2
1637         and     \$0x3ffffff,$h0         # h[2]
1638         shr     \$40,$r1
1639         and     \$0x3ffffff,$h1         # h[3]
1640         or      $r1,$h2                 # h[4]
1641
1642         test    %r15,%r15
1643         jz      .Lstore_base2_26_avx2$suffix
1644
1645         vmovd   %rax#d,%x#$H0
1646         vmovd   %rdx#d,%x#$H1
1647         vmovd   $h0#d,%x#$H2
1648         vmovd   $h1#d,%x#$H3
1649         vmovd   $h2#d,%x#$H4
1650         jmp     .Lproceed_avx2$suffix
1651
1652 .align  32
1653 .Lstore_base2_64_avx2$suffix:
1654         mov     $h0,0($ctx)
1655         mov     $h1,8($ctx)
1656         mov     $h2,16($ctx)            # note that is_base2_26 is zeroed
1657         jmp     .Ldone_avx2$suffix
1658
1659 .align  16
1660 .Lstore_base2_26_avx2$suffix:
1661         mov     %rax#d,0($ctx)          # store hash value base 2^26
1662         mov     %rdx#d,4($ctx)
1663         mov     $h0#d,8($ctx)
1664         mov     $h1#d,12($ctx)
1665         mov     $h2#d,16($ctx)
1666 .align  16
1667 .Ldone_avx2$suffix:
1668         pop             %r15
1669 .cfi_restore    %r15
1670         pop             %r14
1671 .cfi_restore    %r14
1672         pop             %r13
1673 .cfi_restore    %r13
1674         pop             %r12
1675 .cfi_restore    %r12
1676         pop             %rbx
1677 .cfi_restore    %rbx
1678         pop             %rbp
1679 .cfi_restore    %rbp
1680 .Lno_data_avx2$suffix:
1681 .Lblocks_avx2_epilogue$suffix:
1682         ret
1683 .cfi_endproc
1684
1685 .align  32
1686 .Lbase2_64_avx2$suffix:
1687 .cfi_startproc
1688         push    %rbp
1689 .cfi_push       %rbp
1690         mov     %rsp,%rbp
1691         push    %rbx
1692 .cfi_push       %rbx
1693         push    %r12
1694 .cfi_push       %r12
1695         push    %r13
1696 .cfi_push       %r13
1697         push    %r14
1698 .cfi_push       %r14
1699         push    %r15
1700 .cfi_push       %r15
1701 .Lbase2_64_avx2_body$suffix:
1702
1703         mov     $len,%r15               # reassign $len
1704
1705         mov     24($ctx),$r0            # load r
1706         mov     32($ctx),$s1
1707
1708         mov     0($ctx),$h0             # load hash value
1709         mov     8($ctx),$h1
1710         mov     16($ctx),$h2#d
1711
1712         mov     $s1,$r1
1713         mov     $s1,%rax
1714         shr     \$2,$s1
1715         add     $r1,$s1                 # s1 = r1 + (r1 >> 2)
1716
1717         test    \$63,$len
1718         jz      .Linit_avx2$suffix
1719
1720 .Lbase2_64_pre_avx2$suffix:
1721         add     0($inp),$h0             # accumulate input
1722         adc     8($inp),$h1
1723         lea     16($inp),$inp
1724         adc     $padbit,$h2
1725         sub     \$16,%r15
1726
1727         call    __poly1305_block
1728         mov     $r1,%rax
1729
1730         test    \$63,%r15
1731         jnz     .Lbase2_64_pre_avx2$suffix
1732
1733 .Linit_avx2$suffix:
1734         ################################# base 2^64 -> base 2^26
1735         mov     $h0,%rax
1736         mov     $h0,%rdx
1737         shr     \$52,$h0
1738         mov     $h1,$d1
1739         mov     $h1,$d2
1740         shr     \$26,%rdx
1741         and     \$0x3ffffff,%rax        # h[0]
1742         shl     \$12,$d1
1743         and     \$0x3ffffff,%rdx        # h[1]
1744         shr     \$14,$h1
1745         or      $d1,$h0
1746         shl     \$24,$h2
1747         and     \$0x3ffffff,$h0         # h[2]
1748         shr     \$40,$d2
1749         and     \$0x3ffffff,$h1         # h[3]
1750         or      $d2,$h2                 # h[4]
1751
1752         vmovd   %rax#d,%x#$H0
1753         vmovd   %rdx#d,%x#$H1
1754         vmovd   $h0#d,%x#$H2
1755         vmovd   $h1#d,%x#$H3
1756         vmovd   $h2#d,%x#$H4
1757         movl    \$1,20($ctx)            # set is_base2_26
1758
1759         call    __poly1305_init_avx
1760
1761 .Lproceed_avx2$suffix:
1762         mov     %r15,$len                       # restore $len
1763 ___
1764 $code.=<<___ if (!$kernel);
1765         mov     OPENSSL_ia32cap_P+8(%rip),%r9d
1766         mov     \$`(1<<31|1<<30|1<<16)`,%r11d
1767 ___
1768 $code.=<<___;
1769         pop             %r15
1770 .cfi_restore    %r15
1771         pop             %r14
1772 .cfi_restore    %r14
1773         pop             %r13
1774 .cfi_restore    %r13
1775         pop             %r12
1776 .cfi_restore    %r12
1777         pop             %rbx
1778 .cfi_restore    %rbx
1779         pop             %rbp
1780 .cfi_restore    %rbp
1781 .Lbase2_64_avx2_epilogue$suffix:
1782         jmp     .Ldo_avx2$suffix
1783 .cfi_endproc
1784
1785 .align  32
1786 .Leven_avx2$suffix:
1787 .cfi_startproc
1788 ___
1789 $code.=<<___ if (!$kernel);
1790         mov             OPENSSL_ia32cap_P+8(%rip),%r9d
1791 ___
1792 $code.=<<___;
1793         vmovd           4*0($ctx),%x#$H0        # load hash value base 2^26
1794         vmovd           4*1($ctx),%x#$H1
1795         vmovd           4*2($ctx),%x#$H2
1796         vmovd           4*3($ctx),%x#$H3
1797         vmovd           4*4($ctx),%x#$H4
1798
1799 .Ldo_avx2$suffix:
1800 ___
1801 $code.=<<___            if (!$kernel && $avx>2);
1802         cmp             \$512,$len
1803         jb              .Lskip_avx512
1804         and             %r11d,%r9d
1805         test            \$`1<<16`,%r9d          # check for AVX512F
1806         jnz             .Lblocks_avx512
1807 .Lskip_avx512$suffix:
1808 ___
1809 $code.=<<___ if ($avx > 2 && $avx512 && $kernel);
1810         cmp             \$512,$len
1811         jae             .Lblocks_avx512
1812 ___
1813 $code.=<<___    if (!$win64);
1814         lea             8(%rsp),%r10
1815 .cfi_def_cfa_register   %r10
1816         sub             \$0x128,%rsp
1817 ___
1818 $code.=<<___    if ($win64);
1819         lea             8(%rsp),%r10
1820         sub             \$0x1c8,%rsp
1821         vmovdqa         %xmm6,-0xb0(%r10)
1822         vmovdqa         %xmm7,-0xa0(%r10)
1823         vmovdqa         %xmm8,-0x90(%r10)
1824         vmovdqa         %xmm9,-0x80(%r10)
1825         vmovdqa         %xmm10,-0x70(%r10)
1826         vmovdqa         %xmm11,-0x60(%r10)
1827         vmovdqa         %xmm12,-0x50(%r10)
1828         vmovdqa         %xmm13,-0x40(%r10)
1829         vmovdqa         %xmm14,-0x30(%r10)
1830         vmovdqa         %xmm15,-0x20(%r10)
1831 .Ldo_avx2_body$suffix:
1832 ___
1833 $code.=<<___;
1834         lea             .Lconst(%rip),%rcx
1835         lea             48+64($ctx),$ctx        # size optimization
1836         vmovdqa         96(%rcx),$T0            # .Lpermd_avx2
1837
1838         # expand and copy pre-calculated table to stack
1839         vmovdqu         `16*0-64`($ctx),%x#$T2
1840         and             \$-512,%rsp
1841         vmovdqu         `16*1-64`($ctx),%x#$T3
1842         vmovdqu         `16*2-64`($ctx),%x#$T4
1843         vmovdqu         `16*3-64`($ctx),%x#$D0
1844         vmovdqu         `16*4-64`($ctx),%x#$D1
1845         vmovdqu         `16*5-64`($ctx),%x#$D2
1846         lea             0x90(%rsp),%rax         # size optimization
1847         vmovdqu         `16*6-64`($ctx),%x#$D3
1848         vpermd          $T2,$T0,$T2             # 00003412 -> 14243444
1849         vmovdqu         `16*7-64`($ctx),%x#$D4
1850         vpermd          $T3,$T0,$T3
1851         vmovdqu         `16*8-64`($ctx),%x#$MASK
1852         vpermd          $T4,$T0,$T4
1853         vmovdqa         $T2,0x00(%rsp)
1854         vpermd          $D0,$T0,$D0
1855         vmovdqa         $T3,0x20-0x90(%rax)
1856         vpermd          $D1,$T0,$D1
1857         vmovdqa         $T4,0x40-0x90(%rax)
1858         vpermd          $D2,$T0,$D2
1859         vmovdqa         $D0,0x60-0x90(%rax)
1860         vpermd          $D3,$T0,$D3
1861         vmovdqa         $D1,0x80-0x90(%rax)
1862         vpermd          $D4,$T0,$D4
1863         vmovdqa         $D2,0xa0-0x90(%rax)
1864         vpermd          $MASK,$T0,$MASK
1865         vmovdqa         $D3,0xc0-0x90(%rax)
1866         vmovdqa         $D4,0xe0-0x90(%rax)
1867         vmovdqa         $MASK,0x100-0x90(%rax)
1868         vmovdqa         64(%rcx),$MASK          # .Lmask26
1869
1870         ################################################################
1871         # load input
1872         vmovdqu         16*0($inp),%x#$T0
1873         vmovdqu         16*1($inp),%x#$T1
1874         vinserti128     \$1,16*2($inp),$T0,$T0
1875         vinserti128     \$1,16*3($inp),$T1,$T1
1876         lea             16*4($inp),$inp
1877
1878         vpsrldq         \$6,$T0,$T2             # splat input
1879         vpsrldq         \$6,$T1,$T3
1880         vpunpckhqdq     $T1,$T0,$T4             # 4
1881         vpunpcklqdq     $T3,$T2,$T2             # 2:3
1882         vpunpcklqdq     $T1,$T0,$T0             # 0:1
1883
1884         vpsrlq          \$30,$T2,$T3
1885         vpsrlq          \$4,$T2,$T2
1886         vpsrlq          \$26,$T0,$T1
1887         vpsrlq          \$40,$T4,$T4            # 4
1888         vpand           $MASK,$T2,$T2           # 2
1889         vpand           $MASK,$T0,$T0           # 0
1890         vpand           $MASK,$T1,$T1           # 1
1891         vpand           $MASK,$T3,$T3           # 3
1892         vpor            32(%rcx),$T4,$T4        # padbit, yes, always
1893
1894         vpaddq          $H2,$T2,$H2             # accumulate input
1895         sub             \$64,$len
1896         jz              .Ltail_avx2$suffix
1897         jmp             .Loop_avx2$suffix
1898
1899 .align  32
1900 .Loop_avx2$suffix:
1901         ################################################################
1902         # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
1903         # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
1904         # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
1905         # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
1906         #   \________/\__________/
1907         ################################################################
1908         #vpaddq         $H2,$T2,$H2             # accumulate input
1909         vpaddq          $H0,$T0,$H0
1910         vmovdqa         `32*0`(%rsp),$T0        # r0^4
1911         vpaddq          $H1,$T1,$H1
1912         vmovdqa         `32*1`(%rsp),$T1        # r1^4
1913         vpaddq          $H3,$T3,$H3
1914         vmovdqa         `32*3`(%rsp),$T2        # r2^4
1915         vpaddq          $H4,$T4,$H4
1916         vmovdqa         `32*6-0x90`(%rax),$T3   # s3^4
1917         vmovdqa         `32*8-0x90`(%rax),$S4   # s4^4
1918
1919         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
1920         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
1921         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1922         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
1923         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1924         #
1925         # however, as h2 is "chronologically" first one available pull
1926         # corresponding operations up, so it's
1927         #
1928         # d4 = h2*r2   + h4*r0 + h3*r1             + h1*r3   + h0*r4
1929         # d3 = h2*r1   + h3*r0           + h1*r2   + h0*r3   + h4*5*r4
1930         # d2 = h2*r0           + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
1931         # d1 = h2*5*r4 + h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3
1932         # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2           + h1*5*r4
1933
1934         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
1935         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
1936         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
1937         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
1938         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
1939
1940         vpmuludq        $H0,$T1,$T4             # h0*r1
1941         vpmuludq        $H1,$T1,$H2             # h1*r1, borrow $H2 as temp
1942         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
1943         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
1944         vpmuludq        $H3,$T1,$T4             # h3*r1
1945         vpmuludq        `32*2`(%rsp),$H4,$H2    # h4*s1
1946         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
1947         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
1948          vmovdqa        `32*4-0x90`(%rax),$T1   # s2
1949
1950         vpmuludq        $H0,$T0,$T4             # h0*r0
1951         vpmuludq        $H1,$T0,$H2             # h1*r0
1952         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
1953         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
1954         vpmuludq        $H3,$T0,$T4             # h3*r0
1955         vpmuludq        $H4,$T0,$H2             # h4*r0
1956          vmovdqu        16*0($inp),%x#$T0       # load input
1957         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
1958         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
1959          vinserti128    \$1,16*2($inp),$T0,$T0
1960
1961         vpmuludq        $H3,$T1,$T4             # h3*s2
1962         vpmuludq        $H4,$T1,$H2             # h4*s2
1963          vmovdqu        16*1($inp),%x#$T1
1964         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
1965         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
1966          vmovdqa        `32*5-0x90`(%rax),$H2   # r3
1967         vpmuludq        $H1,$T2,$T4             # h1*r2
1968         vpmuludq        $H0,$T2,$T2             # h0*r2
1969         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
1970         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
1971          vinserti128    \$1,16*3($inp),$T1,$T1
1972          lea            16*4($inp),$inp
1973
1974         vpmuludq        $H1,$H2,$T4             # h1*r3
1975         vpmuludq        $H0,$H2,$H2             # h0*r3
1976          vpsrldq        \$6,$T0,$T2             # splat input
1977         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
1978         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
1979         vpmuludq        $H3,$T3,$T4             # h3*s3
1980         vpmuludq        $H4,$T3,$H2             # h4*s3
1981          vpsrldq        \$6,$T1,$T3
1982         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
1983         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
1984          vpunpckhqdq    $T1,$T0,$T4             # 4
1985
1986         vpmuludq        $H3,$S4,$H3             # h3*s4
1987         vpmuludq        $H4,$S4,$H4             # h4*s4
1988          vpunpcklqdq    $T1,$T0,$T0             # 0:1
1989         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
1990         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
1991          vpunpcklqdq    $T3,$T2,$T3             # 2:3
1992         vpmuludq        `32*7-0x90`(%rax),$H0,$H4       # h0*r4
1993         vpmuludq        $H1,$S4,$H0             # h1*s4
1994         vmovdqa         64(%rcx),$MASK          # .Lmask26
1995         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
1996         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
1997
1998         ################################################################
1999         # lazy reduction (interleaved with tail of input splat)
2000
2001         vpsrlq          \$26,$H3,$D3
2002         vpand           $MASK,$H3,$H3
2003         vpaddq          $D3,$H4,$H4             # h3 -> h4
2004
2005         vpsrlq          \$26,$H0,$D0
2006         vpand           $MASK,$H0,$H0
2007         vpaddq          $D0,$D1,$H1             # h0 -> h1
2008
2009         vpsrlq          \$26,$H4,$D4
2010         vpand           $MASK,$H4,$H4
2011
2012          vpsrlq         \$4,$T3,$T2
2013
2014         vpsrlq          \$26,$H1,$D1
2015         vpand           $MASK,$H1,$H1
2016         vpaddq          $D1,$H2,$H2             # h1 -> h2
2017
2018         vpaddq          $D4,$H0,$H0
2019         vpsllq          \$2,$D4,$D4
2020         vpaddq          $D4,$H0,$H0             # h4 -> h0
2021
2022          vpand          $MASK,$T2,$T2           # 2
2023          vpsrlq         \$26,$T0,$T1
2024
2025         vpsrlq          \$26,$H2,$D2
2026         vpand           $MASK,$H2,$H2
2027         vpaddq          $D2,$H3,$H3             # h2 -> h3
2028
2029          vpaddq         $T2,$H2,$H2             # modulo-scheduled
2030          vpsrlq         \$30,$T3,$T3
2031
2032         vpsrlq          \$26,$H0,$D0
2033         vpand           $MASK,$H0,$H0
2034         vpaddq          $D0,$H1,$H1             # h0 -> h1
2035
2036          vpsrlq         \$40,$T4,$T4            # 4
2037
2038         vpsrlq          \$26,$H3,$D3
2039         vpand           $MASK,$H3,$H3
2040         vpaddq          $D3,$H4,$H4             # h3 -> h4
2041
2042          vpand          $MASK,$T0,$T0           # 0
2043          vpand          $MASK,$T1,$T1           # 1
2044          vpand          $MASK,$T3,$T3           # 3
2045          vpor           32(%rcx),$T4,$T4        # padbit, yes, always
2046
2047         sub             \$64,$len
2048         jnz             .Loop_avx2$suffix
2049
2050         .byte           0x66,0x90
2051 .Ltail_avx2$suffix:
2052         ################################################################
2053         # while above multiplications were by r^4 in all lanes, in last
2054         # iteration we multiply least significant lane by r^4 and most
2055         # significant one by r, so copy of above except that references
2056         # to the precomputed table are displaced by 4...
2057
2058         #vpaddq         $H2,$T2,$H2             # accumulate input
2059         vpaddq          $H0,$T0,$H0
2060         vmovdqu         `32*0+4`(%rsp),$T0      # r0^4
2061         vpaddq          $H1,$T1,$H1
2062         vmovdqu         `32*1+4`(%rsp),$T1      # r1^4
2063         vpaddq          $H3,$T3,$H3
2064         vmovdqu         `32*3+4`(%rsp),$T2      # r2^4
2065         vpaddq          $H4,$T4,$H4
2066         vmovdqu         `32*6+4-0x90`(%rax),$T3 # s3^4
2067         vmovdqu         `32*8+4-0x90`(%rax),$S4 # s4^4
2068
2069         vpmuludq        $H2,$T0,$D2             # d2 = h2*r0
2070         vpmuludq        $H2,$T1,$D3             # d3 = h2*r1
2071         vpmuludq        $H2,$T2,$D4             # d4 = h2*r2
2072         vpmuludq        $H2,$T3,$D0             # d0 = h2*s3
2073         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
2074
2075         vpmuludq        $H0,$T1,$T4             # h0*r1
2076         vpmuludq        $H1,$T1,$H2             # h1*r1
2077         vpaddq          $T4,$D1,$D1             # d1 += h0*r1
2078         vpaddq          $H2,$D2,$D2             # d2 += h1*r1
2079         vpmuludq        $H3,$T1,$T4             # h3*r1
2080         vpmuludq        `32*2+4`(%rsp),$H4,$H2  # h4*s1
2081         vpaddq          $T4,$D4,$D4             # d4 += h3*r1
2082         vpaddq          $H2,$D0,$D0             # d0 += h4*s1
2083
2084         vpmuludq        $H0,$T0,$T4             # h0*r0
2085         vpmuludq        $H1,$T0,$H2             # h1*r0
2086         vpaddq          $T4,$D0,$D0             # d0 += h0*r0
2087          vmovdqu        `32*4+4-0x90`(%rax),$T1 # s2
2088         vpaddq          $H2,$D1,$D1             # d1 += h1*r0
2089         vpmuludq        $H3,$T0,$T4             # h3*r0
2090         vpmuludq        $H4,$T0,$H2             # h4*r0
2091         vpaddq          $T4,$D3,$D3             # d3 += h3*r0
2092         vpaddq          $H2,$D4,$D4             # d4 += h4*r0
2093
2094         vpmuludq        $H3,$T1,$T4             # h3*s2
2095         vpmuludq        $H4,$T1,$H2             # h4*s2
2096         vpaddq          $T4,$D0,$D0             # d0 += h3*s2
2097         vpaddq          $H2,$D1,$D1             # d1 += h4*s2
2098          vmovdqu        `32*5+4-0x90`(%rax),$H2 # r3
2099         vpmuludq        $H1,$T2,$T4             # h1*r2
2100         vpmuludq        $H0,$T2,$T2             # h0*r2
2101         vpaddq          $T4,$D3,$D3             # d3 += h1*r2
2102         vpaddq          $T2,$D2,$D2             # d2 += h0*r2
2103
2104         vpmuludq        $H1,$H2,$T4             # h1*r3
2105         vpmuludq        $H0,$H2,$H2             # h0*r3
2106         vpaddq          $T4,$D4,$D4             # d4 += h1*r3
2107         vpaddq          $H2,$D3,$D3             # d3 += h0*r3
2108         vpmuludq        $H3,$T3,$T4             # h3*s3
2109         vpmuludq        $H4,$T3,$H2             # h4*s3
2110         vpaddq          $T4,$D1,$D1             # d1 += h3*s3
2111         vpaddq          $H2,$D2,$D2             # d2 += h4*s3
2112
2113         vpmuludq        $H3,$S4,$H3             # h3*s4
2114         vpmuludq        $H4,$S4,$H4             # h4*s4
2115         vpaddq          $H3,$D2,$H2             # h2 = d2 + h3*r4
2116         vpaddq          $H4,$D3,$H3             # h3 = d3 + h4*r4
2117         vpmuludq        `32*7+4-0x90`(%rax),$H0,$H4             # h0*r4
2118         vpmuludq        $H1,$S4,$H0             # h1*s4
2119         vmovdqa         64(%rcx),$MASK          # .Lmask26
2120         vpaddq          $H4,$D4,$H4             # h4 = d4 + h0*r4
2121         vpaddq          $H0,$D0,$H0             # h0 = d0 + h1*s4
2122
2123         ################################################################
2124         # horizontal addition
2125
2126         vpsrldq         \$8,$D1,$T1
2127         vpsrldq         \$8,$H2,$T2
2128         vpsrldq         \$8,$H3,$T3
2129         vpsrldq         \$8,$H4,$T4
2130         vpsrldq         \$8,$H0,$T0
2131         vpaddq          $T1,$D1,$D1
2132         vpaddq          $T2,$H2,$H2
2133         vpaddq          $T3,$H3,$H3
2134         vpaddq          $T4,$H4,$H4
2135         vpaddq          $T0,$H0,$H0
2136
2137         vpermq          \$0x2,$H3,$T3
2138         vpermq          \$0x2,$H4,$T4
2139         vpermq          \$0x2,$H0,$T0
2140         vpermq          \$0x2,$D1,$T1
2141         vpermq          \$0x2,$H2,$T2
2142         vpaddq          $T3,$H3,$H3
2143         vpaddq          $T4,$H4,$H4
2144         vpaddq          $T0,$H0,$H0
2145         vpaddq          $T1,$D1,$D1
2146         vpaddq          $T2,$H2,$H2
2147
2148         ################################################################
2149         # lazy reduction
2150
2151         vpsrlq          \$26,$H3,$D3
2152         vpand           $MASK,$H3,$H3
2153         vpaddq          $D3,$H4,$H4             # h3 -> h4
2154
2155         vpsrlq          \$26,$H0,$D0
2156         vpand           $MASK,$H0,$H0
2157         vpaddq          $D0,$D1,$H1             # h0 -> h1
2158
2159         vpsrlq          \$26,$H4,$D4
2160         vpand           $MASK,$H4,$H4
2161
2162         vpsrlq          \$26,$H1,$D1
2163         vpand           $MASK,$H1,$H1
2164         vpaddq          $D1,$H2,$H2             # h1 -> h2
2165
2166         vpaddq          $D4,$H0,$H0
2167         vpsllq          \$2,$D4,$D4
2168         vpaddq          $D4,$H0,$H0             # h4 -> h0
2169
2170         vpsrlq          \$26,$H2,$D2
2171         vpand           $MASK,$H2,$H2
2172         vpaddq          $D2,$H3,$H3             # h2 -> h3
2173
2174         vpsrlq          \$26,$H0,$D0
2175         vpand           $MASK,$H0,$H0
2176         vpaddq          $D0,$H1,$H1             # h0 -> h1
2177
2178         vpsrlq          \$26,$H3,$D3
2179         vpand           $MASK,$H3,$H3
2180         vpaddq          $D3,$H4,$H4             # h3 -> h4
2181
2182         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2183         vmovd           %x#$H1,`4*1-48-64`($ctx)
2184         vmovd           %x#$H2,`4*2-48-64`($ctx)
2185         vmovd           %x#$H3,`4*3-48-64`($ctx)
2186         vmovd           %x#$H4,`4*4-48-64`($ctx)
2187 ___
2188 $code.=<<___    if ($win64);
2189         vmovdqa         -0xb0(%r10),%xmm6
2190         vmovdqa         -0xa0(%r10),%xmm7
2191         vmovdqa         -0x90(%r10),%xmm8
2192         vmovdqa         -0x80(%r10),%xmm9
2193         vmovdqa         -0x70(%r10),%xmm10
2194         vmovdqa         -0x60(%r10),%xmm11
2195         vmovdqa         -0x50(%r10),%xmm12
2196         vmovdqa         -0x40(%r10),%xmm13
2197         vmovdqa         -0x30(%r10),%xmm14
2198         vmovdqa         -0x20(%r10),%xmm15
2199         lea             -8(%r10),%rsp
2200 .Ldo_avx2_epilogue$suffix:
2201 ___
2202 $code.=<<___    if (!$win64);
2203         lea             -8(%r10),%rsp
2204 .cfi_def_cfa_register   %rsp
2205 ___
2206 $code.=<<___;
2207         vzeroupper
2208         ret
2209 .cfi_endproc
2210 ___
2211 if($avx > 2 && $avx512) {
2212 my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24));
2213 my ($M0,$M1,$M2,$M3,$M4) = map("%zmm$_",(25..29));
2214 my $PADBIT="%zmm30";
2215
2216 map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3));            # switch to %zmm domain
2217 map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
2218 map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
2219 map(s/%y/%z/,($MASK));
2220
2221 $code.=<<___;
2222 .cfi_startproc
2223 .Lblocks_avx512:
2224         mov             \$15,%eax
2225         kmovw           %eax,%k2
2226 ___
2227 $code.=<<___    if (!$win64);
2228         lea             8(%rsp),%r10
2229 .cfi_def_cfa_register   %r10
2230         sub             \$0x128,%rsp
2231 ___
2232 $code.=<<___    if ($win64);
2233         lea             8(%rsp),%r10
2234         sub             \$0x1c8,%rsp
2235         vmovdqa         %xmm6,-0xb0(%r10)
2236         vmovdqa         %xmm7,-0xa0(%r10)
2237         vmovdqa         %xmm8,-0x90(%r10)
2238         vmovdqa         %xmm9,-0x80(%r10)
2239         vmovdqa         %xmm10,-0x70(%r10)
2240         vmovdqa         %xmm11,-0x60(%r10)
2241         vmovdqa         %xmm12,-0x50(%r10)
2242         vmovdqa         %xmm13,-0x40(%r10)
2243         vmovdqa         %xmm14,-0x30(%r10)
2244         vmovdqa         %xmm15,-0x20(%r10)
2245 .Ldo_avx512_body:
2246 ___
2247 $code.=<<___;
2248         lea             .Lconst(%rip),%rcx
2249         lea             48+64($ctx),$ctx        # size optimization
2250         vmovdqa         96(%rcx),%y#$T2         # .Lpermd_avx2
2251
2252         # expand pre-calculated table
2253         vmovdqu         `16*0-64`($ctx),%x#$D0  # will become expanded ${R0}
2254         and             \$-512,%rsp
2255         vmovdqu         `16*1-64`($ctx),%x#$D1  # will become ... ${R1}
2256         mov             \$0x20,%rax
2257         vmovdqu         `16*2-64`($ctx),%x#$T0  # ... ${S1}
2258         vmovdqu         `16*3-64`($ctx),%x#$D2  # ... ${R2}
2259         vmovdqu         `16*4-64`($ctx),%x#$T1  # ... ${S2}
2260         vmovdqu         `16*5-64`($ctx),%x#$D3  # ... ${R3}
2261         vmovdqu         `16*6-64`($ctx),%x#$T3  # ... ${S3}
2262         vmovdqu         `16*7-64`($ctx),%x#$D4  # ... ${R4}
2263         vmovdqu         `16*8-64`($ctx),%x#$T4  # ... ${S4}
2264         vpermd          $D0,$T2,$R0             # 00003412 -> 14243444
2265         vpbroadcastq    64(%rcx),$MASK          # .Lmask26
2266         vpermd          $D1,$T2,$R1
2267         vpermd          $T0,$T2,$S1
2268         vpermd          $D2,$T2,$R2
2269         vmovdqa64       $R0,0x00(%rsp){%k2}     # save in case $len%128 != 0
2270          vpsrlq         \$32,$R0,$T0            # 14243444 -> 01020304
2271         vpermd          $T1,$T2,$S2
2272         vmovdqu64       $R1,0x00(%rsp,%rax){%k2}
2273          vpsrlq         \$32,$R1,$T1
2274         vpermd          $D3,$T2,$R3
2275         vmovdqa64       $S1,0x40(%rsp){%k2}
2276         vpermd          $T3,$T2,$S3
2277         vpermd          $D4,$T2,$R4
2278         vmovdqu64       $R2,0x40(%rsp,%rax){%k2}
2279         vpermd          $T4,$T2,$S4
2280         vmovdqa64       $S2,0x80(%rsp){%k2}
2281         vmovdqu64       $R3,0x80(%rsp,%rax){%k2}
2282         vmovdqa64       $S3,0xc0(%rsp){%k2}
2283         vmovdqu64       $R4,0xc0(%rsp,%rax){%k2}
2284         vmovdqa64       $S4,0x100(%rsp){%k2}
2285
2286         ################################################################
2287         # calculate 5th through 8th powers of the key
2288         #
2289         # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
2290         # d1 = r0'*r1 + r1'*r0   + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
2291         # d2 = r0'*r2 + r1'*r1   + r2'*r0   + r3'*5*r4 + r4'*5*r3
2292         # d3 = r0'*r3 + r1'*r2   + r2'*r1   + r3'*r0   + r4'*5*r4
2293         # d4 = r0'*r4 + r1'*r3   + r2'*r2   + r3'*r1   + r4'*r0
2294
2295         vpmuludq        $T0,$R0,$D0             # d0 = r0'*r0
2296         vpmuludq        $T0,$R1,$D1             # d1 = r0'*r1
2297         vpmuludq        $T0,$R2,$D2             # d2 = r0'*r2
2298         vpmuludq        $T0,$R3,$D3             # d3 = r0'*r3
2299         vpmuludq        $T0,$R4,$D4             # d4 = r0'*r4
2300          vpsrlq         \$32,$R2,$T2
2301
2302         vpmuludq        $T1,$S4,$M0
2303         vpmuludq        $T1,$R0,$M1
2304         vpmuludq        $T1,$R1,$M2
2305         vpmuludq        $T1,$R2,$M3
2306         vpmuludq        $T1,$R3,$M4
2307          vpsrlq         \$32,$R3,$T3
2308         vpaddq          $M0,$D0,$D0             # d0 += r1'*5*r4
2309         vpaddq          $M1,$D1,$D1             # d1 += r1'*r0
2310         vpaddq          $M2,$D2,$D2             # d2 += r1'*r1
2311         vpaddq          $M3,$D3,$D3             # d3 += r1'*r2
2312         vpaddq          $M4,$D4,$D4             # d4 += r1'*r3
2313
2314         vpmuludq        $T2,$S3,$M0
2315         vpmuludq        $T2,$S4,$M1
2316         vpmuludq        $T2,$R1,$M3
2317         vpmuludq        $T2,$R2,$M4
2318         vpmuludq        $T2,$R0,$M2
2319          vpsrlq         \$32,$R4,$T4
2320         vpaddq          $M0,$D0,$D0             # d0 += r2'*5*r3
2321         vpaddq          $M1,$D1,$D1             # d1 += r2'*5*r4
2322         vpaddq          $M3,$D3,$D3             # d3 += r2'*r1
2323         vpaddq          $M4,$D4,$D4             # d4 += r2'*r2
2324         vpaddq          $M2,$D2,$D2             # d2 += r2'*r0
2325
2326         vpmuludq        $T3,$S2,$M0
2327         vpmuludq        $T3,$R0,$M3
2328         vpmuludq        $T3,$R1,$M4
2329         vpmuludq        $T3,$S3,$M1
2330         vpmuludq        $T3,$S4,$M2
2331         vpaddq          $M0,$D0,$D0             # d0 += r3'*5*r2
2332         vpaddq          $M3,$D3,$D3             # d3 += r3'*r0
2333         vpaddq          $M4,$D4,$D4             # d4 += r3'*r1
2334         vpaddq          $M1,$D1,$D1             # d1 += r3'*5*r3
2335         vpaddq          $M2,$D2,$D2             # d2 += r3'*5*r4
2336
2337         vpmuludq        $T4,$S4,$M3
2338         vpmuludq        $T4,$R0,$M4
2339         vpmuludq        $T4,$S1,$M0
2340         vpmuludq        $T4,$S2,$M1
2341         vpmuludq        $T4,$S3,$M2
2342         vpaddq          $M3,$D3,$D3             # d3 += r2'*5*r4
2343         vpaddq          $M4,$D4,$D4             # d4 += r2'*r0
2344         vpaddq          $M0,$D0,$D0             # d0 += r2'*5*r1
2345         vpaddq          $M1,$D1,$D1             # d1 += r2'*5*r2
2346         vpaddq          $M2,$D2,$D2             # d2 += r2'*5*r3
2347
2348         ################################################################
2349         # load input
2350         vmovdqu64       16*0($inp),%z#$T3
2351         vmovdqu64       16*4($inp),%z#$T4
2352         lea             16*8($inp),$inp
2353
2354         ################################################################
2355         # lazy reduction
2356
2357         vpsrlq          \$26,$D3,$M3
2358         vpandq          $MASK,$D3,$D3
2359         vpaddq          $M3,$D4,$D4             # d3 -> d4
2360
2361         vpsrlq          \$26,$D0,$M0
2362         vpandq          $MASK,$D0,$D0
2363         vpaddq          $M0,$D1,$D1             # d0 -> d1
2364
2365         vpsrlq          \$26,$D4,$M4
2366         vpandq          $MASK,$D4,$D4
2367
2368         vpsrlq          \$26,$D1,$M1
2369         vpandq          $MASK,$D1,$D1
2370         vpaddq          $M1,$D2,$D2             # d1 -> d2
2371
2372         vpaddq          $M4,$D0,$D0
2373         vpsllq          \$2,$M4,$M4
2374         vpaddq          $M4,$D0,$D0             # d4 -> d0
2375
2376         vpsrlq          \$26,$D2,$M2
2377         vpandq          $MASK,$D2,$D2
2378         vpaddq          $M2,$D3,$D3             # d2 -> d3
2379
2380         vpsrlq          \$26,$D0,$M0
2381         vpandq          $MASK,$D0,$D0
2382         vpaddq          $M0,$D1,$D1             # d0 -> d1
2383
2384         vpsrlq          \$26,$D3,$M3
2385         vpandq          $MASK,$D3,$D3
2386         vpaddq          $M3,$D4,$D4             # d3 -> d4
2387
2388         ################################################################
2389         # at this point we have 14243444 in $R0-$S4 and 05060708 in
2390         # $D0-$D4, ...
2391
2392         vpunpcklqdq     $T4,$T3,$T0     # transpose input
2393         vpunpckhqdq     $T4,$T3,$T4
2394
2395         # ... since input 64-bit lanes are ordered as 73625140, we could
2396         # "vperm" it to 76543210 (here and in each loop iteration), *or*
2397         # we could just flow along, hence the goal for $R0-$S4 is
2398         # 1858286838784888 ...
2399
2400         vmovdqa32       128(%rcx),$M0           # .Lpermd_avx512:
2401         mov             \$0x7777,%eax
2402         kmovw           %eax,%k1
2403
2404         vpermd          $R0,$M0,$R0             # 14243444 -> 1---2---3---4---
2405         vpermd          $R1,$M0,$R1
2406         vpermd          $R2,$M0,$R2
2407         vpermd          $R3,$M0,$R3
2408         vpermd          $R4,$M0,$R4
2409
2410         vpermd          $D0,$M0,${R0}{%k1}      # 05060708 -> 1858286838784888
2411         vpermd          $D1,$M0,${R1}{%k1}
2412         vpermd          $D2,$M0,${R2}{%k1}
2413         vpermd          $D3,$M0,${R3}{%k1}
2414         vpermd          $D4,$M0,${R4}{%k1}
2415
2416         vpslld          \$2,$R1,$S1             # *5
2417         vpslld          \$2,$R2,$S2
2418         vpslld          \$2,$R3,$S3
2419         vpslld          \$2,$R4,$S4
2420         vpaddd          $R1,$S1,$S1
2421         vpaddd          $R2,$S2,$S2
2422         vpaddd          $R3,$S3,$S3
2423         vpaddd          $R4,$S4,$S4
2424
2425         vpbroadcastq    32(%rcx),$PADBIT        # .L129
2426
2427         vpsrlq          \$52,$T0,$T2            # splat input
2428         vpsllq          \$12,$T4,$T3
2429         vporq           $T3,$T2,$T2
2430         vpsrlq          \$26,$T0,$T1
2431         vpsrlq          \$14,$T4,$T3
2432         vpsrlq          \$40,$T4,$T4            # 4
2433         vpandq          $MASK,$T2,$T2           # 2
2434         vpandq          $MASK,$T0,$T0           # 0
2435         #vpandq         $MASK,$T1,$T1           # 1
2436         #vpandq         $MASK,$T3,$T3           # 3
2437         #vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2438
2439         vpaddq          $H2,$T2,$H2             # accumulate input
2440         sub             \$192,$len
2441         jbe             .Ltail_avx512
2442         jmp             .Loop_avx512
2443
2444 .align  32
2445 .Loop_avx512:
2446         ################################################################
2447         # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
2448         # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
2449         # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
2450         # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
2451         # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
2452         # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
2453         # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
2454         # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
2455         #   \________/\___________/
2456         ################################################################
2457         #vpaddq         $H2,$T2,$H2             # accumulate input
2458
2459         # d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
2460         # d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
2461         # d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
2462         # d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
2463         # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
2464         #
2465         # however, as h2 is "chronologically" first one available pull
2466         # corresponding operations up, so it's
2467         #
2468         # d3 = h2*r1   + h0*r3 + h1*r2   + h3*r0 + h4*5*r4
2469         # d4 = h2*r2   + h0*r4 + h1*r3   + h3*r1 + h4*r0
2470         # d0 = h2*5*r3 + h0*r0 + h1*5*r4         + h3*5*r2 + h4*5*r1
2471         # d1 = h2*5*r4 + h0*r1           + h1*r0 + h3*5*r3 + h4*5*r2
2472         # d2 = h2*r0           + h0*r2   + h1*r1 + h3*5*r4 + h4*5*r3
2473
2474         vpmuludq        $H2,$R1,$D3             # d3 = h2*r1
2475          vpaddq         $H0,$T0,$H0
2476         vpmuludq        $H2,$R2,$D4             # d4 = h2*r2
2477          vpandq         $MASK,$T1,$T1           # 1
2478         vpmuludq        $H2,$S3,$D0             # d0 = h2*s3
2479          vpandq         $MASK,$T3,$T3           # 3
2480         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
2481          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2482         vpmuludq        $H2,$R0,$D2             # d2 = h2*r0
2483          vpaddq         $H1,$T1,$H1             # accumulate input
2484          vpaddq         $H3,$T3,$H3
2485          vpaddq         $H4,$T4,$H4
2486
2487           vmovdqu64     16*0($inp),$T3          # load input
2488           vmovdqu64     16*4($inp),$T4
2489           lea           16*8($inp),$inp
2490         vpmuludq        $H0,$R3,$M3
2491         vpmuludq        $H0,$R4,$M4
2492         vpmuludq        $H0,$R0,$M0
2493         vpmuludq        $H0,$R1,$M1
2494         vpaddq          $M3,$D3,$D3             # d3 += h0*r3
2495         vpaddq          $M4,$D4,$D4             # d4 += h0*r4
2496         vpaddq          $M0,$D0,$D0             # d0 += h0*r0
2497         vpaddq          $M1,$D1,$D1             # d1 += h0*r1
2498
2499         vpmuludq        $H1,$R2,$M3
2500         vpmuludq        $H1,$R3,$M4
2501         vpmuludq        $H1,$S4,$M0
2502         vpmuludq        $H0,$R2,$M2
2503         vpaddq          $M3,$D3,$D3             # d3 += h1*r2
2504         vpaddq          $M4,$D4,$D4             # d4 += h1*r3
2505         vpaddq          $M0,$D0,$D0             # d0 += h1*s4
2506         vpaddq          $M2,$D2,$D2             # d2 += h0*r2
2507
2508           vpunpcklqdq   $T4,$T3,$T0             # transpose input
2509           vpunpckhqdq   $T4,$T3,$T4
2510
2511         vpmuludq        $H3,$R0,$M3
2512         vpmuludq        $H3,$R1,$M4
2513         vpmuludq        $H1,$R0,$M1
2514         vpmuludq        $H1,$R1,$M2
2515         vpaddq          $M3,$D3,$D3             # d3 += h3*r0
2516         vpaddq          $M4,$D4,$D4             # d4 += h3*r1
2517         vpaddq          $M1,$D1,$D1             # d1 += h1*r0
2518         vpaddq          $M2,$D2,$D2             # d2 += h1*r1
2519
2520         vpmuludq        $H4,$S4,$M3
2521         vpmuludq        $H4,$R0,$M4
2522         vpmuludq        $H3,$S2,$M0
2523         vpmuludq        $H3,$S3,$M1
2524         vpaddq          $M3,$D3,$D3             # d3 += h4*s4
2525         vpmuludq        $H3,$S4,$M2
2526         vpaddq          $M4,$D4,$D4             # d4 += h4*r0
2527         vpaddq          $M0,$D0,$D0             # d0 += h3*s2
2528         vpaddq          $M1,$D1,$D1             # d1 += h3*s3
2529         vpaddq          $M2,$D2,$D2             # d2 += h3*s4
2530
2531         vpmuludq        $H4,$S1,$M0
2532         vpmuludq        $H4,$S2,$M1
2533         vpmuludq        $H4,$S3,$M2
2534         vpaddq          $M0,$D0,$H0             # h0 = d0 + h4*s1
2535         vpaddq          $M1,$D1,$H1             # h1 = d2 + h4*s2
2536         vpaddq          $M2,$D2,$H2             # h2 = d3 + h4*s3
2537
2538         ################################################################
2539         # lazy reduction (interleaved with input splat)
2540
2541          vpsrlq         \$52,$T0,$T2            # splat input
2542          vpsllq         \$12,$T4,$T3
2543
2544         vpsrlq          \$26,$D3,$H3
2545         vpandq          $MASK,$D3,$D3
2546         vpaddq          $H3,$D4,$H4             # h3 -> h4
2547
2548          vporq          $T3,$T2,$T2
2549
2550         vpsrlq          \$26,$H0,$D0
2551         vpandq          $MASK,$H0,$H0
2552         vpaddq          $D0,$H1,$H1             # h0 -> h1
2553
2554          vpandq         $MASK,$T2,$T2           # 2
2555
2556         vpsrlq          \$26,$H4,$D4
2557         vpandq          $MASK,$H4,$H4
2558
2559         vpsrlq          \$26,$H1,$D1
2560         vpandq          $MASK,$H1,$H1
2561         vpaddq          $D1,$H2,$H2             # h1 -> h2
2562
2563         vpaddq          $D4,$H0,$H0
2564         vpsllq          \$2,$D4,$D4
2565         vpaddq          $D4,$H0,$H0             # h4 -> h0
2566
2567          vpaddq         $T2,$H2,$H2             # modulo-scheduled
2568          vpsrlq         \$26,$T0,$T1
2569
2570         vpsrlq          \$26,$H2,$D2
2571         vpandq          $MASK,$H2,$H2
2572         vpaddq          $D2,$D3,$H3             # h2 -> h3
2573
2574          vpsrlq         \$14,$T4,$T3
2575
2576         vpsrlq          \$26,$H0,$D0
2577         vpandq          $MASK,$H0,$H0
2578         vpaddq          $D0,$H1,$H1             # h0 -> h1
2579
2580          vpsrlq         \$40,$T4,$T4            # 4
2581
2582         vpsrlq          \$26,$H3,$D3
2583         vpandq          $MASK,$H3,$H3
2584         vpaddq          $D3,$H4,$H4             # h3 -> h4
2585
2586          vpandq         $MASK,$T0,$T0           # 0
2587          #vpandq        $MASK,$T1,$T1           # 1
2588          #vpandq        $MASK,$T3,$T3           # 3
2589          #vporq         $PADBIT,$T4,$T4         # padbit, yes, always
2590
2591         sub             \$128,$len
2592         ja              .Loop_avx512
2593
2594 .Ltail_avx512:
2595         ################################################################
2596         # while above multiplications were by r^8 in all lanes, in last
2597         # iteration we multiply least significant lane by r^8 and most
2598         # significant one by r, that's why table gets shifted...
2599
2600         vpsrlq          \$32,$R0,$R0            # 0105020603070408
2601         vpsrlq          \$32,$R1,$R1
2602         vpsrlq          \$32,$R2,$R2
2603         vpsrlq          \$32,$S3,$S3
2604         vpsrlq          \$32,$S4,$S4
2605         vpsrlq          \$32,$R3,$R3
2606         vpsrlq          \$32,$R4,$R4
2607         vpsrlq          \$32,$S1,$S1
2608         vpsrlq          \$32,$S2,$S2
2609
2610         ################################################################
2611         # load either next or last 64 byte of input
2612         lea             ($inp,$len),$inp
2613
2614         #vpaddq         $H2,$T2,$H2             # accumulate input
2615         vpaddq          $H0,$T0,$H0
2616
2617         vpmuludq        $H2,$R1,$D3             # d3 = h2*r1
2618         vpmuludq        $H2,$R2,$D4             # d4 = h2*r2
2619         vpmuludq        $H2,$S3,$D0             # d0 = h2*s3
2620          vpandq         $MASK,$T1,$T1           # 1
2621         vpmuludq        $H2,$S4,$D1             # d1 = h2*s4
2622          vpandq         $MASK,$T3,$T3           # 3
2623         vpmuludq        $H2,$R0,$D2             # d2 = h2*r0
2624          vporq          $PADBIT,$T4,$T4         # padbit, yes, always
2625          vpaddq         $H1,$T1,$H1             # accumulate input
2626          vpaddq         $H3,$T3,$H3
2627          vpaddq         $H4,$T4,$H4
2628
2629           vmovdqu       16*0($inp),%x#$T0
2630         vpmuludq        $H0,$R3,$M3
2631         vpmuludq        $H0,$R4,$M4
2632         vpmuludq        $H0,$R0,$M0
2633         vpmuludq        $H0,$R1,$M1
2634         vpaddq          $M3,$D3,$D3             # d3 += h0*r3
2635         vpaddq          $M4,$D4,$D4             # d4 += h0*r4
2636         vpaddq          $M0,$D0,$D0             # d0 += h0*r0
2637         vpaddq          $M1,$D1,$D1             # d1 += h0*r1
2638
2639           vmovdqu       16*1($inp),%x#$T1
2640         vpmuludq        $H1,$R2,$M3
2641         vpmuludq        $H1,$R3,$M4
2642         vpmuludq        $H1,$S4,$M0
2643         vpmuludq        $H0,$R2,$M2
2644         vpaddq          $M3,$D3,$D3             # d3 += h1*r2
2645         vpaddq          $M4,$D4,$D4             # d4 += h1*r3
2646         vpaddq          $M0,$D0,$D0             # d0 += h1*s4
2647         vpaddq          $M2,$D2,$D2             # d2 += h0*r2
2648
2649           vinserti128   \$1,16*2($inp),%y#$T0,%y#$T0
2650         vpmuludq        $H3,$R0,$M3
2651         vpmuludq        $H3,$R1,$M4
2652         vpmuludq        $H1,$R0,$M1
2653         vpmuludq        $H1,$R1,$M2
2654         vpaddq          $M3,$D3,$D3             # d3 += h3*r0
2655         vpaddq          $M4,$D4,$D4             # d4 += h3*r1
2656         vpaddq          $M1,$D1,$D1             # d1 += h1*r0
2657         vpaddq          $M2,$D2,$D2             # d2 += h1*r1
2658
2659           vinserti128   \$1,16*3($inp),%y#$T1,%y#$T1
2660         vpmuludq        $H4,$S4,$M3
2661         vpmuludq        $H4,$R0,$M4
2662         vpmuludq        $H3,$S2,$M0
2663         vpmuludq        $H3,$S3,$M1
2664         vpmuludq        $H3,$S4,$M2
2665         vpaddq          $M3,$D3,$H3             # h3 = d3 + h4*s4
2666         vpaddq          $M4,$D4,$D4             # d4 += h4*r0
2667         vpaddq          $M0,$D0,$D0             # d0 += h3*s2
2668         vpaddq          $M1,$D1,$D1             # d1 += h3*s3
2669         vpaddq          $M2,$D2,$D2             # d2 += h3*s4
2670
2671         vpmuludq        $H4,$S1,$M0
2672         vpmuludq        $H4,$S2,$M1
2673         vpmuludq        $H4,$S3,$M2
2674         vpaddq          $M0,$D0,$H0             # h0 = d0 + h4*s1
2675         vpaddq          $M1,$D1,$H1             # h1 = d2 + h4*s2
2676         vpaddq          $M2,$D2,$H2             # h2 = d3 + h4*s3
2677
2678         ################################################################
2679         # horizontal addition
2680
2681         mov             \$1,%eax
2682         vpermq          \$0xb1,$H3,$D3
2683         vpermq          \$0xb1,$D4,$H4
2684         vpermq          \$0xb1,$H0,$D0
2685         vpermq          \$0xb1,$H1,$D1
2686         vpermq          \$0xb1,$H2,$D2
2687         vpaddq          $D3,$H3,$H3
2688         vpaddq          $D4,$H4,$H4
2689         vpaddq          $D0,$H0,$H0
2690         vpaddq          $D1,$H1,$H1
2691         vpaddq          $D2,$H2,$H2
2692
2693         kmovw           %eax,%k3
2694         vpermq          \$0x2,$H3,$D3
2695         vpermq          \$0x2,$H4,$D4
2696         vpermq          \$0x2,$H0,$D0
2697         vpermq          \$0x2,$H1,$D1
2698         vpermq          \$0x2,$H2,$D2
2699         vpaddq          $D3,$H3,$H3
2700         vpaddq          $D4,$H4,$H4
2701         vpaddq          $D0,$H0,$H0
2702         vpaddq          $D1,$H1,$H1
2703         vpaddq          $D2,$H2,$H2
2704
2705         vextracti64x4   \$0x1,$H3,%y#$D3
2706         vextracti64x4   \$0x1,$H4,%y#$D4
2707         vextracti64x4   \$0x1,$H0,%y#$D0
2708         vextracti64x4   \$0x1,$H1,%y#$D1
2709         vextracti64x4   \$0x1,$H2,%y#$D2
2710         vpaddq          $D3,$H3,${H3}{%k3}{z}   # keep single qword in case
2711         vpaddq          $D4,$H4,${H4}{%k3}{z}   # it's passed to .Ltail_avx2
2712         vpaddq          $D0,$H0,${H0}{%k3}{z}
2713         vpaddq          $D1,$H1,${H1}{%k3}{z}
2714         vpaddq          $D2,$H2,${H2}{%k3}{z}
2715 ___
2716 map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
2717 map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
2718 $code.=<<___;
2719         ################################################################
2720         # lazy reduction (interleaved with input splat)
2721
2722         vpsrlq          \$26,$H3,$D3
2723         vpand           $MASK,$H3,$H3
2724          vpsrldq        \$6,$T0,$T2             # splat input
2725          vpsrldq        \$6,$T1,$T3
2726          vpunpckhqdq    $T1,$T0,$T4             # 4
2727         vpaddq          $D3,$H4,$H4             # h3 -> h4
2728
2729         vpsrlq          \$26,$H0,$D0
2730         vpand           $MASK,$H0,$H0
2731          vpunpcklqdq    $T3,$T2,$T2             # 2:3
2732          vpunpcklqdq    $T1,$T0,$T0             # 0:1
2733         vpaddq          $D0,$H1,$H1             # h0 -> h1
2734
2735         vpsrlq          \$26,$H4,$D4
2736         vpand           $MASK,$H4,$H4
2737
2738         vpsrlq          \$26,$H1,$D1
2739         vpand           $MASK,$H1,$H1
2740          vpsrlq         \$30,$T2,$T3
2741          vpsrlq         \$4,$T2,$T2
2742         vpaddq          $D1,$H2,$H2             # h1 -> h2
2743
2744         vpaddq          $D4,$H0,$H0
2745         vpsllq          \$2,$D4,$D4
2746          vpsrlq         \$26,$T0,$T1
2747          vpsrlq         \$40,$T4,$T4            # 4
2748         vpaddq          $D4,$H0,$H0             # h4 -> h0
2749
2750         vpsrlq          \$26,$H2,$D2
2751         vpand           $MASK,$H2,$H2
2752          vpand          $MASK,$T2,$T2           # 2
2753          vpand          $MASK,$T0,$T0           # 0
2754         vpaddq          $D2,$H3,$H3             # h2 -> h3
2755
2756         vpsrlq          \$26,$H0,$D0
2757         vpand           $MASK,$H0,$H0
2758          vpaddq         $H2,$T2,$H2             # accumulate input for .Ltail_avx2
2759          vpand          $MASK,$T1,$T1           # 1
2760         vpaddq          $D0,$H1,$H1             # h0 -> h1
2761
2762         vpsrlq          \$26,$H3,$D3
2763         vpand           $MASK,$H3,$H3
2764          vpand          $MASK,$T3,$T3           # 3
2765          vpor           32(%rcx),$T4,$T4        # padbit, yes, always
2766         vpaddq          $D3,$H4,$H4             # h3 -> h4
2767
2768         lea             0x90(%rsp),%rax         # size optimization for .Ltail_avx2
2769         add             \$64,$len
2770         jnz             .Ltail_avx2$suffix
2771
2772         vpsubq          $T2,$H2,$H2             # undo input accumulation
2773         vmovd           %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2774         vmovd           %x#$H1,`4*1-48-64`($ctx)
2775         vmovd           %x#$H2,`4*2-48-64`($ctx)
2776         vmovd           %x#$H3,`4*3-48-64`($ctx)
2777         vmovd           %x#$H4,`4*4-48-64`($ctx)
2778         vzeroall
2779 ___
2780 $code.=<<___    if ($win64);
2781         movdqa          -0xb0(%r10),%xmm6
2782         movdqa          -0xa0(%r10),%xmm7
2783         movdqa          -0x90(%r10),%xmm8
2784         movdqa          -0x80(%r10),%xmm9
2785         movdqa          -0x70(%r10),%xmm10
2786         movdqa          -0x60(%r10),%xmm11
2787         movdqa          -0x50(%r10),%xmm12
2788         movdqa          -0x40(%r10),%xmm13
2789         movdqa          -0x30(%r10),%xmm14
2790         movdqa          -0x20(%r10),%xmm15
2791         lea             -8(%r10),%rsp
2792 .Ldo_avx512_epilogue:
2793 ___
2794 $code.=<<___    if (!$win64);
2795         lea             -8(%r10),%rsp
2796 .cfi_def_cfa_register   %rsp
2797 ___
2798 $code.=<<___;
2799         ret
2800 .cfi_endproc
2801 ___
2802
2803 }
2804
2805 }
2806
2807 &declare_function("poly1305_blocks_avx2", 32, 4);
2808 poly1305_blocks_avxN(0);
2809 &end_function("poly1305_blocks_avx2");
2810
2811 if($kernel) {
2812         $code .= "#endif\n";
2813 }
2814
2815 #######################################################################
2816 if ($avx>2) {
2817 # On entry we have input length divisible by 64. But since inner loop
2818 # processes 128 bytes per iteration, cases when length is not divisible
2819 # by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this
2820 # reason stack layout is kept identical to poly1305_blocks_avx2. If not
2821 # for this tail, we wouldn't have to even allocate stack frame...
2822
2823 if($kernel) {
2824         $code .= "#ifdef CONFIG_AS_AVX512\n";
2825 }
2826
2827 &declare_function("poly1305_blocks_avx512", 32, 4);
2828 poly1305_blocks_avxN(1);
2829 &end_function("poly1305_blocks_avx512");
2830
2831 if ($kernel) {
2832         $code .= "#endif\n";
2833 }
2834
2835 if (!$kernel && $avx>3) {
2836 ########################################################################
2837 # VPMADD52 version using 2^44 radix.
2838 #
2839 # One can argue that base 2^52 would be more natural. Well, even though
2840 # some operations would be more natural, one has to recognize couple of
2841 # things. Base 2^52 doesn't provide advantage over base 2^44 if you look
2842 # at amount of multiply-n-accumulate operations. Secondly, it makes it
2843 # impossible to pre-compute multiples of 5 [referred to as s[]/sN in
2844 # reference implementations], which means that more such operations
2845 # would have to be performed in inner loop, which in turn makes critical
2846 # path longer. In other words, even though base 2^44 reduction might
2847 # look less elegant, overall critical path is actually shorter...
2848
2849 ########################################################################
2850 # Layout of opaque area is following.
2851 #
2852 #       unsigned __int64 h[3];          # current hash value base 2^44
2853 #       unsigned __int64 s[2];          # key value*20 base 2^44
2854 #       unsigned __int64 r[3];          # key value base 2^44
2855 #       struct { unsigned __int64 r^1, r^3, r^2, r^4; } R[4];
2856 #                                       # r^n positions reflect
2857 #                                       # placement in register, not
2858 #                                       # memory, R[3] is R[1]*20
2859
2860 $code.=<<___;
2861 .type   poly1305_init_base2_44,\@function,3
2862 .align  32
2863 poly1305_init_base2_44:
2864         xor     %rax,%rax
2865         mov     %rax,0($ctx)            # initialize hash value
2866         mov     %rax,8($ctx)
2867         mov     %rax,16($ctx)
2868
2869 .Linit_base2_44:
2870         lea     poly1305_blocks_vpmadd52(%rip),%r10
2871         lea     poly1305_emit_base2_44(%rip),%r11
2872
2873         mov     \$0x0ffffffc0fffffff,%rax
2874         mov     \$0x0ffffffc0ffffffc,%rcx
2875         and     0($inp),%rax
2876         mov     \$0x00000fffffffffff,%r8
2877         and     8($inp),%rcx
2878         mov     \$0x00000fffffffffff,%r9
2879         and     %rax,%r8
2880         shrd    \$44,%rcx,%rax
2881         mov     %r8,40($ctx)            # r0
2882         and     %r9,%rax
2883         shr     \$24,%rcx
2884         mov     %rax,48($ctx)           # r1
2885         lea     (%rax,%rax,4),%rax      # *5
2886         mov     %rcx,56($ctx)           # r2
2887         shl     \$2,%rax                # magic <<2
2888         lea     (%rcx,%rcx,4),%rcx      # *5
2889         shl     \$2,%rcx                # magic <<2
2890         mov     %rax,24($ctx)           # s1
2891         mov     %rcx,32($ctx)           # s2
2892         movq    \$-1,64($ctx)           # write impossible value
2893 ___
2894 $code.=<<___    if ($flavour !~ /elf32/);
2895         mov     %r10,0(%rdx)
2896         mov     %r11,8(%rdx)
2897 ___
2898 $code.=<<___    if ($flavour =~ /elf32/);
2899         mov     %r10d,0(%rdx)
2900         mov     %r11d,4(%rdx)
2901 ___
2902 $code.=<<___;
2903         mov     \$1,%eax
2904         ret
2905 .size   poly1305_init_base2_44,.-poly1305_init_base2_44
2906 ___
2907 {
2908 my ($H0,$H1,$H2,$r2r1r0,$r1r0s2,$r0s2s1,$Dlo,$Dhi) = map("%ymm$_",(0..5,16,17));
2909 my ($T0,$inp_permd,$inp_shift,$PAD) = map("%ymm$_",(18..21));
2910 my ($reduc_mask,$reduc_rght,$reduc_left) = map("%ymm$_",(22..25));
2911
2912 $code.=<<___;
2913 .type   poly1305_blocks_vpmadd52,\@function,4
2914 .align  32
2915 poly1305_blocks_vpmadd52:
2916         shr     \$4,$len
2917         jz      .Lno_data_vpmadd52              # too short
2918
2919         shl     \$40,$padbit
2920         mov     64($ctx),%r8                    # peek on power of the key
2921
2922         # if powers of the key are not calculated yet, process up to 3
2923         # blocks with this single-block subroutine, otherwise ensure that
2924         # length is divisible by 2 blocks and pass the rest down to next
2925         # subroutine...
2926
2927         mov     \$3,%rax
2928         mov     \$1,%r10
2929         cmp     \$4,$len                        # is input long
2930         cmovae  %r10,%rax
2931         test    %r8,%r8                         # is power value impossible?
2932         cmovns  %r10,%rax
2933
2934         and     $len,%rax                       # is input of favourable length?
2935         jz      .Lblocks_vpmadd52_4x
2936
2937         sub             %rax,$len
2938         mov             \$7,%r10d
2939         mov             \$1,%r11d
2940         kmovw           %r10d,%k7
2941         lea             .L2_44_inp_permd(%rip),%r10
2942         kmovw           %r11d,%k1
2943
2944         vmovq           $padbit,%x#$PAD
2945         vmovdqa64       0(%r10),$inp_permd      # .L2_44_inp_permd
2946         vmovdqa64       32(%r10),$inp_shift     # .L2_44_inp_shift
2947         vpermq          \$0xcf,$PAD,$PAD
2948         vmovdqa64       64(%r10),$reduc_mask    # .L2_44_mask
2949
2950         vmovdqu64       0($ctx),${Dlo}{%k7}{z}          # load hash value
2951         vmovdqu64       40($ctx),${r2r1r0}{%k7}{z}      # load keys
2952         vmovdqu64       32($ctx),${r1r0s2}{%k7}{z}
2953         vmovdqu64       24($ctx),${r0s2s1}{%k7}{z}
2954
2955         vmovdqa64       96(%r10),$reduc_rght    # .L2_44_shift_rgt
2956         vmovdqa64       128(%r10),$reduc_left   # .L2_44_shift_lft
2957
2958         jmp             .Loop_vpmadd52
2959
2960 .align  32
2961 .Loop_vpmadd52:
2962         vmovdqu32       0($inp),%x#$T0          # load input as ----3210
2963         lea             16($inp),$inp
2964
2965         vpermd          $T0,$inp_permd,$T0      # ----3210 -> --322110
2966         vpsrlvq         $inp_shift,$T0,$T0
2967         vpandq          $reduc_mask,$T0,$T0
2968         vporq           $PAD,$T0,$T0
2969
2970         vpaddq          $T0,$Dlo,$Dlo           # accumulate input
2971
2972         vpermq          \$0,$Dlo,${H0}{%k7}{z}  # smash hash value
2973         vpermq          \$0b01010101,$Dlo,${H1}{%k7}{z}
2974         vpermq          \$0b10101010,$Dlo,${H2}{%k7}{z}
2975
2976         vpxord          $Dlo,$Dlo,$Dlo
2977         vpxord          $Dhi,$Dhi,$Dhi
2978
2979         vpmadd52luq     $r2r1r0,$H0,$Dlo
2980         vpmadd52huq     $r2r1r0,$H0,$Dhi
2981
2982         vpmadd52luq     $r1r0s2,$H1,$Dlo
2983         vpmadd52huq     $r1r0s2,$H1,$Dhi
2984
2985         vpmadd52luq     $r0s2s1,$H2,$Dlo
2986         vpmadd52huq     $r0s2s1,$H2,$Dhi
2987
2988         vpsrlvq         $reduc_rght,$Dlo,$T0    # 0 in topmost qword
2989         vpsllvq         $reduc_left,$Dhi,$Dhi   # 0 in topmost qword
2990         vpandq          $reduc_mask,$Dlo,$Dlo
2991
2992         vpaddq          $T0,$Dhi,$Dhi
2993
2994         vpermq          \$0b10010011,$Dhi,$Dhi  # 0 in lowest qword
2995
2996         vpaddq          $Dhi,$Dlo,$Dlo          # note topmost qword :-)
2997
2998         vpsrlvq         $reduc_rght,$Dlo,$T0    # 0 in topmost word
2999         vpandq          $reduc_mask,$Dlo,$Dlo
3000
3001         vpermq          \$0b10010011,$T0,$T0
3002
3003         vpaddq          $T0,$Dlo,$Dlo
3004
3005         vpermq          \$0b10010011,$Dlo,${T0}{%k1}{z}
3006
3007         vpaddq          $T0,$Dlo,$Dlo
3008         vpsllq          \$2,$T0,$T0
3009
3010         vpaddq          $T0,$Dlo,$Dlo
3011
3012         dec             %rax                    # len-=16
3013         jnz             .Loop_vpmadd52
3014
3015         vmovdqu64       $Dlo,0($ctx){%k7}       # store hash value
3016
3017         test            $len,$len
3018         jnz             .Lblocks_vpmadd52_4x
3019
3020 .Lno_data_vpmadd52:
3021         ret
3022 .size   poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
3023 ___
3024 }
3025 {
3026 ########################################################################
3027 # As implied by its name 4x subroutine processes 4 blocks in parallel
3028 # (but handles even 4*n+2 blocks lengths). It takes up to 4th key power
3029 # and is handled in 256-bit %ymm registers.
3030
3031 my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
3032 my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
3033 my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
3034
3035 $code.=<<___;
3036 .type   poly1305_blocks_vpmadd52_4x,\@function,4
3037 .align  32
3038 poly1305_blocks_vpmadd52_4x:
3039         shr     \$4,$len
3040         jz      .Lno_data_vpmadd52_4x           # too short
3041
3042         shl     \$40,$padbit
3043         mov     64($ctx),%r8                    # peek on power of the key
3044
3045 .Lblocks_vpmadd52_4x:
3046         vpbroadcastq    $padbit,$PAD
3047
3048         vmovdqa64       .Lx_mask44(%rip),$mask44
3049         mov             \$5,%eax
3050         vmovdqa64       .Lx_mask42(%rip),$mask42
3051         kmovw           %eax,%k1                # used in 2x path
3052
3053         test            %r8,%r8                 # is power value impossible?
3054         js              .Linit_vpmadd52         # if it is, then init R[4]
3055
3056         vmovq           0($ctx),%x#$H0          # load current hash value
3057         vmovq           8($ctx),%x#$H1
3058         vmovq           16($ctx),%x#$H2
3059
3060         test            \$3,$len                # is length 4*n+2?
3061         jnz             .Lblocks_vpmadd52_2x_do
3062
3063 .Lblocks_vpmadd52_4x_do:
3064         vpbroadcastq    64($ctx),$R0            # load 4th power of the key
3065         vpbroadcastq    96($ctx),$R1
3066         vpbroadcastq    128($ctx),$R2
3067         vpbroadcastq    160($ctx),$S1
3068
3069 .Lblocks_vpmadd52_4x_key_loaded:
3070         vpsllq          \$2,$R2,$S2             # S2 = R2*5*4
3071         vpaddq          $R2,$S2,$S2
3072         vpsllq          \$2,$S2,$S2
3073
3074         test            \$7,$len                # is len 8*n?
3075         jz              .Lblocks_vpmadd52_8x
3076
3077         vmovdqu64       16*0($inp),$T2          # load data
3078         vmovdqu64       16*2($inp),$T3
3079         lea             16*4($inp),$inp
3080
3081         vpunpcklqdq     $T3,$T2,$T1             # transpose data
3082         vpunpckhqdq     $T3,$T2,$T3
3083
3084         # at this point 64-bit lanes are ordered as 3-1-2-0
3085
3086         vpsrlq          \$24,$T3,$T2            # splat the data
3087         vporq           $PAD,$T2,$T2
3088          vpaddq         $T2,$H2,$H2             # accumulate input
3089         vpandq          $mask44,$T1,$T0
3090         vpsrlq          \$44,$T1,$T1
3091         vpsllq          \$20,$T3,$T3
3092         vporq           $T3,$T1,$T1
3093         vpandq          $mask44,$T1,$T1
3094
3095         sub             \$4,$len
3096         jz              .Ltail_vpmadd52_4x
3097         jmp             .Loop_vpmadd52_4x
3098         ud2
3099
3100 .align  32
3101 .Linit_vpmadd52:
3102         vmovq           24($ctx),%x#$S1         # load key
3103         vmovq           56($ctx),%x#$H2
3104         vmovq           32($ctx),%x#$S2
3105         vmovq           40($ctx),%x#$R0
3106         vmovq           48($ctx),%x#$R1
3107
3108         vmovdqa         $R0,$H0
3109         vmovdqa         $R1,$H1
3110         vmovdqa         $H2,$R2
3111
3112         mov             \$2,%eax
3113
3114 .Lmul_init_vpmadd52:
3115         vpxorq          $D0lo,$D0lo,$D0lo
3116         vpmadd52luq     $H2,$S1,$D0lo
3117         vpxorq          $D0hi,$D0hi,$D0hi
3118         vpmadd52huq     $H2,$S1,$D0hi
3119         vpxorq          $D1lo,$D1lo,$D1lo
3120         vpmadd52luq     $H2,$S2,$D1lo
3121         vpxorq          $D1hi,$D1hi,$D1hi
3122         vpmadd52huq     $H2,$S2,$D1hi
3123         vpxorq          $D2lo,$D2lo,$D2lo
3124         vpmadd52luq     $H2,$R0,$D2lo
3125         vpxorq          $D2hi,$D2hi,$D2hi
3126         vpmadd52huq     $H2,$R0,$D2hi
3127
3128         vpmadd52luq     $H0,$R0,$D0lo
3129         vpmadd52huq     $H0,$R0,$D0hi
3130         vpmadd52luq     $H0,$R1,$D1lo
3131         vpmadd52huq     $H0,$R1,$D1hi
3132         vpmadd52luq     $H0,$R2,$D2lo
3133         vpmadd52huq     $H0,$R2,$D2hi
3134
3135         vpmadd52luq     $H1,$S2,$D0lo
3136         vpmadd52huq     $H1,$S2,$D0hi
3137         vpmadd52luq     $H1,$R0,$D1lo
3138         vpmadd52huq     $H1,$R0,$D1hi
3139         vpmadd52luq     $H1,$R1,$D2lo
3140         vpmadd52huq     $H1,$R1,$D2hi
3141
3142         ################################################################
3143         # partial reduction
3144         vpsrlq          \$44,$D0lo,$tmp
3145         vpsllq          \$8,$D0hi,$D0hi
3146         vpandq          $mask44,$D0lo,$H0
3147         vpaddq          $tmp,$D0hi,$D0hi
3148
3149         vpaddq          $D0hi,$D1lo,$D1lo
3150
3151         vpsrlq          \$44,$D1lo,$tmp
3152         vpsllq          \$8,$D1hi,$D1hi
3153         vpandq          $mask44,$D1lo,$H1
3154         vpaddq          $tmp,$D1hi,$D1hi
3155
3156         vpaddq          $D1hi,$D2lo,$D2lo
3157
3158         vpsrlq          \$42,$D2lo,$tmp
3159         vpsllq          \$10,$D2hi,$D2hi
3160         vpandq          $mask42,$D2lo,$H2
3161         vpaddq          $tmp,$D2hi,$D2hi
3162
3163         vpaddq          $D2hi,$H0,$H0
3164         vpsllq          \$2,$D2hi,$D2hi
3165
3166         vpaddq          $D2hi,$H0,$H0
3167
3168         vpsrlq          \$44,$H0,$tmp           # additional step
3169         vpandq          $mask44,$H0,$H0
3170
3171         vpaddq          $tmp,$H1,$H1
3172
3173         dec             %eax
3174         jz              .Ldone_init_vpmadd52
3175
3176         vpunpcklqdq     $R1,$H1,$R1             # 1,2
3177         vpbroadcastq    %x#$H1,%x#$H1           # 2,2
3178         vpunpcklqdq     $R2,$H2,$R2
3179         vpbroadcastq    %x#$H2,%x#$H2
3180         vpunpcklqdq     $R0,$H0,$R0
3181         vpbroadcastq    %x#$H0,%x#$H0
3182
3183         vpsllq          \$2,$R1,$S1             # S1 = R1*5*4
3184         vpsllq          \$2,$R2,$S2             # S2 = R2*5*4
3185         vpaddq          $R1,$S1,$S1
3186         vpaddq          $R2,$S2,$S2
3187         vpsllq          \$2,$S1,$S1
3188         vpsllq          \$2,$S2,$S2
3189
3190         jmp             .Lmul_init_vpmadd52
3191         ud2
3192
3193 .align  32
3194 .Ldone_init_vpmadd52:
3195         vinserti128     \$1,%x#$R1,$H1,$R1      # 1,2,3,4
3196         vinserti128     \$1,%x#$R2,$H2,$R2
3197         vinserti128     \$1,%x#$R0,$H0,$R0
3198
3199         vpermq          \$0b11011000,$R1,$R1    # 1,3,2,4
3200         vpermq          \$0b11011000,$R2,$R2
3201         vpermq          \$0b11011000,$R0,$R0
3202
3203         vpsllq          \$2,$R1,$S1             # S1 = R1*5*4
3204         vpaddq          $R1,$S1,$S1
3205         vpsllq          \$2,$S1,$S1
3206
3207         vmovq           0($ctx),%x#$H0          # load current hash value
3208         vmovq           8($ctx),%x#$H1
3209         vmovq           16($ctx),%x#$H2
3210
3211         test            \$3,$len                # is length 4*n+2?
3212         jnz             .Ldone_init_vpmadd52_2x
3213
3214         vmovdqu64       $R0,64($ctx)            # save key powers
3215         vpbroadcastq    %x#$R0,$R0              # broadcast 4th power
3216         vmovdqu64       $R1,96($ctx)
3217         vpbroadcastq    %x#$R1,$R1
3218         vmovdqu64       $R2,128($ctx)
3219         vpbroadcastq    %x#$R2,$R2
3220         vmovdqu64       $S1,160($ctx)
3221         vpbroadcastq    %x#$S1,$S1
3222
3223         jmp             .Lblocks_vpmadd52_4x_key_loaded
3224         ud2
3225
3226 .align  32
3227 .Ldone_init_vpmadd52_2x:
3228         vmovdqu64       $R0,64($ctx)            # save key powers
3229         vpsrldq         \$8,$R0,$R0             # 0-1-0-2
3230         vmovdqu64       $R1,96($ctx)
3231         vpsrldq         \$8,$R1,$R1
3232         vmovdqu64       $R2,128($ctx)
3233         vpsrldq         \$8,$R2,$R2
3234         vmovdqu64       $S1,160($ctx)
3235         vpsrldq         \$8,$S1,$S1
3236         jmp             .Lblocks_vpmadd52_2x_key_loaded
3237         ud2
3238
3239 .align  32
3240 .Lblocks_vpmadd52_2x_do:
3241         vmovdqu64       128+8($ctx),${R2}{%k1}{z}# load 2nd and 1st key powers
3242         vmovdqu64       160+8($ctx),${S1}{%k1}{z}
3243         vmovdqu64       64+8($ctx),${R0}{%k1}{z}
3244         vmovdqu64       96+8($ctx),${R1}{%k1}{z}
3245
3246 .Lblocks_vpmadd52_2x_key_loaded:
3247         vmovdqu64       16*0($inp),$T2          # load data
3248         vpxorq          $T3,$T3,$T3
3249         lea             16*2($inp),$inp
3250
3251         vpunpcklqdq     $T3,$T2,$T1             # transpose data
3252         vpunpckhqdq     $T3,$T2,$T3
3253
3254         # at this point 64-bit lanes are ordered as x-1-x-0
3255
3256         vpsrlq          \$24,$T3,$T2            # splat the data
3257         vporq           $PAD,$T2,$T2
3258          vpaddq         $T2,$H2,$H2             # accumulate input
3259         vpandq          $mask44,$T1,$T0
3260         vpsrlq          \$44,$T1,$T1
3261         vpsllq          \$20,$T3,$T3
3262         vporq           $T3,$T1,$T1
3263         vpandq          $mask44,$T1,$T1
3264
3265         jmp             .Ltail_vpmadd52_2x
3266         ud2
3267
3268 .align  32
3269 .Loop_vpmadd52_4x:
3270         #vpaddq         $T2,$H2,$H2             # accumulate input
3271         vpaddq          $T0,$H0,$H0
3272         vpaddq          $T1,$H1,$H1
3273
3274         vpxorq          $D0lo,$D0lo,$D0lo
3275         vpmadd52luq     $H2,$S1,$D0lo
3276         vpxorq          $D0hi,$D0hi,$D0hi
3277         vpmadd52huq     $H2,$S1,$D0hi
3278         vpxorq          $D1lo,$D1lo,$D1lo
3279         vpmadd52luq     $H2,$S2,$D1lo
3280         vpxorq          $D1hi,$D1hi,$D1hi
3281         vpmadd52huq     $H2,$S2,$D1hi
3282         vpxorq          $D2lo,$D2lo,$D2lo
3283         vpmadd52luq     $H2,$R0,$D2lo
3284         vpxorq          $D2hi,$D2hi,$D2hi
3285         vpmadd52huq     $H2,$R0,$D2hi
3286
3287          vmovdqu64      16*0($inp),$T2          # load data
3288          vmovdqu64      16*2($inp),$T3
3289          lea            16*4($inp),$inp
3290         vpmadd52luq     $H0,$R0,$D0lo
3291         vpmadd52huq     $H0,$R0,$D0hi
3292         vpmadd52luq     $H0,$R1,$D1lo
3293         vpmadd52huq     $H0,$R1,$D1hi
3294         vpmadd52luq     $H0,$R2,$D2lo
3295         vpmadd52huq     $H0,$R2,$D2hi
3296
3297          vpunpcklqdq    $T3,$T2,$T1             # transpose data
3298          vpunpckhqdq    $T3,$T2,$T3
3299         vpmadd52luq     $H1,$S2,$D0lo
3300         vpmadd52huq     $H1,$S2,$D0hi
3301         vpmadd52luq     $H1,$R0,$D1lo
3302         vpmadd52huq     $H1,$R0,$D1hi
3303         vpmadd52luq     $H1,$R1,$D2lo
3304         vpmadd52huq     $H1,$R1,$D2hi
3305
3306         ################################################################
3307         # partial reduction (interleaved with data splat)
3308         vpsrlq          \$44,$D0lo,$tmp
3309         vpsllq          \$8,$D0hi,$D0hi
3310         vpandq          $mask44,$D0lo,$H0
3311         vpaddq          $tmp,$D0hi,$D0hi
3312
3313          vpsrlq         \$24,$T3,$T2
3314          vporq          $PAD,$T2,$T2
3315         vpaddq          $D0hi,$D1lo,$D1lo
3316
3317         vpsrlq          \$44,$D1lo,$tmp
3318         vpsllq          \$8,$D1hi,$D1hi
3319         vpandq          $mask44,$D1lo,$H1
3320         vpaddq          $tmp,$D1hi,$D1hi
3321
3322          vpandq         $mask44,$T1,$T0
3323          vpsrlq         \$44,$T1,$T1
3324          vpsllq         \$20,$T3,$T3
3325         vpaddq          $D1hi,$D2lo,$D2lo
3326
3327         vpsrlq          \$42,$D2lo,$tmp
3328         vpsllq          \$10,$D2hi,$D2hi
3329         vpandq          $mask42,$D2lo,$H2
3330         vpaddq          $tmp,$D2hi,$D2hi
3331
3332           vpaddq        $T2,$H2,$H2             # accumulate input
3333         vpaddq          $D2hi,$H0,$H0
3334         vpsllq          \$2,$D2hi,$D2hi
3335
3336         vpaddq          $D2hi,$H0,$H0
3337          vporq          $T3,$T1,$T1
3338          vpandq         $mask44,$T1,$T1
3339
3340         vpsrlq          \$44,$H0,$tmp           # additional step
3341         vpandq          $mask44,$H0,$H0
3342
3343         vpaddq          $tmp,$H1,$H1
3344
3345         sub             \$4,$len                # len-=64
3346         jnz             .Loop_vpmadd52_4x
3347
3348 .Ltail_vpmadd52_4x:
3349         vmovdqu64       128($ctx),$R2           # load all key powers
3350         vmovdqu64       160($ctx),$S1
3351         vmovdqu64       64($ctx),$R0
3352         vmovdqu64       96($ctx),$R1
3353
3354 .Ltail_vpmadd52_2x:
3355         vpsllq          \$2,$R2,$S2             # S2 = R2*5*4
3356         vpaddq          $R2,$S2,$S2
3357         vpsllq          \$2,$S2,$S2
3358
3359         #vpaddq         $T2,$H2,$H2             # accumulate input
3360         vpaddq          $T0,$H0,$H0
3361         vpaddq          $T1,$H1,$H1
3362
3363         vpxorq          $D0lo,$D0lo,$D0lo
3364         vpmadd52luq     $H2,$S1,$D0lo
3365         vpxorq          $D0hi,$D0hi,$D0hi
3366         vpmadd52huq     $H2,$S1,$D0hi
3367         vpxorq          $D1lo,$D1lo,$D1lo
3368         vpmadd52luq     $H2,$S2,$D1lo
3369         vpxorq          $D1hi,$D1hi,$D1hi
3370         vpmadd52huq     $H2,$S2,$D1hi
3371         vpxorq          $D2lo,$D2lo,$D2lo
3372         vpmadd52luq     $H2,$R0,$D2lo
3373         vpxorq          $D2hi,$D2hi,$D2hi
3374         vpmadd52huq     $H2,$R0,$D2hi
3375
3376         vpmadd52luq     $H0,$R0,$D0lo
3377         vpmadd52huq     $H0,$R0,$D0hi
3378         vpmadd52luq     $H0,$R1,$D1lo
3379         vpmadd52huq     $H0,$R1,$D1hi
3380         vpmadd52luq     $H0,$R2,$D2lo
3381         vpmadd52huq     $H0,$R2,$D2hi
3382
3383         vpmadd52luq     $H1,$S2,$D0lo
3384         vpmadd52huq     $H1,$S2,$D0hi
3385         vpmadd52luq     $H1,$R0,$D1lo
3386         vpmadd52huq     $H1,$R0,$D1hi
3387         vpmadd52luq     $H1,$R1,$D2lo
3388         vpmadd52huq     $H1,$R1,$D2hi
3389
3390         ################################################################
3391         # horizontal addition
3392
3393         mov             \$1,%eax
3394         kmovw           %eax,%k1
3395         vpsrldq         \$8,$D0lo,$T0
3396         vpsrldq         \$8,$D0hi,$H0
3397         vpsrldq         \$8,$D1lo,$T1
3398         vpsrldq         \$8,$D1hi,$H1
3399         vpaddq          $T0,$D0lo,$D0lo
3400         vpaddq          $H0,$D0hi,$D0hi
3401         vpsrldq         \$8,$D2lo,$T2
3402         vpsrldq         \$8,$D2hi,$H2
3403         vpaddq          $T1,$D1lo,$D1lo
3404         vpaddq          $H1,$D1hi,$D1hi
3405          vpermq         \$0x2,$D0lo,$T0
3406          vpermq         \$0x2,$D0hi,$H0
3407         vpaddq          $T2,$D2lo,$D2lo
3408         vpaddq          $H2,$D2hi,$D2hi
3409
3410         vpermq          \$0x2,$D1lo,$T1
3411         vpermq          \$0x2,$D1hi,$H1
3412         vpaddq          $T0,$D0lo,${D0lo}{%k1}{z}
3413         vpaddq          $H0,$D0hi,${D0hi}{%k1}{z}
3414         vpermq          \$0x2,$D2lo,$T2
3415         vpermq          \$0x2,$D2hi,$H2
3416         vpaddq          $T1,$D1lo,${D1lo}{%k1}{z}
3417         vpaddq          $H1,$D1hi,${D1hi}{%k1}{z}
3418         vpaddq          $T2,$D2lo,${D2lo}{%k1}{z}
3419         vpaddq          $H2,$D2hi,${D2hi}{%k1}{z}
3420
3421         ################################################################
3422         # partial reduction
3423         vpsrlq          \$44,$D0lo,$tmp
3424         vpsllq          \$8,$D0hi,$D0hi
3425         vpandq          $mask44,$D0lo,$H0
3426         vpaddq          $tmp,$D0hi,$D0hi
3427
3428         vpaddq          $D0hi,$D1lo,$D1lo
3429
3430         vpsrlq          \$44,$D1lo,$tmp
3431         vpsllq          \$8,$D1hi,$D1hi
3432         vpandq          $mask44,$D1lo,$H1
3433         vpaddq          $tmp,$D1hi,$D1hi
3434
3435         vpaddq          $D1hi,$D2lo,$D2lo
3436
3437         vpsrlq          \$42,$D2lo,$tmp
3438         vpsllq          \$10,$D2hi,$D2hi
3439         vpandq          $mask42,$D2lo,$H2
3440         vpaddq          $tmp,$D2hi,$D2hi
3441
3442         vpaddq          $D2hi,$H0,$H0
3443         vpsllq          \$2,$D2hi,$D2hi
3444
3445         vpaddq          $D2hi,$H0,$H0
3446
3447         vpsrlq          \$44,$H0,$tmp           # additional step
3448         vpandq          $mask44,$H0,$H0
3449
3450         vpaddq          $tmp,$H1,$H1
3451                                                 # at this point $len is
3452                                                 # either 4*n+2 or 0...
3453         sub             \$2,$len                # len-=32
3454         ja              .Lblocks_vpmadd52_4x_do
3455
3456         vmovq           %x#$H0,0($ctx)
3457         vmovq           %x#$H1,8($ctx)
3458         vmovq           %x#$H2,16($ctx)
3459         vzeroall
3460
3461 .Lno_data_vpmadd52_4x:
3462         ret
3463 .size   poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x
3464 ___
3465 }
3466 {
3467 ########################################################################
3468 # As implied by its name 8x subroutine processes 8 blocks in parallel...
3469 # This is intermediate version, as it's used only in cases when input
3470 # length is either 8*n, 8*n+1 or 8*n+2...
3471
3472 my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
3473 my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
3474 my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
3475 my ($RR0,$RR1,$RR2,$SS1,$SS2) = map("%ymm$_",(6..10));
3476
3477 $code.=<<___;
3478 .type   poly1305_blocks_vpmadd52_8x,\@function,4
3479 .align  32
3480 poly1305_blocks_vpmadd52_8x:
3481         shr     \$4,$len
3482         jz      .Lno_data_vpmadd52_8x           # too short
3483
3484         shl     \$40,$padbit
3485         mov     64($ctx),%r8                    # peek on power of the key
3486
3487         vmovdqa64       .Lx_mask44(%rip),$mask44
3488         vmovdqa64       .Lx_mask42(%rip),$mask42
3489
3490         test    %r8,%r8                         # is power value impossible?
3491         js      .Linit_vpmadd52                 # if it is, then init R[4]
3492
3493         vmovq   0($ctx),%x#$H0                  # load current hash value
3494         vmovq   8($ctx),%x#$H1
3495         vmovq   16($ctx),%x#$H2
3496
3497 .Lblocks_vpmadd52_8x:
3498         ################################################################
3499         # fist we calculate more key powers
3500
3501         vmovdqu64       128($ctx),$R2           # load 1-3-2-4 powers
3502         vmovdqu64       160($ctx),$S1
3503         vmovdqu64       64($ctx),$R0
3504         vmovdqu64       96($ctx),$R1
3505
3506         vpsllq          \$2,$R2,$S2             # S2 = R2*5*4
3507         vpaddq          $R2,$S2,$S2
3508         vpsllq          \$2,$S2,$S2
3509
3510         vpbroadcastq    %x#$R2,$RR2             # broadcast 4th power
3511         vpbroadcastq    %x#$R0,$RR0
3512         vpbroadcastq    %x#$R1,$RR1
3513
3514         vpxorq          $D0lo,$D0lo,$D0lo
3515         vpmadd52luq     $RR2,$S1,$D0lo
3516         vpxorq          $D0hi,$D0hi,$D0hi
3517         vpmadd52huq     $RR2,$S1,$D0hi
3518         vpxorq          $D1lo,$D1lo,$D1lo
3519         vpmadd52luq     $RR2,$S2,$D1lo
3520         vpxorq          $D1hi,$D1hi,$D1hi
3521         vpmadd52huq     $RR2,$S2,$D1hi
3522         vpxorq          $D2lo,$D2lo,$D2lo
3523         vpmadd52luq     $RR2,$R0,$D2lo
3524         vpxorq          $D2hi,$D2hi,$D2hi
3525         vpmadd52huq     $RR2,$R0,$D2hi
3526
3527         vpmadd52luq     $RR0,$R0,$D0lo
3528         vpmadd52huq     $RR0,$R0,$D0hi
3529         vpmadd52luq     $RR0,$R1,$D1lo
3530         vpmadd52huq     $RR0,$R1,$D1hi
3531         vpmadd52luq     $RR0,$R2,$D2lo
3532         vpmadd52huq     $RR0,$R2,$D2hi
3533
3534         vpmadd52luq     $RR1,$S2,$D0lo
3535         vpmadd52huq     $RR1,$S2,$D0hi
3536         vpmadd52luq     $RR1,$R0,$D1lo
3537         vpmadd52huq     $RR1,$R0,$D1hi
3538         vpmadd52luq     $RR1,$R1,$D2lo
3539         vpmadd52huq     $RR1,$R1,$D2hi
3540
3541         ################################################################
3542         # partial reduction
3543         vpsrlq          \$44,$D0lo,$tmp
3544         vpsllq          \$8,$D0hi,$D0hi
3545         vpandq          $mask44,$D0lo,$RR0
3546         vpaddq          $tmp,$D0hi,$D0hi
3547
3548         vpaddq          $D0hi,$D1lo,$D1lo
3549
3550         vpsrlq          \$44,$D1lo,$tmp
3551         vpsllq          \$8,$D1hi,$D1hi
3552         vpandq          $mask44,$D1lo,$RR1
3553         vpaddq          $tmp,$D1hi,$D1hi
3554
3555         vpaddq          $D1hi,$D2lo,$D2lo
3556
3557         vpsrlq          \$42,$D2lo,$tmp
3558         vpsllq          \$10,$D2hi,$D2hi
3559         vpandq          $mask42,$D2lo,$RR2
3560         vpaddq          $tmp,$D2hi,$D2hi
3561
3562         vpaddq          $D2hi,$RR0,$RR0
3563         vpsllq          \$2,$D2hi,$D2hi
3564
3565         vpaddq          $D2hi,$RR0,$RR0
3566
3567         vpsrlq          \$44,$RR0,$tmp          # additional step
3568         vpandq          $mask44,$RR0,$RR0
3569
3570         vpaddq          $tmp,$RR1,$RR1
3571
3572         ################################################################
3573         # At this point Rx holds 1324 powers, RRx - 5768, and the goal
3574         # is 15263748, which reflects how data is loaded...
3575
3576         vpunpcklqdq     $R2,$RR2,$T2            # 3748
3577         vpunpckhqdq     $R2,$RR2,$R2            # 1526
3578         vpunpcklqdq     $R0,$RR0,$T0
3579         vpunpckhqdq     $R0,$RR0,$R0
3580         vpunpcklqdq     $R1,$RR1,$T1
3581         vpunpckhqdq     $R1,$RR1,$R1
3582 ___
3583 ######## switch to %zmm
3584 map(s/%y/%z/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2);
3585 map(s/%y/%z/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi);
3586 map(s/%y/%z/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD);
3587 map(s/%y/%z/, $RR0,$RR1,$RR2,$SS1,$SS2);
3588
3589 $code.=<<___;
3590         vshufi64x2      \$0x44,$R2,$T2,$RR2     # 15263748
3591         vshufi64x2      \$0x44,$R0,$T0,$RR0
3592         vshufi64x2      \$0x44,$R1,$T1,$RR1
3593
3594         vmovdqu64       16*0($inp),$T2          # load data
3595         vmovdqu64       16*4($inp),$T3
3596         lea             16*8($inp),$inp
3597
3598         vpsllq          \$2,$RR2,$SS2           # S2 = R2*5*4
3599         vpsllq          \$2,$RR1,$SS1           # S1 = R1*5*4
3600         vpaddq          $RR2,$SS2,$SS2
3601         vpaddq          $RR1,$SS1,$SS1
3602         vpsllq          \$2,$SS2,$SS2
3603         vpsllq          \$2,$SS1,$SS1
3604
3605         vpbroadcastq    $padbit,$PAD
3606         vpbroadcastq    %x#$mask44,$mask44
3607         vpbroadcastq    %x#$mask42,$mask42
3608
3609         vpbroadcastq    %x#$SS1,$S1             # broadcast 8th power
3610         vpbroadcastq    %x#$SS2,$S2
3611         vpbroadcastq    %x#$RR0,$R0
3612         vpbroadcastq    %x#$RR1,$R1
3613         vpbroadcastq    %x#$RR2,$R2
3614
3615         vpunpcklqdq     $T3,$T2,$T1             # transpose data
3616         vpunpckhqdq     $T3,$T2,$T3
3617
3618         # at this point 64-bit lanes are ordered as 73625140
3619
3620         vpsrlq          \$24,$T3,$T2            # splat the data
3621         vporq           $PAD,$T2,$T2
3622          vpaddq         $T2,$H2,$H2             # accumulate input
3623         vpandq          $mask44,$T1,$T0
3624         vpsrlq          \$44,$T1,$T1
3625         vpsllq          \$20,$T3,$T3
3626         vporq           $T3,$T1,$T1
3627         vpandq          $mask44,$T1,$T1
3628
3629         sub             \$8,$len
3630         jz              .Ltail_vpmadd52_8x
3631         jmp             .Loop_vpmadd52_8x
3632
3633 .align  32
3634 .Loop_vpmadd52_8x:
3635         #vpaddq         $T2,$H2,$H2             # accumulate input
3636         vpaddq          $T0,$H0,$H0
3637         vpaddq          $T1,$H1,$H1
3638
3639         vpxorq          $D0lo,$D0lo,$D0lo
3640         vpmadd52luq     $H2,$S1,$D0lo
3641         vpxorq          $D0hi,$D0hi,$D0hi
3642         vpmadd52huq     $H2,$S1,$D0hi
3643         vpxorq          $D1lo,$D1lo,$D1lo
3644         vpmadd52luq     $H2,$S2,$D1lo
3645         vpxorq          $D1hi,$D1hi,$D1hi
3646         vpmadd52huq     $H2,$S2,$D1hi
3647         vpxorq          $D2lo,$D2lo,$D2lo
3648         vpmadd52luq     $H2,$R0,$D2lo
3649         vpxorq          $D2hi,$D2hi,$D2hi
3650         vpmadd52huq     $H2,$R0,$D2hi
3651
3652          vmovdqu64      16*0($inp),$T2          # load data
3653          vmovdqu64      16*4($inp),$T3
3654          lea            16*8($inp),$inp
3655         vpmadd52luq     $H0,$R0,$D0lo
3656         vpmadd52huq     $H0,$R0,$D0hi
3657         vpmadd52luq     $H0,$R1,$D1lo
3658         vpmadd52huq     $H0,$R1,$D1hi
3659         vpmadd52luq     $H0,$R2,$D2lo
3660         vpmadd52huq     $H0,$R2,$D2hi
3661
3662          vpunpcklqdq    $T3,$T2,$T1             # transpose data
3663          vpunpckhqdq    $T3,$T2,$T3
3664         vpmadd52luq     $H1,$S2,$D0lo
3665         vpmadd52huq     $H1,$S2,$D0hi
3666         vpmadd52luq     $H1,$R0,$D1lo
3667         vpmadd52huq     $H1,$R0,$D1hi
3668         vpmadd52luq     $H1,$R1,$D2lo
3669         vpmadd52huq     $H1,$R1,$D2hi
3670
3671         ################################################################
3672         # partial reduction (interleaved with data splat)
3673         vpsrlq          \$44,$D0lo,$tmp
3674         vpsllq          \$8,$D0hi,$D0hi
3675         vpandq          $mask44,$D0lo,$H0
3676         vpaddq          $tmp,$D0hi,$D0hi
3677
3678          vpsrlq         \$24,$T3,$T2
3679          vporq          $PAD,$T2,$T2
3680         vpaddq          $D0hi,$D1lo,$D1lo
3681
3682         vpsrlq          \$44,$D1lo,$tmp
3683         vpsllq          \$8,$D1hi,$D1hi
3684         vpandq          $mask44,$D1lo,$H1
3685         vpaddq          $tmp,$D1hi,$D1hi
3686
3687          vpandq         $mask44,$T1,$T0
3688          vpsrlq         \$44,$T1,$T1
3689          vpsllq         \$20,$T3,$T3
3690         vpaddq          $D1hi,$D2lo,$D2lo
3691
3692         vpsrlq          \$42,$D2lo,$tmp
3693         vpsllq          \$10,$D2hi,$D2hi
3694         vpandq          $mask42,$D2lo,$H2
3695         vpaddq          $tmp,$D2hi,$D2hi
3696
3697           vpaddq        $T2,$H2,$H2             # accumulate input
3698         vpaddq          $D2hi,$H0,$H0
3699         vpsllq          \$2,$D2hi,$D2hi
3700
3701         vpaddq          $D2hi,$H0,$H0
3702          vporq          $T3,$T1,$T1
3703          vpandq         $mask44,$T1,$T1
3704
3705         vpsrlq          \$44,$H0,$tmp           # additional step
3706         vpandq          $mask44,$H0,$H0
3707
3708         vpaddq          $tmp,$H1,$H1
3709
3710         sub             \$8,$len                # len-=128
3711         jnz             .Loop_vpmadd52_8x
3712
3713 .Ltail_vpmadd52_8x:
3714         #vpaddq         $T2,$H2,$H2             # accumulate input
3715         vpaddq          $T0,$H0,$H0
3716         vpaddq          $T1,$H1,$H1
3717
3718         vpxorq          $D0lo,$D0lo,$D0lo
3719         vpmadd52luq     $H2,$SS1,$D0lo
3720         vpxorq          $D0hi,$D0hi,$D0hi
3721         vpmadd52huq     $H2,$SS1,$D0hi
3722         vpxorq          $D1lo,$D1lo,$D1lo
3723         vpmadd52luq     $H2,$SS2,$D1lo
3724         vpxorq          $D1hi,$D1hi,$D1hi
3725         vpmadd52huq     $H2,$SS2,$D1hi
3726         vpxorq          $D2lo,$D2lo,$D2lo
3727         vpmadd52luq     $H2,$RR0,$D2lo
3728         vpxorq          $D2hi,$D2hi,$D2hi
3729         vpmadd52huq     $H2,$RR0,$D2hi
3730
3731         vpmadd52luq     $H0,$RR0,$D0lo
3732         vpmadd52huq     $H0,$RR0,$D0hi
3733         vpmadd52luq     $H0,$RR1,$D1lo
3734         vpmadd52huq     $H0,$RR1,$D1hi
3735         vpmadd52luq     $H0,$RR2,$D2lo
3736         vpmadd52huq     $H0,$RR2,$D2hi
3737
3738         vpmadd52luq     $H1,$SS2,$D0lo
3739         vpmadd52huq     $H1,$SS2,$D0hi
3740         vpmadd52luq     $H1,$RR0,$D1lo
3741         vpmadd52huq     $H1,$RR0,$D1hi
3742         vpmadd52luq     $H1,$RR1,$D2lo
3743         vpmadd52huq     $H1,$RR1,$D2hi
3744
3745         ################################################################
3746         # horizontal addition
3747
3748         mov             \$1,%eax
3749         kmovw           %eax,%k1
3750         vpsrldq         \$8,$D0lo,$T0
3751         vpsrldq         \$8,$D0hi,$H0
3752         vpsrldq         \$8,$D1lo,$T1
3753         vpsrldq         \$8,$D1hi,$H1
3754         vpaddq          $T0,$D0lo,$D0lo
3755         vpaddq          $H0,$D0hi,$D0hi
3756         vpsrldq         \$8,$D2lo,$T2
3757         vpsrldq         \$8,$D2hi,$H2
3758         vpaddq          $T1,$D1lo,$D1lo
3759         vpaddq          $H1,$D1hi,$D1hi
3760          vpermq         \$0x2,$D0lo,$T0
3761          vpermq         \$0x2,$D0hi,$H0
3762         vpaddq          $T2,$D2lo,$D2lo
3763         vpaddq          $H2,$D2hi,$D2hi
3764
3765         vpermq          \$0x2,$D1lo,$T1
3766         vpermq          \$0x2,$D1hi,$H1
3767         vpaddq          $T0,$D0lo,$D0lo
3768         vpaddq          $H0,$D0hi,$D0hi
3769         vpermq          \$0x2,$D2lo,$T2
3770         vpermq          \$0x2,$D2hi,$H2
3771         vpaddq          $T1,$D1lo,$D1lo
3772         vpaddq          $H1,$D1hi,$D1hi
3773          vextracti64x4  \$1,$D0lo,%y#$T0
3774          vextracti64x4  \$1,$D0hi,%y#$H0
3775         vpaddq          $T2,$D2lo,$D2lo
3776         vpaddq          $H2,$D2hi,$D2hi
3777
3778         vextracti64x4   \$1,$D1lo,%y#$T1
3779         vextracti64x4   \$1,$D1hi,%y#$H1
3780         vextracti64x4   \$1,$D2lo,%y#$T2
3781         vextracti64x4   \$1,$D2hi,%y#$H2
3782 ___
3783 ######## switch back to %ymm
3784 map(s/%z/%y/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2);
3785 map(s/%z/%y/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi);
3786 map(s/%z/%y/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD);
3787
3788 $code.=<<___;
3789         vpaddq          $T0,$D0lo,${D0lo}{%k1}{z}
3790         vpaddq          $H0,$D0hi,${D0hi}{%k1}{z}
3791         vpaddq          $T1,$D1lo,${D1lo}{%k1}{z}
3792         vpaddq          $H1,$D1hi,${D1hi}{%k1}{z}
3793         vpaddq          $T2,$D2lo,${D2lo}{%k1}{z}
3794         vpaddq          $H2,$D2hi,${D2hi}{%k1}{z}
3795
3796         ################################################################
3797         # partial reduction
3798         vpsrlq          \$44,$D0lo,$tmp
3799         vpsllq          \$8,$D0hi,$D0hi
3800         vpandq          $mask44,$D0lo,$H0
3801         vpaddq          $tmp,$D0hi,$D0hi
3802
3803         vpaddq          $D0hi,$D1lo,$D1lo
3804
3805         vpsrlq          \$44,$D1lo,$tmp
3806         vpsllq          \$8,$D1hi,$D1hi
3807         vpandq          $mask44,$D1lo,$H1
3808         vpaddq          $tmp,$D1hi,$D1hi
3809
3810         vpaddq          $D1hi,$D2lo,$D2lo
3811
3812         vpsrlq          \$42,$D2lo,$tmp
3813         vpsllq          \$10,$D2hi,$D2hi
3814         vpandq          $mask42,$D2lo,$H2
3815         vpaddq          $tmp,$D2hi,$D2hi
3816
3817         vpaddq          $D2hi,$H0,$H0
3818         vpsllq          \$2,$D2hi,$D2hi
3819
3820         vpaddq          $D2hi,$H0,$H0
3821
3822         vpsrlq          \$44,$H0,$tmp           # additional step
3823         vpandq          $mask44,$H0,$H0
3824
3825         vpaddq          $tmp,$H1,$H1
3826
3827         ################################################################
3828
3829         vmovq           %x#$H0,0($ctx)
3830         vmovq           %x#$H1,8($ctx)
3831         vmovq           %x#$H2,16($ctx)
3832         vzeroall
3833
3834 .Lno_data_vpmadd52_8x:
3835         ret
3836 .size   poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x
3837 ___
3838 }
3839 $code.=<<___;
3840 .type   poly1305_emit_base2_44,\@function,3
3841 .align  32
3842 poly1305_emit_base2_44:
3843         mov     0($ctx),%r8     # load hash value
3844         mov     8($ctx),%r9
3845         mov     16($ctx),%r10
3846
3847         mov     %r9,%rax
3848         shr     \$20,%r9
3849         shl     \$44,%rax
3850         mov     %r10,%rcx
3851         shr     \$40,%r10
3852         shl     \$24,%rcx
3853
3854         add     %rax,%r8
3855         adc     %rcx,%r9
3856         adc     \$0,%r10
3857
3858         mov     %r8,%rax
3859         add     \$5,%r8         # compare to modulus
3860         mov     %r9,%rcx
3861         adc     \$0,%r9
3862         adc     \$0,%r10
3863         shr     \$2,%r10        # did 130-bit value overflow?
3864         cmovnz  %r8,%rax
3865         cmovnz  %r9,%rcx
3866
3867         add     0($nonce),%rax  # accumulate nonce
3868         adc     8($nonce),%rcx
3869         mov     %rax,0($mac)    # write result
3870         mov     %rcx,8($mac)
3871
3872         ret
3873 .size   poly1305_emit_base2_44,.-poly1305_emit_base2_44
3874 ___
3875 }       }       }
3876 }
3877
3878 if (!$kernel)
3879 {       # chacha20-poly1305 helpers
3880 my ($out,$inp,$otp,$len)=$win64 ? ("%rcx","%rdx","%r8", "%r9") :  # Win64 order
3881                                   ("%rdi","%rsi","%rdx","%rcx");  # Unix order
3882 $code.=<<___;
3883 .globl  xor128_encrypt_n_pad
3884 .type   xor128_encrypt_n_pad,\@abi-omnipotent
3885 .align  16
3886 xor128_encrypt_n_pad:
3887         sub     $otp,$inp
3888         sub     $otp,$out
3889         mov     $len,%r10               # put len aside
3890         shr     \$4,$len                # len / 16
3891         jz      .Ltail_enc
3892         nop
3893 .Loop_enc_xmm:
3894         movdqu  ($inp,$otp),%xmm0
3895         pxor    ($otp),%xmm0
3896         movdqu  %xmm0,($out,$otp)
3897         movdqa  %xmm0,($otp)
3898         lea     16($otp),$otp
3899         dec     $len
3900         jnz     .Loop_enc_xmm
3901
3902         and     \$15,%r10               # len % 16
3903         jz      .Ldone_enc
3904
3905 .Ltail_enc:
3906         mov     \$16,$len
3907         sub     %r10,$len
3908         xor     %eax,%eax
3909 .Loop_enc_byte:
3910         mov     ($inp,$otp),%al
3911         xor     ($otp),%al
3912         mov     %al,($out,$otp)
3913         mov     %al,($otp)
3914         lea     1($otp),$otp
3915         dec     %r10
3916         jnz     .Loop_enc_byte
3917
3918         xor     %eax,%eax
3919 .Loop_enc_pad:
3920         mov     %al,($otp)
3921         lea     1($otp),$otp
3922         dec     $len
3923         jnz     .Loop_enc_pad
3924
3925 .Ldone_enc:
3926         mov     $otp,%rax
3927         ret
3928 .size   xor128_encrypt_n_pad,.-xor128_encrypt_n_pad
3929
3930 .globl  xor128_decrypt_n_pad
3931 .type   xor128_decrypt_n_pad,\@abi-omnipotent
3932 .align  16
3933 xor128_decrypt_n_pad:
3934         sub     $otp,$inp
3935         sub     $otp,$out
3936         mov     $len,%r10               # put len aside
3937         shr     \$4,$len                # len / 16
3938         jz      .Ltail_dec
3939         nop
3940 .Loop_dec_xmm:
3941         movdqu  ($inp,$otp),%xmm0
3942         movdqa  ($otp),%xmm1
3943         pxor    %xmm0,%xmm1
3944         movdqu  %xmm1,($out,$otp)
3945         movdqa  %xmm0,($otp)
3946         lea     16($otp),$otp
3947         dec     $len
3948         jnz     .Loop_dec_xmm
3949
3950         pxor    %xmm1,%xmm1
3951         and     \$15,%r10               # len % 16
3952         jz      .Ldone_dec
3953
3954 .Ltail_dec:
3955         mov     \$16,$len
3956         sub     %r10,$len
3957         xor     %eax,%eax
3958         xor     %r11,%r11
3959 .Loop_dec_byte:
3960         mov     ($inp,$otp),%r11b
3961         mov     ($otp),%al
3962         xor     %r11b,%al
3963         mov     %al,($out,$otp)
3964         mov     %r11b,($otp)
3965         lea     1($otp),$otp
3966         dec     %r10
3967         jnz     .Loop_dec_byte
3968
3969         xor     %eax,%eax
3970 .Loop_dec_pad:
3971         mov     %al,($otp)
3972         lea     1($otp),$otp
3973         dec     $len
3974         jnz     .Loop_dec_pad
3975
3976 .Ldone_dec:
3977         mov     $otp,%rax
3978         ret
3979 .size   xor128_decrypt_n_pad,.-xor128_decrypt_n_pad
3980 ___
3981 }
3982
3983 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3984 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
3985 if ($win64) {
3986 $rec="%rcx";
3987 $frame="%rdx";
3988 $context="%r8";
3989 $disp="%r9";
3990
3991 $code.=<<___;
3992 .extern __imp_RtlVirtualUnwind
3993 .type   se_handler,\@abi-omnipotent
3994 .align  16
3995 se_handler:
3996         push    %rsi
3997         push    %rdi
3998         push    %rbx
3999         push    %rbp
4000         push    %r12
4001         push    %r13
4002         push    %r14
4003         push    %r15
4004         pushfq
4005         sub     \$64,%rsp
4006
4007         mov     120($context),%rax      # pull context->Rax
4008         mov     248($context),%rbx      # pull context->Rip
4009
4010         mov     8($disp),%rsi           # disp->ImageBase
4011         mov     56($disp),%r11          # disp->HandlerData
4012
4013         mov     0(%r11),%r10d           # HandlerData[0]
4014         lea     (%rsi,%r10),%r10        # prologue label
4015         cmp     %r10,%rbx               # context->Rip<.Lprologue
4016         jb      .Lcommon_seh_tail
4017
4018         mov     152($context),%rax      # pull context->Rsp
4019
4020         mov     4(%r11),%r10d           # HandlerData[1]
4021         lea     (%rsi,%r10),%r10        # epilogue label
4022         cmp     %r10,%rbx               # context->Rip>=.Lepilogue
4023         jae     .Lcommon_seh_tail
4024
4025         lea     48(%rax),%rax
4026
4027         mov     -8(%rax),%rbx
4028         mov     -16(%rax),%rbp
4029         mov     -24(%rax),%r12
4030         mov     -32(%rax),%r13
4031         mov     -40(%rax),%r14
4032         mov     -48(%rax),%r15
4033         mov     %rbx,144($context)      # restore context->Rbx
4034         mov     %rbp,160($context)      # restore context->Rbp
4035         mov     %r12,216($context)      # restore context->R12
4036         mov     %r13,224($context)      # restore context->R13
4037         mov     %r14,232($context)      # restore context->R14
4038         mov     %r15,240($context)      # restore context->R14
4039
4040         jmp     .Lcommon_seh_tail
4041 .size   se_handler,.-se_handler
4042
4043 .type   avx_handler,\@abi-omnipotent
4044 .align  16
4045 avx_handler:
4046         push    %rsi
4047         push    %rdi
4048         push    %rbx
4049         push    %rbp
4050         push    %r12
4051         push    %r13
4052         push    %r14
4053         push    %r15
4054         pushfq
4055         sub     \$64,%rsp
4056
4057         mov     120($context),%rax      # pull context->Rax
4058         mov     248($context),%rbx      # pull context->Rip
4059
4060         mov     8($disp),%rsi           # disp->ImageBase
4061         mov     56($disp),%r11          # disp->HandlerData
4062
4063         mov     0(%r11),%r10d           # HandlerData[0]
4064         lea     (%rsi,%r10),%r10        # prologue label
4065         cmp     %r10,%rbx               # context->Rip<prologue label
4066         jb      .Lcommon_seh_tail
4067
4068         mov     152($context),%rax      # pull context->Rsp
4069
4070         mov     4(%r11),%r10d           # HandlerData[1]
4071         lea     (%rsi,%r10),%r10        # epilogue label
4072         cmp     %r10,%rbx               # context->Rip>=epilogue label
4073         jae     .Lcommon_seh_tail
4074
4075         mov     208($context),%rax      # pull context->R11
4076
4077         lea     0x50(%rax),%rsi
4078         lea     0xf8(%rax),%rax
4079         lea     512($context),%rdi      # &context.Xmm6
4080         mov     \$20,%ecx
4081         .long   0xa548f3fc              # cld; rep movsq
4082
4083 .Lcommon_seh_tail:
4084         mov     8(%rax),%rdi
4085         mov     16(%rax),%rsi
4086         mov     %rax,152($context)      # restore context->Rsp
4087         mov     %rsi,168($context)      # restore context->Rsi
4088         mov     %rdi,176($context)      # restore context->Rdi
4089
4090         mov     40($disp),%rdi          # disp->ContextRecord
4091         mov     $context,%rsi           # context
4092         mov     \$154,%ecx              # sizeof(CONTEXT)
4093         .long   0xa548f3fc              # cld; rep movsq
4094
4095         mov     $disp,%rsi
4096         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
4097         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
4098         mov     0(%rsi),%r8             # arg3, disp->ControlPc
4099         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
4100         mov     40(%rsi),%r10           # disp->ContextRecord
4101         lea     56(%rsi),%r11           # &disp->HandlerData
4102         lea     24(%rsi),%r12           # &disp->EstablisherFrame
4103         mov     %r10,32(%rsp)           # arg5
4104         mov     %r11,40(%rsp)           # arg6
4105         mov     %r12,48(%rsp)           # arg7
4106         mov     %rcx,56(%rsp)           # arg8, (NULL)
4107         call    *__imp_RtlVirtualUnwind(%rip)
4108
4109         mov     \$1,%eax                # ExceptionContinueSearch
4110         add     \$64,%rsp
4111         popfq
4112         pop     %r15
4113         pop     %r14
4114         pop     %r13
4115         pop     %r12
4116         pop     %rbp
4117         pop     %rbx
4118         pop     %rdi
4119         pop     %rsi
4120         ret
4121 .size   avx_handler,.-avx_handler
4122
4123 .section        .pdata
4124 .align  4
4125         .rva    .LSEH_begin_poly1305_init_x86_64
4126         .rva    .LSEH_end_poly1305_init_x86_64
4127         .rva    .LSEH_info_poly1305_init_x86_64
4128
4129         .rva    .LSEH_begin_poly1305_blocks_x86_64
4130         .rva    .LSEH_end_poly1305_blocks_x86_64
4131         .rva    .LSEH_info_poly1305_blocks_x86_64
4132
4133         .rva    .LSEH_begin_poly1305_emit_x86_64
4134         .rva    .LSEH_end_poly1305_emit_x86_64
4135         .rva    .LSEH_info_poly1305_emit_x86_64
4136 ___
4137 $code.=<<___ if ($avx);
4138         .rva    .LSEH_begin_poly1305_blocks_avx
4139         .rva    .Lbase2_64_avx
4140         .rva    .LSEH_info_poly1305_blocks_avx_1
4141
4142         .rva    .Lbase2_64_avx
4143         .rva    .Leven_avx
4144         .rva    .LSEH_info_poly1305_blocks_avx_2
4145
4146         .rva    .Leven_avx
4147         .rva    .LSEH_end_poly1305_blocks_avx
4148         .rva    .LSEH_info_poly1305_blocks_avx_3
4149
4150         .rva    .LSEH_begin_poly1305_emit_avx
4151         .rva    .LSEH_end_poly1305_emit_avx
4152         .rva    .LSEH_info_poly1305_emit_avx
4153 ___
4154 $code.=<<___ if ($avx>1);
4155         .rva    .LSEH_begin_poly1305_blocks_avx2
4156         .rva    .Lbase2_64_avx2
4157         .rva    .LSEH_info_poly1305_blocks_avx2_1
4158
4159         .rva    .Lbase2_64_avx2
4160         .rva    .Leven_avx2
4161         .rva    .LSEH_info_poly1305_blocks_avx2_2
4162
4163         .rva    .Leven_avx2
4164         .rva    .LSEH_end_poly1305_blocks_avx2
4165         .rva    .LSEH_info_poly1305_blocks_avx2_3
4166 ___
4167 $code.=<<___ if ($avx>2);
4168         .rva    .LSEH_begin_poly1305_blocks_avx512
4169         .rva    .LSEH_end_poly1305_blocks_avx512
4170         .rva    .LSEH_info_poly1305_blocks_avx512
4171 ___
4172 $code.=<<___;
4173 .section        .xdata
4174 .align  8
4175 .LSEH_info_poly1305_init_x86_64:
4176         .byte   9,0,0,0
4177         .rva    se_handler
4178         .rva    .LSEH_begin_poly1305_init_x86_64,.LSEH_begin_poly1305_init_x86_64
4179
4180 .LSEH_info_poly1305_blocks_x86_64:
4181         .byte   9,0,0,0
4182         .rva    se_handler
4183         .rva    .Lblocks_body,.Lblocks_epilogue
4184
4185 .LSEH_info_poly1305_emit_x86_64:
4186         .byte   9,0,0,0
4187         .rva    se_handler
4188         .rva    .LSEH_begin_poly1305_emit_x86_64,.LSEH_begin_poly1305_emit_x86_64
4189 ___
4190 $code.=<<___ if ($avx);
4191 .LSEH_info_poly1305_blocks_avx_1:
4192         .byte   9,0,0,0
4193         .rva    se_handler
4194         .rva    .Lblocks_avx_body,.Lblocks_avx_epilogue         # HandlerData[]
4195
4196 .LSEH_info_poly1305_blocks_avx_2:
4197         .byte   9,0,0,0
4198         .rva    se_handler
4199         .rva    .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue     # HandlerData[]
4200
4201 .LSEH_info_poly1305_blocks_avx_3:
4202         .byte   9,0,0,0
4203         .rva    avx_handler
4204         .rva    .Ldo_avx_body,.Ldo_avx_epilogue                 # HandlerData[]
4205
4206 .LSEH_info_poly1305_emit_avx:
4207         .byte   9,0,0,0
4208         .rva    se_handler
4209         .rva    .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
4210 ___
4211 $code.=<<___ if ($avx>1);
4212 .LSEH_info_poly1305_blocks_avx2_1:
4213         .byte   9,0,0,0
4214         .rva    se_handler
4215         .rva    .Lblocks_avx2_body,.Lblocks_avx2_epilogue       # HandlerData[]
4216
4217 .LSEH_info_poly1305_blocks_avx2_2:
4218         .byte   9,0,0,0
4219         .rva    se_handler
4220         .rva    .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue   # HandlerData[]
4221
4222 .LSEH_info_poly1305_blocks_avx2_3:
4223         .byte   9,0,0,0
4224         .rva    avx_handler
4225         .rva    .Ldo_avx2_body,.Ldo_avx2_epilogue               # HandlerData[]
4226 ___
4227 $code.=<<___ if ($avx>2);
4228 .LSEH_info_poly1305_blocks_avx512:
4229         .byte   9,0,0,0
4230         .rva    avx_handler
4231         .rva    .Ldo_avx512_body,.Ldo_avx512_epilogue           # HandlerData[]
4232 ___
4233 }
4234
4235 open SELF,$0;
4236 while(<SELF>) {
4237         next if (/^#!/);
4238         last if (!s/^#/\/\// and !/^$/);
4239         print;
4240 }
4241 close SELF;
4242
4243 foreach (split('\n',$code)) {
4244         s/\`([^\`]*)\`/eval($1)/ge;
4245         s/%r([a-z]+)#d/%e$1/g;
4246         s/%r([0-9]+)#d/%r$1d/g;
4247         s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g;
4248
4249         if ($kernel) {
4250                 s/(^\.type.*),[0-9]+$/\1/;
4251                 s/(^\.type.*),\@abi-omnipotent+$/\1,\@function/;
4252                 next if /^\.cfi.*/;
4253         }
4254
4255         print $_,"\n";
4256 }
4257 close STDOUT;