2 # SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
4 # Copyright (C) 2017-2018 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved.
5 # Copyright (C) 2017-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
6 # Copyright (C) 2006-2017 CRYPTOGAMS by <appro@openssl.org>. All Rights Reserved.
8 # This code is taken from the OpenSSL project but the author, Andy Polyakov,
9 # has relicensed it under the licenses specified in the SPDX header above.
10 # The original headers, including the original license headers, are
11 # included below for completeness.
13 # ====================================================================
14 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
15 # project. The module is, however, dual licensed under OpenSSL and
16 # CRYPTOGAMS licenses depending on where you obtain it. For further
17 # details see http://www.openssl.org/~appro/cryptogams/.
18 # ====================================================================
20 # This module implements Poly1305 hash for x86_64.
28 # Add AVX512F+VL+BW code path.
32 # Convert AVX512F+VL+BW code path to pure AVX512F, so that it can be
33 # executed even on Knights Landing. Trigger for modification was
34 # observation that AVX512 code paths can negatively affect overall
35 # Skylake-X system performance. Since we are likely to suppress
36 # AVX512F capability flag [at least on Skylake-X], conversion serves
37 # as kind of "investment protection". Note that next *lake processor,
38 # Cannonlake, has AVX512IFMA code path to execute...
40 # Numbers are cycles per processed byte with poly1305_blocks alone,
41 # measured with rdtsc at fixed clock frequency.
43 # IALU/gcc-4.8(*) AVX(**) AVX2 AVX-512
46 # Westmere 1.88/+120% -
47 # Sandy Bridge 1.39/+140% 1.10
48 # Haswell 1.14/+175% 1.11 0.65
49 # Skylake[-X] 1.13/+120% 0.96 0.51 [0.35]
50 # Silvermont 2.83/+95% -
51 # Knights L 3.60/? 1.65 1.10 0.41(***)
52 # Goldmont 1.70/+180% -
53 # VIA Nano 1.82/+150% -
54 # Sledgehammer 1.38/+160% -
55 # Bulldozer 2.30/+130% 0.97
56 # Ryzen 1.15/+200% 1.08 1.18
58 # (*) improvement coefficients relative to clang are more modest and
59 # are ~50% on most processors, in both cases we are comparing to
61 # (**) SSE2 implementation was attempted, but among non-AVX processors
62 # it was faster than integer-only code only on older Intel P4 and
63 # Core processors, 50-30%, less newer processor is, but slower on
64 # contemporary ones, for example almost 2x slower on Atom, and as
65 # former are naturally disappearing, SSE2 is deemed unnecessary;
66 # (***) strangely enough performance seems to vary from core to core,
67 # listed result is best case;
71 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
73 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
74 $kernel=0; $kernel=1 if (!$flavour && !$output);
77 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
78 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
79 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
80 die "can't locate x86_64-xlate.pl";
82 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
85 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
86 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
87 $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
90 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
91 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
92 $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
93 $avx += 1 if ($1==2.11 && $2>=8);
96 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
97 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
98 $avx = ($1>=10) + ($1>=11);
101 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
102 $avx = ($2>=3.0) + ($2>3.0);
105 $avx = 4; # The kernel uses ifdefs for this.
108 sub declare_function() {
109 my ($name, $align, $nargs) = @_;
111 $code .= ".align $align\n";
112 $code .= "SYM_FUNC_START($name)\n";
113 $code .= ".L$name:\n";
115 $code .= ".globl $name\n";
116 $code .= ".type $name,\@function,$nargs\n";
117 $code .= ".align $align\n";
125 $code .= "SYM_FUNC_END($name)\n";
127 $code .= ".size $name,.-$name\n";
131 $code.=<<___ if $kernel;
132 #include <linux/linkage.h>
136 $code.=<<___ if $kernel;
143 .long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
145 .long `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
147 .long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
149 .long 2,2,2,3,2,0,2,1
151 .long 0,0,0,1, 0,2,0,3, 0,4,0,5, 0,6,0,7
154 .long 0,1,1,2,2,3,7,7
158 .quad 0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff
166 .quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff
167 .quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff
169 .quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff
170 .quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff
173 $code.=<<___ if (!$kernel);
174 .asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
178 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
179 my ($mac,$nonce)=($inp,$len); # *_emit arguments
180 my ($d1,$d2,$d3, $r0,$r1,$s1)=("%r8","%r9","%rdi","%r11","%r12","%r13");
181 my ($h0,$h1,$h2)=("%r14","%rbx","%r10");
183 sub poly1305_iteration {
184 # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1
185 # output: $h0-$h2 *= $r0-$r1
193 mov %rax,$h0 # future $h0
203 mov $h2,$h1 # borrow $h1
207 imulq $s1,$h1 # h2*s1
212 imulq $r0,$h2 # h2*r0
214 mov \$-4,%rax # mask value
217 and $d3,%rax # last reduction step
228 ########################################################################
229 # Layout of opaque area is following.
231 # unsigned __int64 h[3]; # current hash value base 2^64
232 # unsigned __int64 r[2]; # key value base 2^64
237 $code.=<<___ if (!$kernel);
238 .extern OPENSSL_ia32cap_P
240 .globl poly1305_init_x86_64
241 .hidden poly1305_init_x86_64
242 .globl poly1305_blocks_x86_64
243 .hidden poly1305_blocks_x86_64
244 .globl poly1305_emit_x86_64
245 .hidden poly1305_emit_x86_64
247 &declare_function("poly1305_init_x86_64", 32, 3);
250 mov %rax,0($ctx) # initialize hash value
257 $code.=<<___ if (!$kernel);
258 lea poly1305_blocks_x86_64(%rip),%r10
259 lea poly1305_emit_x86_64(%rip),%r11
261 $code.=<<___ if (!$kernel && $avx);
262 mov OPENSSL_ia32cap_P+4(%rip),%r9
263 lea poly1305_blocks_avx(%rip),%rax
264 lea poly1305_emit_avx(%rip),%rcx
265 bt \$`60-32`,%r9 # AVX?
269 $code.=<<___ if (!$kernel && $avx>1);
270 lea poly1305_blocks_avx2(%rip),%rax
271 bt \$`5+32`,%r9 # AVX2?
274 $code.=<<___ if (!$kernel && $avx>3);
275 mov \$`(1<<31|1<<21|1<<16)`,%rax
282 mov \$0x0ffffffc0fffffff,%rax
283 mov \$0x0ffffffc0ffffffc,%rcx
289 $code.=<<___ if (!$kernel && $flavour !~ /elf32/);
293 $code.=<<___ if (!$kernel && $flavour =~ /elf32/);
302 &end_function("poly1305_init_x86_64");
304 &declare_function("poly1305_blocks_x86_64", 32, 4);
309 jz .Lno_data # too short
325 mov $len,%r15 # reassign $len
327 mov 24($ctx),$r0 # load r
330 mov 0($ctx),$h0 # load hash value
337 add $r1,$s1 # s1 = r1 + (r1 >> 2)
342 add 0($inp),$h0 # accumulate input
348 &poly1305_iteration();
358 mov $h0,0($ctx) # store hash value
373 .cfi_adjust_cfa_offset -48
379 &end_function("poly1305_blocks_x86_64");
381 &declare_function("poly1305_emit_x86_64", 32, 3);
384 mov 0($ctx),%r8 # load hash value
389 add \$5,%r8 # compare to modulus
393 shr \$2,%r10 # did 130-bit value overflow?
397 add 0($nonce),%rax # accumulate nonce
399 mov %rax,0($mac) # write result
404 &end_function("poly1305_emit_x86_64");
407 ########################################################################
408 # Layout of opaque area is following.
410 # unsigned __int32 h[5]; # current hash value base 2^26
411 # unsigned __int32 is_base2_26;
412 # unsigned __int64 r[2]; # key value base 2^64
413 # unsigned __int64 pad;
414 # struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
416 # where r^n are base 2^26 digits of degrees of multiplier key. There are
417 # 5 digits, but last four are interleaved with multiples of 5, totalling
418 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
420 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
421 map("%xmm$_",(0..15));
424 .type __poly1305_block,\@abi-omnipotent
429 &poly1305_iteration();
433 .size __poly1305_block,.-__poly1305_block
435 .type __poly1305_init_avx,\@abi-omnipotent
444 lea 48+64($ctx),$ctx # size optimization
447 call __poly1305_block # r^2
449 mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26
455 mov %eax,`16*0+0-64`($ctx)
457 mov %edx,`16*0+4-64`($ctx)
464 mov %eax,`16*1+0-64`($ctx)
465 lea (%rax,%rax,4),%eax # *5
466 mov %edx,`16*1+4-64`($ctx)
467 lea (%rdx,%rdx,4),%edx # *5
468 mov %eax,`16*2+0-64`($ctx)
470 mov %edx,`16*2+4-64`($ctx)
481 mov %eax,`16*3+0-64`($ctx)
482 lea (%rax,%rax,4),%eax # *5
483 mov %edx,`16*3+4-64`($ctx)
484 lea (%rdx,%rdx,4),%edx # *5
485 mov %eax,`16*4+0-64`($ctx)
487 mov %edx,`16*4+4-64`($ctx)
496 mov %eax,`16*5+0-64`($ctx)
497 lea (%rax,%rax,4),%eax # *5
498 mov %edx,`16*5+4-64`($ctx)
499 lea (%rdx,%rdx,4),%edx # *5
500 mov %eax,`16*6+0-64`($ctx)
502 mov %edx,`16*6+4-64`($ctx)
508 mov $d1#d,`16*7+0-64`($ctx)
509 lea ($d1,$d1,4),$d1 # *5
510 mov $d2#d,`16*7+4-64`($ctx)
511 lea ($d2,$d2,4),$d2 # *5
512 mov $d1#d,`16*8+0-64`($ctx)
513 mov $d2#d,`16*8+4-64`($ctx)
516 call __poly1305_block # r^3
518 mov \$0x3ffffff,%eax # save r^3 base 2^26
522 mov %eax,`16*0+12-64`($ctx)
526 mov %edx,`16*1+12-64`($ctx)
527 lea (%rdx,%rdx,4),%edx # *5
529 mov %edx,`16*2+12-64`($ctx)
535 mov %eax,`16*3+12-64`($ctx)
536 lea (%rax,%rax,4),%eax # *5
538 mov %eax,`16*4+12-64`($ctx)
543 mov %edx,`16*5+12-64`($ctx)
544 lea (%rdx,%rdx,4),%edx # *5
546 mov %edx,`16*6+12-64`($ctx)
551 mov $d1#d,`16*7+12-64`($ctx)
552 lea ($d1,$d1,4),$d1 # *5
553 mov $d1#d,`16*8+12-64`($ctx)
556 call __poly1305_block # r^4
558 mov \$0x3ffffff,%eax # save r^4 base 2^26
562 mov %eax,`16*0+8-64`($ctx)
566 mov %edx,`16*1+8-64`($ctx)
567 lea (%rdx,%rdx,4),%edx # *5
569 mov %edx,`16*2+8-64`($ctx)
575 mov %eax,`16*3+8-64`($ctx)
576 lea (%rax,%rax,4),%eax # *5
578 mov %eax,`16*4+8-64`($ctx)
583 mov %edx,`16*5+8-64`($ctx)
584 lea (%rdx,%rdx,4),%edx # *5
586 mov %edx,`16*6+8-64`($ctx)
591 mov $d1#d,`16*7+8-64`($ctx)
592 lea ($d1,$d1,4),$d1 # *5
593 mov $d1#d,`16*8+8-64`($ctx)
595 lea -48-64($ctx),$ctx # size [de-]optimization
598 .size __poly1305_init_avx,.-__poly1305_init_avx
601 &declare_function("poly1305_blocks_avx", 32, 4);
604 mov 20($ctx),%r8d # is_base2_26
637 mov $len,%r15 # reassign $len
639 mov 0($ctx),$d1 # load hash value
643 mov 24($ctx),$r0 # load r
646 ################################# base 2^26 -> base 2^64
648 and \$`-1*(1<<31)`,$d1
649 mov $d2,$r1 # borrow $r1
651 and \$`-1*(1<<31)`,$d2
665 adc \$0,$h2 # can be partially reduced...
667 mov \$-4,$d2 # ... so reduce
680 add $r1,$s1 # s1 = r1 + (r1 >> 2)
682 add 0($inp),$h0 # accumulate input
687 call __poly1305_block
689 test $padbit,$padbit # if $padbit is zero,
690 jz .Lstore_base2_64_avx # store hash in base 2^64 format
692 ################################# base 2^64 -> base 2^26
699 and \$0x3ffffff,%rax # h[0]
701 and \$0x3ffffff,%rdx # h[1]
705 and \$0x3ffffff,$h0 # h[2]
707 and \$0x3ffffff,$h1 # h[3]
711 jz .Lstore_base2_26_avx
721 .Lstore_base2_64_avx:
724 mov $h2,16($ctx) # note that is_base2_26 is zeroed
728 .Lstore_base2_26_avx:
729 mov %rax#d,0($ctx) # store hash value base 2^26
749 .Lblocks_avx_epilogue:
771 mov $len,%r15 # reassign $len
773 mov 24($ctx),$r0 # load r
776 mov 0($ctx),$h0 # load hash value
783 add $r1,$s1 # s1 = r1 + (r1 >> 2)
788 add 0($inp),$h0 # accumulate input
794 call __poly1305_block
797 ################################# base 2^64 -> base 2^26
804 and \$0x3ffffff,%rax # h[0]
806 and \$0x3ffffff,%rdx # h[1]
810 and \$0x3ffffff,$h0 # h[2]
812 and \$0x3ffffff,$h1 # h[3]
820 movl \$1,20($ctx) # set is_base2_26
822 call __poly1305_init_avx
838 .Lbase2_64_avx_epilogue:
845 vmovd 4*0($ctx),$H0 # load hash value
853 $code.=<<___ if (!$win64);
855 .cfi_def_cfa_register %r10
861 $code.=<<___ if ($win64);
864 vmovdqa %xmm6,0x50(%r11)
865 vmovdqa %xmm7,0x60(%r11)
866 vmovdqa %xmm8,0x70(%r11)
867 vmovdqa %xmm9,0x80(%r11)
868 vmovdqa %xmm10,0x90(%r11)
869 vmovdqa %xmm11,0xa0(%r11)
870 vmovdqa %xmm12,0xb0(%r11)
871 vmovdqa %xmm13,0xc0(%r11)
872 vmovdqa %xmm14,0xd0(%r11)
873 vmovdqa %xmm15,0xe0(%r11)
881 vmovdqu `16*3`($ctx),$D4 # preload r0^2
882 lea `16*3+64`($ctx),$ctx # size optimization
883 lea .Lconst(%rip),%rcx
885 ################################################################
887 vmovdqu 16*2($inp),$T0
888 vmovdqu 16*3($inp),$T1
889 vmovdqa 64(%rcx),$MASK # .Lmask26
891 vpsrldq \$6,$T0,$T2 # splat input
893 vpunpckhqdq $T1,$T0,$T4 # 4
894 vpunpcklqdq $T1,$T0,$T0 # 0:1
895 vpunpcklqdq $T3,$T2,$T3 # 2:3
897 vpsrlq \$40,$T4,$T4 # 4
899 vpand $MASK,$T0,$T0 # 0
901 vpand $MASK,$T1,$T1 # 1
903 vpand $MASK,$T2,$T2 # 2
904 vpand $MASK,$T3,$T3 # 3
905 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
909 # expand and copy pre-calculated table to stack
910 vmovdqu `16*1-64`($ctx),$D1
911 vmovdqu `16*2-64`($ctx),$D2
912 vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434
913 vpshufd \$0x44,$D4,$D0 # xx12 -> 1212
914 vmovdqa $D3,-0x90(%r11)
915 vmovdqa $D0,0x00(%rsp)
916 vpshufd \$0xEE,$D1,$D4
917 vmovdqu `16*3-64`($ctx),$D0
918 vpshufd \$0x44,$D1,$D1
919 vmovdqa $D4,-0x80(%r11)
920 vmovdqa $D1,0x10(%rsp)
921 vpshufd \$0xEE,$D2,$D3
922 vmovdqu `16*4-64`($ctx),$D1
923 vpshufd \$0x44,$D2,$D2
924 vmovdqa $D3,-0x70(%r11)
925 vmovdqa $D2,0x20(%rsp)
926 vpshufd \$0xEE,$D0,$D4
927 vmovdqu `16*5-64`($ctx),$D2
928 vpshufd \$0x44,$D0,$D0
929 vmovdqa $D4,-0x60(%r11)
930 vmovdqa $D0,0x30(%rsp)
931 vpshufd \$0xEE,$D1,$D3
932 vmovdqu `16*6-64`($ctx),$D0
933 vpshufd \$0x44,$D1,$D1
934 vmovdqa $D3,-0x50(%r11)
935 vmovdqa $D1,0x40(%rsp)
936 vpshufd \$0xEE,$D2,$D4
937 vmovdqu `16*7-64`($ctx),$D1
938 vpshufd \$0x44,$D2,$D2
939 vmovdqa $D4,-0x40(%r11)
940 vmovdqa $D2,0x50(%rsp)
941 vpshufd \$0xEE,$D0,$D3
942 vmovdqu `16*8-64`($ctx),$D2
943 vpshufd \$0x44,$D0,$D0
944 vmovdqa $D3,-0x30(%r11)
945 vmovdqa $D0,0x60(%rsp)
946 vpshufd \$0xEE,$D1,$D4
947 vpshufd \$0x44,$D1,$D1
948 vmovdqa $D4,-0x20(%r11)
949 vmovdqa $D1,0x70(%rsp)
950 vpshufd \$0xEE,$D2,$D3
951 vmovdqa 0x00(%rsp),$D4 # preload r0^2
952 vpshufd \$0x44,$D2,$D2
953 vmovdqa $D3,-0x10(%r11)
954 vmovdqa $D2,0x80(%rsp)
960 ################################################################
961 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
962 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
963 # \___________________/
964 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
965 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
966 # \___________________/ \____________________/
968 # Note that we start with inp[2:3]*r^2. This is because it
969 # doesn't depend on reduction in previous iteration.
970 ################################################################
971 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
972 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
973 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
974 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
975 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
977 # though note that $Tx and $Hx are "reversed" in this section,
978 # and $D4 is preloaded with r0^2...
980 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
981 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
982 vmovdqa $H2,0x20(%r11) # offload hash
983 vpmuludq $T2,$D4,$D2 # d3 = h2*r0
984 vmovdqa 0x10(%rsp),$H2 # r1^2
985 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
986 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
988 vmovdqa $H0,0x00(%r11) #
989 vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
990 vmovdqa $H1,0x10(%r11) #
991 vpmuludq $T3,$H2,$H1 # h3*r1
992 vpaddq $H0,$D0,$D0 # d0 += h4*s1
993 vpaddq $H1,$D4,$D4 # d4 += h3*r1
994 vmovdqa $H3,0x30(%r11) #
995 vpmuludq $T2,$H2,$H0 # h2*r1
996 vpmuludq $T1,$H2,$H1 # h1*r1
997 vpaddq $H0,$D3,$D3 # d3 += h2*r1
998 vmovdqa 0x30(%rsp),$H3 # r2^2
999 vpaddq $H1,$D2,$D2 # d2 += h1*r1
1000 vmovdqa $H4,0x40(%r11) #
1001 vpmuludq $T0,$H2,$H2 # h0*r1
1002 vpmuludq $T2,$H3,$H0 # h2*r2
1003 vpaddq $H2,$D1,$D1 # d1 += h0*r1
1005 vmovdqa 0x40(%rsp),$H4 # s2^2
1006 vpaddq $H0,$D4,$D4 # d4 += h2*r2
1007 vpmuludq $T1,$H3,$H1 # h1*r2
1008 vpmuludq $T0,$H3,$H3 # h0*r2
1009 vpaddq $H1,$D3,$D3 # d3 += h1*r2
1010 vmovdqa 0x50(%rsp),$H2 # r3^2
1011 vpaddq $H3,$D2,$D2 # d2 += h0*r2
1012 vpmuludq $T4,$H4,$H0 # h4*s2
1013 vpmuludq $T3,$H4,$H4 # h3*s2
1014 vpaddq $H0,$D1,$D1 # d1 += h4*s2
1015 vmovdqa 0x60(%rsp),$H3 # s3^2
1016 vpaddq $H4,$D0,$D0 # d0 += h3*s2
1018 vmovdqa 0x80(%rsp),$H4 # s4^2
1019 vpmuludq $T1,$H2,$H1 # h1*r3
1020 vpmuludq $T0,$H2,$H2 # h0*r3
1021 vpaddq $H1,$D4,$D4 # d4 += h1*r3
1022 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1023 vpmuludq $T4,$H3,$H0 # h4*s3
1024 vpmuludq $T3,$H3,$H1 # h3*s3
1025 vpaddq $H0,$D2,$D2 # d2 += h4*s3
1026 vmovdqu 16*0($inp),$H0 # load input
1027 vpaddq $H1,$D1,$D1 # d1 += h3*s3
1028 vpmuludq $T2,$H3,$H3 # h2*s3
1029 vpmuludq $T2,$H4,$T2 # h2*s4
1030 vpaddq $H3,$D0,$D0 # d0 += h2*s3
1032 vmovdqu 16*1($inp),$H1 #
1033 vpaddq $T2,$D1,$D1 # d1 += h2*s4
1034 vpmuludq $T3,$H4,$T3 # h3*s4
1035 vpmuludq $T4,$H4,$T4 # h4*s4
1036 vpsrldq \$6,$H0,$H2 # splat input
1037 vpaddq $T3,$D2,$D2 # d2 += h3*s4
1038 vpaddq $T4,$D3,$D3 # d3 += h4*s4
1039 vpsrldq \$6,$H1,$H3 #
1040 vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4
1041 vpmuludq $T1,$H4,$T0 # h1*s4
1042 vpunpckhqdq $H1,$H0,$H4 # 4
1043 vpaddq $T4,$D4,$D4 # d4 += h0*r4
1044 vmovdqa -0x90(%r11),$T4 # r0^4
1045 vpaddq $T0,$D0,$D0 # d0 += h1*s4
1047 vpunpcklqdq $H1,$H0,$H0 # 0:1
1048 vpunpcklqdq $H3,$H2,$H3 # 2:3
1050 #vpsrlq \$40,$H4,$H4 # 4
1051 vpsrldq \$`40/8`,$H4,$H4 # 4
1053 vpand $MASK,$H0,$H0 # 0
1055 vpand $MASK,$H1,$H1 # 1
1056 vpand 0(%rcx),$H4,$H4 # .Lmask24
1058 vpand $MASK,$H2,$H2 # 2
1059 vpand $MASK,$H3,$H3 # 3
1060 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
1062 vpaddq 0x00(%r11),$H0,$H0 # add hash value
1063 vpaddq 0x10(%r11),$H1,$H1
1064 vpaddq 0x20(%r11),$H2,$H2
1065 vpaddq 0x30(%r11),$H3,$H3
1066 vpaddq 0x40(%r11),$H4,$H4
1073 ################################################################
1074 # Now we accumulate (inp[0:1]+hash)*r^4
1075 ################################################################
1076 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1077 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1078 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1079 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1080 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1082 vpmuludq $H0,$T4,$T0 # h0*r0
1083 vpmuludq $H1,$T4,$T1 # h1*r0
1086 vmovdqa -0x80(%r11),$T2 # r1^4
1087 vpmuludq $H2,$T4,$T0 # h2*r0
1088 vpmuludq $H3,$T4,$T1 # h3*r0
1091 vpmuludq $H4,$T4,$T4 # h4*r0
1092 vpmuludq -0x70(%r11),$H4,$T0 # h4*s1
1095 vpaddq $T0,$D0,$D0 # d0 += h4*s1
1096 vpmuludq $H2,$T2,$T1 # h2*r1
1097 vpmuludq $H3,$T2,$T0 # h3*r1
1098 vpaddq $T1,$D3,$D3 # d3 += h2*r1
1099 vmovdqa -0x60(%r11),$T3 # r2^4
1100 vpaddq $T0,$D4,$D4 # d4 += h3*r1
1101 vpmuludq $H1,$T2,$T1 # h1*r1
1102 vpmuludq $H0,$T2,$T2 # h0*r1
1103 vpaddq $T1,$D2,$D2 # d2 += h1*r1
1104 vpaddq $T2,$D1,$D1 # d1 += h0*r1
1106 vmovdqa -0x50(%r11),$T4 # s2^4
1107 vpmuludq $H2,$T3,$T0 # h2*r2
1108 vpmuludq $H1,$T3,$T1 # h1*r2
1109 vpaddq $T0,$D4,$D4 # d4 += h2*r2
1110 vpaddq $T1,$D3,$D3 # d3 += h1*r2
1111 vmovdqa -0x40(%r11),$T2 # r3^4
1112 vpmuludq $H0,$T3,$T3 # h0*r2
1113 vpmuludq $H4,$T4,$T0 # h4*s2
1114 vpaddq $T3,$D2,$D2 # d2 += h0*r2
1115 vpaddq $T0,$D1,$D1 # d1 += h4*s2
1116 vmovdqa -0x30(%r11),$T3 # s3^4
1117 vpmuludq $H3,$T4,$T4 # h3*s2
1118 vpmuludq $H1,$T2,$T1 # h1*r3
1119 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1121 vmovdqa -0x10(%r11),$T4 # s4^4
1122 vpaddq $T1,$D4,$D4 # d4 += h1*r3
1123 vpmuludq $H0,$T2,$T2 # h0*r3
1124 vpmuludq $H4,$T3,$T0 # h4*s3
1125 vpaddq $T2,$D3,$D3 # d3 += h0*r3
1126 vpaddq $T0,$D2,$D2 # d2 += h4*s3
1127 vmovdqu 16*2($inp),$T0 # load input
1128 vpmuludq $H3,$T3,$T2 # h3*s3
1129 vpmuludq $H2,$T3,$T3 # h2*s3
1130 vpaddq $T2,$D1,$D1 # d1 += h3*s3
1131 vmovdqu 16*3($inp),$T1 #
1132 vpaddq $T3,$D0,$D0 # d0 += h2*s3
1134 vpmuludq $H2,$T4,$H2 # h2*s4
1135 vpmuludq $H3,$T4,$H3 # h3*s4
1136 vpsrldq \$6,$T0,$T2 # splat input
1137 vpaddq $H2,$D1,$D1 # d1 += h2*s4
1138 vpmuludq $H4,$T4,$H4 # h4*s4
1139 vpsrldq \$6,$T1,$T3 #
1140 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4
1141 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4
1142 vpmuludq -0x20(%r11),$H0,$H4 # h0*r4
1143 vpmuludq $H1,$T4,$H0
1144 vpunpckhqdq $T1,$T0,$T4 # 4
1145 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1146 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1148 vpunpcklqdq $T1,$T0,$T0 # 0:1
1149 vpunpcklqdq $T3,$T2,$T3 # 2:3
1151 #vpsrlq \$40,$T4,$T4 # 4
1152 vpsrldq \$`40/8`,$T4,$T4 # 4
1154 vmovdqa 0x00(%rsp),$D4 # preload r0^2
1155 vpand $MASK,$T0,$T0 # 0
1157 vpand $MASK,$T1,$T1 # 1
1158 vpand 0(%rcx),$T4,$T4 # .Lmask24
1160 vpand $MASK,$T2,$T2 # 2
1161 vpand $MASK,$T3,$T3 # 3
1162 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1164 ################################################################
1165 # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
1170 vpaddq $D3,$H4,$H4 # h3 -> h4
1174 vpaddq $D0,$D1,$H1 # h0 -> h1
1181 vpaddq $D1,$H2,$H2 # h1 -> h2
1185 vpaddq $D0,$H0,$H0 # h4 -> h0
1189 vpaddq $D2,$H3,$H3 # h2 -> h3
1193 vpaddq $D0,$H1,$H1 # h0 -> h1
1197 vpaddq $D3,$H4,$H4 # h3 -> h4
1202 ################################################################
1203 # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1205 vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2
1216 vmovdqa $H2,0x20(%r11)
1217 vmovdqa $H0,0x00(%r11)
1218 vmovdqa $H1,0x10(%r11)
1219 vmovdqa $H3,0x30(%r11)
1220 vmovdqa $H4,0x40(%r11)
1222 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1223 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1224 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1225 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1226 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1228 vpmuludq $T2,$D4,$D2 # d2 = h2*r0
1229 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
1230 vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n
1231 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
1232 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
1233 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
1235 vpmuludq $T3,$H2,$H0 # h3*r1
1236 vpaddq $H0,$D4,$D4 # d4 += h3*r1
1237 vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n
1238 vpmuludq $T2,$H2,$H1 # h2*r1
1239 vpaddq $H1,$D3,$D3 # d3 += h2*r1
1240 vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n
1241 vpmuludq $T1,$H2,$H0 # h1*r1
1242 vpaddq $H0,$D2,$D2 # d2 += h1*r1
1243 vpmuludq $T0,$H2,$H2 # h0*r1
1244 vpaddq $H2,$D1,$D1 # d1 += h0*r1
1245 vpmuludq $T4,$H3,$H3 # h4*s1
1246 vpaddq $H3,$D0,$D0 # d0 += h4*s1
1248 vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n
1249 vpmuludq $T2,$H4,$H1 # h2*r2
1250 vpaddq $H1,$D4,$D4 # d4 += h2*r2
1251 vpmuludq $T1,$H4,$H0 # h1*r2
1252 vpaddq $H0,$D3,$D3 # d3 += h1*r2
1253 vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n
1254 vpmuludq $T0,$H4,$H4 # h0*r2
1255 vpaddq $H4,$D2,$D2 # d2 += h0*r2
1256 vpmuludq $T4,$H2,$H1 # h4*s2
1257 vpaddq $H1,$D1,$D1 # d1 += h4*s2
1258 vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n
1259 vpmuludq $T3,$H2,$H2 # h3*s2
1260 vpaddq $H2,$D0,$D0 # d0 += h3*s2
1262 vpmuludq $T1,$H3,$H0 # h1*r3
1263 vpaddq $H0,$D4,$D4 # d4 += h1*r3
1264 vpmuludq $T0,$H3,$H3 # h0*r3
1265 vpaddq $H3,$D3,$D3 # d3 += h0*r3
1266 vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n
1267 vpmuludq $T4,$H4,$H1 # h4*s3
1268 vpaddq $H1,$D2,$D2 # d2 += h4*s3
1269 vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n
1270 vpmuludq $T3,$H4,$H0 # h3*s3
1271 vpaddq $H0,$D1,$D1 # d1 += h3*s3
1272 vpmuludq $T2,$H4,$H4 # h2*s3
1273 vpaddq $H4,$D0,$D0 # d0 += h2*s3
1275 vpmuludq $T0,$H2,$H2 # h0*r4
1276 vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4
1277 vpmuludq $T4,$H3,$H1 # h4*s4
1278 vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4
1279 vpmuludq $T3,$H3,$H0 # h3*s4
1280 vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4
1281 vpmuludq $T2,$H3,$H1 # h2*s4
1282 vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4
1283 vpmuludq $T1,$H3,$H3 # h1*s4
1284 vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4
1288 vmovdqu 16*0($inp),$H0 # load input
1289 vmovdqu 16*1($inp),$H1
1291 vpsrldq \$6,$H0,$H2 # splat input
1293 vpunpckhqdq $H1,$H0,$H4 # 4
1294 vpunpcklqdq $H1,$H0,$H0 # 0:1
1295 vpunpcklqdq $H3,$H2,$H3 # 2:3
1297 vpsrlq \$40,$H4,$H4 # 4
1299 vpand $MASK,$H0,$H0 # 0
1301 vpand $MASK,$H1,$H1 # 1
1303 vpand $MASK,$H2,$H2 # 2
1304 vpand $MASK,$H3,$H3 # 3
1305 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
1307 vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4
1308 vpaddq 0x00(%r11),$H0,$H0
1309 vpaddq 0x10(%r11),$H1,$H1
1310 vpaddq 0x20(%r11),$H2,$H2
1311 vpaddq 0x30(%r11),$H3,$H3
1312 vpaddq 0x40(%r11),$H4,$H4
1314 ################################################################
1315 # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1317 vpmuludq $H0,$T4,$T0 # h0*r0
1318 vpaddq $T0,$D0,$D0 # d0 += h0*r0
1319 vpmuludq $H1,$T4,$T1 # h1*r0
1320 vpaddq $T1,$D1,$D1 # d1 += h1*r0
1321 vpmuludq $H2,$T4,$T0 # h2*r0
1322 vpaddq $T0,$D2,$D2 # d2 += h2*r0
1323 vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n
1324 vpmuludq $H3,$T4,$T1 # h3*r0
1325 vpaddq $T1,$D3,$D3 # d3 += h3*r0
1326 vpmuludq $H4,$T4,$T4 # h4*r0
1327 vpaddq $T4,$D4,$D4 # d4 += h4*r0
1329 vpmuludq $H3,$T2,$T0 # h3*r1
1330 vpaddq $T0,$D4,$D4 # d4 += h3*r1
1331 vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1
1332 vpmuludq $H2,$T2,$T1 # h2*r1
1333 vpaddq $T1,$D3,$D3 # d3 += h2*r1
1334 vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2
1335 vpmuludq $H1,$T2,$T0 # h1*r1
1336 vpaddq $T0,$D2,$D2 # d2 += h1*r1
1337 vpmuludq $H0,$T2,$T2 # h0*r1
1338 vpaddq $T2,$D1,$D1 # d1 += h0*r1
1339 vpmuludq $H4,$T3,$T3 # h4*s1
1340 vpaddq $T3,$D0,$D0 # d0 += h4*s1
1342 vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2
1343 vpmuludq $H2,$T4,$T1 # h2*r2
1344 vpaddq $T1,$D4,$D4 # d4 += h2*r2
1345 vpmuludq $H1,$T4,$T0 # h1*r2
1346 vpaddq $T0,$D3,$D3 # d3 += h1*r2
1347 vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3
1348 vpmuludq $H0,$T4,$T4 # h0*r2
1349 vpaddq $T4,$D2,$D2 # d2 += h0*r2
1350 vpmuludq $H4,$T2,$T1 # h4*s2
1351 vpaddq $T1,$D1,$D1 # d1 += h4*s2
1352 vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3
1353 vpmuludq $H3,$T2,$T2 # h3*s2
1354 vpaddq $T2,$D0,$D0 # d0 += h3*s2
1356 vpmuludq $H1,$T3,$T0 # h1*r3
1357 vpaddq $T0,$D4,$D4 # d4 += h1*r3
1358 vpmuludq $H0,$T3,$T3 # h0*r3
1359 vpaddq $T3,$D3,$D3 # d3 += h0*r3
1360 vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4
1361 vpmuludq $H4,$T4,$T1 # h4*s3
1362 vpaddq $T1,$D2,$D2 # d2 += h4*s3
1363 vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4
1364 vpmuludq $H3,$T4,$T0 # h3*s3
1365 vpaddq $T0,$D1,$D1 # d1 += h3*s3
1366 vpmuludq $H2,$T4,$T4 # h2*s3
1367 vpaddq $T4,$D0,$D0 # d0 += h2*s3
1369 vpmuludq $H0,$T2,$T2 # h0*r4
1370 vpaddq $T2,$D4,$D4 # d4 += h0*r4
1371 vpmuludq $H4,$T3,$T1 # h4*s4
1372 vpaddq $T1,$D3,$D3 # d3 += h4*s4
1373 vpmuludq $H3,$T3,$T0 # h3*s4
1374 vpaddq $T0,$D2,$D2 # d2 += h3*s4
1375 vpmuludq $H2,$T3,$T1 # h2*s4
1376 vpaddq $T1,$D1,$D1 # d1 += h2*s4
1377 vpmuludq $H1,$T3,$T3 # h1*s4
1378 vpaddq $T3,$D0,$D0 # d0 += h1*s4
1381 ################################################################
1382 # horizontal addition
1395 ################################################################
1400 vpaddq $H3,$D4,$D4 # h3 -> h4
1404 vpaddq $H0,$D1,$D1 # h0 -> h1
1411 vpaddq $H1,$D2,$D2 # h1 -> h2
1415 vpaddq $H4,$D0,$D0 # h4 -> h0
1419 vpaddq $H2,$D3,$D3 # h2 -> h3
1423 vpaddq $H0,$D1,$D1 # h0 -> h1
1427 vpaddq $H3,$D4,$D4 # h3 -> h4
1429 vmovd $D0,`4*0-48-64`($ctx) # save partially reduced
1430 vmovd $D1,`4*1-48-64`($ctx)
1431 vmovd $D2,`4*2-48-64`($ctx)
1432 vmovd $D3,`4*3-48-64`($ctx)
1433 vmovd $D4,`4*4-48-64`($ctx)
1435 $code.=<<___ if ($win64);
1436 vmovdqa 0x50(%r11),%xmm6
1437 vmovdqa 0x60(%r11),%xmm7
1438 vmovdqa 0x70(%r11),%xmm8
1439 vmovdqa 0x80(%r11),%xmm9
1440 vmovdqa 0x90(%r11),%xmm10
1441 vmovdqa 0xa0(%r11),%xmm11
1442 vmovdqa 0xb0(%r11),%xmm12
1443 vmovdqa 0xc0(%r11),%xmm13
1444 vmovdqa 0xd0(%r11),%xmm14
1445 vmovdqa 0xe0(%r11),%xmm15
1449 $code.=<<___ if (!$win64);
1451 .cfi_def_cfa_register %rsp
1458 &end_function("poly1305_blocks_avx");
1460 &declare_function("poly1305_emit_avx", 32, 3);
1462 cmpl \$0,20($ctx) # is_base2_26?
1465 mov 0($ctx),%eax # load hash value base 2^26
1471 shl \$26,%rcx # base 2^26 -> base 2^64
1487 mov %r10,%rax # could be partially reduced, so reduce
1498 add \$5,%r8 # compare to modulus
1502 shr \$2,%r10 # did 130-bit value overflow?
1506 add 0($nonce),%rax # accumulate nonce
1508 mov %rax,0($mac) # write result
1513 &end_function("poly1305_emit_avx");
1518 $code .= "#ifdef CONFIG_AS_AVX2\n";
1521 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1522 map("%ymm$_",(0..15));
1525 sub poly1305_blocks_avxN {
1527 my $suffix = $avx512 ? "_avx512" : "";
1530 mov 20($ctx),%r8d # is_base2_26
1532 jae .Lblocks_avx2$suffix
1536 .Lblocks_avx2$suffix:
1538 jz .Lno_data_avx2$suffix
1543 jz .Lbase2_64_avx2$suffix
1546 jz .Leven_avx2$suffix
1561 .Lblocks_avx2_body$suffix:
1563 mov $len,%r15 # reassign $len
1565 mov 0($ctx),$d1 # load hash value
1569 mov 24($ctx),$r0 # load r
1572 ################################# base 2^26 -> base 2^64
1574 and \$`-1*(1<<31)`,$d1
1575 mov $d2,$r1 # borrow $r1
1577 and \$`-1*(1<<31)`,$d2
1591 adc \$0,$h2 # can be partially reduced...
1593 mov \$-4,$d2 # ... so reduce
1606 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1608 .Lbase2_26_pre_avx2$suffix:
1609 add 0($inp),$h0 # accumulate input
1615 call __poly1305_block
1619 jnz .Lbase2_26_pre_avx2$suffix
1621 test $padbit,$padbit # if $padbit is zero,
1622 jz .Lstore_base2_64_avx2$suffix # store hash in base 2^64 format
1624 ################################# base 2^64 -> base 2^26
1631 and \$0x3ffffff,%rax # h[0]
1633 and \$0x3ffffff,%rdx # h[1]
1637 and \$0x3ffffff,$h0 # h[2]
1639 and \$0x3ffffff,$h1 # h[3]
1643 jz .Lstore_base2_26_avx2$suffix
1650 jmp .Lproceed_avx2$suffix
1653 .Lstore_base2_64_avx2$suffix:
1656 mov $h2,16($ctx) # note that is_base2_26 is zeroed
1657 jmp .Ldone_avx2$suffix
1660 .Lstore_base2_26_avx2$suffix:
1661 mov %rax#d,0($ctx) # store hash value base 2^26
1680 .Lno_data_avx2$suffix:
1681 .Lblocks_avx2_epilogue$suffix:
1686 .Lbase2_64_avx2$suffix:
1701 .Lbase2_64_avx2_body$suffix:
1703 mov $len,%r15 # reassign $len
1705 mov 24($ctx),$r0 # load r
1708 mov 0($ctx),$h0 # load hash value
1715 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1718 jz .Linit_avx2$suffix
1720 .Lbase2_64_pre_avx2$suffix:
1721 add 0($inp),$h0 # accumulate input
1727 call __poly1305_block
1731 jnz .Lbase2_64_pre_avx2$suffix
1734 ################################# base 2^64 -> base 2^26
1741 and \$0x3ffffff,%rax # h[0]
1743 and \$0x3ffffff,%rdx # h[1]
1747 and \$0x3ffffff,$h0 # h[2]
1749 and \$0x3ffffff,$h1 # h[3]
1757 movl \$1,20($ctx) # set is_base2_26
1759 call __poly1305_init_avx
1761 .Lproceed_avx2$suffix:
1762 mov %r15,$len # restore $len
1764 $code.=<<___ if (!$kernel);
1765 mov OPENSSL_ia32cap_P+8(%rip),%r9d
1766 mov \$`(1<<31|1<<30|1<<16)`,%r11d
1781 .Lbase2_64_avx2_epilogue$suffix:
1782 jmp .Ldo_avx2$suffix
1789 $code.=<<___ if (!$kernel);
1790 mov OPENSSL_ia32cap_P+8(%rip),%r9d
1793 vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26
1794 vmovd 4*1($ctx),%x#$H1
1795 vmovd 4*2($ctx),%x#$H2
1796 vmovd 4*3($ctx),%x#$H3
1797 vmovd 4*4($ctx),%x#$H4
1801 $code.=<<___ if (!$kernel && $avx>2);
1805 test \$`1<<16`,%r9d # check for AVX512F
1807 .Lskip_avx512$suffix:
1809 $code.=<<___ if ($avx > 2 && $avx512 && $kernel);
1813 $code.=<<___ if (!$win64);
1815 .cfi_def_cfa_register %r10
1818 $code.=<<___ if ($win64);
1821 vmovdqa %xmm6,-0xb0(%r10)
1822 vmovdqa %xmm7,-0xa0(%r10)
1823 vmovdqa %xmm8,-0x90(%r10)
1824 vmovdqa %xmm9,-0x80(%r10)
1825 vmovdqa %xmm10,-0x70(%r10)
1826 vmovdqa %xmm11,-0x60(%r10)
1827 vmovdqa %xmm12,-0x50(%r10)
1828 vmovdqa %xmm13,-0x40(%r10)
1829 vmovdqa %xmm14,-0x30(%r10)
1830 vmovdqa %xmm15,-0x20(%r10)
1831 .Ldo_avx2_body$suffix:
1834 lea .Lconst(%rip),%rcx
1835 lea 48+64($ctx),$ctx # size optimization
1836 vmovdqa 96(%rcx),$T0 # .Lpermd_avx2
1838 # expand and copy pre-calculated table to stack
1839 vmovdqu `16*0-64`($ctx),%x#$T2
1841 vmovdqu `16*1-64`($ctx),%x#$T3
1842 vmovdqu `16*2-64`($ctx),%x#$T4
1843 vmovdqu `16*3-64`($ctx),%x#$D0
1844 vmovdqu `16*4-64`($ctx),%x#$D1
1845 vmovdqu `16*5-64`($ctx),%x#$D2
1846 lea 0x90(%rsp),%rax # size optimization
1847 vmovdqu `16*6-64`($ctx),%x#$D3
1848 vpermd $T2,$T0,$T2 # 00003412 -> 14243444
1849 vmovdqu `16*7-64`($ctx),%x#$D4
1851 vmovdqu `16*8-64`($ctx),%x#$MASK
1853 vmovdqa $T2,0x00(%rsp)
1855 vmovdqa $T3,0x20-0x90(%rax)
1857 vmovdqa $T4,0x40-0x90(%rax)
1859 vmovdqa $D0,0x60-0x90(%rax)
1861 vmovdqa $D1,0x80-0x90(%rax)
1863 vmovdqa $D2,0xa0-0x90(%rax)
1864 vpermd $MASK,$T0,$MASK
1865 vmovdqa $D3,0xc0-0x90(%rax)
1866 vmovdqa $D4,0xe0-0x90(%rax)
1867 vmovdqa $MASK,0x100-0x90(%rax)
1868 vmovdqa 64(%rcx),$MASK # .Lmask26
1870 ################################################################
1872 vmovdqu 16*0($inp),%x#$T0
1873 vmovdqu 16*1($inp),%x#$T1
1874 vinserti128 \$1,16*2($inp),$T0,$T0
1875 vinserti128 \$1,16*3($inp),$T1,$T1
1878 vpsrldq \$6,$T0,$T2 # splat input
1880 vpunpckhqdq $T1,$T0,$T4 # 4
1881 vpunpcklqdq $T3,$T2,$T2 # 2:3
1882 vpunpcklqdq $T1,$T0,$T0 # 0:1
1887 vpsrlq \$40,$T4,$T4 # 4
1888 vpand $MASK,$T2,$T2 # 2
1889 vpand $MASK,$T0,$T0 # 0
1890 vpand $MASK,$T1,$T1 # 1
1891 vpand $MASK,$T3,$T3 # 3
1892 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1894 vpaddq $H2,$T2,$H2 # accumulate input
1896 jz .Ltail_avx2$suffix
1897 jmp .Loop_avx2$suffix
1901 ################################################################
1902 # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
1903 # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
1904 # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
1905 # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
1906 # \________/\__________/
1907 ################################################################
1908 #vpaddq $H2,$T2,$H2 # accumulate input
1910 vmovdqa `32*0`(%rsp),$T0 # r0^4
1912 vmovdqa `32*1`(%rsp),$T1 # r1^4
1914 vmovdqa `32*3`(%rsp),$T2 # r2^4
1916 vmovdqa `32*6-0x90`(%rax),$T3 # s3^4
1917 vmovdqa `32*8-0x90`(%rax),$S4 # s4^4
1919 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1920 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1921 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1922 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1923 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1925 # however, as h2 is "chronologically" first one available pull
1926 # corresponding operations up, so it's
1928 # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
1929 # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
1930 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1931 # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
1932 # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
1934 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1935 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1936 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1937 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1938 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1940 vpmuludq $H0,$T1,$T4 # h0*r1
1941 vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp
1942 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1943 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1944 vpmuludq $H3,$T1,$T4 # h3*r1
1945 vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1
1946 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1947 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1948 vmovdqa `32*4-0x90`(%rax),$T1 # s2
1950 vpmuludq $H0,$T0,$T4 # h0*r0
1951 vpmuludq $H1,$T0,$H2 # h1*r0
1952 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1953 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1954 vpmuludq $H3,$T0,$T4 # h3*r0
1955 vpmuludq $H4,$T0,$H2 # h4*r0
1956 vmovdqu 16*0($inp),%x#$T0 # load input
1957 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1958 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1959 vinserti128 \$1,16*2($inp),$T0,$T0
1961 vpmuludq $H3,$T1,$T4 # h3*s2
1962 vpmuludq $H4,$T1,$H2 # h4*s2
1963 vmovdqu 16*1($inp),%x#$T1
1964 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1965 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1966 vmovdqa `32*5-0x90`(%rax),$H2 # r3
1967 vpmuludq $H1,$T2,$T4 # h1*r2
1968 vpmuludq $H0,$T2,$T2 # h0*r2
1969 vpaddq $T4,$D3,$D3 # d3 += h1*r2
1970 vpaddq $T2,$D2,$D2 # d2 += h0*r2
1971 vinserti128 \$1,16*3($inp),$T1,$T1
1974 vpmuludq $H1,$H2,$T4 # h1*r3
1975 vpmuludq $H0,$H2,$H2 # h0*r3
1976 vpsrldq \$6,$T0,$T2 # splat input
1977 vpaddq $T4,$D4,$D4 # d4 += h1*r3
1978 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1979 vpmuludq $H3,$T3,$T4 # h3*s3
1980 vpmuludq $H4,$T3,$H2 # h4*s3
1982 vpaddq $T4,$D1,$D1 # d1 += h3*s3
1983 vpaddq $H2,$D2,$D2 # d2 += h4*s3
1984 vpunpckhqdq $T1,$T0,$T4 # 4
1986 vpmuludq $H3,$S4,$H3 # h3*s4
1987 vpmuludq $H4,$S4,$H4 # h4*s4
1988 vpunpcklqdq $T1,$T0,$T0 # 0:1
1989 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
1990 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
1991 vpunpcklqdq $T3,$T2,$T3 # 2:3
1992 vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4
1993 vpmuludq $H1,$S4,$H0 # h1*s4
1994 vmovdqa 64(%rcx),$MASK # .Lmask26
1995 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1996 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1998 ################################################################
1999 # lazy reduction (interleaved with tail of input splat)
2003 vpaddq $D3,$H4,$H4 # h3 -> h4
2007 vpaddq $D0,$D1,$H1 # h0 -> h1
2016 vpaddq $D1,$H2,$H2 # h1 -> h2
2020 vpaddq $D4,$H0,$H0 # h4 -> h0
2022 vpand $MASK,$T2,$T2 # 2
2027 vpaddq $D2,$H3,$H3 # h2 -> h3
2029 vpaddq $T2,$H2,$H2 # modulo-scheduled
2034 vpaddq $D0,$H1,$H1 # h0 -> h1
2036 vpsrlq \$40,$T4,$T4 # 4
2040 vpaddq $D3,$H4,$H4 # h3 -> h4
2042 vpand $MASK,$T0,$T0 # 0
2043 vpand $MASK,$T1,$T1 # 1
2044 vpand $MASK,$T3,$T3 # 3
2045 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
2048 jnz .Loop_avx2$suffix
2052 ################################################################
2053 # while above multiplications were by r^4 in all lanes, in last
2054 # iteration we multiply least significant lane by r^4 and most
2055 # significant one by r, so copy of above except that references
2056 # to the precomputed table are displaced by 4...
2058 #vpaddq $H2,$T2,$H2 # accumulate input
2060 vmovdqu `32*0+4`(%rsp),$T0 # r0^4
2062 vmovdqu `32*1+4`(%rsp),$T1 # r1^4
2064 vmovdqu `32*3+4`(%rsp),$T2 # r2^4
2066 vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4
2067 vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4
2069 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
2070 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
2071 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
2072 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
2073 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
2075 vpmuludq $H0,$T1,$T4 # h0*r1
2076 vpmuludq $H1,$T1,$H2 # h1*r1
2077 vpaddq $T4,$D1,$D1 # d1 += h0*r1
2078 vpaddq $H2,$D2,$D2 # d2 += h1*r1
2079 vpmuludq $H3,$T1,$T4 # h3*r1
2080 vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1
2081 vpaddq $T4,$D4,$D4 # d4 += h3*r1
2082 vpaddq $H2,$D0,$D0 # d0 += h4*s1
2084 vpmuludq $H0,$T0,$T4 # h0*r0
2085 vpmuludq $H1,$T0,$H2 # h1*r0
2086 vpaddq $T4,$D0,$D0 # d0 += h0*r0
2087 vmovdqu `32*4+4-0x90`(%rax),$T1 # s2
2088 vpaddq $H2,$D1,$D1 # d1 += h1*r0
2089 vpmuludq $H3,$T0,$T4 # h3*r0
2090 vpmuludq $H4,$T0,$H2 # h4*r0
2091 vpaddq $T4,$D3,$D3 # d3 += h3*r0
2092 vpaddq $H2,$D4,$D4 # d4 += h4*r0
2094 vpmuludq $H3,$T1,$T4 # h3*s2
2095 vpmuludq $H4,$T1,$H2 # h4*s2
2096 vpaddq $T4,$D0,$D0 # d0 += h3*s2
2097 vpaddq $H2,$D1,$D1 # d1 += h4*s2
2098 vmovdqu `32*5+4-0x90`(%rax),$H2 # r3
2099 vpmuludq $H1,$T2,$T4 # h1*r2
2100 vpmuludq $H0,$T2,$T2 # h0*r2
2101 vpaddq $T4,$D3,$D3 # d3 += h1*r2
2102 vpaddq $T2,$D2,$D2 # d2 += h0*r2
2104 vpmuludq $H1,$H2,$T4 # h1*r3
2105 vpmuludq $H0,$H2,$H2 # h0*r3
2106 vpaddq $T4,$D4,$D4 # d4 += h1*r3
2107 vpaddq $H2,$D3,$D3 # d3 += h0*r3
2108 vpmuludq $H3,$T3,$T4 # h3*s3
2109 vpmuludq $H4,$T3,$H2 # h4*s3
2110 vpaddq $T4,$D1,$D1 # d1 += h3*s3
2111 vpaddq $H2,$D2,$D2 # d2 += h4*s3
2113 vpmuludq $H3,$S4,$H3 # h3*s4
2114 vpmuludq $H4,$S4,$H4 # h4*s4
2115 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
2116 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
2117 vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
2118 vpmuludq $H1,$S4,$H0 # h1*s4
2119 vmovdqa 64(%rcx),$MASK # .Lmask26
2120 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
2121 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
2123 ################################################################
2124 # horizontal addition
2137 vpermq \$0x2,$H3,$T3
2138 vpermq \$0x2,$H4,$T4
2139 vpermq \$0x2,$H0,$T0
2140 vpermq \$0x2,$D1,$T1
2141 vpermq \$0x2,$H2,$T2
2148 ################################################################
2153 vpaddq $D3,$H4,$H4 # h3 -> h4
2157 vpaddq $D0,$D1,$H1 # h0 -> h1
2164 vpaddq $D1,$H2,$H2 # h1 -> h2
2168 vpaddq $D4,$H0,$H0 # h4 -> h0
2172 vpaddq $D2,$H3,$H3 # h2 -> h3
2176 vpaddq $D0,$H1,$H1 # h0 -> h1
2180 vpaddq $D3,$H4,$H4 # h3 -> h4
2182 vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2183 vmovd %x#$H1,`4*1-48-64`($ctx)
2184 vmovd %x#$H2,`4*2-48-64`($ctx)
2185 vmovd %x#$H3,`4*3-48-64`($ctx)
2186 vmovd %x#$H4,`4*4-48-64`($ctx)
2188 $code.=<<___ if ($win64);
2189 vmovdqa -0xb0(%r10),%xmm6
2190 vmovdqa -0xa0(%r10),%xmm7
2191 vmovdqa -0x90(%r10),%xmm8
2192 vmovdqa -0x80(%r10),%xmm9
2193 vmovdqa -0x70(%r10),%xmm10
2194 vmovdqa -0x60(%r10),%xmm11
2195 vmovdqa -0x50(%r10),%xmm12
2196 vmovdqa -0x40(%r10),%xmm13
2197 vmovdqa -0x30(%r10),%xmm14
2198 vmovdqa -0x20(%r10),%xmm15
2200 .Ldo_avx2_epilogue$suffix:
2202 $code.=<<___ if (!$win64);
2204 .cfi_def_cfa_register %rsp
2211 if($avx > 2 && $avx512) {
2212 my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24));
2213 my ($M0,$M1,$M2,$M3,$M4) = map("%zmm$_",(25..29));
2214 my $PADBIT="%zmm30";
2216 map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3)); # switch to %zmm domain
2217 map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
2218 map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
2219 map(s/%y/%z/,($MASK));
2227 $code.=<<___ if (!$win64);
2229 .cfi_def_cfa_register %r10
2232 $code.=<<___ if ($win64);
2235 vmovdqa %xmm6,-0xb0(%r10)
2236 vmovdqa %xmm7,-0xa0(%r10)
2237 vmovdqa %xmm8,-0x90(%r10)
2238 vmovdqa %xmm9,-0x80(%r10)
2239 vmovdqa %xmm10,-0x70(%r10)
2240 vmovdqa %xmm11,-0x60(%r10)
2241 vmovdqa %xmm12,-0x50(%r10)
2242 vmovdqa %xmm13,-0x40(%r10)
2243 vmovdqa %xmm14,-0x30(%r10)
2244 vmovdqa %xmm15,-0x20(%r10)
2248 lea .Lconst(%rip),%rcx
2249 lea 48+64($ctx),$ctx # size optimization
2250 vmovdqa 96(%rcx),%y#$T2 # .Lpermd_avx2
2252 # expand pre-calculated table
2253 vmovdqu `16*0-64`($ctx),%x#$D0 # will become expanded ${R0}
2255 vmovdqu `16*1-64`($ctx),%x#$D1 # will become ... ${R1}
2257 vmovdqu `16*2-64`($ctx),%x#$T0 # ... ${S1}
2258 vmovdqu `16*3-64`($ctx),%x#$D2 # ... ${R2}
2259 vmovdqu `16*4-64`($ctx),%x#$T1 # ... ${S2}
2260 vmovdqu `16*5-64`($ctx),%x#$D3 # ... ${R3}
2261 vmovdqu `16*6-64`($ctx),%x#$T3 # ... ${S3}
2262 vmovdqu `16*7-64`($ctx),%x#$D4 # ... ${R4}
2263 vmovdqu `16*8-64`($ctx),%x#$T4 # ... ${S4}
2264 vpermd $D0,$T2,$R0 # 00003412 -> 14243444
2265 vpbroadcastq 64(%rcx),$MASK # .Lmask26
2269 vmovdqa64 $R0,0x00(%rsp){%k2} # save in case $len%128 != 0
2270 vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304
2272 vmovdqu64 $R1,0x00(%rsp,%rax){%k2}
2275 vmovdqa64 $S1,0x40(%rsp){%k2}
2278 vmovdqu64 $R2,0x40(%rsp,%rax){%k2}
2280 vmovdqa64 $S2,0x80(%rsp){%k2}
2281 vmovdqu64 $R3,0x80(%rsp,%rax){%k2}
2282 vmovdqa64 $S3,0xc0(%rsp){%k2}
2283 vmovdqu64 $R4,0xc0(%rsp,%rax){%k2}
2284 vmovdqa64 $S4,0x100(%rsp){%k2}
2286 ################################################################
2287 # calculate 5th through 8th powers of the key
2289 # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
2290 # d1 = r0'*r1 + r1'*r0 + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
2291 # d2 = r0'*r2 + r1'*r1 + r2'*r0 + r3'*5*r4 + r4'*5*r3
2292 # d3 = r0'*r3 + r1'*r2 + r2'*r1 + r3'*r0 + r4'*5*r4
2293 # d4 = r0'*r4 + r1'*r3 + r2'*r2 + r3'*r1 + r4'*r0
2295 vpmuludq $T0,$R0,$D0 # d0 = r0'*r0
2296 vpmuludq $T0,$R1,$D1 # d1 = r0'*r1
2297 vpmuludq $T0,$R2,$D2 # d2 = r0'*r2
2298 vpmuludq $T0,$R3,$D3 # d3 = r0'*r3
2299 vpmuludq $T0,$R4,$D4 # d4 = r0'*r4
2302 vpmuludq $T1,$S4,$M0
2303 vpmuludq $T1,$R0,$M1
2304 vpmuludq $T1,$R1,$M2
2305 vpmuludq $T1,$R2,$M3
2306 vpmuludq $T1,$R3,$M4
2308 vpaddq $M0,$D0,$D0 # d0 += r1'*5*r4
2309 vpaddq $M1,$D1,$D1 # d1 += r1'*r0
2310 vpaddq $M2,$D2,$D2 # d2 += r1'*r1
2311 vpaddq $M3,$D3,$D3 # d3 += r1'*r2
2312 vpaddq $M4,$D4,$D4 # d4 += r1'*r3
2314 vpmuludq $T2,$S3,$M0
2315 vpmuludq $T2,$S4,$M1
2316 vpmuludq $T2,$R1,$M3
2317 vpmuludq $T2,$R2,$M4
2318 vpmuludq $T2,$R0,$M2
2320 vpaddq $M0,$D0,$D0 # d0 += r2'*5*r3
2321 vpaddq $M1,$D1,$D1 # d1 += r2'*5*r4
2322 vpaddq $M3,$D3,$D3 # d3 += r2'*r1
2323 vpaddq $M4,$D4,$D4 # d4 += r2'*r2
2324 vpaddq $M2,$D2,$D2 # d2 += r2'*r0
2326 vpmuludq $T3,$S2,$M0
2327 vpmuludq $T3,$R0,$M3
2328 vpmuludq $T3,$R1,$M4
2329 vpmuludq $T3,$S3,$M1
2330 vpmuludq $T3,$S4,$M2
2331 vpaddq $M0,$D0,$D0 # d0 += r3'*5*r2
2332 vpaddq $M3,$D3,$D3 # d3 += r3'*r0
2333 vpaddq $M4,$D4,$D4 # d4 += r3'*r1
2334 vpaddq $M1,$D1,$D1 # d1 += r3'*5*r3
2335 vpaddq $M2,$D2,$D2 # d2 += r3'*5*r4
2337 vpmuludq $T4,$S4,$M3
2338 vpmuludq $T4,$R0,$M4
2339 vpmuludq $T4,$S1,$M0
2340 vpmuludq $T4,$S2,$M1
2341 vpmuludq $T4,$S3,$M2
2342 vpaddq $M3,$D3,$D3 # d3 += r2'*5*r4
2343 vpaddq $M4,$D4,$D4 # d4 += r2'*r0
2344 vpaddq $M0,$D0,$D0 # d0 += r2'*5*r1
2345 vpaddq $M1,$D1,$D1 # d1 += r2'*5*r2
2346 vpaddq $M2,$D2,$D2 # d2 += r2'*5*r3
2348 ################################################################
2350 vmovdqu64 16*0($inp),%z#$T3
2351 vmovdqu64 16*4($inp),%z#$T4
2354 ################################################################
2358 vpandq $MASK,$D3,$D3
2359 vpaddq $M3,$D4,$D4 # d3 -> d4
2362 vpandq $MASK,$D0,$D0
2363 vpaddq $M0,$D1,$D1 # d0 -> d1
2366 vpandq $MASK,$D4,$D4
2369 vpandq $MASK,$D1,$D1
2370 vpaddq $M1,$D2,$D2 # d1 -> d2
2374 vpaddq $M4,$D0,$D0 # d4 -> d0
2377 vpandq $MASK,$D2,$D2
2378 vpaddq $M2,$D3,$D3 # d2 -> d3
2381 vpandq $MASK,$D0,$D0
2382 vpaddq $M0,$D1,$D1 # d0 -> d1
2385 vpandq $MASK,$D3,$D3
2386 vpaddq $M3,$D4,$D4 # d3 -> d4
2388 ################################################################
2389 # at this point we have 14243444 in $R0-$S4 and 05060708 in
2392 vpunpcklqdq $T4,$T3,$T0 # transpose input
2393 vpunpckhqdq $T4,$T3,$T4
2395 # ... since input 64-bit lanes are ordered as 73625140, we could
2396 # "vperm" it to 76543210 (here and in each loop iteration), *or*
2397 # we could just flow along, hence the goal for $R0-$S4 is
2398 # 1858286838784888 ...
2400 vmovdqa32 128(%rcx),$M0 # .Lpermd_avx512:
2404 vpermd $R0,$M0,$R0 # 14243444 -> 1---2---3---4---
2410 vpermd $D0,$M0,${R0}{%k1} # 05060708 -> 1858286838784888
2411 vpermd $D1,$M0,${R1}{%k1}
2412 vpermd $D2,$M0,${R2}{%k1}
2413 vpermd $D3,$M0,${R3}{%k1}
2414 vpermd $D4,$M0,${R4}{%k1}
2416 vpslld \$2,$R1,$S1 # *5
2425 vpbroadcastq 32(%rcx),$PADBIT # .L129
2427 vpsrlq \$52,$T0,$T2 # splat input
2432 vpsrlq \$40,$T4,$T4 # 4
2433 vpandq $MASK,$T2,$T2 # 2
2434 vpandq $MASK,$T0,$T0 # 0
2435 #vpandq $MASK,$T1,$T1 # 1
2436 #vpandq $MASK,$T3,$T3 # 3
2437 #vporq $PADBIT,$T4,$T4 # padbit, yes, always
2439 vpaddq $H2,$T2,$H2 # accumulate input
2446 ################################################################
2447 # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
2448 # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
2449 # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
2450 # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
2451 # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
2452 # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
2453 # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
2454 # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
2455 # \________/\___________/
2456 ################################################################
2457 #vpaddq $H2,$T2,$H2 # accumulate input
2459 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
2460 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
2461 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
2462 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
2463 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
2465 # however, as h2 is "chronologically" first one available pull
2466 # corresponding operations up, so it's
2468 # d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4
2469 # d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0
2470 # d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1
2471 # d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2
2472 # d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3
2474 vpmuludq $H2,$R1,$D3 # d3 = h2*r1
2476 vpmuludq $H2,$R2,$D4 # d4 = h2*r2
2477 vpandq $MASK,$T1,$T1 # 1
2478 vpmuludq $H2,$S3,$D0 # d0 = h2*s3
2479 vpandq $MASK,$T3,$T3 # 3
2480 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
2481 vporq $PADBIT,$T4,$T4 # padbit, yes, always
2482 vpmuludq $H2,$R0,$D2 # d2 = h2*r0
2483 vpaddq $H1,$T1,$H1 # accumulate input
2487 vmovdqu64 16*0($inp),$T3 # load input
2488 vmovdqu64 16*4($inp),$T4
2490 vpmuludq $H0,$R3,$M3
2491 vpmuludq $H0,$R4,$M4
2492 vpmuludq $H0,$R0,$M0
2493 vpmuludq $H0,$R1,$M1
2494 vpaddq $M3,$D3,$D3 # d3 += h0*r3
2495 vpaddq $M4,$D4,$D4 # d4 += h0*r4
2496 vpaddq $M0,$D0,$D0 # d0 += h0*r0
2497 vpaddq $M1,$D1,$D1 # d1 += h0*r1
2499 vpmuludq $H1,$R2,$M3
2500 vpmuludq $H1,$R3,$M4
2501 vpmuludq $H1,$S4,$M0
2502 vpmuludq $H0,$R2,$M2
2503 vpaddq $M3,$D3,$D3 # d3 += h1*r2
2504 vpaddq $M4,$D4,$D4 # d4 += h1*r3
2505 vpaddq $M0,$D0,$D0 # d0 += h1*s4
2506 vpaddq $M2,$D2,$D2 # d2 += h0*r2
2508 vpunpcklqdq $T4,$T3,$T0 # transpose input
2509 vpunpckhqdq $T4,$T3,$T4
2511 vpmuludq $H3,$R0,$M3
2512 vpmuludq $H3,$R1,$M4
2513 vpmuludq $H1,$R0,$M1
2514 vpmuludq $H1,$R1,$M2
2515 vpaddq $M3,$D3,$D3 # d3 += h3*r0
2516 vpaddq $M4,$D4,$D4 # d4 += h3*r1
2517 vpaddq $M1,$D1,$D1 # d1 += h1*r0
2518 vpaddq $M2,$D2,$D2 # d2 += h1*r1
2520 vpmuludq $H4,$S4,$M3
2521 vpmuludq $H4,$R0,$M4
2522 vpmuludq $H3,$S2,$M0
2523 vpmuludq $H3,$S3,$M1
2524 vpaddq $M3,$D3,$D3 # d3 += h4*s4
2525 vpmuludq $H3,$S4,$M2
2526 vpaddq $M4,$D4,$D4 # d4 += h4*r0
2527 vpaddq $M0,$D0,$D0 # d0 += h3*s2
2528 vpaddq $M1,$D1,$D1 # d1 += h3*s3
2529 vpaddq $M2,$D2,$D2 # d2 += h3*s4
2531 vpmuludq $H4,$S1,$M0
2532 vpmuludq $H4,$S2,$M1
2533 vpmuludq $H4,$S3,$M2
2534 vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
2535 vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
2536 vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
2538 ################################################################
2539 # lazy reduction (interleaved with input splat)
2541 vpsrlq \$52,$T0,$T2 # splat input
2545 vpandq $MASK,$D3,$D3
2546 vpaddq $H3,$D4,$H4 # h3 -> h4
2551 vpandq $MASK,$H0,$H0
2552 vpaddq $D0,$H1,$H1 # h0 -> h1
2554 vpandq $MASK,$T2,$T2 # 2
2557 vpandq $MASK,$H4,$H4
2560 vpandq $MASK,$H1,$H1
2561 vpaddq $D1,$H2,$H2 # h1 -> h2
2565 vpaddq $D4,$H0,$H0 # h4 -> h0
2567 vpaddq $T2,$H2,$H2 # modulo-scheduled
2571 vpandq $MASK,$H2,$H2
2572 vpaddq $D2,$D3,$H3 # h2 -> h3
2577 vpandq $MASK,$H0,$H0
2578 vpaddq $D0,$H1,$H1 # h0 -> h1
2580 vpsrlq \$40,$T4,$T4 # 4
2583 vpandq $MASK,$H3,$H3
2584 vpaddq $D3,$H4,$H4 # h3 -> h4
2586 vpandq $MASK,$T0,$T0 # 0
2587 #vpandq $MASK,$T1,$T1 # 1
2588 #vpandq $MASK,$T3,$T3 # 3
2589 #vporq $PADBIT,$T4,$T4 # padbit, yes, always
2595 ################################################################
2596 # while above multiplications were by r^8 in all lanes, in last
2597 # iteration we multiply least significant lane by r^8 and most
2598 # significant one by r, that's why table gets shifted...
2600 vpsrlq \$32,$R0,$R0 # 0105020603070408
2610 ################################################################
2611 # load either next or last 64 byte of input
2612 lea ($inp,$len),$inp
2614 #vpaddq $H2,$T2,$H2 # accumulate input
2617 vpmuludq $H2,$R1,$D3 # d3 = h2*r1
2618 vpmuludq $H2,$R2,$D4 # d4 = h2*r2
2619 vpmuludq $H2,$S3,$D0 # d0 = h2*s3
2620 vpandq $MASK,$T1,$T1 # 1
2621 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
2622 vpandq $MASK,$T3,$T3 # 3
2623 vpmuludq $H2,$R0,$D2 # d2 = h2*r0
2624 vporq $PADBIT,$T4,$T4 # padbit, yes, always
2625 vpaddq $H1,$T1,$H1 # accumulate input
2629 vmovdqu 16*0($inp),%x#$T0
2630 vpmuludq $H0,$R3,$M3
2631 vpmuludq $H0,$R4,$M4
2632 vpmuludq $H0,$R0,$M0
2633 vpmuludq $H0,$R1,$M1
2634 vpaddq $M3,$D3,$D3 # d3 += h0*r3
2635 vpaddq $M4,$D4,$D4 # d4 += h0*r4
2636 vpaddq $M0,$D0,$D0 # d0 += h0*r0
2637 vpaddq $M1,$D1,$D1 # d1 += h0*r1
2639 vmovdqu 16*1($inp),%x#$T1
2640 vpmuludq $H1,$R2,$M3
2641 vpmuludq $H1,$R3,$M4
2642 vpmuludq $H1,$S4,$M0
2643 vpmuludq $H0,$R2,$M2
2644 vpaddq $M3,$D3,$D3 # d3 += h1*r2
2645 vpaddq $M4,$D4,$D4 # d4 += h1*r3
2646 vpaddq $M0,$D0,$D0 # d0 += h1*s4
2647 vpaddq $M2,$D2,$D2 # d2 += h0*r2
2649 vinserti128 \$1,16*2($inp),%y#$T0,%y#$T0
2650 vpmuludq $H3,$R0,$M3
2651 vpmuludq $H3,$R1,$M4
2652 vpmuludq $H1,$R0,$M1
2653 vpmuludq $H1,$R1,$M2
2654 vpaddq $M3,$D3,$D3 # d3 += h3*r0
2655 vpaddq $M4,$D4,$D4 # d4 += h3*r1
2656 vpaddq $M1,$D1,$D1 # d1 += h1*r0
2657 vpaddq $M2,$D2,$D2 # d2 += h1*r1
2659 vinserti128 \$1,16*3($inp),%y#$T1,%y#$T1
2660 vpmuludq $H4,$S4,$M3
2661 vpmuludq $H4,$R0,$M4
2662 vpmuludq $H3,$S2,$M0
2663 vpmuludq $H3,$S3,$M1
2664 vpmuludq $H3,$S4,$M2
2665 vpaddq $M3,$D3,$H3 # h3 = d3 + h4*s4
2666 vpaddq $M4,$D4,$D4 # d4 += h4*r0
2667 vpaddq $M0,$D0,$D0 # d0 += h3*s2
2668 vpaddq $M1,$D1,$D1 # d1 += h3*s3
2669 vpaddq $M2,$D2,$D2 # d2 += h3*s4
2671 vpmuludq $H4,$S1,$M0
2672 vpmuludq $H4,$S2,$M1
2673 vpmuludq $H4,$S3,$M2
2674 vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
2675 vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
2676 vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
2678 ################################################################
2679 # horizontal addition
2682 vpermq \$0xb1,$H3,$D3
2683 vpermq \$0xb1,$D4,$H4
2684 vpermq \$0xb1,$H0,$D0
2685 vpermq \$0xb1,$H1,$D1
2686 vpermq \$0xb1,$H2,$D2
2694 vpermq \$0x2,$H3,$D3
2695 vpermq \$0x2,$H4,$D4
2696 vpermq \$0x2,$H0,$D0
2697 vpermq \$0x2,$H1,$D1
2698 vpermq \$0x2,$H2,$D2
2705 vextracti64x4 \$0x1,$H3,%y#$D3
2706 vextracti64x4 \$0x1,$H4,%y#$D4
2707 vextracti64x4 \$0x1,$H0,%y#$D0
2708 vextracti64x4 \$0x1,$H1,%y#$D1
2709 vextracti64x4 \$0x1,$H2,%y#$D2
2710 vpaddq $D3,$H3,${H3}{%k3}{z} # keep single qword in case
2711 vpaddq $D4,$H4,${H4}{%k3}{z} # it's passed to .Ltail_avx2
2712 vpaddq $D0,$H0,${H0}{%k3}{z}
2713 vpaddq $D1,$H1,${H1}{%k3}{z}
2714 vpaddq $D2,$H2,${H2}{%k3}{z}
2716 map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
2717 map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
2719 ################################################################
2720 # lazy reduction (interleaved with input splat)
2724 vpsrldq \$6,$T0,$T2 # splat input
2726 vpunpckhqdq $T1,$T0,$T4 # 4
2727 vpaddq $D3,$H4,$H4 # h3 -> h4
2731 vpunpcklqdq $T3,$T2,$T2 # 2:3
2732 vpunpcklqdq $T1,$T0,$T0 # 0:1
2733 vpaddq $D0,$H1,$H1 # h0 -> h1
2742 vpaddq $D1,$H2,$H2 # h1 -> h2
2747 vpsrlq \$40,$T4,$T4 # 4
2748 vpaddq $D4,$H0,$H0 # h4 -> h0
2752 vpand $MASK,$T2,$T2 # 2
2753 vpand $MASK,$T0,$T0 # 0
2754 vpaddq $D2,$H3,$H3 # h2 -> h3
2758 vpaddq $H2,$T2,$H2 # accumulate input for .Ltail_avx2
2759 vpand $MASK,$T1,$T1 # 1
2760 vpaddq $D0,$H1,$H1 # h0 -> h1
2764 vpand $MASK,$T3,$T3 # 3
2765 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
2766 vpaddq $D3,$H4,$H4 # h3 -> h4
2768 lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2
2770 jnz .Ltail_avx2$suffix
2772 vpsubq $T2,$H2,$H2 # undo input accumulation
2773 vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2774 vmovd %x#$H1,`4*1-48-64`($ctx)
2775 vmovd %x#$H2,`4*2-48-64`($ctx)
2776 vmovd %x#$H3,`4*3-48-64`($ctx)
2777 vmovd %x#$H4,`4*4-48-64`($ctx)
2780 $code.=<<___ if ($win64);
2781 movdqa -0xb0(%r10),%xmm6
2782 movdqa -0xa0(%r10),%xmm7
2783 movdqa -0x90(%r10),%xmm8
2784 movdqa -0x80(%r10),%xmm9
2785 movdqa -0x70(%r10),%xmm10
2786 movdqa -0x60(%r10),%xmm11
2787 movdqa -0x50(%r10),%xmm12
2788 movdqa -0x40(%r10),%xmm13
2789 movdqa -0x30(%r10),%xmm14
2790 movdqa -0x20(%r10),%xmm15
2792 .Ldo_avx512_epilogue:
2794 $code.=<<___ if (!$win64);
2796 .cfi_def_cfa_register %rsp
2807 &declare_function("poly1305_blocks_avx2", 32, 4);
2808 poly1305_blocks_avxN(0);
2809 &end_function("poly1305_blocks_avx2");
2812 $code .= "#endif\n";
2815 #######################################################################
2817 # On entry we have input length divisible by 64. But since inner loop
2818 # processes 128 bytes per iteration, cases when length is not divisible
2819 # by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this
2820 # reason stack layout is kept identical to poly1305_blocks_avx2. If not
2821 # for this tail, we wouldn't have to even allocate stack frame...
2824 $code .= "#ifdef CONFIG_AS_AVX512\n";
2827 &declare_function("poly1305_blocks_avx512", 32, 4);
2828 poly1305_blocks_avxN(1);
2829 &end_function("poly1305_blocks_avx512");
2832 $code .= "#endif\n";
2835 if (!$kernel && $avx>3) {
2836 ########################################################################
2837 # VPMADD52 version using 2^44 radix.
2839 # One can argue that base 2^52 would be more natural. Well, even though
2840 # some operations would be more natural, one has to recognize couple of
2841 # things. Base 2^52 doesn't provide advantage over base 2^44 if you look
2842 # at amount of multiply-n-accumulate operations. Secondly, it makes it
2843 # impossible to pre-compute multiples of 5 [referred to as s[]/sN in
2844 # reference implementations], which means that more such operations
2845 # would have to be performed in inner loop, which in turn makes critical
2846 # path longer. In other words, even though base 2^44 reduction might
2847 # look less elegant, overall critical path is actually shorter...
2849 ########################################################################
2850 # Layout of opaque area is following.
2852 # unsigned __int64 h[3]; # current hash value base 2^44
2853 # unsigned __int64 s[2]; # key value*20 base 2^44
2854 # unsigned __int64 r[3]; # key value base 2^44
2855 # struct { unsigned __int64 r^1, r^3, r^2, r^4; } R[4];
2856 # # r^n positions reflect
2857 # # placement in register, not
2858 # # memory, R[3] is R[1]*20
2861 .type poly1305_init_base2_44,\@function,3
2863 poly1305_init_base2_44:
2865 mov %rax,0($ctx) # initialize hash value
2870 lea poly1305_blocks_vpmadd52(%rip),%r10
2871 lea poly1305_emit_base2_44(%rip),%r11
2873 mov \$0x0ffffffc0fffffff,%rax
2874 mov \$0x0ffffffc0ffffffc,%rcx
2876 mov \$0x00000fffffffffff,%r8
2878 mov \$0x00000fffffffffff,%r9
2881 mov %r8,40($ctx) # r0
2884 mov %rax,48($ctx) # r1
2885 lea (%rax,%rax,4),%rax # *5
2886 mov %rcx,56($ctx) # r2
2887 shl \$2,%rax # magic <<2
2888 lea (%rcx,%rcx,4),%rcx # *5
2889 shl \$2,%rcx # magic <<2
2890 mov %rax,24($ctx) # s1
2891 mov %rcx,32($ctx) # s2
2892 movq \$-1,64($ctx) # write impossible value
2894 $code.=<<___ if ($flavour !~ /elf32/);
2898 $code.=<<___ if ($flavour =~ /elf32/);
2905 .size poly1305_init_base2_44,.-poly1305_init_base2_44
2908 my ($H0,$H1,$H2,$r2r1r0,$r1r0s2,$r0s2s1,$Dlo,$Dhi) = map("%ymm$_",(0..5,16,17));
2909 my ($T0,$inp_permd,$inp_shift,$PAD) = map("%ymm$_",(18..21));
2910 my ($reduc_mask,$reduc_rght,$reduc_left) = map("%ymm$_",(22..25));
2913 .type poly1305_blocks_vpmadd52,\@function,4
2915 poly1305_blocks_vpmadd52:
2917 jz .Lno_data_vpmadd52 # too short
2920 mov 64($ctx),%r8 # peek on power of the key
2922 # if powers of the key are not calculated yet, process up to 3
2923 # blocks with this single-block subroutine, otherwise ensure that
2924 # length is divisible by 2 blocks and pass the rest down to next
2929 cmp \$4,$len # is input long
2931 test %r8,%r8 # is power value impossible?
2934 and $len,%rax # is input of favourable length?
2935 jz .Lblocks_vpmadd52_4x
2941 lea .L2_44_inp_permd(%rip),%r10
2944 vmovq $padbit,%x#$PAD
2945 vmovdqa64 0(%r10),$inp_permd # .L2_44_inp_permd
2946 vmovdqa64 32(%r10),$inp_shift # .L2_44_inp_shift
2947 vpermq \$0xcf,$PAD,$PAD
2948 vmovdqa64 64(%r10),$reduc_mask # .L2_44_mask
2950 vmovdqu64 0($ctx),${Dlo}{%k7}{z} # load hash value
2951 vmovdqu64 40($ctx),${r2r1r0}{%k7}{z} # load keys
2952 vmovdqu64 32($ctx),${r1r0s2}{%k7}{z}
2953 vmovdqu64 24($ctx),${r0s2s1}{%k7}{z}
2955 vmovdqa64 96(%r10),$reduc_rght # .L2_44_shift_rgt
2956 vmovdqa64 128(%r10),$reduc_left # .L2_44_shift_lft
2962 vmovdqu32 0($inp),%x#$T0 # load input as ----3210
2965 vpermd $T0,$inp_permd,$T0 # ----3210 -> --322110
2966 vpsrlvq $inp_shift,$T0,$T0
2967 vpandq $reduc_mask,$T0,$T0
2970 vpaddq $T0,$Dlo,$Dlo # accumulate input
2972 vpermq \$0,$Dlo,${H0}{%k7}{z} # smash hash value
2973 vpermq \$0b01010101,$Dlo,${H1}{%k7}{z}
2974 vpermq \$0b10101010,$Dlo,${H2}{%k7}{z}
2976 vpxord $Dlo,$Dlo,$Dlo
2977 vpxord $Dhi,$Dhi,$Dhi
2979 vpmadd52luq $r2r1r0,$H0,$Dlo
2980 vpmadd52huq $r2r1r0,$H0,$Dhi
2982 vpmadd52luq $r1r0s2,$H1,$Dlo
2983 vpmadd52huq $r1r0s2,$H1,$Dhi
2985 vpmadd52luq $r0s2s1,$H2,$Dlo
2986 vpmadd52huq $r0s2s1,$H2,$Dhi
2988 vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost qword
2989 vpsllvq $reduc_left,$Dhi,$Dhi # 0 in topmost qword
2990 vpandq $reduc_mask,$Dlo,$Dlo
2992 vpaddq $T0,$Dhi,$Dhi
2994 vpermq \$0b10010011,$Dhi,$Dhi # 0 in lowest qword
2996 vpaddq $Dhi,$Dlo,$Dlo # note topmost qword :-)
2998 vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost word
2999 vpandq $reduc_mask,$Dlo,$Dlo
3001 vpermq \$0b10010011,$T0,$T0
3003 vpaddq $T0,$Dlo,$Dlo
3005 vpermq \$0b10010011,$Dlo,${T0}{%k1}{z}
3007 vpaddq $T0,$Dlo,$Dlo
3010 vpaddq $T0,$Dlo,$Dlo
3015 vmovdqu64 $Dlo,0($ctx){%k7} # store hash value
3018 jnz .Lblocks_vpmadd52_4x
3022 .size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
3026 ########################################################################
3027 # As implied by its name 4x subroutine processes 4 blocks in parallel
3028 # (but handles even 4*n+2 blocks lengths). It takes up to 4th key power
3029 # and is handled in 256-bit %ymm registers.
3031 my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
3032 my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
3033 my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
3036 .type poly1305_blocks_vpmadd52_4x,\@function,4
3038 poly1305_blocks_vpmadd52_4x:
3040 jz .Lno_data_vpmadd52_4x # too short
3043 mov 64($ctx),%r8 # peek on power of the key
3045 .Lblocks_vpmadd52_4x:
3046 vpbroadcastq $padbit,$PAD
3048 vmovdqa64 .Lx_mask44(%rip),$mask44
3050 vmovdqa64 .Lx_mask42(%rip),$mask42
3051 kmovw %eax,%k1 # used in 2x path
3053 test %r8,%r8 # is power value impossible?
3054 js .Linit_vpmadd52 # if it is, then init R[4]
3056 vmovq 0($ctx),%x#$H0 # load current hash value
3057 vmovq 8($ctx),%x#$H1
3058 vmovq 16($ctx),%x#$H2
3060 test \$3,$len # is length 4*n+2?
3061 jnz .Lblocks_vpmadd52_2x_do
3063 .Lblocks_vpmadd52_4x_do:
3064 vpbroadcastq 64($ctx),$R0 # load 4th power of the key
3065 vpbroadcastq 96($ctx),$R1
3066 vpbroadcastq 128($ctx),$R2
3067 vpbroadcastq 160($ctx),$S1
3069 .Lblocks_vpmadd52_4x_key_loaded:
3070 vpsllq \$2,$R2,$S2 # S2 = R2*5*4
3074 test \$7,$len # is len 8*n?
3075 jz .Lblocks_vpmadd52_8x
3077 vmovdqu64 16*0($inp),$T2 # load data
3078 vmovdqu64 16*2($inp),$T3
3081 vpunpcklqdq $T3,$T2,$T1 # transpose data
3082 vpunpckhqdq $T3,$T2,$T3
3084 # at this point 64-bit lanes are ordered as 3-1-2-0
3086 vpsrlq \$24,$T3,$T2 # splat the data
3088 vpaddq $T2,$H2,$H2 # accumulate input
3089 vpandq $mask44,$T1,$T0
3093 vpandq $mask44,$T1,$T1
3096 jz .Ltail_vpmadd52_4x
3097 jmp .Loop_vpmadd52_4x
3102 vmovq 24($ctx),%x#$S1 # load key
3103 vmovq 56($ctx),%x#$H2
3104 vmovq 32($ctx),%x#$S2
3105 vmovq 40($ctx),%x#$R0
3106 vmovq 48($ctx),%x#$R1
3114 .Lmul_init_vpmadd52:
3115 vpxorq $D0lo,$D0lo,$D0lo
3116 vpmadd52luq $H2,$S1,$D0lo
3117 vpxorq $D0hi,$D0hi,$D0hi
3118 vpmadd52huq $H2,$S1,$D0hi
3119 vpxorq $D1lo,$D1lo,$D1lo
3120 vpmadd52luq $H2,$S2,$D1lo
3121 vpxorq $D1hi,$D1hi,$D1hi
3122 vpmadd52huq $H2,$S2,$D1hi
3123 vpxorq $D2lo,$D2lo,$D2lo
3124 vpmadd52luq $H2,$R0,$D2lo
3125 vpxorq $D2hi,$D2hi,$D2hi
3126 vpmadd52huq $H2,$R0,$D2hi
3128 vpmadd52luq $H0,$R0,$D0lo
3129 vpmadd52huq $H0,$R0,$D0hi
3130 vpmadd52luq $H0,$R1,$D1lo
3131 vpmadd52huq $H0,$R1,$D1hi
3132 vpmadd52luq $H0,$R2,$D2lo
3133 vpmadd52huq $H0,$R2,$D2hi
3135 vpmadd52luq $H1,$S2,$D0lo
3136 vpmadd52huq $H1,$S2,$D0hi
3137 vpmadd52luq $H1,$R0,$D1lo
3138 vpmadd52huq $H1,$R0,$D1hi
3139 vpmadd52luq $H1,$R1,$D2lo
3140 vpmadd52huq $H1,$R1,$D2hi
3142 ################################################################
3144 vpsrlq \$44,$D0lo,$tmp
3145 vpsllq \$8,$D0hi,$D0hi
3146 vpandq $mask44,$D0lo,$H0
3147 vpaddq $tmp,$D0hi,$D0hi
3149 vpaddq $D0hi,$D1lo,$D1lo
3151 vpsrlq \$44,$D1lo,$tmp
3152 vpsllq \$8,$D1hi,$D1hi
3153 vpandq $mask44,$D1lo,$H1
3154 vpaddq $tmp,$D1hi,$D1hi
3156 vpaddq $D1hi,$D2lo,$D2lo
3158 vpsrlq \$42,$D2lo,$tmp
3159 vpsllq \$10,$D2hi,$D2hi
3160 vpandq $mask42,$D2lo,$H2
3161 vpaddq $tmp,$D2hi,$D2hi
3163 vpaddq $D2hi,$H0,$H0
3164 vpsllq \$2,$D2hi,$D2hi
3166 vpaddq $D2hi,$H0,$H0
3168 vpsrlq \$44,$H0,$tmp # additional step
3169 vpandq $mask44,$H0,$H0
3174 jz .Ldone_init_vpmadd52
3176 vpunpcklqdq $R1,$H1,$R1 # 1,2
3177 vpbroadcastq %x#$H1,%x#$H1 # 2,2
3178 vpunpcklqdq $R2,$H2,$R2
3179 vpbroadcastq %x#$H2,%x#$H2
3180 vpunpcklqdq $R0,$H0,$R0
3181 vpbroadcastq %x#$H0,%x#$H0
3183 vpsllq \$2,$R1,$S1 # S1 = R1*5*4
3184 vpsllq \$2,$R2,$S2 # S2 = R2*5*4
3190 jmp .Lmul_init_vpmadd52
3194 .Ldone_init_vpmadd52:
3195 vinserti128 \$1,%x#$R1,$H1,$R1 # 1,2,3,4
3196 vinserti128 \$1,%x#$R2,$H2,$R2
3197 vinserti128 \$1,%x#$R0,$H0,$R0
3199 vpermq \$0b11011000,$R1,$R1 # 1,3,2,4
3200 vpermq \$0b11011000,$R2,$R2
3201 vpermq \$0b11011000,$R0,$R0
3203 vpsllq \$2,$R1,$S1 # S1 = R1*5*4
3207 vmovq 0($ctx),%x#$H0 # load current hash value
3208 vmovq 8($ctx),%x#$H1
3209 vmovq 16($ctx),%x#$H2
3211 test \$3,$len # is length 4*n+2?
3212 jnz .Ldone_init_vpmadd52_2x
3214 vmovdqu64 $R0,64($ctx) # save key powers
3215 vpbroadcastq %x#$R0,$R0 # broadcast 4th power
3216 vmovdqu64 $R1,96($ctx)
3217 vpbroadcastq %x#$R1,$R1
3218 vmovdqu64 $R2,128($ctx)
3219 vpbroadcastq %x#$R2,$R2
3220 vmovdqu64 $S1,160($ctx)
3221 vpbroadcastq %x#$S1,$S1
3223 jmp .Lblocks_vpmadd52_4x_key_loaded
3227 .Ldone_init_vpmadd52_2x:
3228 vmovdqu64 $R0,64($ctx) # save key powers
3229 vpsrldq \$8,$R0,$R0 # 0-1-0-2
3230 vmovdqu64 $R1,96($ctx)
3232 vmovdqu64 $R2,128($ctx)
3234 vmovdqu64 $S1,160($ctx)
3236 jmp .Lblocks_vpmadd52_2x_key_loaded
3240 .Lblocks_vpmadd52_2x_do:
3241 vmovdqu64 128+8($ctx),${R2}{%k1}{z}# load 2nd and 1st key powers
3242 vmovdqu64 160+8($ctx),${S1}{%k1}{z}
3243 vmovdqu64 64+8($ctx),${R0}{%k1}{z}
3244 vmovdqu64 96+8($ctx),${R1}{%k1}{z}
3246 .Lblocks_vpmadd52_2x_key_loaded:
3247 vmovdqu64 16*0($inp),$T2 # load data
3251 vpunpcklqdq $T3,$T2,$T1 # transpose data
3252 vpunpckhqdq $T3,$T2,$T3
3254 # at this point 64-bit lanes are ordered as x-1-x-0
3256 vpsrlq \$24,$T3,$T2 # splat the data
3258 vpaddq $T2,$H2,$H2 # accumulate input
3259 vpandq $mask44,$T1,$T0
3263 vpandq $mask44,$T1,$T1
3265 jmp .Ltail_vpmadd52_2x
3270 #vpaddq $T2,$H2,$H2 # accumulate input
3274 vpxorq $D0lo,$D0lo,$D0lo
3275 vpmadd52luq $H2,$S1,$D0lo
3276 vpxorq $D0hi,$D0hi,$D0hi
3277 vpmadd52huq $H2,$S1,$D0hi
3278 vpxorq $D1lo,$D1lo,$D1lo
3279 vpmadd52luq $H2,$S2,$D1lo
3280 vpxorq $D1hi,$D1hi,$D1hi
3281 vpmadd52huq $H2,$S2,$D1hi
3282 vpxorq $D2lo,$D2lo,$D2lo
3283 vpmadd52luq $H2,$R0,$D2lo
3284 vpxorq $D2hi,$D2hi,$D2hi
3285 vpmadd52huq $H2,$R0,$D2hi
3287 vmovdqu64 16*0($inp),$T2 # load data
3288 vmovdqu64 16*2($inp),$T3
3290 vpmadd52luq $H0,$R0,$D0lo
3291 vpmadd52huq $H0,$R0,$D0hi
3292 vpmadd52luq $H0,$R1,$D1lo
3293 vpmadd52huq $H0,$R1,$D1hi
3294 vpmadd52luq $H0,$R2,$D2lo
3295 vpmadd52huq $H0,$R2,$D2hi
3297 vpunpcklqdq $T3,$T2,$T1 # transpose data
3298 vpunpckhqdq $T3,$T2,$T3
3299 vpmadd52luq $H1,$S2,$D0lo
3300 vpmadd52huq $H1,$S2,$D0hi
3301 vpmadd52luq $H1,$R0,$D1lo
3302 vpmadd52huq $H1,$R0,$D1hi
3303 vpmadd52luq $H1,$R1,$D2lo
3304 vpmadd52huq $H1,$R1,$D2hi
3306 ################################################################
3307 # partial reduction (interleaved with data splat)
3308 vpsrlq \$44,$D0lo,$tmp
3309 vpsllq \$8,$D0hi,$D0hi
3310 vpandq $mask44,$D0lo,$H0
3311 vpaddq $tmp,$D0hi,$D0hi
3315 vpaddq $D0hi,$D1lo,$D1lo
3317 vpsrlq \$44,$D1lo,$tmp
3318 vpsllq \$8,$D1hi,$D1hi
3319 vpandq $mask44,$D1lo,$H1
3320 vpaddq $tmp,$D1hi,$D1hi
3322 vpandq $mask44,$T1,$T0
3325 vpaddq $D1hi,$D2lo,$D2lo
3327 vpsrlq \$42,$D2lo,$tmp
3328 vpsllq \$10,$D2hi,$D2hi
3329 vpandq $mask42,$D2lo,$H2
3330 vpaddq $tmp,$D2hi,$D2hi
3332 vpaddq $T2,$H2,$H2 # accumulate input
3333 vpaddq $D2hi,$H0,$H0
3334 vpsllq \$2,$D2hi,$D2hi
3336 vpaddq $D2hi,$H0,$H0
3338 vpandq $mask44,$T1,$T1
3340 vpsrlq \$44,$H0,$tmp # additional step
3341 vpandq $mask44,$H0,$H0
3345 sub \$4,$len # len-=64
3346 jnz .Loop_vpmadd52_4x
3349 vmovdqu64 128($ctx),$R2 # load all key powers
3350 vmovdqu64 160($ctx),$S1
3351 vmovdqu64 64($ctx),$R0
3352 vmovdqu64 96($ctx),$R1
3355 vpsllq \$2,$R2,$S2 # S2 = R2*5*4
3359 #vpaddq $T2,$H2,$H2 # accumulate input
3363 vpxorq $D0lo,$D0lo,$D0lo
3364 vpmadd52luq $H2,$S1,$D0lo
3365 vpxorq $D0hi,$D0hi,$D0hi
3366 vpmadd52huq $H2,$S1,$D0hi
3367 vpxorq $D1lo,$D1lo,$D1lo
3368 vpmadd52luq $H2,$S2,$D1lo
3369 vpxorq $D1hi,$D1hi,$D1hi
3370 vpmadd52huq $H2,$S2,$D1hi
3371 vpxorq $D2lo,$D2lo,$D2lo
3372 vpmadd52luq $H2,$R0,$D2lo
3373 vpxorq $D2hi,$D2hi,$D2hi
3374 vpmadd52huq $H2,$R0,$D2hi
3376 vpmadd52luq $H0,$R0,$D0lo
3377 vpmadd52huq $H0,$R0,$D0hi
3378 vpmadd52luq $H0,$R1,$D1lo
3379 vpmadd52huq $H0,$R1,$D1hi
3380 vpmadd52luq $H0,$R2,$D2lo
3381 vpmadd52huq $H0,$R2,$D2hi
3383 vpmadd52luq $H1,$S2,$D0lo
3384 vpmadd52huq $H1,$S2,$D0hi
3385 vpmadd52luq $H1,$R0,$D1lo
3386 vpmadd52huq $H1,$R0,$D1hi
3387 vpmadd52luq $H1,$R1,$D2lo
3388 vpmadd52huq $H1,$R1,$D2hi
3390 ################################################################
3391 # horizontal addition
3395 vpsrldq \$8,$D0lo,$T0
3396 vpsrldq \$8,$D0hi,$H0
3397 vpsrldq \$8,$D1lo,$T1
3398 vpsrldq \$8,$D1hi,$H1
3399 vpaddq $T0,$D0lo,$D0lo
3400 vpaddq $H0,$D0hi,$D0hi
3401 vpsrldq \$8,$D2lo,$T2
3402 vpsrldq \$8,$D2hi,$H2
3403 vpaddq $T1,$D1lo,$D1lo
3404 vpaddq $H1,$D1hi,$D1hi
3405 vpermq \$0x2,$D0lo,$T0
3406 vpermq \$0x2,$D0hi,$H0
3407 vpaddq $T2,$D2lo,$D2lo
3408 vpaddq $H2,$D2hi,$D2hi
3410 vpermq \$0x2,$D1lo,$T1
3411 vpermq \$0x2,$D1hi,$H1
3412 vpaddq $T0,$D0lo,${D0lo}{%k1}{z}
3413 vpaddq $H0,$D0hi,${D0hi}{%k1}{z}
3414 vpermq \$0x2,$D2lo,$T2
3415 vpermq \$0x2,$D2hi,$H2
3416 vpaddq $T1,$D1lo,${D1lo}{%k1}{z}
3417 vpaddq $H1,$D1hi,${D1hi}{%k1}{z}
3418 vpaddq $T2,$D2lo,${D2lo}{%k1}{z}
3419 vpaddq $H2,$D2hi,${D2hi}{%k1}{z}
3421 ################################################################
3423 vpsrlq \$44,$D0lo,$tmp
3424 vpsllq \$8,$D0hi,$D0hi
3425 vpandq $mask44,$D0lo,$H0
3426 vpaddq $tmp,$D0hi,$D0hi
3428 vpaddq $D0hi,$D1lo,$D1lo
3430 vpsrlq \$44,$D1lo,$tmp
3431 vpsllq \$8,$D1hi,$D1hi
3432 vpandq $mask44,$D1lo,$H1
3433 vpaddq $tmp,$D1hi,$D1hi
3435 vpaddq $D1hi,$D2lo,$D2lo
3437 vpsrlq \$42,$D2lo,$tmp
3438 vpsllq \$10,$D2hi,$D2hi
3439 vpandq $mask42,$D2lo,$H2
3440 vpaddq $tmp,$D2hi,$D2hi
3442 vpaddq $D2hi,$H0,$H0
3443 vpsllq \$2,$D2hi,$D2hi
3445 vpaddq $D2hi,$H0,$H0
3447 vpsrlq \$44,$H0,$tmp # additional step
3448 vpandq $mask44,$H0,$H0
3451 # at this point $len is
3452 # either 4*n+2 or 0...
3453 sub \$2,$len # len-=32
3454 ja .Lblocks_vpmadd52_4x_do
3456 vmovq %x#$H0,0($ctx)
3457 vmovq %x#$H1,8($ctx)
3458 vmovq %x#$H2,16($ctx)
3461 .Lno_data_vpmadd52_4x:
3463 .size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x
3467 ########################################################################
3468 # As implied by its name 8x subroutine processes 8 blocks in parallel...
3469 # This is intermediate version, as it's used only in cases when input
3470 # length is either 8*n, 8*n+1 or 8*n+2...
3472 my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17));
3473 my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23));
3474 my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31));
3475 my ($RR0,$RR1,$RR2,$SS1,$SS2) = map("%ymm$_",(6..10));
3478 .type poly1305_blocks_vpmadd52_8x,\@function,4
3480 poly1305_blocks_vpmadd52_8x:
3482 jz .Lno_data_vpmadd52_8x # too short
3485 mov 64($ctx),%r8 # peek on power of the key
3487 vmovdqa64 .Lx_mask44(%rip),$mask44
3488 vmovdqa64 .Lx_mask42(%rip),$mask42
3490 test %r8,%r8 # is power value impossible?
3491 js .Linit_vpmadd52 # if it is, then init R[4]
3493 vmovq 0($ctx),%x#$H0 # load current hash value
3494 vmovq 8($ctx),%x#$H1
3495 vmovq 16($ctx),%x#$H2
3497 .Lblocks_vpmadd52_8x:
3498 ################################################################
3499 # fist we calculate more key powers
3501 vmovdqu64 128($ctx),$R2 # load 1-3-2-4 powers
3502 vmovdqu64 160($ctx),$S1
3503 vmovdqu64 64($ctx),$R0
3504 vmovdqu64 96($ctx),$R1
3506 vpsllq \$2,$R2,$S2 # S2 = R2*5*4
3510 vpbroadcastq %x#$R2,$RR2 # broadcast 4th power
3511 vpbroadcastq %x#$R0,$RR0
3512 vpbroadcastq %x#$R1,$RR1
3514 vpxorq $D0lo,$D0lo,$D0lo
3515 vpmadd52luq $RR2,$S1,$D0lo
3516 vpxorq $D0hi,$D0hi,$D0hi
3517 vpmadd52huq $RR2,$S1,$D0hi
3518 vpxorq $D1lo,$D1lo,$D1lo
3519 vpmadd52luq $RR2,$S2,$D1lo
3520 vpxorq $D1hi,$D1hi,$D1hi
3521 vpmadd52huq $RR2,$S2,$D1hi
3522 vpxorq $D2lo,$D2lo,$D2lo
3523 vpmadd52luq $RR2,$R0,$D2lo
3524 vpxorq $D2hi,$D2hi,$D2hi
3525 vpmadd52huq $RR2,$R0,$D2hi
3527 vpmadd52luq $RR0,$R0,$D0lo
3528 vpmadd52huq $RR0,$R0,$D0hi
3529 vpmadd52luq $RR0,$R1,$D1lo
3530 vpmadd52huq $RR0,$R1,$D1hi
3531 vpmadd52luq $RR0,$R2,$D2lo
3532 vpmadd52huq $RR0,$R2,$D2hi
3534 vpmadd52luq $RR1,$S2,$D0lo
3535 vpmadd52huq $RR1,$S2,$D0hi
3536 vpmadd52luq $RR1,$R0,$D1lo
3537 vpmadd52huq $RR1,$R0,$D1hi
3538 vpmadd52luq $RR1,$R1,$D2lo
3539 vpmadd52huq $RR1,$R1,$D2hi
3541 ################################################################
3543 vpsrlq \$44,$D0lo,$tmp
3544 vpsllq \$8,$D0hi,$D0hi
3545 vpandq $mask44,$D0lo,$RR0
3546 vpaddq $tmp,$D0hi,$D0hi
3548 vpaddq $D0hi,$D1lo,$D1lo
3550 vpsrlq \$44,$D1lo,$tmp
3551 vpsllq \$8,$D1hi,$D1hi
3552 vpandq $mask44,$D1lo,$RR1
3553 vpaddq $tmp,$D1hi,$D1hi
3555 vpaddq $D1hi,$D2lo,$D2lo
3557 vpsrlq \$42,$D2lo,$tmp
3558 vpsllq \$10,$D2hi,$D2hi
3559 vpandq $mask42,$D2lo,$RR2
3560 vpaddq $tmp,$D2hi,$D2hi
3562 vpaddq $D2hi,$RR0,$RR0
3563 vpsllq \$2,$D2hi,$D2hi
3565 vpaddq $D2hi,$RR0,$RR0
3567 vpsrlq \$44,$RR0,$tmp # additional step
3568 vpandq $mask44,$RR0,$RR0
3570 vpaddq $tmp,$RR1,$RR1
3572 ################################################################
3573 # At this point Rx holds 1324 powers, RRx - 5768, and the goal
3574 # is 15263748, which reflects how data is loaded...
3576 vpunpcklqdq $R2,$RR2,$T2 # 3748
3577 vpunpckhqdq $R2,$RR2,$R2 # 1526
3578 vpunpcklqdq $R0,$RR0,$T0
3579 vpunpckhqdq $R0,$RR0,$R0
3580 vpunpcklqdq $R1,$RR1,$T1
3581 vpunpckhqdq $R1,$RR1,$R1
3583 ######## switch to %zmm
3584 map(s/%y/%z/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2);
3585 map(s/%y/%z/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi);
3586 map(s/%y/%z/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD);
3587 map(s/%y/%z/, $RR0,$RR1,$RR2,$SS1,$SS2);
3590 vshufi64x2 \$0x44,$R2,$T2,$RR2 # 15263748
3591 vshufi64x2 \$0x44,$R0,$T0,$RR0
3592 vshufi64x2 \$0x44,$R1,$T1,$RR1
3594 vmovdqu64 16*0($inp),$T2 # load data
3595 vmovdqu64 16*4($inp),$T3
3598 vpsllq \$2,$RR2,$SS2 # S2 = R2*5*4
3599 vpsllq \$2,$RR1,$SS1 # S1 = R1*5*4
3600 vpaddq $RR2,$SS2,$SS2
3601 vpaddq $RR1,$SS1,$SS1
3602 vpsllq \$2,$SS2,$SS2
3603 vpsllq \$2,$SS1,$SS1
3605 vpbroadcastq $padbit,$PAD
3606 vpbroadcastq %x#$mask44,$mask44
3607 vpbroadcastq %x#$mask42,$mask42
3609 vpbroadcastq %x#$SS1,$S1 # broadcast 8th power
3610 vpbroadcastq %x#$SS2,$S2
3611 vpbroadcastq %x#$RR0,$R0
3612 vpbroadcastq %x#$RR1,$R1
3613 vpbroadcastq %x#$RR2,$R2
3615 vpunpcklqdq $T3,$T2,$T1 # transpose data
3616 vpunpckhqdq $T3,$T2,$T3
3618 # at this point 64-bit lanes are ordered as 73625140
3620 vpsrlq \$24,$T3,$T2 # splat the data
3622 vpaddq $T2,$H2,$H2 # accumulate input
3623 vpandq $mask44,$T1,$T0
3627 vpandq $mask44,$T1,$T1
3630 jz .Ltail_vpmadd52_8x
3631 jmp .Loop_vpmadd52_8x
3635 #vpaddq $T2,$H2,$H2 # accumulate input
3639 vpxorq $D0lo,$D0lo,$D0lo
3640 vpmadd52luq $H2,$S1,$D0lo
3641 vpxorq $D0hi,$D0hi,$D0hi
3642 vpmadd52huq $H2,$S1,$D0hi
3643 vpxorq $D1lo,$D1lo,$D1lo
3644 vpmadd52luq $H2,$S2,$D1lo
3645 vpxorq $D1hi,$D1hi,$D1hi
3646 vpmadd52huq $H2,$S2,$D1hi
3647 vpxorq $D2lo,$D2lo,$D2lo
3648 vpmadd52luq $H2,$R0,$D2lo
3649 vpxorq $D2hi,$D2hi,$D2hi
3650 vpmadd52huq $H2,$R0,$D2hi
3652 vmovdqu64 16*0($inp),$T2 # load data
3653 vmovdqu64 16*4($inp),$T3
3655 vpmadd52luq $H0,$R0,$D0lo
3656 vpmadd52huq $H0,$R0,$D0hi
3657 vpmadd52luq $H0,$R1,$D1lo
3658 vpmadd52huq $H0,$R1,$D1hi
3659 vpmadd52luq $H0,$R2,$D2lo
3660 vpmadd52huq $H0,$R2,$D2hi
3662 vpunpcklqdq $T3,$T2,$T1 # transpose data
3663 vpunpckhqdq $T3,$T2,$T3
3664 vpmadd52luq $H1,$S2,$D0lo
3665 vpmadd52huq $H1,$S2,$D0hi
3666 vpmadd52luq $H1,$R0,$D1lo
3667 vpmadd52huq $H1,$R0,$D1hi
3668 vpmadd52luq $H1,$R1,$D2lo
3669 vpmadd52huq $H1,$R1,$D2hi
3671 ################################################################
3672 # partial reduction (interleaved with data splat)
3673 vpsrlq \$44,$D0lo,$tmp
3674 vpsllq \$8,$D0hi,$D0hi
3675 vpandq $mask44,$D0lo,$H0
3676 vpaddq $tmp,$D0hi,$D0hi
3680 vpaddq $D0hi,$D1lo,$D1lo
3682 vpsrlq \$44,$D1lo,$tmp
3683 vpsllq \$8,$D1hi,$D1hi
3684 vpandq $mask44,$D1lo,$H1
3685 vpaddq $tmp,$D1hi,$D1hi
3687 vpandq $mask44,$T1,$T0
3690 vpaddq $D1hi,$D2lo,$D2lo
3692 vpsrlq \$42,$D2lo,$tmp
3693 vpsllq \$10,$D2hi,$D2hi
3694 vpandq $mask42,$D2lo,$H2
3695 vpaddq $tmp,$D2hi,$D2hi
3697 vpaddq $T2,$H2,$H2 # accumulate input
3698 vpaddq $D2hi,$H0,$H0
3699 vpsllq \$2,$D2hi,$D2hi
3701 vpaddq $D2hi,$H0,$H0
3703 vpandq $mask44,$T1,$T1
3705 vpsrlq \$44,$H0,$tmp # additional step
3706 vpandq $mask44,$H0,$H0
3710 sub \$8,$len # len-=128
3711 jnz .Loop_vpmadd52_8x
3714 #vpaddq $T2,$H2,$H2 # accumulate input
3718 vpxorq $D0lo,$D0lo,$D0lo
3719 vpmadd52luq $H2,$SS1,$D0lo
3720 vpxorq $D0hi,$D0hi,$D0hi
3721 vpmadd52huq $H2,$SS1,$D0hi
3722 vpxorq $D1lo,$D1lo,$D1lo
3723 vpmadd52luq $H2,$SS2,$D1lo
3724 vpxorq $D1hi,$D1hi,$D1hi
3725 vpmadd52huq $H2,$SS2,$D1hi
3726 vpxorq $D2lo,$D2lo,$D2lo
3727 vpmadd52luq $H2,$RR0,$D2lo
3728 vpxorq $D2hi,$D2hi,$D2hi
3729 vpmadd52huq $H2,$RR0,$D2hi
3731 vpmadd52luq $H0,$RR0,$D0lo
3732 vpmadd52huq $H0,$RR0,$D0hi
3733 vpmadd52luq $H0,$RR1,$D1lo
3734 vpmadd52huq $H0,$RR1,$D1hi
3735 vpmadd52luq $H0,$RR2,$D2lo
3736 vpmadd52huq $H0,$RR2,$D2hi
3738 vpmadd52luq $H1,$SS2,$D0lo
3739 vpmadd52huq $H1,$SS2,$D0hi
3740 vpmadd52luq $H1,$RR0,$D1lo
3741 vpmadd52huq $H1,$RR0,$D1hi
3742 vpmadd52luq $H1,$RR1,$D2lo
3743 vpmadd52huq $H1,$RR1,$D2hi
3745 ################################################################
3746 # horizontal addition
3750 vpsrldq \$8,$D0lo,$T0
3751 vpsrldq \$8,$D0hi,$H0
3752 vpsrldq \$8,$D1lo,$T1
3753 vpsrldq \$8,$D1hi,$H1
3754 vpaddq $T0,$D0lo,$D0lo
3755 vpaddq $H0,$D0hi,$D0hi
3756 vpsrldq \$8,$D2lo,$T2
3757 vpsrldq \$8,$D2hi,$H2
3758 vpaddq $T1,$D1lo,$D1lo
3759 vpaddq $H1,$D1hi,$D1hi
3760 vpermq \$0x2,$D0lo,$T0
3761 vpermq \$0x2,$D0hi,$H0
3762 vpaddq $T2,$D2lo,$D2lo
3763 vpaddq $H2,$D2hi,$D2hi
3765 vpermq \$0x2,$D1lo,$T1
3766 vpermq \$0x2,$D1hi,$H1
3767 vpaddq $T0,$D0lo,$D0lo
3768 vpaddq $H0,$D0hi,$D0hi
3769 vpermq \$0x2,$D2lo,$T2
3770 vpermq \$0x2,$D2hi,$H2
3771 vpaddq $T1,$D1lo,$D1lo
3772 vpaddq $H1,$D1hi,$D1hi
3773 vextracti64x4 \$1,$D0lo,%y#$T0
3774 vextracti64x4 \$1,$D0hi,%y#$H0
3775 vpaddq $T2,$D2lo,$D2lo
3776 vpaddq $H2,$D2hi,$D2hi
3778 vextracti64x4 \$1,$D1lo,%y#$T1
3779 vextracti64x4 \$1,$D1hi,%y#$H1
3780 vextracti64x4 \$1,$D2lo,%y#$T2
3781 vextracti64x4 \$1,$D2hi,%y#$H2
3783 ######## switch back to %ymm
3784 map(s/%z/%y/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2);
3785 map(s/%z/%y/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi);
3786 map(s/%z/%y/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD);
3789 vpaddq $T0,$D0lo,${D0lo}{%k1}{z}
3790 vpaddq $H0,$D0hi,${D0hi}{%k1}{z}
3791 vpaddq $T1,$D1lo,${D1lo}{%k1}{z}
3792 vpaddq $H1,$D1hi,${D1hi}{%k1}{z}
3793 vpaddq $T2,$D2lo,${D2lo}{%k1}{z}
3794 vpaddq $H2,$D2hi,${D2hi}{%k1}{z}
3796 ################################################################
3798 vpsrlq \$44,$D0lo,$tmp
3799 vpsllq \$8,$D0hi,$D0hi
3800 vpandq $mask44,$D0lo,$H0
3801 vpaddq $tmp,$D0hi,$D0hi
3803 vpaddq $D0hi,$D1lo,$D1lo
3805 vpsrlq \$44,$D1lo,$tmp
3806 vpsllq \$8,$D1hi,$D1hi
3807 vpandq $mask44,$D1lo,$H1
3808 vpaddq $tmp,$D1hi,$D1hi
3810 vpaddq $D1hi,$D2lo,$D2lo
3812 vpsrlq \$42,$D2lo,$tmp
3813 vpsllq \$10,$D2hi,$D2hi
3814 vpandq $mask42,$D2lo,$H2
3815 vpaddq $tmp,$D2hi,$D2hi
3817 vpaddq $D2hi,$H0,$H0
3818 vpsllq \$2,$D2hi,$D2hi
3820 vpaddq $D2hi,$H0,$H0
3822 vpsrlq \$44,$H0,$tmp # additional step
3823 vpandq $mask44,$H0,$H0
3827 ################################################################
3829 vmovq %x#$H0,0($ctx)
3830 vmovq %x#$H1,8($ctx)
3831 vmovq %x#$H2,16($ctx)
3834 .Lno_data_vpmadd52_8x:
3836 .size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x
3840 .type poly1305_emit_base2_44,\@function,3
3842 poly1305_emit_base2_44:
3843 mov 0($ctx),%r8 # load hash value
3859 add \$5,%r8 # compare to modulus
3863 shr \$2,%r10 # did 130-bit value overflow?
3867 add 0($nonce),%rax # accumulate nonce
3869 mov %rax,0($mac) # write result
3873 .size poly1305_emit_base2_44,.-poly1305_emit_base2_44
3879 { # chacha20-poly1305 helpers
3880 my ($out,$inp,$otp,$len)=$win64 ? ("%rcx","%rdx","%r8", "%r9") : # Win64 order
3881 ("%rdi","%rsi","%rdx","%rcx"); # Unix order
3883 .globl xor128_encrypt_n_pad
3884 .type xor128_encrypt_n_pad,\@abi-omnipotent
3886 xor128_encrypt_n_pad:
3889 mov $len,%r10 # put len aside
3890 shr \$4,$len # len / 16
3894 movdqu ($inp,$otp),%xmm0
3896 movdqu %xmm0,($out,$otp)
3902 and \$15,%r10 # len % 16
3928 .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad
3930 .globl xor128_decrypt_n_pad
3931 .type xor128_decrypt_n_pad,\@abi-omnipotent
3933 xor128_decrypt_n_pad:
3936 mov $len,%r10 # put len aside
3937 shr \$4,$len # len / 16
3941 movdqu ($inp,$otp),%xmm0
3944 movdqu %xmm1,($out,$otp)
3951 and \$15,%r10 # len % 16
3960 mov ($inp,$otp),%r11b
3979 .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad
3983 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3984 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
3992 .extern __imp_RtlVirtualUnwind
3993 .type se_handler,\@abi-omnipotent
4007 mov 120($context),%rax # pull context->Rax
4008 mov 248($context),%rbx # pull context->Rip
4010 mov 8($disp),%rsi # disp->ImageBase
4011 mov 56($disp),%r11 # disp->HandlerData
4013 mov 0(%r11),%r10d # HandlerData[0]
4014 lea (%rsi,%r10),%r10 # prologue label
4015 cmp %r10,%rbx # context->Rip<.Lprologue
4016 jb .Lcommon_seh_tail
4018 mov 152($context),%rax # pull context->Rsp
4020 mov 4(%r11),%r10d # HandlerData[1]
4021 lea (%rsi,%r10),%r10 # epilogue label
4022 cmp %r10,%rbx # context->Rip>=.Lepilogue
4023 jae .Lcommon_seh_tail
4033 mov %rbx,144($context) # restore context->Rbx
4034 mov %rbp,160($context) # restore context->Rbp
4035 mov %r12,216($context) # restore context->R12
4036 mov %r13,224($context) # restore context->R13
4037 mov %r14,232($context) # restore context->R14
4038 mov %r15,240($context) # restore context->R14
4040 jmp .Lcommon_seh_tail
4041 .size se_handler,.-se_handler
4043 .type avx_handler,\@abi-omnipotent
4057 mov 120($context),%rax # pull context->Rax
4058 mov 248($context),%rbx # pull context->Rip
4060 mov 8($disp),%rsi # disp->ImageBase
4061 mov 56($disp),%r11 # disp->HandlerData
4063 mov 0(%r11),%r10d # HandlerData[0]
4064 lea (%rsi,%r10),%r10 # prologue label
4065 cmp %r10,%rbx # context->Rip<prologue label
4066 jb .Lcommon_seh_tail
4068 mov 152($context),%rax # pull context->Rsp
4070 mov 4(%r11),%r10d # HandlerData[1]
4071 lea (%rsi,%r10),%r10 # epilogue label
4072 cmp %r10,%rbx # context->Rip>=epilogue label
4073 jae .Lcommon_seh_tail
4075 mov 208($context),%rax # pull context->R11
4079 lea 512($context),%rdi # &context.Xmm6
4081 .long 0xa548f3fc # cld; rep movsq
4086 mov %rax,152($context) # restore context->Rsp
4087 mov %rsi,168($context) # restore context->Rsi
4088 mov %rdi,176($context) # restore context->Rdi
4090 mov 40($disp),%rdi # disp->ContextRecord
4091 mov $context,%rsi # context
4092 mov \$154,%ecx # sizeof(CONTEXT)
4093 .long 0xa548f3fc # cld; rep movsq
4096 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
4097 mov 8(%rsi),%rdx # arg2, disp->ImageBase
4098 mov 0(%rsi),%r8 # arg3, disp->ControlPc
4099 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
4100 mov 40(%rsi),%r10 # disp->ContextRecord
4101 lea 56(%rsi),%r11 # &disp->HandlerData
4102 lea 24(%rsi),%r12 # &disp->EstablisherFrame
4103 mov %r10,32(%rsp) # arg5
4104 mov %r11,40(%rsp) # arg6
4105 mov %r12,48(%rsp) # arg7
4106 mov %rcx,56(%rsp) # arg8, (NULL)
4107 call *__imp_RtlVirtualUnwind(%rip)
4109 mov \$1,%eax # ExceptionContinueSearch
4121 .size avx_handler,.-avx_handler
4125 .rva .LSEH_begin_poly1305_init_x86_64
4126 .rva .LSEH_end_poly1305_init_x86_64
4127 .rva .LSEH_info_poly1305_init_x86_64
4129 .rva .LSEH_begin_poly1305_blocks_x86_64
4130 .rva .LSEH_end_poly1305_blocks_x86_64
4131 .rva .LSEH_info_poly1305_blocks_x86_64
4133 .rva .LSEH_begin_poly1305_emit_x86_64
4134 .rva .LSEH_end_poly1305_emit_x86_64
4135 .rva .LSEH_info_poly1305_emit_x86_64
4137 $code.=<<___ if ($avx);
4138 .rva .LSEH_begin_poly1305_blocks_avx
4140 .rva .LSEH_info_poly1305_blocks_avx_1
4144 .rva .LSEH_info_poly1305_blocks_avx_2
4147 .rva .LSEH_end_poly1305_blocks_avx
4148 .rva .LSEH_info_poly1305_blocks_avx_3
4150 .rva .LSEH_begin_poly1305_emit_avx
4151 .rva .LSEH_end_poly1305_emit_avx
4152 .rva .LSEH_info_poly1305_emit_avx
4154 $code.=<<___ if ($avx>1);
4155 .rva .LSEH_begin_poly1305_blocks_avx2
4156 .rva .Lbase2_64_avx2
4157 .rva .LSEH_info_poly1305_blocks_avx2_1
4159 .rva .Lbase2_64_avx2
4161 .rva .LSEH_info_poly1305_blocks_avx2_2
4164 .rva .LSEH_end_poly1305_blocks_avx2
4165 .rva .LSEH_info_poly1305_blocks_avx2_3
4167 $code.=<<___ if ($avx>2);
4168 .rva .LSEH_begin_poly1305_blocks_avx512
4169 .rva .LSEH_end_poly1305_blocks_avx512
4170 .rva .LSEH_info_poly1305_blocks_avx512
4175 .LSEH_info_poly1305_init_x86_64:
4178 .rva .LSEH_begin_poly1305_init_x86_64,.LSEH_begin_poly1305_init_x86_64
4180 .LSEH_info_poly1305_blocks_x86_64:
4183 .rva .Lblocks_body,.Lblocks_epilogue
4185 .LSEH_info_poly1305_emit_x86_64:
4188 .rva .LSEH_begin_poly1305_emit_x86_64,.LSEH_begin_poly1305_emit_x86_64
4190 $code.=<<___ if ($avx);
4191 .LSEH_info_poly1305_blocks_avx_1:
4194 .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[]
4196 .LSEH_info_poly1305_blocks_avx_2:
4199 .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[]
4201 .LSEH_info_poly1305_blocks_avx_3:
4204 .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[]
4206 .LSEH_info_poly1305_emit_avx:
4209 .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
4211 $code.=<<___ if ($avx>1);
4212 .LSEH_info_poly1305_blocks_avx2_1:
4215 .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[]
4217 .LSEH_info_poly1305_blocks_avx2_2:
4220 .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[]
4222 .LSEH_info_poly1305_blocks_avx2_3:
4225 .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[]
4227 $code.=<<___ if ($avx>2);
4228 .LSEH_info_poly1305_blocks_avx512:
4231 .rva .Ldo_avx512_body,.Ldo_avx512_epilogue # HandlerData[]
4238 last if (!s/^#/\/\// and !/^$/);
4243 foreach (split('\n',$code)) {
4244 s/\`([^\`]*)\`/eval($1)/ge;
4245 s/%r([a-z]+)#d/%e$1/g;
4246 s/%r([0-9]+)#d/%r$1d/g;
4247 s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g;
4250 s/(^\.type.*),[0-9]+$/\1/;
4251 s/(^\.type.*),\@abi-omnipotent+$/\1,\@function/;